hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
68b58237ea36db794b699f57c65a8ae384c67d91
16,511
py
Python
hazm/WordTokenizer.py
lingwndr/hazm
2af5665dfb19225c9433b955b6a3a7d370b96c13
[ "MIT" ]
null
null
null
hazm/WordTokenizer.py
lingwndr/hazm
2af5665dfb19225c9433b955b6a3a7d370b96c13
[ "MIT" ]
null
null
null
hazm/WordTokenizer.py
lingwndr/hazm
2af5665dfb19225c9433b955b6a3a7d370b96c13
[ "MIT" ]
null
null
null
# coding: utf-8 from __future__ import unicode_literals import re, codecs from .utils import words_list, default_words, default_verbs from nltk.tokenize.api import TokenizerI import string class WordTokenizer(TokenizerI): """ >>> tokenizer = WordTokenizer() >>> tokenizer.tokenize('این جمله (خیلی) پیچیده نیست!!!') ['این', 'جمله', '(', 'خیلی', ')', 'پیچیده', 'نیست', '!!!'] >>> tokenizer.tokenize('نسخه 0.5 در ساعت 22:00 تهران،1396') ['نسخه', '0.5', 'در', 'ساعت', '22:00', 'تهران', '،', '1396'] >>> tokenizer = WordTokenizer(join_verb_parts=False) >>> print(' '.join(tokenizer.tokenize('سلام.'))) سلام . >>> tokenizer = WordTokenizer(join_verb_parts=False, replace_links=True) >>> print(' '.join(tokenizer.tokenize('در قطر هک شد https://t.co/tZOurPSXzi https://t.co/vtJtwsRebP'))) در قطر هک شد LINK LINK >>> tokenizer = WordTokenizer(join_verb_parts=False, replace_IDs=True, replace_numbers=True) >>> print(' '.join(tokenizer.tokenize('زلزله ۴.۸ ریشتری در هجدک کرمان @bourse24ir'))) زلزله NUMF ریشتری در هجدک کرمان ID >>> tokenizer = WordTokenizer(join_verb_parts=False, replace_hashtags=True, replace_numbers=True, separate_emoji=True) >>> print(' '.join(tokenizer.tokenize('📍عرضه بلوک 17 درصدی #های_وب به قیمت'))) 📍 عرضه بلوک NUM2 درصدی TAG های وب به قیمت >>> tokenizer = WordTokenizer(join_verb_parts=False, separate_emoji=True) >>> print(' '.join(tokenizer.tokenize('دیگه میخوام ترک تحصیل کنم 😂😂😂'))) دیگه میخوام ترک تحصیل کنم 😂 😂 😂 """ def __init__(self, words_file=default_words, verbs_file=default_verbs, join_verb_parts=True, separate_emoji=False, replace_links=False, replace_IDs=False, replace_emails=False, replace_numbers=False, replace_hashtags=False): self._join_verb_parts = join_verb_parts self.separate_emoji = separate_emoji self.replace_links = replace_links self.replace_IDs = replace_IDs self.replace_emails = replace_emails self.replace_numbers = replace_numbers self.replace_hashtags = replace_hashtags #self.pattern = re.compile(r'([؟!\?]+|\d[\d\.:/\\]+|[:\.،؛»\]\)\}"«\[\(\{])') # TODO \d self.pattern = re.compile(r'([!\"#$%&\'\(\)\*\+,-\./:;<=>\?@\[\\\]\^_`\{\|\}\~؟؛،»«٪]+|\d[\d\.:/\\]+)') # TODO \d self.emoji_pattern = re.compile("[" "\xa9\xae\u2002\u2003\u2005\u203c\u2049\u2122\u2139\u2194\u2195\u2196\u2197\u2198\u2199\u21a9\u21aa\u231a\u231b\u23e9\u23ea\u23eb\u23ec\u23f0\u23f3\u24c2\u25aa\u25ab\u25b6\u25c0\u25fb\u25fc\u25fd\u25fe\u2600\u2601\u260e\u2611\u2614\u2615\u261d\u263a\u2648\u2649\u264a\u264b\u264c\u264d\u264e\u264f\u2650\u2651\u2652\u2653\u2660\u2663\u2665\u2666\u2668\u267b\u267f\u2693\u26a0\u26a1\u26aa\u26ab\u26bd\u26be\u26c4\u26c5\u26ce\u26d4\u26ea\u26f2\u26f3\u26f5\u26fa\u26fd\u2702\u2705\u2708\u2709\u270a\u270b\u270c\u270f\u2712\u2714\u2716\u2728\u2733\u2734\u2744\u2747\u274c\u274e\u2753\u2754\u2755\u2757\u2764\u2795\u2796\u2797\u27a1\u27b0\u2934\u2935\u2b05\u2b06\u2b07\u2b1b\u2b1c\u2b50\u2b55\u3030\u303d\u3297\u3299\U0001f004\U0001f0cf\U0001f170\U0001f171\U0001f17e\U0001f17f\U0001f18e\U0001f191\U0001f192\U0001f193\U0001f194\U0001f195\U0001f196\U0001f197\U0001f198\U0001f199\U0001f19a\U0001f1e7\U0001f1e8\U0001f1e9\U0001f1ea\U0001f1eb\U0001f1ec\U0001f1ee\U0001f1ef\U0001f1f0\U0001f1f3\U0001f1f5\U0001f1f7\U0001f1f8\U0001f1f9\U0001f1fa\U0001f201\U0001f202\U0001f21a\U0001f22f\U0001f232\U0001f233\U0001f234\U0001f235\U0001f236\U0001f237\U0001f238\U0001f239\U0001f23a\U0001f250\U0001f251\U0001f300\U0001f301\U0001f302\U0001f303\U0001f304\U0001f305\U0001f306\U0001f307\U0001f308\U0001f309\U0001f30a\U0001f30b\U0001f30c\U0001f30d\U0001f30e\U0001f30f\U0001f310\U0001f311\U0001f312\U0001f313\U0001f314\U0001f315\U0001f316\U0001f317\U0001f318\U0001f319\U0001f31a\U0001f31b\U0001f31c\U0001f31d\U0001f31e\U0001f31f\U0001f320\U0001f321\U0001f322\U0001f323\U0001f324\U0001f325\U0001f326\U0001f327\U0001f328\U0001f329\U0001f32a\U0001f32b\U0001f32c\U0001f32d\U0001f32e\U0001f32f\U0001f330\U0001f331\U0001f332\U0001f333\U0001f334\U0001f335\U0001f336\U0001f337\U0001f338\U0001f339\U0001f33a\U0001f33b\U0001f33c\U0001f33d\U0001f33e\U0001f33f\U0001f340\U0001f341\U0001f342\U0001f343\U0001f344\U0001f345\U0001f346\U0001f347\U0001f348\U0001f349\U0001f34a\U0001f34b\U0001f34c\U0001f34d\U0001f34e\U0001f34f\U0001f350\U0001f351\U0001f352\U0001f353\U0001f354\U0001f355\U0001f356\U0001f357\U0001f358\U0001f359\U0001f35a\U0001f35b\U0001f35c\U0001f35d\U0001f35e\U0001f35f\U0001f360\U0001f361\U0001f362\U0001f363\U0001f364\U0001f365\U0001f366\U0001f367\U0001f368\U0001f369\U0001f36a\U0001f36b\U0001f36c\U0001f36d\U0001f36e\U0001f36f\U0001f370\U0001f371\U0001f372\U0001f373\U0001f374\U0001f375\U0001f376\U0001f377\U0001f378\U0001f379\U0001f37a\U0001f37b\U0001f37c\U0001f37d\U0001f37e\U0001f37f\U0001f380\U0001f381\U0001f382\U0001f383\U0001f384\U0001f385\U0001f386\U0001f387\U0001f388\U0001f389\U0001f38a\U0001f38b\U0001f38c\U0001f38d\U0001f38e\U0001f38f\U0001f390\U0001f391\U0001f392\U0001f393\U0001f394\U0001f395\U0001f396\U0001f397\U0001f398\U0001f399\U0001f39a\U0001f39b\U0001f39c\U0001f39d\U0001f39e\U0001f39f\U0001f3a0\U0001f3a1\U0001f3a2\U0001f3a3\U0001f3a4\U0001f3a5\U0001f3a6\U0001f3a7\U0001f3a8\U0001f3a9\U0001f3aa\U0001f3ab\U0001f3ac\U0001f3ad\U0001f3ae\U0001f3af\U0001f3b0\U0001f3b1\U0001f3b2\U0001f3b3\U0001f3b4\U0001f3b5\U0001f3b6\U0001f3b7\U0001f3b8\U0001f3b9\U0001f3ba\U0001f3bb\U0001f3bc\U0001f3bd\U0001f3be\U0001f3bf\U0001f3c0\U0001f3c1\U0001f3c2\U0001f3c3\U0001f3c4\U0001f3c5\U0001f3c6\U0001f3c7\U0001f3c8\U0001f3c9\U0001f3ca\U0001f3cb\U0001f3cc\U0001f3cd\U0001f3ce\U0001f3cf\U0001f3d0\U0001f3d1\U0001f3d2\U0001f3d3\U0001f3d4\U0001f3d5\U0001f3d6\U0001f3d7\U0001f3d8\U0001f3d9\U0001f3da\U0001f3db\U0001f3dc\U0001f3dd\U0001f3de\U0001f3df\U0001f3e0\U0001f3e1\U0001f3e2\U0001f3e3\U0001f3e4\U0001f3e5\U0001f3e6\U0001f3e7\U0001f3e8\U0001f3e9\U0001f3ea\U0001f3eb\U0001f3ec\U0001f3ed\U0001f3ee\U0001f3ef\U0001f3f0\U0001f3f1\U0001f3f2\U0001f3f3\U0001f3f4\U0001f3f5\U0001f3f6\U0001f3f7\U0001f3f8\U0001f3f9\U0001f3fa\U0001f3fb\U0001f3fc\U0001f3fd\U0001f3fe\U0001f3ff\U0001f400\U0001f401\U0001f402\U0001f403\U0001f404\U0001f405\U0001f406\U0001f407\U0001f408\U0001f409\U0001f40a\U0001f40b\U0001f40c\U0001f40d\U0001f40e\U0001f40f\U0001f410\U0001f411\U0001f412\U0001f413\U0001f414\U0001f415\U0001f416\U0001f417\U0001f418\U0001f419\U0001f41a\U0001f41b\U0001f41c\U0001f41d\U0001f41e\U0001f41f\U0001f420\U0001f421\U0001f422\U0001f423\U0001f424\U0001f425\U0001f426\U0001f427\U0001f428\U0001f429\U0001f42a\U0001f42b\U0001f42c\U0001f42d\U0001f42e\U0001f42f\U0001f430\U0001f431\U0001f432\U0001f433\U0001f434\U0001f435\U0001f436\U0001f437\U0001f438\U0001f439\U0001f43a\U0001f43b\U0001f43c\U0001f43d\U0001f43e\U0001f43f\U0001f440\U0001f441\U0001f442\U0001f443\U0001f444\U0001f445\U0001f446\U0001f447\U0001f448\U0001f449\U0001f44a\U0001f44b\U0001f44c\U0001f44d\U0001f44e\U0001f44f\U0001f450\U0001f451\U0001f452\U0001f453\U0001f454\U0001f455\U0001f456\U0001f457\U0001f458\U0001f459\U0001f45a\U0001f45b\U0001f45c\U0001f45d\U0001f45e\U0001f45f\U0001f460\U0001f461\U0001f462\U0001f463\U0001f464\U0001f465\U0001f466\U0001f467\U0001f468\U0001f469\U0001f46a\U0001f46b\U0001f46c\U0001f46d\U0001f46e\U0001f46f\U0001f470\U0001f471\U0001f472\U0001f473\U0001f474\U0001f475\U0001f476\U0001f477\U0001f478\U0001f479\U0001f47a\U0001f47b\U0001f47c\U0001f47d\U0001f47e\U0001f47f\U0001f480\U0001f481\U0001f482\U0001f483\U0001f484\U0001f485\U0001f486\U0001f487\U0001f488\U0001f489\U0001f48a\U0001f48b\U0001f48c\U0001f48d\U0001f48e\U0001f48f\U0001f490\U0001f491\U0001f492\U0001f493\U0001f494\U0001f495\U0001f496\U0001f497\U0001f498\U0001f499\U0001f49a\U0001f49b\U0001f49c\U0001f49d\U0001f49e\U0001f49f\U0001f4a0\U0001f4a1\U0001f4a2\U0001f4a3\U0001f4a4\U0001f4a5\U0001f4a6\U0001f4a7\U0001f4a8\U0001f4a9\U0001f4aa\U0001f4ab\U0001f4ac\U0001f4ad\U0001f4ae\U0001f4af\U0001f4b0\U0001f4b1\U0001f4b2\U0001f4b3\U0001f4b4\U0001f4b5\U0001f4b6\U0001f4b7\U0001f4b8\U0001f4b9\U0001f4ba\U0001f4bb\U0001f4bc\U0001f4bd\U0001f4be\U0001f4bf\U0001f4c0\U0001f4c1\U0001f4c2\U0001f4c3\U0001f4c4\U0001f4c5\U0001f4c6\U0001f4c7\U0001f4c8\U0001f4c9\U0001f4ca\U0001f4cb\U0001f4cc\U0001f4cd\U0001f4ce\U0001f4cf\U0001f4d0\U0001f4d1\U0001f4d2\U0001f4d3\U0001f4d4\U0001f4d5\U0001f4d6\U0001f4d7\U0001f4d8\U0001f4d9\U0001f4da\U0001f4db\U0001f4dc\U0001f4dd\U0001f4de\U0001f4df\U0001f4e0\U0001f4e1\U0001f4e2\U0001f4e3\U0001f4e4\U0001f4e5\U0001f4e6\U0001f4e7\U0001f4e8\U0001f4e9\U0001f4ea\U0001f4eb\U0001f4ec\U0001f4ed\U0001f4ee\U0001f4ef\U0001f4f0\U0001f4f1\U0001f4f2\U0001f4f3\U0001f4f4\U0001f4f5\U0001f4f6\U0001f4f7\U0001f4f8\U0001f4f9\U0001f4fa\U0001f4fb\U0001f4fc\U0001f4fd\U0001f4fe\U0001f4ff\U0001f500\U0001f501\U0001f502\U0001f503\U0001f504\U0001f505\U0001f506\U0001f507\U0001f508\U0001f509\U0001f50a\U0001f50b\U0001f50c\U0001f50d\U0001f50e\U0001f50f\U0001f510\U0001f511\U0001f512\U0001f513\U0001f514\U0001f515\U0001f516\U0001f517\U0001f518\U0001f519\U0001f51a\U0001f51b\U0001f51c\U0001f51d\U0001f51e\U0001f51f\U0001f520\U0001f521\U0001f522\U0001f523\U0001f524\U0001f525\U0001f526\U0001f527\U0001f528\U0001f529\U0001f52a\U0001f52b\U0001f52c\U0001f52d\U0001f52e\U0001f52f\U0001f530\U0001f531\U0001f532\U0001f533\U0001f534\U0001f535\U0001f536\U0001f537\U0001f538\U0001f539\U0001f53a\U0001f53b\U0001f53c\U0001f53d\U0001f53e\U0001f53f\U0001f540\U0001f541\U0001f542\U0001f543\U0001f544\U0001f545\U0001f546\U0001f547\U0001f548\U0001f549\U0001f54a\U0001f54b\U0001f54c\U0001f54d\U0001f54e\U0001f54f\U0001f550\U0001f551\U0001f552\U0001f553\U0001f554\U0001f555\U0001f556\U0001f557\U0001f558\U0001f559\U0001f55a\U0001f55b\U0001f55c\U0001f55d\U0001f55e\U0001f55f\U0001f560\U0001f561\U0001f562\U0001f563\U0001f564\U0001f565\U0001f566\U0001f567\U0001f568\U0001f569\U0001f56a\U0001f56b\U0001f56c\U0001f56d\U0001f56e\U0001f56f\U0001f570\U0001f571\U0001f572\U0001f573\U0001f574\U0001f575\U0001f576\U0001f577\U0001f578\U0001f579\U0001f57a\U0001f57b\U0001f57c\U0001f57d\U0001f57e\U0001f57f\U0001f580\U0001f581\U0001f582\U0001f583\U0001f584\U0001f585\U0001f586\U0001f587\U0001f588\U0001f589\U0001f58a\U0001f58b\U0001f58c\U0001f58d\U0001f58e\U0001f58f\U0001f590\U0001f591\U0001f592\U0001f593\U0001f594\U0001f595\U0001f596\U0001f597\U0001f598\U0001f599\U0001f59a\U0001f59b\U0001f59c\U0001f59d\U0001f59e\U0001f59f\U0001f5a0\U0001f5a1\U0001f5a2\U0001f5a3\U0001f5a4\U0001f5a5\U0001f5a6\U0001f5a7\U0001f5a8\U0001f5a9\U0001f5aa\U0001f5ab\U0001f5ac\U0001f5ad\U0001f5ae\U0001f5af\U0001f5b0\U0001f5b1\U0001f5b2\U0001f5b3\U0001f5b4\U0001f5b5\U0001f5b6\U0001f5b7\U0001f5b8\U0001f5b9\U0001f5ba\U0001f5bb\U0001f5bc\U0001f5bd\U0001f5be\U0001f5bf\U0001f5c0\U0001f5c1\U0001f5c2\U0001f5c3\U0001f5c4\U0001f5c5\U0001f5c6\U0001f5c7\U0001f5c8\U0001f5c9\U0001f5ca\U0001f5cb\U0001f5cc\U0001f5cd\U0001f5ce\U0001f5cf\U0001f5d0\U0001f5d1\U0001f5d2\U0001f5d3\U0001f5d4\U0001f5d5\U0001f5d6\U0001f5d7\U0001f5d8\U0001f5d9\U0001f5da\U0001f5db\U0001f5dc\U0001f5dd\U0001f5de\U0001f5df\U0001f5e0\U0001f5e1\U0001f5e2\U0001f5e3\U0001f5e4\U0001f5e5\U0001f5e6\U0001f5e7\U0001f5e8\U0001f5e9\U0001f5ea\U0001f5eb\U0001f5ec\U0001f5ed\U0001f5ee\U0001f5ef\U0001f5f0\U0001f5f1\U0001f5f2\U0001f5f3\U0001f5f4\U0001f5f5\U0001f5f6\U0001f5f7\U0001f5f8\U0001f5f9\U0001f5fa\U0001f5fb\U0001f5fc\U0001f5fd\U0001f5fe\U0001f5ff\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604\U0001f605\U0001f606\U0001f607\U0001f608\U0001f609\U0001f60a\U0001f60b\U0001f60c\U0001f60d\U0001f60e\U0001f60f\U0001f610\U0001f611\U0001f612\U0001f613\U0001f614\U0001f615\U0001f616\U0001f617\U0001f618\U0001f619\U0001f61a\U0001f61b\U0001f61c\U0001f61d\U0001f61e\U0001f61f\U0001f620\U0001f621\U0001f622\U0001f623\U0001f624\U0001f625\U0001f626\U0001f627\U0001f628\U0001f629\U0001f62a\U0001f62b\U0001f62c\U0001f62d\U0001f62e\U0001f62f\U0001f630\U0001f631\U0001f632\U0001f633\U0001f634\U0001f635\U0001f636\U0001f637\U0001f638\U0001f639\U0001f63a\U0001f63b\U0001f63c\U0001f63d\U0001f63e\U0001f63f\U0001f640\U0001f641\U0001f642\U0001f643\U0001f644\U0001f645\U0001f646\U0001f647\U0001f648\U0001f649\U0001f64a\U0001f64b\U0001f64c\U0001f64d\U0001f64e\U0001f64f\U0001f680\U0001f683\U0001f684\U0001f685\U0001f687\U0001f689\U0001f68c\U0001f68f\U0001f691\U0001f692\U0001f693\U0001f695\U0001f697\U0001f699\U0001f69a\U0001f6a2\U0001f6a4\U0001f6a5\U0001f6a7\U0001f6a8\U0001f6a9\U0001f6aa\U0001f6ab\U0001f6ac\U0001f6ad\U0001f6b2\U0001f6b6\U0001f6b9\U0001f6ba\U0001f6bb\U0001f6bc\U0001f6bd\U0001f6be\U0001f6c0" # other emojis "]", flags=re.UNICODE) self.emoji_repl = r' \g<0> ' self.email_pattern = re.compile(r'[a-zA-Z0-9\._\+-]+@([a-zA-Z0-9-]+\.)+[A-Za-z]{2,}') self.email_repl = r'EMAIL' self.id_pattern = re.compile(r'([^\w\._]*)(@[\w_]+)') self.id_repl = r'\1ID' self.link_pattern = re.compile(r'((https?|ftp):\/\/)?(?<!@)([wW]{3}\.)?(([\w-]+)(\.(\w){2,})+([-\w@:%_\+\/~#?&=]+)?)') self.link_repl = r'LINK' self.number_int_pattern = re.compile(r'([^\.,\w]+)([\d۰-۹]+)([^\.,\w]+)') self.number_int_repl = lambda m: m.group(1) + 'NUM'+ str(len(m.group(2))) + m.group(3) self.number_float_pattern = re.compile(r'([^,\w]+)([\d۰-۹,]+[\.٫]{1}[\d۰-۹]+)([^,\w]+)') self.number_float_repl = r'\1NUMF\3' self.hashtag_pattern = re.compile(r'\#([\S]+)') # NOTE: python2.7 does not support unicodes with \w Example: r'\#([\w\_]+)' self.hashtag_repl = lambda m: 'TAGSTART ' + m.group(1).replace('_', ' ') + ' TAGEND' self.words = {item[0]: (item[1], item[2]) for item in words_list(default_words)} if join_verb_parts: self.after_verbs = set([ 'ام', 'ای', 'است', 'ایم', 'اید', 'اند', 'بودم', 'بودی', 'بود', 'بودیم', 'بودید', 'بودند', 'باشم', 'باشی', 'باشد', 'باشیم', 'باشید', 'باشند', 'شده_ام', 'شده_ای', 'شده_است', 'شده_ایم', 'شده_اید', 'شده_اند', 'شده_بودم', 'شده_بودی', 'شده_بود', 'شده_بودیم', 'شده_بودید', 'شده_بودند', 'شده_باشم', 'شده_باشی', 'شده_باشد', 'شده_باشیم', 'شده_باشید', 'شده_باشند', 'نشده_ام', 'نشده_ای', 'نشده_است', 'نشده_ایم', 'نشده_اید', 'نشده_اند', 'نشده_بودم', 'نشده_بودی', 'نشده_بود', 'نشده_بودیم', 'نشده_بودید', 'نشده_بودند', 'نشده_باشم', 'نشده_باشی', 'نشده_باشد', 'نشده_باشیم', 'نشده_باشید', 'نشده_باشند', 'شوم', 'شوی', 'شود', 'شویم', 'شوید', 'شوند', 'شدم', 'شدی', 'شد', 'شدیم', 'شدید', 'شدند', 'نشوم', 'نشوی', 'نشود', 'نشویم', 'نشوید', 'نشوند', 'نشدم', 'نشدی', 'نشد', 'نشدیم', 'نشدید', 'نشدند', 'می‌شوم', 'می‌شوی', 'می‌شود', 'می‌شویم', 'می‌شوید', 'می‌شوند', 'می‌شدم', 'می‌شدی', 'می‌شد', 'می‌شدیم', 'می‌شدید', 'می‌شدند', 'نمی‌شوم', 'نمی‌شوی', 'نمی‌شود', 'نمی‌شویم', 'نمی‌شوید', 'نمی‌شوند', 'نمی‌شدم', 'نمی‌شدی', 'نمی‌شد', 'نمی‌شدیم', 'نمی‌شدید', 'نمی‌شدند', 'خواهم_شد', 'خواهی_شد', 'خواهد_شد', 'خواهیم_شد', 'خواهید_شد', 'خواهند_شد', 'نخواهم_شد', 'نخواهی_شد', 'نخواهد_شد', 'نخواهیم_شد', 'نخواهید_شد', 'نخواهند_شد', ]) self.before_verbs = set([ 'خواهم', 'خواهی', 'خواهد', 'خواهیم', 'خواهید', 'خواهند', 'نخواهم', 'نخواهی', 'نخواهد', 'نخواهیم', 'نخواهید', 'نخواهند' ]) with codecs.open(verbs_file, encoding='utf8') as verbs_file: self.verbs = list(reversed([verb.strip() for verb in verbs_file if verb])) self.bons = set([verb.split('#')[0] for verb in self.verbs]) self.verbe = set([bon +'ه' for bon in self.bons] + ['ن'+ bon +'ه' for bon in self.bons]) def tokenize(self, text): if self.separate_emoji: text = self.emoji_pattern.sub(self.emoji_repl, text) if self.replace_emails: text = self.email_pattern.sub(self.email_repl, text) if self.replace_links: text = self.link_pattern.sub(self.link_repl, text) if self.replace_IDs: text = self.id_pattern.sub(self.id_repl, text) if self.replace_hashtags: text = self.hashtag_pattern.sub(self.hashtag_repl, text) if self.replace_numbers: text = self.number_int_pattern.sub(self.number_int_repl, text) text = self.number_float_pattern.sub(self.number_float_repl, text) text = self.pattern.sub(r' \1 ', text.replace('\n', ' ').replace('\t', ' ')) tokens = [word for word in text.split(" ") if word] if self._join_verb_parts: tokens = self.join_verb_parts(tokens) return tokens def join_verb_parts(self, tokens): """ >>> tokenizer = WordTokenizer() >>> tokenizer.join_verb_parts(['خواهد', 'رفت']) ['خواهد_رفت'] >>> tokenizer.join_verb_parts(['رفته', 'است']) ['رفته_است'] >>> tokenizer.join_verb_parts(['گفته', 'شده', 'است']) ['گفته_شده_است'] >>> tokenizer.join_verb_parts(['گفته', 'خواهد', 'شد']) ['گفته_خواهد_شد'] >>> tokenizer.join_verb_parts(['خسته', 'شدید']) ['خسته', 'شدید'] """ result = [''] for token in reversed(tokens): if token in self.before_verbs or (result[-1] in self.after_verbs and token in self.verbe): result[-1] = token +'_'+ result[-1] else: result.append(token) return list(reversed(result[1:]))
117.099291
10,028
0.793471
2,017
16,511
6.423401
0.638076
0.010497
0.017058
0.010497
0.069003
0.039673
0.027864
0.004014
0.004014
0.004014
0
0.401893
0.052995
16,511
140
10,029
117.935714
0.423766
0.107504
0
0.025316
0
0.037975
0.758555
0.69591
0
0
0
0.007143
0
1
0.037975
false
0
0.063291
0
0.139241
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d7acf5b5786e5c233aebfbfa7c30d5010a468d91
379
py
Python
pybrain/structure/connections/__init__.py
sveilleux1/pybrain
1e1de73142c290edb84e29ca7850835f3e7bca8b
[ "BSD-3-Clause" ]
2,208
2015-01-02T02:14:41.000Z
2022-03-31T04:45:46.000Z
pybrain/structure/connections/__init__.py
sveilleux1/pybrain
1e1de73142c290edb84e29ca7850835f3e7bca8b
[ "BSD-3-Clause" ]
91
2015-01-08T16:42:16.000Z
2021-12-11T19:16:35.000Z
pybrain/structure/connections/__init__.py
sveilleux1/pybrain
1e1de73142c290edb84e29ca7850835f3e7bca8b
[ "BSD-3-Clause" ]
786
2015-01-02T15:18:20.000Z
2022-02-23T23:42:40.000Z
from pybrain.structure.connections.full import FullConnection from pybrain.structure.connections.identity import IdentityConnection from pybrain.structure.connections.shared import SharedFullConnection, MotherConnection, SharedConnection from pybrain.structure.connections.linear import LinearConnection from pybrain.structure.connections.fullnotself import FullNotSelfConnection
75.8
105
0.899736
37
379
9.216216
0.459459
0.16129
0.293255
0.454545
0
0
0
0
0
0
0
0
0.055409
379
5
106
75.8
0.952514
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cc2acf30832ff2f621fe8ccce390cbd59bba315f
122
py
Python
eex/translators/__init__.py
dgasmith/EEX
7608c9ef25931040524c75d227f0bee18de9ddc1
[ "BSD-3-Clause" ]
7
2018-04-03T18:12:04.000Z
2020-03-27T07:52:49.000Z
eex/translators/__init__.py
dgasmith/EEX
7608c9ef25931040524c75d227f0bee18de9ddc1
[ "BSD-3-Clause" ]
43
2018-04-03T20:18:23.000Z
2018-10-16T02:28:34.000Z
eex/translators/__init__.py
dgasmith/EEX
7608c9ef25931040524c75d227f0bee18de9ddc1
[ "BSD-3-Clause" ]
3
2018-04-06T15:51:37.000Z
2018-07-31T18:53:06.000Z
""" A file that aggregates the EEX translator classes """ from . import lammps from . import amber from . import gromacs
15.25
49
0.737705
17
122
5.294118
0.764706
0.333333
0
0
0
0
0
0
0
0
0
0
0.188525
122
7
50
17.428571
0.909091
0.401639
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
042d63a5e2a64df002084e2cef91f945df84c992
128
py
Python
bootcamp/contacts/admin.py
nandkumar1996/sharebox-portal
1b4fb60c776d42271a03997ab47f4da67463ad91
[ "MIT" ]
null
null
null
bootcamp/contacts/admin.py
nandkumar1996/sharebox-portal
1b4fb60c776d42271a03997ab47f4da67463ad91
[ "MIT" ]
null
null
null
bootcamp/contacts/admin.py
nandkumar1996/sharebox-portal
1b4fb60c776d42271a03997ab47f4da67463ad91
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Contact_form # Register your models here. admin.site.register(Contact_form)
32
33
0.835938
19
128
5.526316
0.631579
0.209524
0
0
0
0
0
0
0
0
0
0
0.101563
128
4
33
32
0.913043
0.203125
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
044645b7e77ca2e08c89225fceefd08f4eef8777
284
py
Python
main/repository/CarRepository.py
KesharaWaidyarathna/Lambo_ChatBot_AI
2ef599d0c5a5a9730e218d62f869e01188c60e43
[ "BSD-3-Clause" ]
null
null
null
main/repository/CarRepository.py
KesharaWaidyarathna/Lambo_ChatBot_AI
2ef599d0c5a5a9730e218d62f869e01188c60e43
[ "BSD-3-Clause" ]
null
null
null
main/repository/CarRepository.py
KesharaWaidyarathna/Lambo_ChatBot_AI
2ef599d0c5a5a9730e218d62f869e01188c60e43
[ "BSD-3-Clause" ]
null
null
null
from abc import ABC, abstractmethod class CarRepository(ABC): @abstractmethod def get_all(self): pass @abstractmethod def find_by_brand(self, brand): pass @abstractmethod def get_min_price(self): pass @abstractmethod def get_max_price(self): pass
17.75
40
0.711268
36
284
5.416667
0.472222
0.348718
0.307692
0.25641
0
0
0
0
0
0
0
0
0.214789
284
15
41
18.933333
0.874439
0
0
0.4
0
0
0
0
0
0
0
0
0
1
0.4
false
0.4
0.1
0
0.6
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
045bf821e75c89b1e5315c011c76569b1b106544
96
py
Python
venv/lib/python3.8/site-packages/chardet/langhungarianmodel.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/chardet/langhungarianmodel.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/chardet/langhungarianmodel.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/38/30/22/b2fa827deb3c07815ec8cfcf83d1d8dd90e7132682893e01c72ce873ac
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.416667
0
96
1
96
96
0.479167
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
f0b6bbb2efadbb4feb5814f2eddfd4f6e4d1c9bb
24
py
Python
src/graph_db/console/__init__.py
ilya16/graph-db
6b35130c3fb540f030e65cdf309419f75f94cedf
[ "MIT" ]
9
2018-04-27T07:49:08.000Z
2021-03-15T12:06:23.000Z
src/graph_db/console/__init__.py
ilya16/graph-db
6b35130c3fb540f030e65cdf309419f75f94cedf
[ "MIT" ]
14
2018-04-10T13:09:34.000Z
2018-05-07T21:40:01.000Z
src/graph_db/console/__init__.py
ilya16/graph-db
6b35130c3fb540f030e65cdf309419f75f94cedf
[ "MIT" ]
null
null
null
from .console import run
24
24
0.833333
4
24
5
1
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f0cc5954fc7814ac16604a70969becddba54d0eb
21
py
Python
libs/file/nxpy/core/file/__init__.py
nmusatti/nxpy
68568e71ee3c3ecb0b467cb8d25d76eb03c81205
[ "BSL-1.0" ]
5
2019-08-16T09:48:35.000Z
2021-03-23T09:56:44.000Z
libs/file/nxpy/core/file/__init__.py
nmusatti/nxpy
68568e71ee3c3ecb0b467cb8d25d76eb03c81205
[ "BSL-1.0" ]
1
2019-01-17T14:11:56.000Z
2019-01-18T17:56:35.000Z
libs/file/nxpy/core/file/__init__.py
nmusatti/nxpy
68568e71ee3c3ecb0b467cb8d25d76eb03c81205
[ "BSL-1.0" ]
2
2019-02-09T17:57:00.000Z
2019-08-30T08:06:01.000Z
from .file import *
10.5
20
0.666667
3
21
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.238095
21
1
21
21
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f0f1c48f08d7847d3b8f1dd3cfe2b3f53ced9bab
113
py
Python
dagology/__init__.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
5
2017-02-16T21:35:28.000Z
2020-08-09T07:33:30.000Z
dagology/__init__.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
null
null
null
dagology/__init__.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
3
2018-04-20T08:58:24.000Z
2020-04-11T02:25:56.000Z
from algorithms import * from generators import * from utils import * from metrics import * from matrix import *
18.833333
24
0.778761
15
113
5.866667
0.466667
0.454545
0
0
0
0
0
0
0
0
0
0
0.176991
113
5
25
22.6
0.946237
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f0fcd45d67fbe901875224b7c9d4a7946df088f7
10,865
py
Python
tests/test_pdp_isolate.py
antwhite/PDPbox
b022a0aabcc6dbe2440244bf48d08fbb6ecdaf2d
[ "MIT" ]
675
2017-08-08T03:37:46.000Z
2022-03-31T20:14:02.000Z
tests/test_pdp_isolate.py
antwhite/PDPbox
b022a0aabcc6dbe2440244bf48d08fbb6ecdaf2d
[ "MIT" ]
60
2017-08-02T15:59:02.000Z
2022-03-29T03:57:22.000Z
tests/test_pdp_isolate.py
antwhite/PDPbox
b022a0aabcc6dbe2440244bf48d08fbb6ecdaf2d
[ "MIT" ]
121
2017-08-08T03:37:50.000Z
2022-03-29T10:06:11.000Z
import pytest import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal from pandas.testing import assert_frame_equal import pandas as pd import matplotlib from pdpbox.pdp import pdp_isolate, pdp_plot class TestPDPIsolateBinary(object): def test_pdp_isolate_binary_feature( self, titanic_model, titanic_data, titanic_features ): # feature_type: binary pdp_isolate_out = pdp_isolate( model=titanic_model, dataset=titanic_data, model_features=titanic_features, feature="Sex", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert pdp_isolate_out._type == "PDPIsolate_instance" assert pdp_isolate_out.n_classes == 2 assert pdp_isolate_out.which_class is None assert pdp_isolate_out.feature == "Sex" assert pdp_isolate_out.feature_type == "binary" assert pdp_isolate_out.percentile_info == [] assert pdp_isolate_out.display_columns == ["Sex_0", "Sex_1"] assert pdp_isolate_out.hist_data is None def test_pdp_isolate_onehot_feature( self, titanic_model, titanic_data, titanic_features ): # feature_type: onehot pdp_isolate_out = pdp_isolate( model=titanic_model, dataset=titanic_data, model_features=titanic_features, feature=["Embarked_C", "Embarked_S", "Embarked_Q"], num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert pdp_isolate_out._type == "PDPIsolate_instance" assert pdp_isolate_out.n_classes == 2 assert pdp_isolate_out.which_class is None assert pdp_isolate_out.feature == ["Embarked_C", "Embarked_S", "Embarked_Q"] assert pdp_isolate_out.feature_type == "onehot" assert pdp_isolate_out.percentile_info == [] assert pdp_isolate_out.display_columns == [ "Embarked_C", "Embarked_S", "Embarked_Q", ] assert pdp_isolate_out.hist_data is None def test_pdp_isolate_numeric_feature( self, titanic_model, titanic_data, titanic_features ): # feature_type: numeric pdp_isolate_out = pdp_isolate( model=titanic_model, dataset=titanic_data, model_features=titanic_features, feature="Fare", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert pdp_isolate_out._type == "PDPIsolate_instance" assert pdp_isolate_out.n_classes == 2 assert pdp_isolate_out.which_class is None assert pdp_isolate_out.feature == "Fare" assert pdp_isolate_out.feature_type == "numeric" assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0] def test_pdp_isolate_cust_grid_points( self, titanic_model, titanic_data, titanic_features ): # use cust_grid_points pdp_isolate_out = pdp_isolate( model=titanic_model, dataset=titanic_data, model_features=titanic_features, feature="Fare", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=range(0, 100, 5), memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert pdp_isolate_out._type == "PDPIsolate_instance" assert pdp_isolate_out.n_classes == 2 assert pdp_isolate_out.which_class is None assert pdp_isolate_out.feature == "Fare" assert pdp_isolate_out.feature_type == "numeric" assert pdp_isolate_out.percentile_info == [] assert pdp_isolate_out.display_columns == [ "0", "5", "10", "15", "20", "25", "30", "35", "40", "45", "50", "55", "60", "65", "70", "75", "80", "85", "90", "95", ] assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0] class TestPDPIsolateRegression(object): def test_pdp_isolate_regression(self, ross_model, ross_data, ross_features): pdp_isolate_out = pdp_isolate( model=ross_model, dataset=ross_data, model_features=ross_features, feature="SchoolHoliday", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert pdp_isolate_out._type == "PDPIsolate_instance" assert pdp_isolate_out.n_classes == 0 assert pdp_isolate_out.which_class is None assert pdp_isolate_out.feature == "SchoolHoliday" assert pdp_isolate_out.feature_type == "binary" assert pdp_isolate_out.percentile_info == [] assert pdp_isolate_out.display_columns == ["SchoolHoliday_0", "SchoolHoliday_1"] assert pdp_isolate_out.hist_data is None def test_pdp_isolate_n_jobs(self, ross_model, ross_data, ross_features): # test n_jobs > 1 _ = pdp_isolate( model=ross_model, dataset=ross_data, model_features=ross_features, feature="SchoolHoliday", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=2, predict_kwds={}, data_transformer=None, ) def test_pdp_isolate_multiclass(otto_model, otto_data, otto_features): pdp_isolate_out = pdp_isolate( model=otto_model, dataset=otto_data, model_features=otto_features, feature="feat_67", num_grid_points=10, grid_type="percentile", percentile_range=None, grid_range=None, cust_grid_points=None, memory_limit=0.5, n_jobs=1, predict_kwds={}, data_transformer=None, ) assert len(pdp_isolate_out) == 9 assert pdp_isolate_out[0]._type == "PDPIsolate_instance" assert pdp_isolate_out[0].n_classes == 9 for i in range(9): assert pdp_isolate_out[i].which_class == i assert pdp_isolate_out[0].feature == "feat_67" assert pdp_isolate_out[0].feature_type == "numeric" class TestPDPPlotSingle(object): @pytest.fixture def pdp_sex(self, titanic_data, titanic_model, titanic_features): result = pdp_isolate( model=titanic_model, dataset=titanic_data, model_features=titanic_features, feature="Sex", ) return result def test_pdp_plot_single_default(self, pdp_sex): # single chart without data dist plot fig, axes = pdp_plot(pdp_sex, "sex") assert type(fig) == matplotlib.figure.Figure assert sorted(axes.keys()) == ["pdp_ax", "title_ax"] assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot def test_pdp_plot_single_distplot(self, pdp_sex): # single chart with data dist plot fig, axes = pdp_plot(pdp_sex, "sex", plot_pts_dist=True) assert sorted(axes.keys()) == ["pdp_ax", "title_ax"] assert sorted(axes["pdp_ax"].keys()) == ["_count_ax", "_pdp_ax"] assert type(axes["pdp_ax"]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot assert type(axes["pdp_ax"]["_count_ax"]) == matplotlib.axes._subplots.Subplot assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot class TestPDPPlotMulti(object): @pytest.fixture def pdp_feat_67_rf(self, otto_data, otto_model, otto_features): result = pdp_isolate( model=otto_model, dataset=otto_data, model_features=otto_features, feature="feat_67", ) return result def test_pdp_plot_multi_default(self, pdp_feat_67_rf): # multi charts without data dist plot fig, axes = pdp_plot( pdp_isolate_out=pdp_feat_67_rf, feature_name="feat_67", center=True, x_quantile=True, ) assert type(fig) == matplotlib.figure.Figure assert sorted(axes.keys()) == ["pdp_ax", "title_ax"] assert len(axes["pdp_ax"]) == 9 assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot assert type(axes["pdp_ax"][0]) == matplotlib.axes._subplots.Subplot def test_pdp_plot_multi_which_classes(self, pdp_feat_67_rf): # change which classes fig, axes = pdp_plot( pdp_feat_67_rf, "feat_67", center=True, x_quantile=True, ncols=2, which_classes=[0, 3, 7], ) assert len(axes["pdp_ax"]) == 3 def test_pdp_plot_multi_one_class(self, pdp_feat_67_rf): # only keep 1 class fig, axes = pdp_plot( pdp_feat_67_rf, "feat_67", center=True, x_quantile=True, ncols=2, which_classes=[5], ) assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot def test_pdp_plot_multi_distplot(self, pdp_feat_67_rf): # multi charts with data dist plot fig, axes = pdp_plot( pdp_isolate_out=pdp_feat_67_rf, feature_name="feat_67", center=True, x_quantile=True, plot_pts_dist=True, ) assert sorted(axes.keys()) == ["pdp_ax", "title_ax"] assert len(axes["pdp_ax"]) == 9 assert sorted(axes["pdp_ax"][0].keys()) == ["_count_ax", "_pdp_ax"] assert type(axes["pdp_ax"][0]["_count_ax"]) == matplotlib.axes._subplots.Subplot assert type(axes["pdp_ax"][0]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot
34.166667
88
0.601565
1,287
10,865
4.700855
0.109557
0.11405
0.111736
0.12876
0.876694
0.813058
0.787934
0.744628
0.744628
0.713554
0
0.018905
0.30382
10,865
317
89
34.274448
0.780936
0.025495
0
0.65704
0
0
0.068646
0
0
0
0
0
0.241877
1
0.054152
false
0
0.025271
0
0.101083
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0b164bd29023088ac7ad9fd2c87b337e6be29aa7
13,591
py
Python
checkerpy/tests/validators/one/test_justcall.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
1
2018-01-12T19:20:51.000Z
2018-01-12T19:20:51.000Z
checkerpy/tests/validators/one/test_justcall.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
null
null
null
checkerpy/tests/validators/one/test_justcall.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
null
null
null
import logging import unittest as ut from collections import defaultdict, deque, OrderedDict from ....validators.one import JustCall from ....exceptions import CallableError from ....functional import CompositionOf class TestJustCall(ut.TestCase): def test_works_with_sane_callable(self): inp = lambda x: x out = JustCall(inp) self.assertIs(out, inp) def test_error_on_unnamed_object_without_name_attr(self): log_msg = ['ERROR:root:Object foo of type str is not callable!'] err_msg = 'Object foo of type str is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall('foo') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_on_named_object_without_name_attr(self): log_msg = ['ERROR:root:Object test of type int is not callable!'] err_msg = 'Object test of type int is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(1, 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_on_unnamed_object_with_name_attr(self): class Test: pass t = Test() t.__name__= 'test' log_msg = ['ERROR:root:Object test of type Test is not callable!'] err_msg = 'Object test of type Test is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(t) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_on_named_object_with_name_attr(self): class Test: pass t = Test() t.__name__= 'test' log_msg = ['ERROR:root:Object name of type Test is not callable!'] err_msg = 'Object name of type Test is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(t, 'name') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_frozenset(self): inp = frozenset({1, 2}) log_msg = ['ERROR:root:Object frozenset({1, 2}) is not callable!'] err_msg = 'Object frozenset({1, 2}) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_frozenset(self): inp = frozenset({1, 2}) log_msg = ['ERROR:root:Object test of type frozenset is not callable!'] err_msg = 'Object test of type frozenset is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp, 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_deque(self): inp = deque([1, 2]) log_msg = ['ERROR:root:Object deque([1, 2]) is not callable!'] err_msg = 'Object deque([1, 2]) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_deqeue(self): inp = deque([1, 2]) log_msg = ['ERROR:root:Object test of type deque is not callable!'] err_msg = 'Object test of type deque is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp, 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_ordereddict(self): inp = OrderedDict({1: 'one', 2: 'two'}) log_msg = ["ERROR:root:Object OrderedDict([(1, 'one')," " (2, 'two')]) is not callable!"] err_msg = ("Object OrderedDict([(1, 'one')," " (2, 'two')]) is not callable!") with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_ordereddict(self): inp = OrderedDict({1: 'one', 2: 'two'}) log_msg = ['ERROR:root:Object test of type' ' OrderedDict is not callable!'] err_msg = 'Object test of type OrderedDict is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp, 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_defaultdict(self): inp = defaultdict(str, {1: 'one', 2: 'two'}) log_msg = ["ERROR:root:Object defaultdict(<class 'str'>, " "{1: 'one', 2: 'two'}) is not callable!"] err_msg = ("Object defaultdict(<class 'str'>, " "{1: 'one', 2: 'two'}) is not callable!") with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_defaultdict(self): inp = defaultdict(str, {1: 'one', 2: 'two'}) log_msg = ['ERROR:root:Object test of type' ' defaultdict is not callable!'] err_msg = 'Object test of type defaultdict is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp, 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_dict_keys(self): inp = {1: 'one', 2: 'two'} log_msg = ['ERROR:root:Object dict_keys([1, 2]) is not callable!'] err_msg = 'Object dict_keys([1, 2]) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.keys()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_dict_keys(self): inp = {1: 'one', 2: 'two'} log_msg = ['ERROR:root:Object test of type dict_keys is not callable!'] err_msg = 'Object test of type dict_keys is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.keys(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_ordereddict_keys(self): inp = OrderedDict({1: 'one', 2: 'two'}) log_msg = ['ERROR:root:Object odict_keys([1, 2]) is not callable!'] err_msg = 'Object odict_keys([1, 2]) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.keys()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_ordereddict_keys(self): inp = OrderedDict({1: 'one', 2: 'two'}) log_msg = ['ERROR:root:Object test of type' ' odict_keys is not callable!'] err_msg = 'Object test of type odict_keys is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.keys(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_dict_values(self): inp = {'one': 1, 'two': 2} log_msg = ['ERROR:root:Object dict_values([1, 2]) is not callable!'] err_msg = 'Object dict_values([1, 2]) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.values()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_dict_values(self): inp = {'one': 1, 'two': 2} log_msg = ['ERROR:root:Object test of type' ' dict_values is not callable!'] err_msg = 'Object test of type dict_values is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.values(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_ordereddict_values(self): inp = OrderedDict({'one': 1, 'two': 2}) log_msg = ['ERROR:root:Object odict_values([1, 2]) is not callable!'] err_msg = 'Object odict_values([1, 2]) is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.values()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_ordereddict_values(self): inp = OrderedDict({'one': 1, 'two': 2}) log_msg = ['ERROR:root:Object test of type' ' odict_values is not callable!'] err_msg = 'Object test of type odict_values is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.values(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_dict_items(self): inp = {'one': 1, 'two': 2} log_msg = ["ERROR:root:Object dict_items([('one', 1)," " ('two', 2)]) is not callable!"] err_msg = ("Object dict_items([('one', 1)," " ('two', 2)]) is not callable!") with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.items()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_dict_items(self): inp = {'one': 1, 'two': 2} log_msg = ['ERROR:root:Object test of type' ' dict_items is not callable!'] err_msg = 'Object test of type dict_items is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.items(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_unnamed_ordereddict_items(self): inp = OrderedDict({'one': 1, 'two': 2}) log_msg = ["ERROR:root:Object odict_items([('one', 1)," " ('two', 2)]) is not callable!"] err_msg = ("Object odict_items([('one', 1)," " ('two', 2)]) is not callable!") with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.items()) self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) def test_error_with_named_ordereddict_items(self): inp = OrderedDict({'one': 1, 'two': 2}) log_msg = ['ERROR:root:Object test of type' ' odict_items is not callable!'] err_msg = 'Object test of type odict_items is not callable!' with self.assertLogs(level=logging.ERROR) as log: with self.assertRaises(CallableError) as err: _ = JustCall(inp.items(), 'test') self.assertEqual(str(err.exception), err_msg) self.assertEqual(log.output, log_msg) class TestJustCallMethods(ut.TestCase): def test_has_attribute_o(self): self.assertTrue(hasattr(JustCall, 'o')) def test_attribute_o_is_callable(self): self.assertTrue(callable(JustCall.o)) def test_o_returns_composition(self): def f(x): return x composition = JustCall.o(f) self.assertIsInstance(composition, CompositionOf) def test_o_raises_error_on_argument_not_callable(self): err_msg = ('foo must be a callable that accepts (i) a value,' ' (ii) an optional name for that value, and (iii)' ' any number of keyword arguments!') with self.assertRaises(CallableError) as err: _ = JustCall.o('foo') self.assertEqual(str(err.exception), err_msg) if __name__ == '__main__': ut.main()
45.152824
79
0.619454
1,738
13,591
4.66916
0.058113
0.036969
0.076895
0.101664
0.911892
0.911892
0.907702
0.898583
0.868022
0.815527
0
0.008092
0.263483
13,591
300
80
45.303333
0.802597
0
0
0.592453
0
0
0.207785
0
0
0
0
0
0.384906
1
0.113208
false
0.007547
0.022642
0.003774
0.154717
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9bd33d4beb645fb3312356c3aa75e829aace2e80
220
py
Python
rasa/nlu/featurizers/sparse_featurizer/sparse_featurizer.py
fintzd/rasa
6359be5509c7d87cd29c2ab5149bc45e843fea85
[ "Apache-2.0" ]
9,701
2019-04-16T15:46:27.000Z
2022-03-31T11:52:18.000Z
rasa/nlu/featurizers/sparse_featurizer/sparse_featurizer.py
fintzd/rasa
6359be5509c7d87cd29c2ab5149bc45e843fea85
[ "Apache-2.0" ]
6,420
2019-04-16T15:58:22.000Z
2022-03-31T17:54:35.000Z
rasa/nlu/featurizers/sparse_featurizer/sparse_featurizer.py
fintzd/rasa
6359be5509c7d87cd29c2ab5149bc45e843fea85
[ "Apache-2.0" ]
3,063
2019-04-16T15:23:52.000Z
2022-03-31T00:01:12.000Z
from abc import ABC import scipy.sparse from rasa.nlu.featurizers.featurizer import Featurizer class SparseFeaturizer(Featurizer[scipy.sparse.spmatrix], ABC): """Base class for all sparse featurizers.""" pass
22
63
0.772727
28
220
6.071429
0.571429
0.105882
0
0
0
0
0
0
0
0
0
0
0.140909
220
9
64
24.444444
0.899471
0.172727
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.6
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
9bd8ba5ddf85211654f2727017b412f1c8ecf824
33
py
Python
python/djwechatlogin/wechat/qyweixin/__init__.py
yc19890920/Learn
3990e75b469225ba7b430539ef9a16abe89eb863
[ "Apache-2.0" ]
1
2021-01-11T06:30:44.000Z
2021-01-11T06:30:44.000Z
python/djwechatlogin/wechat/qyweixin/__init__.py
yc19890920/Learn
3990e75b469225ba7b430539ef9a16abe89eb863
[ "Apache-2.0" ]
23
2020-02-12T02:35:49.000Z
2022-02-11T03:45:40.000Z
python/djwechatlogin/wechat/qyweixin/__init__.py
yc19890920/Learn
3990e75b469225ba7b430539ef9a16abe89eb863
[ "Apache-2.0" ]
2
2020-04-08T15:39:46.000Z
2020-10-10T10:13:09.000Z
from .client import QiyeWeixinAPI
33
33
0.878788
4
33
7.25
1
0
0
0
0
0
0
0
0
0
0
0
0.090909
33
1
33
33
0.966667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9be86853e0eec6e76b1ece7b5f5e0defe317e984
76
py
Python
vit/formatter/status_long.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
179
2020-07-28T08:21:51.000Z
2022-03-30T21:39:37.000Z
vit/formatter/status_long.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
255
2017-02-01T11:49:12.000Z
2020-07-26T22:31:25.000Z
vit/formatter/status_long.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
26
2017-01-17T20:31:13.000Z
2020-06-17T13:09:01.000Z
from vit.formatter.status import Status class StatusLong(Status): pass
15.2
39
0.776316
10
76
5.9
0.8
0
0
0
0
0
0
0
0
0
0
0
0.157895
76
4
40
19
0.921875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
9bef662a796e6f0f5600bc58f73ccd7326b47d8a
3,202
py
Python
tests/python/java_help_test.py
karpierz/jtypes.py4j
1bf48c022357c558da4d0df45fe4a0100df99a99
[ "BSD-3-Clause" ]
null
null
null
tests/python/java_help_test.py
karpierz/jtypes.py4j
1bf48c022357c558da4d0df45fe4a0100df99a99
[ "BSD-3-Clause" ]
null
null
null
tests/python/java_help_test.py
karpierz/jtypes.py4j
1bf48c022357c558da4d0df45fe4a0100df99a99
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals, absolute_import from .java_gateway_test import gateway, example_app_process # <AK> was: from py4j.tests. def test_help_object(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = g.help(ex, display=False) assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" in doc assert "method2" in doc def test_doc_object(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = ex.__doc__ assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" in doc assert "getField1" in doc def test_not_callable(): with example_app_process(): with gateway() as g: ex = g.getNewExample() try: ex() raise AssertionError except TypeError as e: assert "object is not callable" in str(e) def test_help_pattern_1(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = g.help(ex, display=False, pattern="m*") assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" in doc assert "getField1" not in doc def test_help_pattern_2(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = g.help(ex, display=False, pattern="getField1(*") assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" not in doc assert "getField1" in doc def test_help_method(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = g.help(ex.method7, display=False) # Make sure multiple method7s appear (overloaded method) assert "method7(int)" in doc assert "method7(Object)" in doc assert "method1" not in doc def test_doc_method(): with example_app_process(): with gateway() as g: ex = g.getNewExample() doc = ex.method7.__doc__ # Make sure multiple method7s appear (overloaded method) assert "method7(int)" in doc assert "method7(Object)" in doc assert "method1" not in doc def test_help_class(): with example_app_process(): with gateway() as g: clazz = g.jvm.py4j.examples.ExampleClass doc = g.help(clazz, display=False) assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" in doc assert "method2" in doc def test_doc_class(): with example_app_process(): with gateway() as g: clazz = g.jvm.py4j.examples.ExampleClass doc = clazz.__doc__ # Make sure multiple method7s appear (overloaded method) assert "Help on class ExampleClass in package py4j.examples" in doc assert "method1" in doc assert "method2" in doc
32.343434
89
0.592442
392
3,202
4.686224
0.158163
0.065324
0.095808
0.102885
0.843223
0.839412
0.826892
0.825259
0.801306
0.772999
0
0.016256
0.327608
3,202
98
90
32.673469
0.836972
0.05965
0
0.666667
0
0
0.166001
0
0
0
0
0
0.346667
1
0.12
false
0
0.026667
0
0.146667
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
501030c89c274cdac1a18e143ff1720918d5006b
116
py
Python
opracowanie_pytan/questions/admin.py
EricFelixLuther/quiz_app
0063ab129432678d7d9a3fa463b3657f71101fe1
[ "MIT" ]
null
null
null
opracowanie_pytan/questions/admin.py
EricFelixLuther/quiz_app
0063ab129432678d7d9a3fa463b3657f71101fe1
[ "MIT" ]
null
null
null
opracowanie_pytan/questions/admin.py
EricFelixLuther/quiz_app
0063ab129432678d7d9a3fa463b3657f71101fe1
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Quiz_Set, Question admin.site.register([Quiz_Set, Question])
19.333333
41
0.801724
17
116
5.352941
0.647059
0.153846
0.32967
0
0
0
0
0
0
0
0
0
0.112069
116
5
42
23.2
0.883495
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
50308051bf5908b6032dbd89c3f9583e9bcc986f
147
py
Python
src/pyotelem/plots/__init__.py
ryanjdillon/pyotelem
3c92a368c53631046ed74d073b0af498226567ad
[ "MIT" ]
null
null
null
src/pyotelem/plots/__init__.py
ryanjdillon/pyotelem
3c92a368c53631046ed74d073b0af498226567ad
[ "MIT" ]
null
null
null
src/pyotelem/plots/__init__.py
ryanjdillon/pyotelem
3c92a368c53631046ed74d073b0af498226567ad
[ "MIT" ]
null
null
null
from . import plotconfig from . import plotglides from . import plotdives from . import plotdynamics from . import plotdsp from . import plotutils
21
26
0.795918
18
147
6.5
0.444444
0.512821
0
0
0
0
0
0
0
0
0
0
0.163265
147
6
27
24.5
0.95122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
505014d01b1044b1523c49d4603cce0fb5d27e1d
42
py
Python
falco/__init__.py
leonidprinceton/falco-python
21e84bf8052faca73cb703fa4d8682c35630ee4e
[ "Apache-2.0" ]
null
null
null
falco/__init__.py
leonidprinceton/falco-python
21e84bf8052faca73cb703fa4d8682c35630ee4e
[ "Apache-2.0" ]
null
null
null
falco/__init__.py
leonidprinceton/falco-python
21e84bf8052faca73cb703fa4d8682c35630ee4e
[ "Apache-2.0" ]
null
null
null
from .falco import * from .utils import *
14
20
0.714286
6
42
5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.190476
42
2
21
21
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
acb28fb23262d0998acc75728289c9c9de01bf93
102
py
Python
cls/p7.py
sanchez0623/zsq.LearningPython
419df031a2a905fe7d7c2dfe14aa2f8729989a9a
[ "Apache-2.0" ]
null
null
null
cls/p7.py
sanchez0623/zsq.LearningPython
419df031a2a905fe7d7c2dfe14aa2f8729989a9a
[ "Apache-2.0" ]
null
null
null
cls/p7.py
sanchez0623/zsq.LearningPython
419df031a2a905fe7d7c2dfe14aa2f8729989a9a
[ "Apache-2.0" ]
null
null
null
# 可用来判断该文件是否为入口文件,并做一些逻辑 if __name__ == '__main__': print('this is app') print('this is module')
17
26
0.686275
13
102
4.769231
0.769231
0.290323
0.354839
0
0
0
0
0
0
0
0
0
0.166667
102
5
27
20.4
0.729412
0.215686
0
0
0
0
0.423077
0
0
0
0
0
0
1
0
true
0
0
0
0
0.666667
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
acbbe6f47814bc2d3c6890dde8a7d1503844ddbb
34,894
py
Python
exportmodul.py
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
41b99deacc5bfee6562907de109a8ad5af917d01
[ "MIT" ]
null
null
null
exportmodul.py
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
41b99deacc5bfee6562907de109a8ad5af917d01
[ "MIT" ]
null
null
null
exportmodul.py
MaliziaGrimm/Lohnvorerfassung-50a-fuer-DATEV
41b99deacc5bfee6562907de109a8ad5af917d01
[ "MIT" ]
null
null
null
from flask import Flask from flask import request, render_template import os, time, csv from flask_sqlalchemy import SQLAlchemy from sqlalchemy import create_engine from sqlalchemy import Column, Integer, Text, MetaData, Table, DATE from sqlalchemy.sql import select, update import datenbank_obj, funktionen, setting import pandas as pd import datetime def export_steuer(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer): # nur für Auswahl Monat/Jahr return def export_steuerli(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer): # brauche ich für Auswahl der Datensätze ggf. # aktuell werden alle erfassten DS exportiert, die noch nicht exportert wurden # unabhängig vom Erfassungsmonat return def export_steuerliste(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer): # der eigentliche ExcelExport bzw. # PDF Druck (geplant) var_stmonat=request.form["form_stmonat"] var_stjahr=request.form["form_stjahr"] engine = create_engine('sqlite:///daten/abrechnungsdaten.db') metadata = datenbank_obj.getdbmetadata(engine) abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata) metadata.create_all() if var_stmonat=="01" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="02" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="03" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="04" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="05" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="06" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="07" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="08" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="09" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="10" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="11" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="12" and var_stjahr=="2022": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) elif var_stmonat=="01" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="02" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="03" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="04" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="05" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="06" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="07" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="08" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="09" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="10" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="11" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="12" and var_stjahr=="2023": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2023\" ", engine) elif var_stmonat=="01" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="02" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="03" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="04" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="05" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="06" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="07" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="08" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="09" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="10" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="11" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="12" and var_stjahr=="2024": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2024\" ", engine) elif var_stmonat=="01" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"01\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="02" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"02\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="03" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"03\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="04" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"04\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="05" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"05\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="06" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"06\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="07" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"07\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="08" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"08\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="09" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"09\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="10" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"10\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="11" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"11\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) elif var_stmonat=="12" and var_stjahr=="2025": result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==\"12\" AND abrechnungsdaten.abrechnungsjahr==\"2025\" ", engine) else: var_version_titel = setting.Version_Titel var_version_program = setting.Version_Program var_text=("Zeitraum nicht verfügbar!") return render_template('/index.html', v_text=var_text, v_bnr=var_beraternummer, v_mdt=var_mandantennummer, v_heute="Fehler !", v_monat=var_abrmonat, v_jahr=var_abrjahr, v_version_program=var_version_program, v_version_titel=var_version_titel) ### variabler Monat aktuell nicht abfragebar - result = pd.read_sql("SELECT * FROM abrechnungsdaten WHERE abrechnungsdaten.abrechnungsmonat==var_stmonat AND abrechnungsdaten.abrechnungsjahr==\"2022\" ", engine) result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_Export_Monatsauswertung_16.csv", sep=';', encoding='utf-16', index=False, mode='w') result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_Export_Monatsauswertung_8.csv", sep=';', encoding='utf-8', index=False, mode='w') # Zwischendatei anlegen für Buchungsliste Agenturprovision AG Anteil result.to_csv("daten/ZW_Buchungsliste_AGP_AG.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') result.to_csv("daten/ZW_Buchungsliste_AGP_AN.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') # Quell und Zieldatei öffnen - Agenturprov AG Werte in Buchungsliste zu schreiben filequelle=open("daten/ZW_Buchungsliste_AGP_AG.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AGP_AGWerte_Buchungsliste.csv","w", encoding='utf-8') #Beschreibung der Felder aus der Quelldatei #stelle 1 = Satznummer; stelle 2 = BNR; stelle 3 = Mdt; stelle 4 = PNR; stelle 5 = Lohnart; stelle 6 = LohnartText; stelle 7 = Wert; stelle 8 = Kostenstelle; stelle 9 = Kostenträger; #stelle 10 = Art der Tätigkeit; stelle 11 = Freitext; stelle 12 = Buchungsmonat; stelle 13 = Buchungsjahr; stelle 14 = %Agentur gesamt; stelle 15 = %Agentur AN Anteil; stelle 16 = agenturprovwert_AN; #stelle 17 = agenturprovwert_AG, stelle 18 = lohnartustabzug; stelle 19 = ustwert; stelle 20 = kontoust; stelle 21 = exportlodas; stelle 22 = exportlohnundgehalt; stelle 23 = exportwiederholung; #stelle 24 = exportdatum; stelle 25 = Agenturnummer AGP_Gegenkonto = funktionen.fibukonten_dic_lesen("konto_ggagp") #Beschreibung Exportdatei #AGP Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf 99988 gesetzt falls leer; Wert AGP AG in -; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ????? for x in filequelle: stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|") stelle25 = (stelle25.strip()) if str(stelle17) != "0.0" and str(stelle17) != "0": if stelle25 == "": stelle25 = "99988" fileziel.write(AGP_Gegenkonto+";"+stelle25+";"+stelle17+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" AGP %: "+stelle14+" davon AGP AN %: "+stelle15+" Text:"+stelle11+";0\n") filequelle.close() fileziel.close() # Quell und Zieldatei öffnen - AGP Werte um Buchungsliste zu schreiben filequelle=open("daten/ZW_Buchungsliste_AGP_AN.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AGP_ANWerte_Buchungsliste.csv","w", encoding='utf-8') # Buchungsliste Agenturprov AN Werte schreiben AGP_AN_Gegenkonto = funktionen.fibukonten_dic_lesen("konto_ggagpan") #Beschreibung Exportdatei #AGP Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf 99988 gesetzt falls leer; Wert AGP AN in -; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ????? for x in filequelle: stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|") stelle25 = (stelle25.strip()) if str(stelle16) != "0.0" and str(stelle16) != "0": if stelle25 == "": stelle25 = "99988" fileziel.write(AGP_AN_Gegenkonto+";"+stelle25+";"+stelle16+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" AGP %: "+stelle14+" davon AGP AN %: "+stelle15+" Text:"+stelle11+";0\n") filequelle.close() fileziel.close() #Beschreibung der Felder aus der Quelldatei #stelle 1 = Satznummer; stelle 2 = BNR; stelle 3 = Mdt; stelle 4 = PNR; stelle 5 = Lohnart; stelle 6 = LohnartText; stelle 7 = Wert; stelle 8 = Kostenstelle; stelle 9 = Kostenträger; #stelle 10 = Art der Tätigkeit; stelle 11 = Freitext; stelle 12 = Buchungsmonat; stelle 13 = Buchungsjahr; stelle 14 = %Agentur gesamt; stelle 15 = %Agentur AN Anteil; stelle 16 = agenturprovwert_AN; #stelle 17 = agenturprovwert_AG, stelle 18 = lohnartustabzug; stelle 19 = ustwert; stelle 20 = kontoust; stelle 21 = exportlodas; stelle 22 = exportlohnundgehalt; stelle 23 = exportwiederholung; #stelle 24 = exportdatum; stelle 25 = Agenturnummer # Quell und Zieldatei öffnen - AGP Werte um Buchungsliste zu schreiben filequelle=open("daten/ZW_Buchungsliste_AGP_AG.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_stmonat+"_"+var_stjahr+"_AG_USt_Werte_Buchungsliste.csv","w", encoding='utf-8') # Buchungsliste Agenturprov AN Werte schreiben AG_USt_konto = funktionen.fibukonten_dic_lesen("konto_ust19") GG_AG_USt_konto = funktionen.fibukonten_dic_lesen("konto_ggust19") #Beschreibung Exportdatei #AG USt Gegenkonto (aus dict); Agentur (Personenkonto Rewe) wird auf "unbekannt" gesetzt falls leer; Buchungsdatum mit 01MMJJJJ; freier Text als Buchungstext 120 Zeichen ????? for x in filequelle: stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22,stelle23,stelle24,stelle25=x.split("|") stelle25 = (stelle25.strip()) if str(stelle18) == "0" and str(stelle19) != "0" and str(stelle19) != "0.0": if str(stelle16) != "0.0" and str(stelle16) != "0": if stelle25 == "": stelle25 = "AG unbekannt" fileziel.write(AG_USt_konto+";"+GG_AG_USt_konto+";"+stelle19+";01"+stelle12+stelle13+";"+stelle8+";"+stelle9+";PNR: "+stelle4+" Agentur: "+stelle25+" Text:"+stelle11+";0\n") filequelle.close() fileziel.close() ##################### PDF Block --------------------------- - NOCH OFFEN # ##################### komplett entfernt if result.shape[0] != 0: var_text = result.shape[0] var_text="Es wurden "+str(var_text)+" Datensätze in die Datei Export_Steuer exportiert. Weitere Auswertungen stehen zur Verfügung." else: var_text="Es wurden keine Datensätze als Steuerwerte exportiert" return var_text, var_stmonat, var_stjahr def export_csv(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantennummer): engine = create_engine('sqlite:///daten/abrechnungsdaten.db') metadata = datenbank_obj.getdbmetadata(engine) abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata) metadata.create_all() result = pd.read_sql("SELECT * FROM abrechnungsdaten", engine) result.to_csv("export/"+var_beraternummer+"_"+var_mandantennummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Export.csv", sep=';', encoding='utf-16', index=False, mode='w') if result.shape[0] != 0: var_text = result.shape[0] var_text="Es wurden "+str(var_text)+" Datensätze als csv Daten exportiert." else: var_text="Es wurden keine Datensätze als csv Daten exportiert" # Export alle DS nach Excel return var_text ## sollte nach vielen anpassungen nicht mehr funktionieren - ungeprüft def export_lohnundgehalt(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantenummer): engine = create_engine('sqlite:///daten/abrechnungsdaten.db') metadata = datenbank_obj.getdbmetadata(engine) abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata) metadata.create_all() if request.method == 'POST': neuedatei = open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", "w") neuedatei.write(var_beraternummer+";"+var_mandantenummer+";"+var_abrmonat+"/"+var_abrjahr+"\n") neuedatei.close() # Export der Lohnarten und Nettobe/abzüge result = pd.read_sql("SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnart, abrechnungsdaten.wert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.exportlohnundgehalt==\"N\" ", engine) result.to_csv("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", sep=';', encoding='utf-8', index=False, header=False, mode='a') ### NEU AGP und UST auch in LUG Datei # Export der USt in Zwischendatei result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.ustwert != "0" AND abrechnungsdaten.exportlohnundgehalt==\"N\" ', engine) result.to_csv("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt", sep=';', encoding='utf-8', index=False, header=False, mode='a') # Export der Agenturprovision in Zwischendatei result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AN, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AN != "0" AND abrechnungsdaten.exportlohnundgehalt==\"N\" ', engine) result.to_csv("daten/ZW_LuG_AGP.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') ############ # Quell und Zieldatei öffnen - AGP Werte um Lohnart einzufügen filequelle=open("daten/ZW_LuG_AGP.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"LuG.txt","a", encoding='utf-8') #Beschreibung der Felder aus der Quelldatei #stelle 1 = PNR; stelle 2 = Wert; stelle 3 = Kostenstelle; stelle 4 = Kostentraeger AGP_Lohnart = funktionen.lohnarten_dic_lesen("loa_nb6") for x in filequelle: stelle1,stelle2,stelle3,stelle4=x.split("|") stelle4 = (stelle4.strip()) # stelle2 = stelle2.replace(".", ",") fileziel.write(stelle1+";"+AGP_Lohnart+";"+stelle2+";"+stelle3+";"+stelle4+"\n") filequelle.close() fileziel.close() hdatum = datetime.datetime.now() hdatum = hdatum.strftime("%d.%m.%Y") conn = engine.connect() abrechnungsdatenupdate = abrechnungsdaten.update().where(abrechnungsdaten.c.exportlohnundgehalt=="N").values(exportlohnundgehalt="J", exportlodas="X", exportwiederholung="X", abrechnungsmonat=var_abrmonat, abrechnungsjahr=var_abrjahr, exportdatum=str(hdatum)) conn.execute(abrechnungsdatenupdate) abrechnungsdatenupdate = abrechnungsdaten.select() conn.execute(abrechnungsdatenupdate).fetchall() if result.shape[0] != 0: var_text = result.shape[0] var_text="Es wurden "+str(var_text)+" Datensätze für Lohn und Gehalt exportiert." filequelle=open("daten/abrechnungszeitraum.txt","r", encoding='utf-8') for x in filequelle: var_abrmonat,var_abrjahr=x.split("|") break var_abrmonat = int(var_abrmonat)+1 if var_abrmonat < 10: var_abrmonat = str(var_abrmonat) var_abrmonat = "0"+var_abrmonat else: var_abrmonat = str(var_abrmonat) if var_abrmonat == "13": var_abrmonat = "01" var_abrjahr = int(var_abrjahr)+1 var_abrjahr = str(var_abrjahr) filequelle=open("daten/abrechnungszeitraum.txt","w") filequelle.write(var_abrmonat+"|"+var_abrjahr) filequelle.close() else: var_text="Es wurden keine Datensätze für Lohn und Gehalt exportiert" pass else: var_text="Es werden die Datensätze der Monatsübersicht für Lohn und Gehalt exportiert" pass return var_text ### Export Lodas in Funktion aktuell 2022-02-14 mit AGP und USt ### Tabellen auf Netto und Brutto geändert ### NEU* 20220402 USt wenn AG übernimmt - LOA 0 in SQL DB ### USt wenn AN trägt Nettoabzug in SQl DB def export_lodas(var_abrmonat, var_abrjahr, var_beraternummer, var_mandantenummer): engine = create_engine('sqlite:///daten/abrechnungsdaten.db') metadata = datenbank_obj.getdbmetadata(engine) abrechnungsdaten = datenbank_obj.abrechnungsdaten_dbobj(metadata) metadata.create_all() if request.method == 'POST': if os.path.exists("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt"): ## Datei öffnen und Daten werden angehangen fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a") fileziel.write("\n* Stunden zur Abrechnung von Mitarbeitern\n") fileziel.write("[Bewegungsdaten]\n") else: ## Datei neu öffnen und Kopfdaten schreiben fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","w") # schreiben in Lodas Importdatei fileziel.write("[Allgemein]\nZiel=LODAS\nVersion_SST=1.0\nBeraterNr=") fileziel.write(var_beraternummer) fileziel.write("\nMandantenNr=") fileziel.write(var_mandantenummer) fileziel.write("\nDatumsformat=JJJJ-MM-TT") fileziel.write("\nStringbegrenzer='") fileziel.write("\n\n* LEGENDE:\n* Datei erzeugt mit Tool ARMTool\n* AP: Andreé Rosenkranz; andree@rosenkranz.one\n\n") fileziel.write("* Satzbeschreibungen zur Übergabe von Bewegungsdaten für Mitarbeiter\n[Satzbeschreibung]\n") # fileziel.write("\n10;u_lod_bwd_buchung_brutto;abrechnung_zeitraum#bwd;pnr#bwd;la_eigene#bwd;brutto_fest_bez#bwd;kostenstelle#bwd;kostentraeger#bwd;") # fileziel.write("\n11;u_lod_bwd_buchung_netto;abrechnung_zeitraum#bwd;pnr#bwd;nba_nr#bwd;netto_betrag#bwd;") fileziel.write("\n10;u_lod_bwd_buchung_standard;abrechnung_zeitraum#bwd;pnr#bwd;la_eigene#bwd;bs_nr#bwd;bs_wert_butab#bwd;kostenstelle#bwd;kostentraeger#bwd;") fileziel.write("\n\n") fileziel.write("* Werte zur Abrechnung von Mitarbeitern\n\n") fileziel.write("[Bewegungsdaten]\n\n") # Export der USt in Zwischendatei # Neu* 20220401 ohne USt AG result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.ustwert != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine) result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnartustabzug, abrechnungsdaten.ustwert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.lohnartustabzug != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine) result.to_csv("daten/ZW_Lodas_USt.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') # Export der Agenturprovision AN in Zwischendatei result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AN, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AN != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine) result.to_csv("daten/ZW_Lodas_AGP_AN.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') # Export der Agenturprovision AG in Zwischendatei result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.agenturprovwert_AG, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.agenturprovwert_AG != "0" AND abrechnungsdaten.exportlodas==\"N\" ', engine) result.to_csv("daten/ZW_Lodas_AGP_AG.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') # Export der Lohnarten und Nettobe/abzüge result = pd.read_sql('SELECT abrechnungsdaten.PNR, abrechnungsdaten.lohnart, abrechnungsdaten.wert, abrechnungsdaten.kostenstelle, abrechnungsdaten.kostentraeger FROM abrechnungsdaten WHERE abrechnungsdaten.exportlodas==\"N\" ', engine) result.to_csv("daten/ZW_Lodas.txt", sep='|', encoding='utf-8', index=False, header=False, mode='w') ############ # Quell und Zieldatei öffnen - AGP Werte filequelle=open("daten/ZW_Lodas_AGP_AN.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8') #Beschreibung der Felder aus der Quelldatei #stelle 1 = PNR; stelle 2 = Wert; stelle 3 = Kostenstelle; stelle 4 = Kostentraeger AGP_Lohnart = funktionen.lohnarten_dic_lesen("loa_nb6") for x in filequelle: stelle1,stelle2,stelle3,stelle4=x.split("|") stelle4 = (stelle4.strip()) var_bs = "3" stelle2 = stelle2.replace(".", ",") # fileziel.write("11;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+AGP_Lohnart+";"+stelle2+";"+stelle3+";"+stelle4+";\n") fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+AGP_Lohnart+";"+var_bs+";"+stelle2+";"+stelle3+";"+stelle4+";\n") filequelle.close() fileziel.close() ############ # Quell und Zieldatei öffnen - USt Werte filequelle=open("daten/ZW_Lodas_Ust.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8') #Beschreibung der Felder aus der Quelldatei #stelle 1 = PNR; stelle 2 = Lohnart; stelle 3 = Wert; stelle 4 = Kostenstelle; stelle 5 = Kostentraeger for x in filequelle: stelle1,stelle2,stelle3,stelle4,stelle5=x.split("|") stelle5 = (stelle5.strip()) if int(stelle2) > 8999: var_bs = "3" var_sa = "11" else: var_bs = "2" var_sa = "10" stelle3 = stelle3.replace(".", ",") # fileziel.write(var_sa+";"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+stelle3+";"+stelle4+";"+stelle5+";\n") fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+var_bs+";"+stelle3+";"+stelle4+";"+stelle5+";\n") filequelle.close() fileziel.close() filequelle=open("daten/ZW_Lodas.txt") fileziel=open("export/"+var_beraternummer+"_"+var_mandantenummer+"_"+var_abrmonat+"_"+var_abrjahr+"_Lodas.txt","a", encoding='utf-8') #Beschreibung der Felder aus der Quelldatei #stelle 1 = PNR; stelle 2 = Lohnart; stelle 3 = Wert; stelle 4 = Kostenstelle; stelle 5 = Kostentraeger for x in filequelle: stelle1,stelle2,stelle3,stelle4,stelle5=x.split("|") stelle5 = (stelle5.strip()) if int(stelle2) > 8999: var_bs = "3" var_sa = "11" else: var_bs = "2" var_sa = "10" stelle3 = stelle3.replace(".", ",") # fileziel.write(var_sa+";"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+stelle3+";"+stelle4+";"+stelle5+";\n") fileziel.write("10;"+var_abrjahr+"-"+var_abrmonat+"-01;"+stelle1+";"+stelle2+";"+var_bs+";"+stelle3+";"+stelle4+";"+stelle5+";\n") fileziel.write("\n\n[Hinweisdaten]\n\nDaten uebernommen aus Erfassungstool ARMTool\nfuer die korrekte Berechnung saemtlicher Werte ist allein der Anwender verantwortlich!\n") #Dateien schließen filequelle.close() fileziel.close() ###################### hdatum = datetime.datetime.now() hdatum = hdatum.strftime("%d.%m.%Y") conn = engine.connect() abrechnungsdatenupdate = abrechnungsdaten.update().where(abrechnungsdaten.c.exportlodas=="N").values(exportlohnundgehalt="X", exportlodas="J", exportwiederholung="X", abrechnungsmonat=var_abrmonat, abrechnungsjahr=var_abrjahr, exportdatum=str(hdatum)) conn.execute(abrechnungsdatenupdate) abrechnungsdatenupdate = abrechnungsdaten.select() conn.execute(abrechnungsdatenupdate).fetchall() if result.shape[0] != 0: var_text = result.shape[0] var_text="Es wurden "+str(var_text)+" Datensätze für Lodas exportiert." filequelle=open("daten/abrechnungszeitraum.txt","r", encoding='utf-8') for x in filequelle: var_abrmonat,var_abrjahr=x.split("|") break var_abrmonat = int(var_abrmonat)+1 if var_abrmonat < 10: var_abrmonat = str(var_abrmonat) var_abrmonat = "0"+var_abrmonat else: var_abrmonat = str(var_abrmonat) if var_abrmonat == "13": var_abrmonat = "01" var_abrjahr = int(var_abrjahr)+1 var_abrjahr = str(var_abrjahr) filequelle=open("daten/abrechnungszeitraum.txt","w") filequelle.write(var_abrmonat+"|"+var_abrjahr) filequelle.close() pass else: var_text="Es wurden keine Datensätze für Lodas exportiert" pass else: var_text="Es werden die Datensätze der Monatsübersicht für Lodas exportiert" pass return var_text
70.635628
316
0.681865
3,853
34,894
6.021023
0.105892
0.053407
0.030001
0.037502
0.876546
0.855425
0.844088
0.828398
0.740032
0.728092
0
0.040863
0.187167
34,894
494
317
70.635628
0.777069
0.155671
0
0.451429
0
0.028571
0.360318
0.198617
0
0
0
0
0
1
0.017143
false
0.014286
0.028571
0.005714
0.065714
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
accb96f14eb03fa437b6008fa217177f8b6d2a1b
12,768
py
Python
tests/test_misc.py
oolorg/opencenter
689805f663ce9332b2502f98c384a7b4d9d46ce4
[ "Apache-2.0" ]
null
null
null
tests/test_misc.py
oolorg/opencenter
689805f663ce9332b2502f98c384a7b4d9d46ce4
[ "Apache-2.0" ]
null
null
null
tests/test_misc.py
oolorg/opencenter
689805f663ce9332b2502f98c384a7b4d9d46ce4
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc. ############################################################################## # # OpenCenter is licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. This # version of OpenCenter includes Rackspace trademarks and logos, and in # accordance with Section 6 of the License, the provision of commercial # support services in conjunction with a version of OpenCenter which includes # Rackspace trademarks and logos is prohibited. OpenCenter source code and # details are available at: # https://github.com/rcbops/opencenter or upon # written request. # # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this # notice, is available in the LICENSE file accompanying this software. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the # specific language governing permissions and limitations # under the License. # ############################################################################## import opencenter.webapp.utility from util import OpenCenterTestCase from opencenter.db import api as db_api from opencenter.db import exceptions as exc class MiscDBAPITests(OpenCenterTestCase): def __init__(self, *args, **kwargs): super(MiscDBAPITests, self).__init__(*args, **kwargs) def setUp(self): pass def tearDown(self): pass def test_call_undefined_model(self): api = db_api.api_from_models() with self.assertRaises(KeyError): api._call_model('get_all', 'fakemodel') def test_call_bad_model_function(self): api = db_api.api_from_models() with self.assertRaises(ValueError): api._call_model('bad_function', 'nodes') def test_bad_concrete_expression_syntax(self): api = db_api.api_from_models() with self.assertRaises(SyntaxError): api.concrete_expression("foo not in 'bar'") def test_bad_regularize_expression_syntax(self): api = db_api.api_from_models() with self.assertRaises(SyntaxError): api.regularize_expression("foo not in 'bar'") def test_delete_nonexistant_node(self): api = db_api.api_from_models() with self.assertRaises(exc.IdNotFound): api.node_delete_by_id(99) class MiscTests(OpenCenterTestCase): def __init__(self, *args, **kwargs): super(MiscTests, self).__init__(*args, **kwargs) def setUp(self): pass def tearDown(self): pass def test_node_expansion(self): container1 = self._model_create('nodes', name='container1') container2a = self._model_create('nodes', name='container2a') container2b = self._model_create('nodes', name='container2b') container3a = self._model_create('nodes', name='container3a') self._model_create('facts', node_id=container2a['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container2b['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container3a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=container1['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2a['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2b['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container3a['id'], key='backends', value=['container', 'node']) node1 = self._model_create('nodes', name='node1') node2a = self._model_create('nodes', name='node2a') node2b = self._model_create('nodes', name='node2b') node3a = self._model_create('nodes', name='node3a') self._model_create('facts', node_id=node1['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=node1['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=node2a['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2b['id'], key='parent_id', value=container2b['id']) self._model_create('facts', node_id=node2b['id'], key='backends', value=['node']) self._model_create('facts', node_id=node3a['id'], key='parent_id', value=container3a['id']) self._model_create('facts', node_id=node3a['id'], key='backends', value=['node']) nodelist = opencenter.webapp.utility.expand_nodelist([container1 ['id']]) self.logger.debug('Expanded nodelist: %s' % nodelist) #node list should contain ids of node1, node2a, node2b, and node3a self.assertEquals(len(nodelist), 4) self.assertTrue(node1['id'] in nodelist) self._clean_table('nodes') self._clean_table('facts') def test_get_direct_children(self): container1 = self._model_create('nodes', name='container1') container2a = self._model_create('nodes', name='container2a') container2b = self._model_create('nodes', name='container2b') container3a = self._model_create('nodes', name='container3a') self._model_create('facts', node_id=container2a['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container2b['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container3a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=container1['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2a['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2b['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container3a['id'], key='backends', value=['container', 'node']) node1 = self._model_create('nodes', name='node1') node2a = self._model_create('nodes', name='node2a') node2b = self._model_create('nodes', name='node2b') node3a = self._model_create('nodes', name='node3a') self._model_create('facts', node_id=node1['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=node1['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=node2a['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2b['id'], key='parent_id', value=container2b['id']) self._model_create('facts', node_id=node2b['id'], key='backends', value=['node']) self._model_create('facts', node_id=node3a['id'], key='parent_id', value=container3a['id']) self._model_create('facts', node_id=node3a['id'], key='backends', value=['node']) nodelist = opencenter.webapp.utility.get_direct_children(container1 ['id']) self.logger.debug('Expanded nodelist: %s' % nodelist) #nodelist should contain full records for node1, container2a, and #container2b node_ids = [n['id'] for n in nodelist] self.assertEquals(len(nodelist), 3) self.assertTrue(node1['id'] in node_ids and container2a['id'] in node_ids and container2b['id'] in node_ids) self._clean_table('nodes') self._clean_table('facts') def test_full_node_expansion(self): container1 = self._model_create('nodes', name='container1') container2a = self._model_create('nodes', name='container2a') container2b = self._model_create('nodes', name='container2b') container3a = self._model_create('nodes', name='container3a') self._model_create('facts', node_id=container2a['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container2b['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=container3a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=container1['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2a['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container2b['id'], key='backends', value=['container', 'node']) self._model_create('facts', node_id=container3a['id'], key='backends', value=['container', 'node']) node1 = self._model_create('nodes', name='node1') node2a = self._model_create('nodes', name='node2a') node2b = self._model_create('nodes', name='node2b') node3a = self._model_create('nodes', name='node3a') self._model_create('facts', node_id=node1['id'], key='parent_id', value=container1['id']) self._model_create('facts', node_id=node1['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2a['id'], key='parent_id', value=container2a['id']) self._model_create('facts', node_id=node2a['id'], key='backends', value=['node']) self._model_create('facts', node_id=node2b['id'], key='parent_id', value=container2b['id']) self._model_create('facts', node_id=node2b['id'], key='backends', value=['node']) self._model_create('facts', node_id=node3a['id'], key='parent_id', value=container3a['id']) self._model_create('facts', node_id=node3a['id'], key='backends', value=['node']) nodelist = opencenter.webapp.utility.fully_expand_nodelist( [container1['id']]) self.logger.debug('Expanded nodelist: %s' % nodelist) #node list should contain ids of container1, container2a, #container2b, container3a, node1, node2a, node2b, and node3a self.assertEquals(len(nodelist), 8) self.assertTrue(node1['id'] in nodelist and container3a['id'] in nodelist and container1['id'] in nodelist) self._clean_table('nodes') self._clean_table('facts') def test_unprovisioned_container(self): n = opencenter.webapp.utility.unprovisioned_container() self.assertTrue(n is not None) n2 = opencenter.webapp.utility.unprovisioned_container() self.assertTrue(n['id'] == n2['id']) self._clean_table('nodes') self._clean_table('facts')
47.641791
79
0.560229
1,345
12,768
5.09368
0.142007
0.090644
0.151073
0.131368
0.772442
0.755364
0.748066
0.727047
0.703985
0.680922
0
0.01762
0.297697
12,768
267
80
47.820225
0.746403
0.108004
0
0.779904
0
0
0.129787
0
0
0
0
0
0.062201
1
0.07177
false
0.019139
0.019139
0
0.100478
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
acf19a94af3403cf983799b3db1b3c3003808497
8,026
py
Python
tests/test_schema_editor_partitioning.py
adamchainz/django-postgres-extra
c11dbb5b75e16f7bd8fd336cc051806cf587269f
[ "MIT" ]
529
2017-03-20T08:16:30.000Z
2022-03-31T13:23:09.000Z
tests/test_schema_editor_partitioning.py
adamchainz/django-postgres-extra
c11dbb5b75e16f7bd8fd336cc051806cf587269f
[ "MIT" ]
137
2017-06-08T07:59:22.000Z
2022-02-07T08:34:38.000Z
tests/test_schema_editor_partitioning.py
adamchainz/django-postgres-extra
c11dbb5b75e16f7bd8fd336cc051806cf587269f
[ "MIT" ]
67
2017-06-21T10:01:13.000Z
2022-02-24T21:23:24.000Z
import pytest from django.core.exceptions import ImproperlyConfigured from django.db import connection, models from psqlextra.backend.schema import PostgresSchemaEditor from psqlextra.types import PostgresPartitioningMethod from . import db_introspection from .fake_model import define_fake_partitioned_model def test_schema_editor_create_delete_partitioned_model_range(): """Tests whether creating a partitioned model and adding a list partition to it using the :see:PostgresSchemaEditor works.""" method = PostgresPartitioningMethod.RANGE key = ["timestamp"] model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"method": method, "key": key}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_range_partition(model, "pt1", "2019-01-01", "2019-02-01") table = db_introspection.get_partitioned_table(model._meta.db_table) assert table.name == model._meta.db_table assert table.method == method assert table.key == key assert table.partitions[0].full_name == model._meta.db_table + "_pt1" schema_editor.delete_partitioned_model(model) table = db_introspection.get_partitioned_table(model._meta.db_table) assert not table partitions = db_introspection.get_partitions(model._meta.db_table) assert len(partitions) == 0 def test_schema_editor_create_delete_partitioned_model_list(): """Tests whether creating a partitioned model and adding a range partition to it using the :see:PostgresSchemaEditor works.""" method = PostgresPartitioningMethod.LIST key = ["category"] model = define_fake_partitioned_model( {"name": models.TextField(), "category": models.TextField()}, {"method": method, "key": key}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_list_partition(model, "pt1", ["car", "boat"]) table = db_introspection.get_partitioned_table(model._meta.db_table) assert table.name == model._meta.db_table assert table.method == method assert table.key == key assert table.partitions[0].full_name == model._meta.db_table + "_pt1" schema_editor.delete_partitioned_model(model) table = db_introspection.get_partitioned_table(model._meta.db_table) assert not table partitions = db_introspection.get_partitions(model._meta.db_table) assert len(partitions) == 0 def test_schema_editor_create_delete_partitioned_model_default(): """Tests whether creating a partitioned model and adding a default partition to it using the :see:PostgresSchemaEditor works.""" method = PostgresPartitioningMethod.LIST key = ["category"] model = define_fake_partitioned_model( {"name": models.TextField(), "category": models.TextField()}, {"method": method, "key": key}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_default_partition(model, "default") table = db_introspection.get_partitioned_table(model._meta.db_table) assert table.name == model._meta.db_table assert table.method == method assert table.key == key assert table.partitions[0].full_name == model._meta.db_table + "_default" schema_editor.delete_partitioned_model(model) table = db_introspection.get_partitioned_table(model._meta.db_table) assert not table partitions = db_introspection.get_partitions(model._meta.db_table) assert len(partitions) == 0 def test_schema_editor_create_partitioned_model_no_method(): """Tests whether its possible to create a partitioned model without explicitly setting a partitioning method. The default is "range" so setting one explicitely should not be needed. """ model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"key": ["timestamp"]}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) pt = db_introspection.get_partitioned_table(model._meta.db_table) assert pt.method == PostgresPartitioningMethod.RANGE assert len(pt.partitions) == 0 def test_schema_editor_create_partitioned_model_no_key(): """Tests whether trying to create a partitioned model without a partitioning key raises :see:ImproperlyConfigured as its not possible to create a partitioned model without one and we cannot have a sane default.""" model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"method": PostgresPartitioningMethod.RANGE}, ) schema_editor = PostgresSchemaEditor(connection) with pytest.raises(ImproperlyConfigured): schema_editor.create_partitioned_model(model) def test_schema_editor_add_range_partition(): """Tests whether adding a range partition works.""" model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"key": ["timestamp"]}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_range_partition( model, name="mypartition", from_values="2019-1-1", to_values="2019-2-1", comment="test", ) table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 1 assert table.partitions[0].name == "mypartition" assert ( table.partitions[0].full_name == f"{model._meta.db_table}_mypartition" ) assert table.partitions[0].comment == "test" schema_editor.delete_partition(model, "mypartition") table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 0 def test_schema_editor_add_list_partition(): """Tests whether adding a list partition works.""" model = define_fake_partitioned_model( {"name": models.TextField()}, {"method": PostgresPartitioningMethod.LIST, "key": ["name"]}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_list_partition( model, name="mypartition", values=["1"], comment="test" ) table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 1 assert table.partitions[0].name == "mypartition" assert ( table.partitions[0].full_name == f"{model._meta.db_table}_mypartition" ) assert table.partitions[0].comment == "test" schema_editor.delete_partition(model, "mypartition") table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 0 @pytest.mark.parametrize( "method,key", [ (PostgresPartitioningMethod.RANGE, ["timestamp"]), (PostgresPartitioningMethod.LIST, ["name"]), ], ) def test_schema_editor_add_default_partition(method, key): model = define_fake_partitioned_model( {"name": models.TextField(), "timestamp": models.DateTimeField()}, {"method": method, "key": key}, ) schema_editor = PostgresSchemaEditor(connection) schema_editor.create_partitioned_model(model) schema_editor.add_default_partition( model, name="mypartition", comment="test" ) table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 1 assert table.partitions[0].name == "mypartition" assert ( table.partitions[0].full_name == f"{model._meta.db_table}_mypartition" ) assert table.partitions[0].comment == "test" schema_editor.delete_partition(model, "mypartition") table = db_introspection.get_partitioned_table(model._meta.db_table) assert len(table.partitions) == 0
33.722689
79
0.72614
931
8,026
5.988185
0.103115
0.077489
0.049327
0.071749
0.824395
0.800538
0.784395
0.770045
0.761614
0.736323
0
0.008246
0.168951
8,026
237
80
33.864979
0.827586
0.104909
0
0.621795
0
0
0.072886
0.014352
0
0
0
0
0.224359
1
0.051282
false
0
0.044872
0
0.096154
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c59836a2ffdf0622a76e8b5a84d3bca8344c4304
9,434
py
Python
tests/contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/test_michelson_coding_KT1G39.py
juztin/pytezos-1
7e608ff599d934bdcf129e47db43dbdb8fef9027
[ "MIT" ]
1
2021-05-20T16:52:08.000Z
2021-05-20T16:52:08.000Z
tests/contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/test_michelson_coding_KT1G39.py
juztin/pytezos-1
7e608ff599d934bdcf129e47db43dbdb8fef9027
[ "MIT" ]
1
2020-12-30T16:44:56.000Z
2020-12-30T16:44:56.000Z
tests/contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/test_michelson_coding_KT1G39.py
juztin/pytezos-1
7e608ff599d934bdcf129e47db43dbdb8fef9027
[ "MIT" ]
1
2022-03-20T19:01:00.000Z
2022-03-20T19:01:00.000Z
from unittest import TestCase from tests import get_data from pytezos.michelson.micheline import michelson_to_micheline from pytezos.michelson.formatter import micheline_to_michelson class MichelsonCodingTestKT1G39(TestCase): def setUp(self): self.maxDiff = None def test_michelson_parse_code_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.tz')) self.assertEqual(expected, actual) def test_michelson_format_code_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_code_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_storage_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.tz')) self.assertEqual(expected, actual) def test_michelson_format_storage_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_storage_KT1G39(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_ong4Gv(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_ong4Gv(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_ong4Gv(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_ooqEHd(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_ooqEHd(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_ooqEHd(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_onynir(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_onynir(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_onynir(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_onn4pk(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_onn4pk(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_onn4pk(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_ooYJ85(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_ooYJ85(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_ooYJ85(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_ooDRnz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_ooDRnz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_ooDRnz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual) def test_michelson_parse_parameter_oophVz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json') actual = michelson_to_micheline(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.tz')) self.assertEqual(expected, actual) def test_michelson_format_parameter_oophVz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.tz') actual = micheline_to_michelson(get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json'), inline=True) self.assertEqual(expected, actual) def test_michelson_inverse_parameter_oophVz(self): expected = get_data( path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json') actual = michelson_to_micheline(micheline_to_michelson(expected)) self.assertEqual(expected, actual)
46.935323
89
0.734683
880
9,434
7.563636
0.05
0.048377
0.074369
0.135216
0.963341
0.963341
0.963341
0.963341
0.947416
0.947416
0
0.063908
0.190587
9,434
200
90
47.17
0.807753
0
0
0.639053
0
0
0.316833
0.316833
0
0
0
0
0.159763
1
0.16568
false
0
0.023669
0
0.195266
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c5a74bb24e4878f4ed607358e360e12802ff3da1
126
py
Python
one_utils/__init__.py
rentainhe/OneUtils
f7c4e28dd04958c51f08073946d35748aa9c1b4d
[ "MIT" ]
null
null
null
one_utils/__init__.py
rentainhe/OneUtils
f7c4e28dd04958c51f08073946d35748aa9c1b4d
[ "MIT" ]
null
null
null
one_utils/__init__.py
rentainhe/OneUtils
f7c4e28dd04958c51f08073946d35748aa9c1b4d
[ "MIT" ]
null
null
null
from .weight_transfer import convert_torch_to_flow from .torch_eval import eval_torch_acc from .flow_eval import eval_flow_acc
42
50
0.888889
22
126
4.636364
0.454545
0.196078
0.27451
0
0
0
0
0
0
0
0
0
0.087302
126
3
51
42
0.886957
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
68134d07aebd4fa7f58e6936211ee2f23674c91c
91
py
Python
app/mentors/__init__.py
hack4impact/ican
0bf7697476a5fe88a2274e3524a7d6455957fe28
[ "MIT" ]
13
2015-03-13T21:39:00.000Z
2017-02-01T01:45:41.000Z
app/mentors/__init__.py
hack4impact/ican
0bf7697476a5fe88a2274e3524a7d6455957fe28
[ "MIT" ]
1
2016-01-22T20:08:13.000Z
2016-01-22T20:08:13.000Z
app/mentors/__init__.py
hack4impact/ican
0bf7697476a5fe88a2274e3524a7d6455957fe28
[ "MIT" ]
2
2015-08-26T00:56:17.000Z
2018-10-19T12:12:35.000Z
from flask import Blueprint mentors = Blueprint('mentors', __name__) from . import views
15.166667
40
0.769231
11
91
6
0.636364
0.484848
0
0
0
0
0
0
0
0
0
0
0.153846
91
5
41
18.2
0.857143
0
0
0
0
0
0.076923
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
a870bed10551d9c31763faf54c019eb9cf1a96f3
14,746
py
Python
src/NISysServer.py
HymanMurphy/meta-labview
95f01bf65853028d83b0874eaf82e77b02558745
[ "MIT" ]
null
null
null
src/NISysServer.py
HymanMurphy/meta-labview
95f01bf65853028d83b0874eaf82e77b02558745
[ "MIT" ]
null
null
null
src/NISysServer.py
HymanMurphy/meta-labview
95f01bf65853028d83b0874eaf82e77b02558745
[ "MIT" ]
null
null
null
#! /usr/bin/python # Copyright 2016 National Instruments # This server emulates the NI Service Locator and the NI System Web Server # Primarily the purpose of this emaulator is to publish a web service to # reboot the target from the LabVIEW project. import BaseHTTPServer from SocketServer import ThreadingMixIn import urlparse import os import socket import threading import time HOST_NAME = '' PORT_NUMBER = 3580 RESTART_MAX_RETRIES = 3 RESTART_RETRY_DELAY = 1 def getIP(): retVal = socket.gethostbyname(socket.getfqdn()) if retVal.startswith("127."): # this returned localhost's IP # try an alternative that requires an internet connection try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8',80)) retVal = s.getsockname()[0] s.close() except: # if all else fails, fall back to the hostname retVal = socket.getfqdn() return retVal def restartLV(): # Some early versions of systemd (v44) don't consistently restart # services, so retry a few times if the restart fails. print "Restarting LabVIEW now..." retries = 0 while retries < RESTART_MAX_RETRIES: retval = os.system("/bin/systemctl restart labview.service") if retval == 0: print "Restart successful" return else: retries = retries + 1 print "Restart failed; retry %d" % retries time.sleep(RESTART_RETRY_DELAY) class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(s): ppath = urlparse.urlparse(s.path) query = urlparse.parse_qs(ppath.query) if s.path.find("National%20Instruments%2FWeb%20Servers%2FNI%20System%20Web%20Server%2Fhttp") >= 0: # Service Locator for System Web Server; redirect to same port s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() s.wfile.write("Mapping=" + str(PORT_NUMBER) + "\r\n") elif ppath.path == '/login' and 'username' in query: # login challange s.send_response(403) s.send_header("X-NI-AUTH-PARAMS", "N=1,s=n7gxGBi085pJ+upFcfxEvQ==,B=ro8BaR4PUaUUcGsQZvFeE8Gbav1iYBFX3+37bGNJUCPcOSvuzle9y5EErTu4F2/Ry5GhmaYHCYo9sBbqa9HAJFk+TMc641aZlnsUG+fojWPdef98Lnis8kuXqfl5GTKgM9PS4CF+4AJ2MM59HQW6+Qm/mCZLDJhMPWr+efFmEvI=,ss=") s.end_headers() elif ppath.path == '/logout': # logout call s.send_response(200) s.send_header("Content-type", "text/html") s.send_header("Set-Cookie", "_appwebSessionId_=; Path=/; Expires=Thu, 01 Jan 1970 00:00:00 GMT") s.end_headers() s.wfile.write("User admin logged out.") elif ppath.path == 'deletetree': # call to service locator to remove a service # this happens when LV daemon shuts down # since the daemon might not be running when the # response is sent, just close the connection print "GET deletetree received" s.wfile.close() elif ppath.path == 'publish': # call to service locator to add a service # this happens when LV daemon starts s.send_response(200) s.end_headers() else: s.send_error(404) def do_POST(s): ppath = urlparse.urlparse(s.path) query = urlparse.parse_qs(ppath.query) if ppath.path == '/login': # actual login, happens after login challenge s.send_response(200) s.send_header("Content-type", "text/xml") s.send_header("Set-Cookie", "_appwebSessionId_=Zoz4eDPybs#qoUb9za2m0Q!!; Path=/") s.end_headers() loginxmldata = "<?xml version='1.0' encoding='UTF-8'?><Permissions><Permission><Name>GetDB</Name><BuiltIn>false</BuiltIn><ID>0</ID></Permission><Permission><Name>SetDB</Name><BuiltIn>false</BuiltIn><ID>1</ID></Permission><Permission><Name>FSRead</Name><BuiltIn>false</BuiltIn><ID>2</ID></Permission><Permission><Name>FSWrite</Name><BuiltIn>false</BuiltIn><ID>3</ID></Permission><Permission><Name>SSLAdminModifyCerts</Name><BuiltIn>false</BuiltIn><ID>4</ID></Permission><Permission><Name>SSLAdminReadCerts</Name><BuiltIn>false</BuiltIn><ID>5</ID></Permission><Permission><Name>NIWebCer</Name><BuiltIn>false</BuiltIn><ID>6</ID></Permission><Permission><Name>GetWSAPIKey</Name><BuiltIn>false</BuiltIn><ID>7</ID></Permission><Permission><Name>ManageWS</Name><BuiltIn>false</BuiltIn><ID>8</ID></Permission><Permission><Name>SetWSAPIKey</Name><BuiltIn>false</BuiltIn><ID>9</ID></Permission><Permission><Name>WIFConfigureAppServer</Name><BuiltIn>false</BuiltIn><ID>10</ID></Permission><Permission><Name>GetSystemConfiguration</Name><BuiltIn>false</BuiltIn><ID>11</ID></Permission><Permission><Name>SetSystemConfiguration</Name><BuiltIn>false</BuiltIn><ID>12</ID></Permission><Permission><Name>FirmwareUpdate</Name><BuiltIn>false</BuiltIn><ID>13</ID></Permission><Permission><Name>Reboot</Name><BuiltIn>false</BuiltIn><ID>14</ID></Permission><Permission><Name>RemoteShell</Name><BuiltIn>false</BuiltIn><ID>15</ID></Permission><Permission><Name>SetRTLockPassword</Name><BuiltIn>false</BuiltIn><ID>16</ID></Permission><Permission><Name>ViewConsoleOutput</Name><BuiltIn>false</BuiltIn><ID>17</ID></Permission><Permission><Name>GetSyslog</Name><BuiltIn>false</BuiltIn><ID>18</ID></Permission><Permission><Name>ManageExtensions</Name><BuiltIn>false</BuiltIn><ID>19</ID></Permission></Permissions>" s.wfile.write(loginxmldata) elif ppath.path == '/rtexecsvc/RebootEx': # reboot call # there is form encoded data as part of this call # we could parse this using cgi.FieldStorage # details here: https://pymotw.com/2/BaseHTTPServer/ s.send_response(202) s.send_header("Content-type", "text/plain") s.end_headers() s.wfile.write("Rebooting in 0 seconds") # spawn a daemon thread to do the reboot so the # HTTP Handler can send its response immediately t = threading.Thread(target=restartLV) t.setDaemon(True) t.start() elif ppath.path == '/nisysapi/server': # handle a request for system information # there can be many more requests to sysapi server # but for now just assume it's the most common case # this type of request has url-encoded form data like this: # Version=00010001&Plugins=nisyscfg%2cNetworkConfig&response_encoding=UTF-8&Function=SearchForItemsAndProperties&FilterMode=1&NbrBags=0& s.send_response(200) s.send_header("Content-type", "text/xml; charset=utf-8") s.end_headers() ipAddr = getIP() hostname = socket.gethostname() sysapixmldata = "<?xml version='1.0' encoding='utf-8'?><NISysAPI_Results hr='0' version='00010001'><PropertyBags><PropertyBag><Property tag='1000000' type='6'>//localhost/nisyscfg/usb0</Property><Property tag='1001000' type='1'>0</Property><Property tag='1009000' type='1'>0</Property><Property tag='100D000' type='3'>0</Property><Property tag='101C000' type='2'>1</Property><Property tag='101D000' type='6'>usb0</Property><Property tag='101E000' type='6'>nisyscfg</Property><Property tag='101F000' type='6'>Ethernet Adapter usb0</Property><Property tag='1020000' type='1'>0</Property><Property tag='1022000' type='5'>{00000000-0000-0000-0000-000000000000}</Property><Property tag='1028000' type='1'>0</Property><Property tag='102A000' type='2'>1000</Property><Property tag='1037000' type='3'>983040</Property><Property tag='1038000' type='1'>1</Property><Property tag='1039000' type='2'>1</Property><Property tag='103A000' type='3'>5</Property><Property tag='1054000' type='1'>0</Property><Property tag='D102000' type='3'>2</Property><Property tag='D103000' type='3'>2</Property><Property tag='D104000' type='6'>00:80:2F:21:2F:99</Property><Property tag='D105000' type='3'>8</Property><Property tag='D106000' type='3'>8</Property><Property tag='D107000' type='6'>0.0.0.0</Property><Property tag='D108000' type='3'>1</Property><Property tag='D109000' type='6'>0.0.0.0</Property><Property tag='D10A000' type='6'>0.0.0.0</Property><Property tag='D10B000' type='6'>0.0.0.0</Property><Property tag='D10F000' type='3'>1</Property><Property tag='D110000' type='3'>1</Property><Property tag='D111000' type='3'>1</Property><Property tag='D119000' type='3'>1</Property><Property tag='D11A000' type='3'>1</Property><Property tag='D126000' type='1'>0</Property><Property tag='D12C000' type='3'>1</Property></PropertyBag><PropertyBag><Property tag='1000000' type='6'>//localhost/nisyscfg/eth0</Property><Property tag='1001000' type='1'>0</Property><Property tag='1009000' type='1'>0</Property><Property tag='100D000' type='3'>0</Property><Property tag='101C000' type='2'>1</Property><Property tag='101D000' type='6'>eth0</Property><Property tag='101E000' type='6'>nisyscfg</Property><Property tag='101F000' type='6'>Ethernet Adapter eth0</Property><Property tag='1020000' type='1'>0</Property><Property tag='1022000' type='5'>{00000000-0000-0000-0000-000000000000}</Property><Property tag='1028000' type='1'>0</Property><Property tag='102A000' type='2'>1000</Property><Property tag='1037000' type='3'>983040</Property><Property tag='1038000' type='1'>1</Property><Property tag='1039000' type='2'>1</Property><Property tag='103A000' type='3'>5</Property><Property tag='1054000' type='1'>0</Property><Property tag='D102000' type='3'>2</Property><Property tag='D103000' type='3'>2</Property><Property tag='D104000' type='6'>00:80:2F:21:2F:97</Property><Property tag='D105000' type='3'>2</Property><Property tag='D106000' type='3'>15</Property><Property tag='D107000' type='6'>%s</Property><Property tag='D108000' type='3'>1</Property><Property tag='D109000' type='6'>255.255.254.0</Property><Property tag='D10A000' type='6'>10.2.106.1</Property><Property tag='D10B000' type='6'>130.164.12.8</Property><Property tag='D10F000' type='3'>1</Property><Property tag='D110000' type='3'>95</Property><Property tag='D111000' type='3'>64</Property><Property tag='D119000' type='3'>1</Property><Property tag='D11A000' type='3'>1</Property><Property tag='D126000' type='1'>1</Property><Property tag='D12C000' type='3'>1</Property></PropertyBag><PropertyBag><Property tag='1000000' type='6'>//localhost/nisyscfg/eth1</Property><Property tag='1001000' type='1'>0</Property><Property tag='1009000' type='1'>0</Property><Property tag='100D000' type='3'>0</Property><Property tag='101C000' type='2'>1</Property><Property tag='101D000' type='6'>eth1</Property><Property tag='101E000' type='6'>nisyscfg</Property><Property tag='101F000' type='6'>Ethernet Adapter eth1</Property><Property tag='1020000' type='1'>0</Property><Property tag='1022000' type='5'>{00000000-0000-0000-0000-000000000000}</Property><Property tag='1028000' type='1'>0</Property><Property tag='102A000' type='2'>1000</Property><Property tag='1037000' type='3'>983040</Property><Property tag='1038000' type='1'>1</Property><Property tag='1039000' type='2'>1</Property><Property tag='103A000' type='3'>5</Property><Property tag='1054000' type='1'>0</Property><Property tag='D102000' type='3'>2</Property><Property tag='D103000' type='3'>3</Property><Property tag='D104000' type='6'>00:80:2F:21:2F:98</Property><Property tag='D105000' type='3'>2</Property><Property tag='D106000' type='3'>15</Property><Property tag='D107000' type='6'>0.0.0.0</Property><Property tag='D108000' type='3'>1</Property><Property tag='D109000' type='6'>0.0.0.0</Property><Property tag='D10A000' type='6'>0.0.0.0</Property><Property tag='D10B000' type='6'>0.0.0.0</Property><Property tag='D10F000' type='3'>1</Property><Property tag='D110000' type='3'>95</Property><Property tag='D111000' type='3'>0</Property><Property tag='D119000' type='3'>1</Property><Property tag='D11A000' type='3'>1</Property><Property tag='D126000' type='1'>0</Property><Property tag='D12C000' type='3'>1</Property></PropertyBag><PropertyBag><Property tag='1000000' type='6'>//localhost/nisyscfg/system</Property><Property tag='1001000' type='1'>1</Property><Property tag='1002000' type='3'>0</Property><Property tag='1004000' type='6'>National Instruments</Property><Property tag='1005000' type='3'>30549</Property><Property tag='1006000' type='6'>LINX Target</Property><Property tag='1007000' type='6'>01A549AB</Property><Property tag='1008000' type='1'>1</Property><Property tag='1009000' type='1'>0</Property><Property tag='100D000' type='3'>0</Property><Property tag='101C000' type='2'>1</Property><Property tag='101D000' type='6'>system</Property><Property tag='101E000' type='6'>nisyscfg</Property><Property tag='101F000' type='6'>%s</Property><Property tag='1020000' type='1'>0</Property><Property tag='1022000' type='5'>{00000000-0000-0000-0000-000000000000}</Property><Property tag='1024000' type='2'>1</Property><Property tag='1028000' type='1'>0</Property><Property tag='102A000' type='2'>1000</Property><Property tag='102F000' type='6'>3.0.0f0</Property><Property tag='1033000' type='6'>00:80:2F:21:2F:97</Property><Property tag='1037000' type='3'>983040</Property><Property tag='1038000' type='1'>1</Property><Property tag='1039000' type='2'>1</Property><Property tag='103A000' type='3'>4</Property><Property tag='103C000' type='6'>cRIO</Property><Property tag='103D000' type='6' /><Property tag='104A000' type='1'>1</Property><Property tag='104B000' type='6'>*.cfg</Property><Property tag='104C000' type='2'>0</Property><Property tag='104E000' type='6'>NI-Linux x64</Property><Property tag='104F000' type='6'>3.14.40-rt37-3.0.0f1</Property><Property tag='1050000' type='6'>NI Linux Real-Time x64 3.14.40-rt37-3.0.0f1</Property><Property tag='1051000' type='7'>9B613800 E2CD41C9 D246A77B 0</Property><Property tag='1052000' type='1'>0</Property><Property tag='1053000' type='6'>Running</Property><Property tag='1054000' type='1'>0</Property><Property tag='1058000' type='2'>2</Property><Property tag='5105000' type='1'>0</Property><Property tag='D11D000' type='6'>en</Property><Property tag='D11E000' type='6'>en</Property><Property tag='D120000' type='6'>CUT0</Property><Property tag='D122000' type='4'>3522608.000000</Property><Property tag='D123000' type='4'>3070516.000000</Property><Property tag='D129000' type='1'>1</Property><Property tag='D12B000' type='2'>0</Property><Property tag='D14E000' type='6'>UTC</Property><Property tag='D159000' type='6' /><Property tag='D15A000' type='6' /><Property tag='D15B000' type='6' /><Property tag='D15C000' type='6' /><Property tag='D15D000' type='1'>1</Property><Property tag='D15E000' type='1'>0</Property><Property tag='D15F000' type='1'>0</Property><Property tag='13000000' type='1'>1</Property><Property tag='14000000' type='1'>0</Property><Property tag='14002000' type='1'>0</Property><Property tag='14004000' type='1'>0</Property></PropertyBag></PropertyBags></NISysAPI_Results>" s.wfile.write(sysapixmldata % (ipAddr, hostname)) else: s.send_error(404) class ThreadedHTTPServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): """ Handle requests in a separate thread. """ if __name__ == '__main__': httpd = ThreadedHTTPServer((HOST_NAME, PORT_NUMBER), MyHandler) try: print "Starting NISysServer..." httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close()
104.58156
8,135
0.728401
2,147
14,746
4.974849
0.210992
0.164779
0.268608
0.084262
0.555191
0.499485
0.430952
0.417658
0.412883
0.412883
0
0.131476
0.077241
14,746
140
8,136
105.328571
0.653487
0.0984
0
0.255102
0
0.030612
0.812391
0.564264
0
0
0
0
0
0
null
null
0.020408
0.071429
null
null
0.05102
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
a888291e96acc1d22e969452666d06adf0abe928
17,070
py
Python
nexxT/tests/core/test_FilterExceptions.py
pfrydlewicz/nexxT
33616dbeee448c59201aa3009d637fe6b8d2b39c
[ "Apache-2.0" ]
5
2020-05-03T10:52:14.000Z
2022-03-02T10:32:33.000Z
nexxT/tests/core/test_FilterExceptions.py
pfrydlewicz/nexxT
33616dbeee448c59201aa3009d637fe6b8d2b39c
[ "Apache-2.0" ]
32
2020-05-18T15:49:00.000Z
2022-02-22T20:10:56.000Z
nexxT/tests/core/test_FilterExceptions.py
pfrydlewicz/nexxT
33616dbeee448c59201aa3009d637fe6b8d2b39c
[ "Apache-2.0" ]
2
2020-03-21T15:04:46.000Z
2021-03-01T15:42:49.000Z
# SPDX-License-Identifier: Apache-2.0 # Copyright (C) 2020 ifm electronic gmbh # # THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. # import json import logging from pathlib import Path import pytest import pytestqt from PySide2.QtCore import QCoreApplication, QTimer from nexxT.interface import FilterState, Services from nexxT.core.ConfigFiles import ConfigFileLoader from nexxT.core.Application import Application from nexxT.core.Configuration import Configuration import nexxT def setup(): global app app = QCoreApplication.instance() if app is None: app = QCoreApplication() def exception_setup(python, thread, where, activeTime_s): logging.getLogger(__name__).info("------------------------------------------------------") logging.getLogger(__name__).info("Starting exception_setup %d %s %s %f", python, thread, where, activeTime_s) from nexxT.services.ConsoleLogger import ConsoleLogger logger = ConsoleLogger() Services.addService("Logging", logger) class LogCollector(logging.StreamHandler): def __init__(self): super().__init__() self.logs = [] def emit(self, record): self.logs.append(record) # avoid warning flood about service profiling not found Services.addService("Profiling", None) collector = LogCollector() logging.getLogger().addHandler(collector) try: t = QTimer() t.setSingleShot(True) # timeout if test case hangs t2 = QTimer() t2.start((activeTime_s + 3)*1000) try: test_json = Path(__file__).parent / "test_except_constr.json" with test_json.open("r", encoding='utf-8') as fp: cfg = json.load(fp) if nexxT.useCImpl and not python: cfg["composite_filters"][0]["nodes"][2]["library"] = "binary://../binary/${NEXXT_PLATFORM}/${NEXXT_VARIANT}/test_plugins" cfg["composite_filters"][0]["nodes"][2]["thread"] = thread cfg["composite_filters"][0]["nodes"][2]["properties"]["whereToThrow"] = where mod_json = Path(__file__).parent / "test_except_constr_tmp.json" with mod_json.open("w", encoding="utf-8") as fp: json.dump(cfg, fp) config = Configuration() ConfigFileLoader.load(config, mod_json) config.activate("testApp") app.processEvents() aa = Application.activeApplication init = True def timeout(): nonlocal init if init: init = False aa.stop() aa.close() aa.deinit() else: app.exit(0) def timeout2(): print("Application timeout hit!") nonlocal init if init: init = False aa.stop() aa.close() aa.deinit() else: print("application exit!") app.exit(1) t2.timeout.connect(timeout2) t.timeout.connect(timeout) def state_changed(state): if state == FilterState.ACTIVE: t.setSingleShot(True) t.start(activeTime_s*1000) elif not init and state == FilterState.CONSTRUCTED: t.start(1000) aa.stateChanged.connect(state_changed) aa.init() aa.open() aa.start() app.exec_() finally: del t del t2 finally: logging.getLogger().removeHandler(collector) Services.removeAll() return collector.logs @pytest.mark.qt_no_exception_capture def test_exception_python_main_none(): logs = exception_setup(True, "main", "nowhere", 2) # --------------- # port exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_port(): logs = exception_setup(True, "main", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_port(): logs = exception_setup(True, "thread-source", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_port(): logs = exception_setup(True, "compute", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Uncaught exception" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_port(): logs = exception_setup(False, "main", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_port(): logs = exception_setup(False, "thread-source", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_port(): logs = exception_setup(False, "compute", "port", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) > 0 assert all(e == "Unexpected exception during onPortDataChanged from filter filter: exception in port" for e in errors) # --------------- # init exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_init(): logs = exception_setup(True, "main", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_init(): logs = exception_setup(True, "thread-source", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_init(): logs = exception_setup(True, "compute", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_init(): logs = exception_setup(False, "main", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_init(): logs = exception_setup(False, "thread-source", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_init(): logs = exception_setup(False, "compute", "init", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation INITIALIZING of filter filter" for e in errors) # --------------- # start exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_start(): logs = exception_setup(True, "main", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_start(): logs = exception_setup(True, "thread-source", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_start(): logs = exception_setup(True, "compute", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_start(): logs = exception_setup(False, "main", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_start(): logs = exception_setup(False, "thread-source", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_start(): logs = exception_setup(False, "compute", "start", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STARTING of filter filter" for e in errors) # --------------- # stop exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_stop(): logs = exception_setup(True, "main", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_stop(): logs = exception_setup(True, "thread-source", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_stop(): logs = exception_setup(True, "compute", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_stop(): logs = exception_setup(False, "main", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_stop(): logs = exception_setup(False, "thread-source", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_stop(): logs = exception_setup(False, "compute", "stop", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert len(errors) == 1 assert all(e == "Exception while executing operation STOPPING of filter filter" for e in errors) # --------------- # deinit exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_deinit(): logs = exception_setup(True, "main", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_source_deinit(): logs = exception_setup(True, "thread-source", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture def test_exception_python_compute_deinit(): logs = exception_setup(True, "compute", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_deinit(): logs = exception_setup(False, "main", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_deinit(): logs = exception_setup(False, "thread-source", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_deinit(): logs = exception_setup(False, "compute", "deinit", 2) errors = [r.message for r in logs if r.levelno >= logging.ERROR] assert 1 <= len(errors) <= 3 assert all(e == "Exception while executing operation DEINITIALIZING of filter filter" for e in errors) # --------------- # constructor exceptions # --------------- @pytest.mark.qt_no_exception_capture def test_exception_python_main_constr(): try: logs = exception_setup(True, "main", "constructor", 2) exception = False except Exception as e: exception = True assert exception @pytest.mark.qt_no_exception_capture def test_exception_python_source_constr(): try: logs = exception_setup(True, "thread-source", "constructor", 2) exception = False except Exception as e: exception = True assert exception @pytest.mark.qt_no_exception_capture def test_exception_python_compute_constr(): try: logs = exception_setup(True, "compute", "constructor", 2) exception = False except Exception as e: exception = True assert exception @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_main_constr(): try: logs = exception_setup(False, "main", "constructor", 2) exception = False except Exception as e: exception = True assert exception @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_source_constr(): try: logs = exception_setup(False, "thread-source", "constructor", 2) exception = False except Exception as e: exception = True assert exception @pytest.mark.qt_no_exception_capture @pytest.mark.skipif(not nexxT.useCImpl, reason="python only test") def test_exception_c_compute_constr(): try: logs = exception_setup(False, "compute", "constructor", 2) exception = False except Exception as e: exception = True assert exception if __name__ == "__main__": test_exception_python_compute_constr()
40.450237
137
0.680668
2,295
17,070
4.90719
0.083224
0.048837
0.039425
0.045995
0.840881
0.775706
0.732019
0.725981
0.725981
0.725981
0
0.008441
0.201875
17,070
422
138
40.450237
0.818188
0.03017
0
0.56686
0
0
0.182033
0.010284
0
0
0
0
0.19186
1
0.127907
false
0
0.034884
0
0.168605
0.005814
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7666b804a192f3e0e222333d8e9adf5cc845e304
40
py
Python
autovirt/equipment/interface/__init__.py
xlam/autovirt
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
[ "MIT" ]
null
null
null
autovirt/equipment/interface/__init__.py
xlam/autovirt
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
[ "MIT" ]
null
null
null
autovirt/equipment/interface/__init__.py
xlam/autovirt
a19f9237c8b1123ce4f4b8b396dc88122019d4f8
[ "MIT" ]
null
null
null
from .equipment import EquipmentGateway
20
39
0.875
4
40
8.75
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.972222
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
76a04a572cbe5aca4df86af3511450fb71cac79d
61
py
Python
gbp/factors/__init__.py
joeaortiz/gbp
5670a498950bfa948da502b2381899ab46f61021
[ "MIT" ]
50
2020-03-10T08:49:45.000Z
2022-03-24T01:50:24.000Z
gbp/factors/__init__.py
joeaortiz/gbp
5670a498950bfa948da502b2381899ab46f61021
[ "MIT" ]
1
2022-03-21T02:36:36.000Z
2022-03-21T03:03:38.000Z
gbp/factors/__init__.py
joeaortiz/gbp
5670a498950bfa948da502b2381899ab46f61021
[ "MIT" ]
11
2020-04-24T16:29:48.000Z
2022-03-09T07:39:30.000Z
from . import reprojection from . import linear_displacement
20.333333
33
0.836066
7
61
7.142857
0.714286
0.4
0
0
0
0
0
0
0
0
0
0
0.131148
61
2
34
30.5
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4f23c2c570563ceee52ae6d6ee808ff4e355098f
1,520
py
Python
mrp_system/migrations/0017_auto_20181206_1348.py
mgeorge8/django_time
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
[ "MIT" ]
1
2018-11-09T02:09:14.000Z
2018-11-09T02:09:14.000Z
mrp_system/migrations/0017_auto_20181206_1348.py
mgeorge8/django_time
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
[ "MIT" ]
null
null
null
mrp_system/migrations/0017_auto_20181206_1348.py
mgeorge8/django_time
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
[ "MIT" ]
null
null
null
# Generated by Django 2.1.2 on 2018-12-06 20:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('mrp_system', '0016_auto_20181206_1258'), ] operations = [ migrations.AddField( model_name='part', name='char3', field=models.CharField(blank=True, max_length=30), ), migrations.AddField( model_name='part', name='char4', field=models.CharField(blank=True, max_length=30), ), migrations.AddField( model_name='part', name='char5', field=models.CharField(blank=True, max_length=30), ), migrations.AddField( model_name='part', name='char6', field=models.CharField(blank=True, max_length=30), ), migrations.AddField( model_name='part', name='char7', field=models.CharField(blank=True, max_length=30), ), migrations.AddField( model_name='part', name='char8', field=models.CharField(blank=True, max_length=30), ), migrations.AlterField( model_name='part', name='char1', field=models.CharField(blank=True, max_length=30), ), migrations.AlterField( model_name='part', name='char2', field=models.CharField(blank=True, max_length=30), ), ]
28.148148
62
0.536842
150
1,520
5.306667
0.306667
0.090452
0.130653
0.170854
0.758794
0.758794
0.714824
0.714824
0.664573
0.664573
0
0.05489
0.340789
1,520
53
63
28.679245
0.739521
0.029605
0
0.680851
1
0
0.071283
0.015614
0
0
0
0
0
1
0
false
0
0.021277
0
0.085106
0
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
4f4f31924f56be00269c564b63d2ad09b40bbc17
110
py
Python
trading_bots/contrib/exchanges/__init__.py
budacom/trading-bots
9ac362cc21ce185e7b974bf9bcc7480ff9c6b2aa
[ "MIT" ]
21
2018-08-10T16:45:21.000Z
2022-01-25T13:04:07.000Z
trading_bots/contrib/clients/__init__.py
rob-Hitchens/trading-bots
16d53be0c32b45bee0520d8192629ade09727e24
[ "MIT" ]
6
2018-07-18T15:34:32.000Z
2021-02-02T21:59:04.000Z
trading_bots/contrib/clients/__init__.py
rob-Hitchens/trading-bots
16d53be0c32b45bee0520d8192629ade09727e24
[ "MIT" ]
10
2018-10-24T22:14:10.000Z
2022-02-08T17:21:47.000Z
from .base import * from .bitfinex import * from .bitstamp import * from .buda import * from .kraken import *
18.333333
23
0.727273
15
110
5.333333
0.466667
0.5
0
0
0
0
0
0
0
0
0
0
0.181818
110
5
24
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8c4780d9b158823d7c627047195e010e48dcedae
150
py
Python
ambra_sdk/service/entrypoints/dictionary.py
dyens/sdk-python
24bf05268af2832c70120b84fd53bf44862cffec
[ "Apache-2.0" ]
null
null
null
ambra_sdk/service/entrypoints/dictionary.py
dyens/sdk-python
24bf05268af2832c70120b84fd53bf44862cffec
[ "Apache-2.0" ]
null
null
null
ambra_sdk/service/entrypoints/dictionary.py
dyens/sdk-python
24bf05268af2832c70120b84fd53bf44862cffec
[ "Apache-2.0" ]
null
null
null
from ambra_sdk.service.entrypoints.generated.dictionary import \ Dictionary as GDictionary class Dictionary(GDictionary): """Dictionary."""
21.428571
64
0.766667
15
150
7.6
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.133333
150
6
65
25
0.876923
0.073333
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8c5347793e9338547be2cf79e7330e0c8ee8d564
32
py
Python
bundle/__init__.py
davidbrochart/bundle
9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0
[ "MIT" ]
4
2018-09-15T08:30:14.000Z
2019-03-11T20:56:25.000Z
bundle/__init__.py
davidbrochart/bundle
9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0
[ "MIT" ]
null
null
null
bundle/__init__.py
davidbrochart/bundle
9e6a14fa48e4d22a2cbc8239b13e600a86c5e5b0
[ "MIT" ]
1
2022-03-14T02:01:16.000Z
2022-03-14T02:01:16.000Z
from .scheduler import evaluate
16
31
0.84375
4
32
6.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.964286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4fdd63c3f67079f0814eff038b93bf1473f4163e
144
py
Python
04 Print and Input Functions/printCalculation.py
Himanshu44626748/Learn-Python
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
[ "MIT" ]
2
2020-03-16T14:57:44.000Z
2020-11-29T07:45:54.000Z
04 Print and Input Functions/printCalculation.py
Himanshu44626748/Learn-Python
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
[ "MIT" ]
null
null
null
04 Print and Input Functions/printCalculation.py
Himanshu44626748/Learn-Python
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
[ "MIT" ]
1
2020-08-13T07:59:02.000Z
2020-08-13T07:59:02.000Z
a,b = 10,20 print(a+b) #Output: 30 print(a*b) #Output: 200 print(b/a) #Output: 2.0 print(b%a) #Output: 0
16
32
0.458333
25
144
2.64
0.4
0.090909
0.212121
0.393939
0
0
0
0
0
0
0
0.133333
0.375
144
9
33
16
0.6
0.284722
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0.8
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
8b36f0623512d4378bba3cf002df40184df3e5ac
70
py
Python
hrl/goal_hrl/common/vec_env/__init__.py
guojm14/HRL
b011fa65a82a861a89979257ed63ed3341b01b24
[ "MIT" ]
5
2021-07-23T09:50:35.000Z
2022-01-03T07:44:43.000Z
hrl/goal_hrl/common/vec_env/__init__.py
guojm14/HRL
b011fa65a82a861a89979257ed63ed3341b01b24
[ "MIT" ]
null
null
null
hrl/goal_hrl/common/vec_env/__init__.py
guojm14/HRL
b011fa65a82a861a89979257ed63ed3341b01b24
[ "MIT" ]
null
null
null
from hrl.goal_hrl.common.vec_env.subproc_vec_env import SubprocVecEnv
35
69
0.885714
12
70
4.833333
0.75
0.206897
0
0
0
0
0
0
0
0
0
0
0.057143
70
1
70
70
0.878788
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8c6f0d36df5bf5ad459a164ef0e969fc7a63885a
43
py
Python
tfbox/metrics/__init__.py
brookisme/tfbox
4d4883e5a998367504db72c95ca14488cee9dd6e
[ "MIT" ]
null
null
null
tfbox/metrics/__init__.py
brookisme/tfbox
4d4883e5a998367504db72c95ca14488cee9dd6e
[ "MIT" ]
null
null
null
tfbox/metrics/__init__.py
brookisme/tfbox
4d4883e5a998367504db72c95ca14488cee9dd6e
[ "MIT" ]
null
null
null
from .weighted import get, weighted, subset
43
43
0.813953
6
43
5.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.921053
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
508c9db5c5845f54f992bce891f3e16b2041e0a6
68
py
Python
scripts/train.py
sytelus/axformer
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
[ "MIT" ]
null
null
null
scripts/train.py
sytelus/axformer
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
[ "MIT" ]
null
null
null
scripts/train.py
sytelus/axformer
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
[ "MIT" ]
null
null
null
from axformer import trainer def main(): trainer.train()
11.333333
29
0.647059
8
68
5.5
0.875
0
0
0
0
0
0
0
0
0
0
0
0.264706
68
6
30
11.333333
0.88
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
50cad6e323d0cb8dbdb52bf37a7b5552a0b0dd61
7,220
py
Python
tests/contrib/tornado/test_executor_decorator.py
lucien2k/dd-trace-py
bb2f5a260ed2fc63e8e4120e2294428f3ec2cf1d
[ "BSD-3-Clause" ]
null
null
null
tests/contrib/tornado/test_executor_decorator.py
lucien2k/dd-trace-py
bb2f5a260ed2fc63e8e4120e2294428f3ec2cf1d
[ "BSD-3-Clause" ]
null
null
null
tests/contrib/tornado/test_executor_decorator.py
lucien2k/dd-trace-py
bb2f5a260ed2fc63e8e4120e2294428f3ec2cf1d
[ "BSD-3-Clause" ]
1
2021-01-24T13:44:57.000Z
2021-01-24T13:44:57.000Z
import time import unittest from nose.tools import eq_, ok_ from tornado import version_info from .utils import TornadoTestCase class TestTornadoExecutor(TornadoTestCase): """ Ensure that Tornado web handlers are properly traced even if ``@run_on_executor`` decorator is used. """ def test_on_executor_handler(self): # it should trace a handler that uses @run_on_executor response = self.fetch('/executor_handler/') eq_(200, response.code) traces = self.tracer.writer.pop_traces() eq_(2, len(traces)) eq_(1, len(traces[0])) eq_(1, len(traces[1])) # this trace yields the execution of the thread request_span = traces[1][0] eq_('tornado-web', request_span.service) eq_('tornado.request', request_span.name) eq_('http', request_span.span_type) eq_('tests.contrib.tornado.web.app.ExecutorHandler', request_span.resource) eq_('GET', request_span.get_tag('http.method')) eq_('200', request_span.get_tag('http.status_code')) eq_('/executor_handler/', request_span.get_tag('http.url')) eq_(0, request_span.error) ok_(request_span.duration >= 0.05) # this trace is executed in a different thread executor_span = traces[0][0] eq_('tornado-web', executor_span.service) eq_('tornado.executor.with', executor_span.name) eq_(executor_span.parent_id, request_span.span_id) eq_(0, executor_span.error) ok_(executor_span.duration >= 0.05) def test_on_delayed_executor_handler(self): # it should trace a handler that uses @run_on_executor but that doesn't # wait for its termination response = self.fetch('/executor_delayed_handler/') eq_(200, response.code) # timeout for the background thread execution time.sleep(0.1) traces = self.tracer.writer.pop_traces() eq_(2, len(traces)) eq_(1, len(traces[0])) eq_(1, len(traces[1])) # order the `traces` list to have deterministic results # (required only for this special use case) traces.sort(key=lambda x: x[0].name, reverse=True) # this trace yields the execution of the thread request_span = traces[0][0] eq_('tornado-web', request_span.service) eq_('tornado.request', request_span.name) eq_('http', request_span.span_type) eq_('tests.contrib.tornado.web.app.ExecutorDelayedHandler', request_span.resource) eq_('GET', request_span.get_tag('http.method')) eq_('200', request_span.get_tag('http.status_code')) eq_('/executor_delayed_handler/', request_span.get_tag('http.url')) eq_(0, request_span.error) # this trace is executed in a different thread executor_span = traces[1][0] eq_('tornado-web', executor_span.service) eq_('tornado.executor.with', executor_span.name) eq_(executor_span.parent_id, request_span.span_id) eq_(0, executor_span.error) ok_(executor_span.duration >= 0.05) def test_on_executor_exception_handler(self): # it should trace a handler that uses @run_on_executor response = self.fetch('/executor_exception/') eq_(500, response.code) traces = self.tracer.writer.pop_traces() eq_(2, len(traces)) eq_(1, len(traces[0])) eq_(1, len(traces[1])) # this trace yields the execution of the thread request_span = traces[1][0] eq_('tornado-web', request_span.service) eq_('tornado.request', request_span.name) eq_('http', request_span.span_type) eq_('tests.contrib.tornado.web.app.ExecutorExceptionHandler', request_span.resource) eq_('GET', request_span.get_tag('http.method')) eq_('500', request_span.get_tag('http.status_code')) eq_('/executor_exception/', request_span.get_tag('http.url')) eq_(1, request_span.error) eq_('Ouch!', request_span.get_tag('error.msg')) ok_('Exception: Ouch!' in request_span.get_tag('error.stack')) # this trace is executed in a different thread executor_span = traces[0][0] eq_('tornado-web', executor_span.service) eq_('tornado.executor.with', executor_span.name) eq_(executor_span.parent_id, request_span.span_id) eq_(1, executor_span.error) eq_('Ouch!', executor_span.get_tag('error.msg')) ok_('Exception: Ouch!' in executor_span.get_tag('error.stack')) @unittest.skipIf( (version_info[0], version_info[1]) in [(4, 0), (4, 1)], reason='Custom kwargs are available only for Tornado 4.2+', ) def test_on_executor_custom_kwarg(self): # it should trace a handler that uses @run_on_executor # with the `executor` kwarg response = self.fetch('/executor_custom_handler/') eq_(200, response.code) traces = self.tracer.writer.pop_traces() eq_(2, len(traces)) eq_(1, len(traces[0])) eq_(1, len(traces[1])) # this trace yields the execution of the thread request_span = traces[1][0] eq_('tornado-web', request_span.service) eq_('tornado.request', request_span.name) eq_('http', request_span.span_type) eq_('tests.contrib.tornado.web.app.ExecutorCustomHandler', request_span.resource) eq_('GET', request_span.get_tag('http.method')) eq_('200', request_span.get_tag('http.status_code')) eq_('/executor_custom_handler/', request_span.get_tag('http.url')) eq_(0, request_span.error) ok_(request_span.duration >= 0.05) # this trace is executed in a different thread executor_span = traces[0][0] eq_('tornado-web', executor_span.service) eq_('tornado.executor.with', executor_span.name) eq_(executor_span.parent_id, request_span.span_id) eq_(0, executor_span.error) ok_(executor_span.duration >= 0.05) @unittest.skipIf( (version_info[0], version_info[1]) in [(4, 0), (4, 1)], reason='Custom kwargs are available only for Tornado 4.2+', ) def test_on_executor_custom_args_kwarg(self): # it should raise an exception if the decorator is used improperly response = self.fetch('/executor_custom_args_handler/') eq_(500, response.code) traces = self.tracer.writer.pop_traces() eq_(1, len(traces)) eq_(1, len(traces[0])) # this trace yields the execution of the thread request_span = traces[0][0] eq_('tornado-web', request_span.service) eq_('tornado.request', request_span.name) eq_('http', request_span.span_type) eq_('tests.contrib.tornado.web.app.ExecutorCustomArgsHandler', request_span.resource) eq_('GET', request_span.get_tag('http.method')) eq_('500', request_span.get_tag('http.status_code')) eq_('/executor_custom_args_handler/', request_span.get_tag('http.url')) eq_(1, request_span.error) eq_('cannot combine positional and keyword args', request_span.get_tag('error.msg')) ok_('ValueError' in request_span.get_tag('error.stack'))
41.257143
93
0.652355
973
7,220
4.568345
0.139774
0.136108
0.047244
0.072666
0.820247
0.792576
0.792576
0.767154
0.765579
0.749831
0
0.020324
0.22313
7,220
174
94
41.494253
0.772152
0.13795
0
0.690476
0
0
0.198836
0.081313
0
0
0
0
0
1
0.039683
false
0
0.039683
0
0.087302
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
50d008ee22dcc9887e25b13da82f5dfaff8a91d9
26
py
Python
mpids/MPIcollections/__init__.py
edgargabriel/mpids
170f402ecea5af0db4eee39e8d426884dce12ad6
[ "BSD-2-Clause" ]
1
2020-01-22T03:27:31.000Z
2020-01-22T03:27:31.000Z
mpids/MPIcollections/__init__.py
jrodgers01d/mpids
f771b1d25eba5f5dc8e30e5d86ee0251775b9da1
[ "BSD-2-Clause" ]
1
2020-05-04T20:25:55.000Z
2020-05-04T20:25:55.000Z
mpids/MPIcollections/__init__.py
jrodgers01d/mpids
f771b1d25eba5f5dc8e30e5d86ee0251775b9da1
[ "BSD-2-Clause" ]
2
2019-04-08T03:01:31.000Z
2020-04-27T15:56:28.000Z
from .MPICounter import *
13
25
0.769231
3
26
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.153846
26
1
26
26
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e85c6043c07f057ceb5ca9f161f3b5ca0838a4cd
417
py
Python
pyquil/quantum_processor/transformers/__init__.py
stjordanis/pyquil
36987ecb78d5dc85d299dd62395b7669a1cedd5a
[ "Apache-2.0" ]
677
2017-01-09T23:20:22.000Z
2018-11-26T10:57:49.000Z
pyquil/quantum_processor/transformers/__init__.py
stjordanis/pyquil
36987ecb78d5dc85d299dd62395b7669a1cedd5a
[ "Apache-2.0" ]
574
2018-11-28T05:38:40.000Z
2022-03-23T20:38:28.000Z
pyquil/quantum_processor/transformers/__init__.py
stjordanis/pyquil
36987ecb78d5dc85d299dd62395b7669a1cedd5a
[ "Apache-2.0" ]
202
2018-11-30T06:36:28.000Z
2022-03-29T15:38:18.000Z
from pyquil.quantum_processor.transformers.qcs_isa_to_compiler_isa import ( qcs_isa_to_compiler_isa, QCSISAParseError, ) from pyquil.quantum_processor.transformers.qcs_isa_to_graph import qcs_isa_to_graph from pyquil.quantum_processor.transformers.compiler_isa_to_graph import compiler_isa_to_graph from pyquil.quantum_processor.transformers.graph_to_compiler_isa import graph_to_compiler_isa, GraphGateError
52.125
109
0.892086
60
417
5.7
0.216667
0.087719
0.19883
0.304094
0.637427
0.549708
0.549708
0.549708
0
0
0
0
0.067146
417
7
110
59.571429
0.879177
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.571429
0
0.571429
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e88a035f2403f13e0046bcaed45885b7525143eb
25
py
Python
philander/__init__.py
pfirsich/philander
35ff4fa750739d270554328f81185e3ece023a98
[ "MIT" ]
1
2018-12-18T17:41:21.000Z
2018-12-18T17:41:21.000Z
philander/__init__.py
pfirsich/philander
35ff4fa750739d270554328f81185e3ece023a98
[ "MIT" ]
6
2018-07-04T20:38:32.000Z
2018-07-10T19:21:17.000Z
philander/__init__.py
pfirsich/philander
35ff4fa750739d270554328f81185e3ece023a98
[ "MIT" ]
1
2018-07-01T15:45:14.000Z
2018-07-01T15:45:14.000Z
from .philander import *
12.5
24
0.76
3
25
6.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e8b3ecc850245cc0b4ac7d4e4d0ca6ab4e186a83
198
py
Python
workshop_schedules/tools.py
WWU-AMM/workshop_schedules
696545c42956154bf7c14fafeb8d5860725c22d4
[ "BSD-3-Clause" ]
null
null
null
workshop_schedules/tools.py
WWU-AMM/workshop_schedules
696545c42956154bf7c14fafeb8d5860725c22d4
[ "BSD-3-Clause" ]
4
2022-03-11T14:23:31.000Z
2022-03-15T10:30:22.000Z
workshop_schedules/tools.py
WWU-AMM/workshop_schedules
696545c42956154bf7c14fafeb8d5860725c22d4
[ "BSD-3-Clause" ]
null
null
null
import humanfriendly import datetime def duration_to_date(duration: str) -> datetime.timedelta: seconds = humanfriendly.parse_timespan(duration) return datetime.timedelta(seconds=seconds)
24.75
58
0.80303
22
198
7.090909
0.590909
0.217949
0.307692
0
0
0
0
0
0
0
0
0
0.121212
198
7
59
28.285714
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
e8d729e714a67703db457b772ce2b1d27a6e0ab5
18,184
py
Python
api/vm/backup/views.py
erigones/esdc-ce
2e39211a8f5132d66e574d3a657906c7d3c406fe
[ "Apache-2.0" ]
97
2016-11-15T14:44:23.000Z
2022-03-13T18:09:15.000Z
api/vm/backup/views.py
erigones/esdc-ce
2e39211a8f5132d66e574d3a657906c7d3c406fe
[ "Apache-2.0" ]
334
2016-11-17T19:56:57.000Z
2022-03-18T10:45:53.000Z
api/vm/backup/views.py
erigones/esdc-ce
2e39211a8f5132d66e574d3a657906c7d3c406fe
[ "Apache-2.0" ]
33
2017-01-02T16:04:13.000Z
2022-02-07T19:20:24.000Z
from vms.models import BackupDefine from api.decorators import api_view, request_data, setting_required from api.permissions import IsAdminOrReadOnly from api.utils.db import get_object from api.vm.utils import get_vm, get_vms from api.vm.snapshot.utils import get_disk_id, filter_disk_id from api.vm.backup.utils import output_extended_backup_count from api.vm.backup.vm_define_backup import BackupDefineView from api.vm.backup.vm_backup import VmBackup from api.vm.backup.vm_backup_list import VmBackupList __all__ = ('vm_define_backup_list_all', 'vm_define_backup_list', 'vm_define_backup', 'vm_backup_list', 'vm_backup') #: vm_status: GET: @api_view(('GET',)) @request_data(permissions=(IsAdminOrReadOnly,)) # get_vms() = IsVmOwner @setting_required('VMS_VM_BACKUP_ENABLED') def vm_define_backup_list_all(request, data=None): """ List (:http:get:`GET </vm/define/backup>`) all backup definitions for all VMs. .. http:get:: /vm/define/backup :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg data.full: Return list of objects with all backup definition details (default: false) :type data.full: boolean :arg data.extended: Include total number of backups for each backup definition (default: false) :type data.extended: boolean :arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, ``hostname``, \ ``created`` (default: ``hostname,-created``) :type data.order_by: string :status 200: SUCCESS :status 403: Forbidden """ extra = output_extended_backup_count(request, data) # TODO: check indexes bkp_define = BackupDefine.objects.select_related('vm', 'vm__dc', 'node', 'zpool', 'periodic_task', 'periodic_task__crontab')\ .filter(vm__in=get_vms(request)).order_by(*BackupDefineView.get_order_by(data)) if extra: bkp_define = bkp_define.extra(extra) return BackupDefineView(request, data=data).get(None, bkp_define, many=True, extended=bool(extra)) #: vm_status: GET: @api_view(('GET',)) @request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner @setting_required('VMS_VM_BACKUP_ENABLED') def vm_define_backup_list(request, hostname_or_uuid, data=None): """ List (:http:get:`GET </vm/(hostname_or_uuid)/define/backup>`) all VM backup definitions. .. http:get:: /vm/(hostname_or_uuid)/define/backup :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg data.full: Return list of objects with all backup definition details (default: false) :type data.full: boolean :arg data.disk_id: Filter by disk number/ID :type data.disk_id: integer :arg data.extended: Include total number of backups for each backup definition (default: false) :type data.extended: boolean :arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, ``created`` \ (default: ``-created``) :type data.order_by: string :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found :status 412: Invalid disk_id """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True, sr=('node', 'owner')) query_filter = {'vm': vm} query_filter = filter_disk_id(vm, query_filter, data) extra = output_extended_backup_count(request, data) # TODO: check indexes bkp_define = BackupDefine.objects.select_related('vm', 'vm__dc', 'node', 'zpool', 'periodic_task', 'periodic_task__crontab')\ .filter(**query_filter).order_by(*BackupDefineView.get_order_by(data)) if extra: bkp_define = bkp_define.extra(extra) return BackupDefineView(request, data=data).get(vm, bkp_define, many=True, extended=bool(extra)) #: vm_status: GET: #: vm_status: POST: running, stopped, stopping #: vm_status: PUT: running, stopped, stopping #: vm_status:DELETE: running, stopped, stopping @api_view(('GET', 'POST', 'PUT', 'DELETE')) @request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner @setting_required('VMS_VM_BACKUP_ENABLED') def vm_define_backup(request, hostname_or_uuid, bkpdef, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) or update (:http:put:`PUT </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) a VM backup definition and schedule. .. http:get:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.extended: Include total number of backups (default: false) :type data.extended: boolean :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name (predefined: hourly, daily, weekly, monthly) :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.type: **required** - Backup type (1 - dataset, 2 - file) (default: 1) :type: data.type: integer :arg data.node: **required** - Name of the backup node :type data.node: string :arg data.zpool: **required** - The zpool used on the backup node (default: zones) :type data.zpool: string :arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: **required** - Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule (default: true) :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2, 3 - xz) (default: 0) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes (default: null => no limit) :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \ creating backup snapshot (requires QEMU Guest Agent) (default: false) :type data.fsfreeze: boolean :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Backup definition already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:put:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2, 3 - xz) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:delete:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) extra = output_extended_backup_count(request, data) define = get_object(request, BackupDefine, {'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id}, sr=('vm', 'vm__dc', 'node', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra}) return BackupDefineView(request, data=data).response(vm, define, extended=bool(extra)) #: vm_status: GET: @api_view(('GET', 'DELETE')) @request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner @setting_required('VMS_VM_BACKUP_ENABLED') def vm_backup_list(request, hostname_or_uuid, data=None): """ List (:http:get:`GET </vm/(hostname_or_uuid)/backup>`) all VM backups. Delete (:http:delete:`DELETE </vm/(hostname_or_uuid)/backup>`) VM backups specified by the list (data.bkpnames). .. http:get:: /vm/(hostname_or_uuid)/backup :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg data.full: Return list of objects with all backup details (default: false) :type data.full: boolean :arg data.disk_id: Filter by original disk number/ID :type data.disk_id: integer :arg data.define: Filter by backup definition :type data.define: string :arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, \ ``size``, ``time``, ``created`` (default: ``-created``) :type data.order_by: string :status 200: SUCCESS :status 403: Forbidden :status 412: Invalid disk_id .. http:delete:: /vm/(hostname_or_uuid)/backup :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-yes| :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg data.bkpnames: **required** - List of backups to be deleted :type data.bkpnames: array :status 200: SUCCESS :status 403: Forbidden :status 404: Backup not found :status 412: Invalid bkpnames :status 417: VM backup status is not OK :status 423: Node is not operational / VM is not operational """ return VmBackupList(request, hostname_or_uuid, data).response() #: vm_status: GET: #: vm_status: POST: running, stopped, stopping #: vm_status: PUT: stopped #: vm_status:DELETE: running, stopped, stopping @api_view(('GET', 'POST', 'PUT', 'DELETE')) @request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner @setting_required('VMS_VM_BACKUP_ENABLED') def vm_backup(request, hostname_or_uuid, bkpname, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/backup/(bkpname)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/backup/(bkpdef)>`), delete (:http:delete:`DELETE </vm/(hostname_or_uuid)/backup/(bkpname)>`) or restore (:http:put:`PUT </vm/(hostname_or_uuid)/backup/(bkpname)>`) a backup of VM's disk. .. http:get:: /vm/(hostname_or_uuid)/backup/(bkpname) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg bkpname: **required** - Backup name :type bkpname: string :arg data.disk_id: **required** - Original disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 403: Forbidden :status 404: Backup not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-yes| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpname.bkpdef: **required** - Backup definition name :type bkpname.bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.note: Backup comment :type data.note: string :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Backup already exists :status 412: Invalid disk_id :status 417: DC backup size limit reached :status 423: Node is not operational / VM is not operational :status 428: VM is not installed .. http:put:: /vm/(hostname_or_uuid)/backup/(bkpname) .. warning:: A backup restore will restore disk data from the backup into target disk; \ All data created after the backup (including all existing snapshots) on target server will be lost! :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-yes| - Restore backup :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg bkpname: **required** - Backup name :type bkpname: string :arg data.disk_id: Original disk number/ID (default: 1) :type data.disk_id: integer :arg data.target_hostname_or_uuid: **required** - Target server hostname or uuid :type data.target_hostname_or_uuid: string :arg data.target_disk_id: **required** - Target disk number/ID :type data.target_disk_id: integer :arg data.force: Force restore and delete existing snapshots and backups (default: true) :type data.force: boolean :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: Backup not found :status 409: VM has pending tasks :status 412: Invalid disk_id / Invalid target_disk_id :status 417: VM backup status is not OK / VM has snapshots (force=false) :status 423: Node is not operational / VM is not operational / VM is not stopped / VM is locked or has slave VMs :status 428: VM brand mismatch / Disk size mismatch / Not enough free space on target storage .. http:put:: /vm/(hostname_or_uuid)/backup/(bkpname) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| - Update backup note :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg bkpname: **required** - Backup name :type bkpname: string :arg data.note: **required** - Backup comment (change note instead of restore if specified) :type data.note: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: Backup not found .. http:delete:: /vm/(hostname_or_uuid)/backup/(bkpname) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-yes| :arg hostname_or_uuid: **required** - Original server hostname or uuid :type hostname_or_uuid: string :arg bkpname: **required** - Backup name :type bkpname: string :arg data.disk_id: **required** - Original disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: Backup not found :status 412: Invalid disk_id :status 417: VM backup status is not OK :status 423: Node is not operational / VM is not operational """ return VmBackup(request, hostname_or_uuid, bkpname, data).response()
41.047404
120
0.629015
2,256
18,184
4.926418
0.104167
0.062984
0.088177
0.033111
0.812939
0.783876
0.759672
0.745366
0.709735
0.6912
0
0.018624
0.255884
18,184
442
121
41.140271
0.802749
0.735757
0
0.446429
0
0
0.119565
0.062071
0
0
0
0.004525
0
1
0.089286
false
0
0.178571
0
0.357143
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2cd9f25cb8ff7e2c71115be3b3ab7878bac0bbd0
258
py
Python
ca_ab_strathcona_county/people.py
dcycle/scrapers-ca
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
[ "MIT" ]
null
null
null
ca_ab_strathcona_county/people.py
dcycle/scrapers-ca
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
[ "MIT" ]
null
null
null
ca_ab_strathcona_county/people.py
dcycle/scrapers-ca
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
[ "MIT" ]
null
null
null
from utils import CSVScraper class StrathconaCountyPersonScraper(CSVScraper): # https://data.strathcona.ca/County-Government/County-Council-2013-2017/suw8-zxcy csv_url = 'https://data.strathcona.ca/api/views/suw8-zxcy/rows.csv?accessType=DOWNLOAD'
36.857143
91
0.790698
33
258
6.151515
0.727273
0.08867
0.187192
0.206897
0
0
0
0
0
0
0
0.042194
0.081395
258
6
92
43
0.814346
0.306202
0
0
0
0.333333
0.423729
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
2cfa27a05981b7c009928431c6a875a2341d7f53
11,694
py
Python
spair/visualizer.py
51616/split
58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19
[ "MIT" ]
18
2020-01-19T10:21:16.000Z
2022-03-13T04:58:39.000Z
spair/visualizer.py
51616/split
58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19
[ "MIT" ]
2
2020-01-29T05:58:30.000Z
2020-11-13T17:41:29.000Z
spair/visualizer.py
51616/split
58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19
[ "MIT" ]
6
2020-02-21T09:45:03.000Z
2021-11-25T12:29:21.000Z
import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np import tensorflow as tf import warnings # plt.tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) mpl.use('agg') mpl.rcParams['figure.dpi'] = 300 mpl.rcParams['savefig.dpi'] = 300 warnings.filterwarnings("ignore", module="matplotlib") def reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10): #Get a batch of test images test_ds = test_dataset.take(n).shuffle(n,seed=1) for test_data in test_ds: if label: images = test_data[0] else: images = test_data x_test = images[:n] break h,w,channel = x_test.shape[1:4] channel = min(3,channel) (x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma, z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test) num_cells = z_where.shape[1]*z_where.shape[2] f, ax = plt.subplots(1, 3) ax[0].set_xticks(np.arange(0, h*n, w)) ax[0].set_yticks(np.arange(0, h*(num_cells+2), w)) ax[1].set_xticks(np.arange(0, h*n, w)) ax[1].set_yticks(np.arange(0, h*(num_cells+2), w)) ax[2].set_xticks(np.arange(0, h*n, w)) ax[2].set_yticks(np.arange(0, h*(num_cells+2), w)) # num_channel = x_recon.shape[-1] obj_recon = obj_full_recon_unnorm[:,:,:,:,:channel] obj_alpha = obj_full_recon_unnorm[:,:,:,:,channel:] z_depth = tf.reshape(z_depth, [n, num_cells, 1, 1, 1]) z_pres = tf.reshape(tf.round(tf.sigmoid(z_pres_logits)), [n, num_cells, 1, 1, 1]) canvas = np.empty((h*(num_cells+2), w*n, channel)) canvas_weighted = np.empty((h*(num_cells+2), w*n, channel)) canvas_weights_only = np.empty((h*(num_cells+2), w*n, channel)) # only weights of that part for i in range(n): canvas_weights_only[0:h,i*w:(i+1)*w, :] = canvas_weighted[0:h,i*w:(i+1)*w, :] = canvas[0:h,i*w:(i+1)*w, :] = images[i,:,:,:3] canvas_weights_only[h:h*2, i*w:(i+1)*w, :] = canvas_weighted[h:h*2, i*w:(i+1)*w, :] = canvas[h:h*2, i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel)) canvas[h*2:, i*w:(i+1)*w, :] = obj_recon[i].numpy().reshape((num_cells*h,w,channel)) canvas_weighted[h*2:, i*w:(i+1)*w, :] = (obj_recon[i]*obj_alpha[i]*z_pres[i]*tf.nn.sigmoid(-z_depth[i])).numpy().reshape((num_cells*h,w,channel)) canvas_weights_only[h*2:, i*w:(i+1)*w, 0] = (tf.ones(shape=obj_alpha[i].shape)*z_pres[i]).numpy().reshape((num_cells*h,w)) # *tf.nn.sigmoid(-z_depth[i]) ax[0].imshow(np.squeeze(canvas),cmap='gray') ax[0].set_title('reconstruction') ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) ax[1].imshow(np.squeeze(canvas_weighted),cmap='gray') ax[1].set_title('reconstruction weighted') ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) ax[2].imshow(np.squeeze(canvas_weights_only),cmap='inferno') ax[2].set_title('weights') ax[2].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[2].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) if filename is None: plt.savefig(filepath + 'x_reconstrcution_test_spair.png') else: plt.savefig(filepath + 'x_reconstrcution_test' + filename + '.png', dpi=300) # plt.close() return plt def reconstruction_bbox(model, test_dataset, filename = None, filepath = None, label=True, n = 10): #Get a batch of test images test_ds = test_dataset.take(n).shuffle(n,seed=1) for test_data in test_ds: if label: images = test_data[0] else: images = test_data x_test = images[:n] break h,w,channel = x_test.shape[1:4] channel = min(3,channel) (x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma, z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test) num_cells = z_where.shape[1]*z_where.shape[2] # f, ax = plt.subplots(1, 1) # ax[0].set_xticks(np.arange(0, h*n, w)) # ax[0].set_yticks(np.arange(0, h*(num_cells+2), w)) # num_channel = x_recon.shape[-1] # print(obj_bbox_mask.numpy()) z_pres = tf.reshape(tf.round(tf.sigmoid(z_pres_logits)), [n, num_cells, 1]) colors = tf.constant([[1.0,1.0,1.0,1.0]]) obj_bbox_mask = obj_bbox_mask * z_pres x_recon_w_bbox = tf.image.draw_bounding_boxes(x_recon,obj_bbox_mask,colors) img_w_bbox = tf.image.draw_bounding_boxes(x_test[:,:,:,:3],obj_bbox_mask,colors) canvas = np.empty((h*3, w*n, channel)) for i in range(n): canvas[0:h,i*w:(i+1)*w, :] = images[i,:,:,:3] canvas[h:h*2, i*w:(i+1)*w, :] = img_w_bbox[i].numpy().reshape((h,w,channel)) # canvas[h*2:h*3, i*w:(i+1)*w, :] = x_recon[i].numpy().reshape((h,w,channel)) canvas[h*2:h*3, i*w:(i+1)*w, :] = x_recon_w_bbox[i].numpy().reshape((h,w,channel)) # ax[0].imshow(np.squeeze(canvas),cmap='gray') # ax[0].set_title('reconstruction') # ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-') # ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) plt.imshow(canvas) if filename is None: plt.savefig(filepath + 'x_reconstrcution_bbox.png') else: plt.savefig(filepath + 'x_reconstrcution_bbox' + filename + '.png', dpi=300) # plt.close() return plt def glimpses_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10): # Glimpses for test_data in test_dataset: if label: images = test_data[0] else: images = test_data x_test = images[:n] break h,w,channel = x_test.shape[1:4] channel = min(3,channel) (x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma, z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *more_outputs) = model(x_test) num_cells = z_where.shape[1]*z_where.shape[2] object_size = obj_recon_alpha.shape[2] f, ax = plt.subplots(1, 3) ax[0].set_xticks(np.arange(0, object_size*n, object_size)) ax[0].set_yticks(np.arange(0, object_size*num_cells, object_size)) ax[1].set_xticks(np.arange(0, object_size*n, object_size)) ax[1].set_yticks(np.arange(0, object_size*num_cells, object_size)) ax[2].set_xticks(np.arange(0, object_size*n, object_size)) ax[2].set_yticks(np.arange(0, object_size*num_cells, object_size)) # plot glimpses canvas_glimpses = np.empty((object_size*num_cells, object_size*n, channel)) canvas_glimpses_recon = np.empty((object_size*num_cells, object_size*n, channel)) canvas_glimpses_alpha = np.zeros((object_size*num_cells, object_size*n)) for i in range(n): canvas_glimpses[:,i*object_size:(i+1)*object_size,:] = all_glimpses[i].numpy().reshape((num_cells*object_size,object_size,channel)) canvas_glimpses_recon[:,i*object_size:(i+1)*object_size,:] = obj_recon_unnorm[i].numpy().reshape((num_cells*object_size,object_size,channel)) canvas_glimpses_alpha[:,i*object_size:(i+1)*object_size] = obj_recon_alpha[i].numpy().reshape((num_cells*object_size,object_size)) ax[0].imshow(np.squeeze(canvas_glimpses),cmap='gray') ax[0].set_title('Glimpses') ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) ax[1].imshow(np.squeeze(canvas_glimpses_recon),cmap='gray') ax[1].set_title('Glimpses reconstruction') ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) ax[2].imshow(np.squeeze(canvas_glimpses_alpha), cmap='viridis') #,cmap='gray' ax[2].set_title('Glimpses alpha') ax[2].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[2].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) if filename is None: plt.savefig(filepath + 'glimpses.png') else: plt.savefig(filepath + 'glimpses' + filename + '.png', dpi=300) # plt.close() return plt def glimpses_local_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10): # Glimpses for test_data in test_dataset: if label: images = test_data[0] else: images = test_data x_test = images[:n] break h,w,channel = x_test.shape[1:4] channel = min(3,channel) (x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma, z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, z_bg, z_bg_mean, z_bg_sig, x_hat_recon, z_l, z_l_mean, z_l_sig, x_hat) = model(x_test) num_cells = z_where.shape[1]*z_where.shape[2] object_size = obj_recon_alpha.shape[2] f, ax = plt.subplots(1, 2) ax[0].set_xticks(np.arange(0, object_size*n, object_size)) ax[0].set_yticks(np.arange(0, object_size*num_cells, object_size)) ax[1].set_xticks(np.arange(0, object_size*n, object_size)) ax[1].set_yticks(np.arange(0, object_size*num_cells, object_size)) # plot glimpses canvas_glimpses = np.empty((object_size*num_cells, object_size*n, channel)) canvas_glimpses_recon = np.empty((object_size*num_cells, object_size*n, channel)) for i in range(n): canvas_glimpses[:,i*object_size:(i+1)*object_size,:] = x_hat[i].numpy().reshape((num_cells*object_size,object_size,channel)) canvas_glimpses_recon[:,i*object_size:(i+1)*object_size,:] = x_hat_recon[i].numpy().reshape((num_cells*object_size,object_size,channel)) ax[0].imshow(np.squeeze(canvas_glimpses),cmap='gray') ax[0].set_title('Glimpses') ax[0].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[0].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) ax[1].imshow(np.squeeze(canvas_glimpses_recon),cmap='gray') ax[1].set_title('Glimpses reconstruction') ax[1].grid(b=True, which='major', color='#ffffff', linestyle='-') ax[1].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False) if filename is None: plt.savefig(filepath + 'glimpses_local.png') else: plt.savefig(filepath + 'glimpses_local' + filename + '.png', dpi=300) # plt.close() return plt def x_hat_reconstruction_test(model, test_dataset, filename = None, filepath = None, label=True, n = 10): for test_data in test_dataset: if label: images = test_data[0] else: images = test_data x_test = images[:n] break h,w,channel = x_test.shape[1:4] channel = min(3,channel) (x_recon, z_what, z_what_mean, z_what_sigma, z_where, z_where_mean, z_where_sigma, z_depth, z_depth_mean, z_depth_sigma, z_pres, z_pres_logits, z_pres_pre_sigmoid, all_glimpses, obj_recon_unnorm, obj_recon_alpha, obj_full_recon_unnorm, obj_bbox_mask, *_, x_hat_recon, z_l, z_l_mean, z_l_sig) = model(x_test) canvas_x_hat = np.empty((h*2, w*n, channel)) for i in range(n): canvas_x_hat[0:h,i*w:(i+1)*w, :] = x_hat_recon[i].numpy().reshape((h,w,channel)) canvas_x_hat[h:h*2, i*w:(i+1)*w, :] = images[i,:,:,3:] plt.figure(figsize=(2*n,2)) plt.imshow(canvas_x_hat) if filename is None: plt.savefig(filepath + 'x_hat_reconstrcution_test_lg_vae.png') else: plt.savefig(filepath + 'x_hat_reconstrcution_test' + filename + '.png') plt.close() return canvas_x_hat
40.888112
171
0.71712
2,062
11,694
3.821532
0.07323
0.06599
0.020558
0.007614
0.893528
0.883629
0.859264
0.830203
0.80533
0.747462
0
0.020161
0.109287
11,694
285
172
41.031579
0.736367
0.07243
0
0.61194
0
0
0.049261
0.014695
0
0
0
0
0
1
0.024876
false
0
0.024876
0
0.074627
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fa2b5cb5ed4980f194e240be2b75a587b0badc44
283
py
Python
python/graphscope/nx/tests/algorithms/forward/traversal/test_bfs.py
LI-Mingyu/GraphScope-MY
942060983d3f7f8d3a3377467386e27aba285b33
[ "Apache-2.0" ]
1,521
2020-10-28T03:20:24.000Z
2022-03-31T12:42:51.000Z
python/graphscope/nx/tests/algorithms/forward/traversal/test_bfs.py
LI-Mingyu/GraphScope-MY
942060983d3f7f8d3a3377467386e27aba285b33
[ "Apache-2.0" ]
850
2020-12-15T03:17:32.000Z
2022-03-31T11:40:13.000Z
python/graphscope/nx/tests/algorithms/forward/traversal/test_bfs.py
LI-Mingyu/GraphScope-MY
942060983d3f7f8d3a3377467386e27aba285b33
[ "Apache-2.0" ]
180
2020-11-10T03:43:21.000Z
2022-03-28T11:13:31.000Z
import networkx.algorithms.traversal.tests.test_bfs import pytest from graphscope.nx.utils.compat import import_as_graphscope_nx import_as_graphscope_nx(networkx.algorithms.traversal.tests.test_bfs, decorators=pytest.mark.usefixtures("graphscope_session"))
35.375
81
0.798587
35
283
6.2
0.514286
0.165899
0.248848
0.294931
0.359447
0.359447
0
0
0
0
0
0
0.127208
283
7
82
40.428571
0.878543
0
0
0
0
0
0.063604
0
0
0
0
0
0
1
0
true
0
0.8
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fa2ca703fbc1609eec9fc81c111d891211d5bb82
1,889
py
Python
hypha/public/partner/admin_view.py
maxpearl/hypha
e181ebadfb744aab34617bb766e746368d6f2de0
[ "BSD-3-Clause" ]
20
2021-04-08T16:38:49.000Z
2022-02-09T20:05:57.000Z
hypha/public/partner/admin_view.py
maxpearl/hypha
e181ebadfb744aab34617bb766e746368d6f2de0
[ "BSD-3-Clause" ]
1,098
2017-12-15T11:23:03.000Z
2020-01-24T07:58:07.000Z
hypha/public/partner/admin_view.py
maxpearl/hypha
e181ebadfb744aab34617bb766e746368d6f2de0
[ "BSD-3-Clause" ]
17
2020-02-07T14:55:54.000Z
2021-04-04T19:32:38.000Z
from wagtail.admin.edit_handlers import FieldPanel from wagtail.contrib.modeladmin.views import CreateView, EditView from .models import InvestmentCategorySettings class CreateInvestmentView(CreateView): def get_form_kwargs(self): kwargs = super(CreateInvestmentView, self).get_form_kwargs() kwargs['request'] = self.request return kwargs def get_context_data(self): context = super(CreateInvestmentView, self).get_context_data() ics = InvestmentCategorySettings.for_request(self.request) categories = ics.categories.all() for category in categories: field_name = category.name.lower().replace(' ', '_') field_panel = FieldPanel(field_name).bind_to( model=self.model, instance=context['edit_handler'].instance, request=context['edit_handler'].request, form=context['form'] ) context['edit_handler'].children.append(field_panel) return context class EditInvestmentView(EditView): def get_form_kwargs(self): kwargs = super(EditInvestmentView, self).get_form_kwargs() kwargs['request'] = self.request return kwargs def get_context_data(self): context = super(EditInvestmentView, self).get_context_data() ics = InvestmentCategorySettings.for_request(self.request) categories = ics.categories.all() for category in categories: field_name = category.name.lower().replace(' ', '_') field_panel = FieldPanel(field_name).bind_to( model=self.model, instance=context['edit_handler'].instance, request=context['edit_handler'].request, form=context['form'] ) context['edit_handler'].children.append(field_panel) return context
38.55102
70
0.649021
189
1,889
6.291005
0.238095
0.055509
0.090833
0.026913
0.760303
0.760303
0.760303
0.708158
0.708158
0.708158
0
0
0.253044
1,889
48
71
39.354167
0.842665
0
0
0.731707
0
0
0.051879
0
0
0
0
0
0
1
0.097561
false
0
0.073171
0
0.317073
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fa3b6f0ab4dcefd81c358fdcd55e34ceaf514f49
162
py
Python
pilot/views.py
MbuguaM/RideAlong
730ee29aebdd8ab9b2d4639ec6ba5dcccb03ee28
[ "MIT" ]
null
null
null
pilot/views.py
MbuguaM/RideAlong
730ee29aebdd8ab9b2d4639ec6ba5dcccb03ee28
[ "MIT" ]
null
null
null
pilot/views.py
MbuguaM/RideAlong
730ee29aebdd8ab9b2d4639ec6ba5dcccb03ee28
[ "MIT" ]
null
null
null
from django.shortcuts import render from . import views # Create your views here. def landing(request): return render(request,'main_templates/landing.html')
23.142857
56
0.777778
22
162
5.681818
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.135802
162
7
56
23.142857
0.892857
0.141975
0
0
0
0
0.195652
0.195652
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
fa5a33d45536a357637fce4cce7a1621253fea29
18
py
Python
godaddypy/utils/__init__.py
avitko001c/godaddypy
c5bd91e414cb4831e57fa3bf310d639df29ed4e7
[ "BSD-3-Clause" ]
null
null
null
godaddypy/utils/__init__.py
avitko001c/godaddypy
c5bd91e414cb4831e57fa3bf310d639df29ed4e7
[ "BSD-3-Clause" ]
null
null
null
godaddypy/utils/__init__.py
avitko001c/godaddypy
c5bd91e414cb4831e57fa3bf310d639df29ed4e7
[ "BSD-3-Clause" ]
null
null
null
from . import six
9
17
0.722222
3
18
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.222222
18
1
18
18
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d71838ac9b546defde18727cfc9154d2fb062abe
180
py
Python
modelator_py/apalache/__init__.py
informalsystems/modelator-py
d66464096c022799e680e6201590a2ead69be32d
[ "Apache-2.0" ]
null
null
null
modelator_py/apalache/__init__.py
informalsystems/modelator-py
d66464096c022799e680e6201590a2ead69be32d
[ "Apache-2.0" ]
3
2022-03-30T16:01:49.000Z
2022-03-31T13:40:03.000Z
modelator_py/apalache/__init__.py
informalsystems/modelator-py
d66464096c022799e680e6201590a2ead69be32d
[ "Apache-2.0" ]
null
null
null
from .args import ApalacheArgs from .pure import PureCmd as ApalachePureCmd from .pure import apalache_pure from .raw import RawCmd as ApalacheRawCmd from .raw import apalache_raw
30
44
0.838889
26
180
5.730769
0.461538
0.107383
0.187919
0
0
0
0
0
0
0
0
0
0.133333
180
5
45
36
0.955128
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d72f7b18ff94314420fae214f2f7eb843142fb79
28,238
py
Python
linebot/connect_data_to_db.py
ThebiggunSeeoil/VIS-MASTER
a54a5f321cfe8b258bacc25458490c5b154edf19
[ "MIT" ]
null
null
null
linebot/connect_data_to_db.py
ThebiggunSeeoil/VIS-MASTER
a54a5f321cfe8b258bacc25458490c5b154edf19
[ "MIT" ]
null
null
null
linebot/connect_data_to_db.py
ThebiggunSeeoil/VIS-MASTER
a54a5f321cfe8b258bacc25458490c5b154edf19
[ "MIT" ]
null
null
null
from django.core import serializers from django.contrib.auth.decorators import login_required from django.shortcuts import render, get_object_or_404, redirect from django.template import loader from django.http import HttpResponse from django.http import JsonResponse from django import template import json import datetime from django.utils import timezone from dateutil.relativedelta import relativedelta, SA, TH from app.models import Team,Site,Nozzle,Status,Status_Error_logger,VIS_ip_address ,Setup_Config from django.db.models import OuterRef, Subquery, Count, Min from linebot.creating_flex_messages import * class connect_data_to_db (): def prepare_nozzle (GET_VIS_DATA,GET_VIS_DATA_ALL,NOZZLE) : vis_check = [] #สำหรับเก็บค่า name_id เพื่อป้องกันไม่ให้มีการบันทึกซ้ำ vis_result = [] #ส่วนสำหรับ นำค่าที่ได้จากตาราง site ที่เป็น name_id เอามาเพิ่มข้อมูล 'Unit_log_address':[] เข้าไปเพื่อใช้ในการเก็บข้อมูลของ nozzle for data in GET_VIS_DATA: # print(data) if data['name_id'] not in vis_check: # ทำการเช็คว่า name_id มีเก็บไว้ใน vis_check = [] หรือไม่ถ้ายังไม่มีก็จะทำข้างล่างจนเสร็จก่อน แล้วค่อยนำ name_id ไปบันทึกไว้เพื่อป้องกันการ loop รอบอื่นๆมาทำซ้ำอีก vis_check.append(data['name_id']) # ทำการนำ name_id ไปบันทึกไว้ที่ vis_check = [] data = {'name_id': data['name_id'], 'log_address_check': [], 'pump_log_address_check': [], 'nozzle_data_check': [], 'log_address_count': [], 'pump_log_address_count': [], 'nozzle_data_count': [], 'site_name':data['site__station_name'], 'station_ip':data['site__station_ip'], 'station_monitor_device': data['site__station_monitor_device'], 'MWGT_status':data['MWGT_status'], 'VIS_status':data['VIS_status'], 'NOZZLE_status_check':data['NOZZLE_status_check'], 'BATTERY_status_check':data['BATTERY_status_check'], 'VIS_last_time':data['VIS_last_time'], 'Unit_log_address': []} # สร้างข้อมูลไว้ สำหรับโยนเข้าไปเก็บไว้ใน vis_result = [] vis_result.append(data) # นำ data ไปเก็บไว้ใน vis_result = [] เพื่อเอาไปใช้ใน function อื่น # for vis_1 in vis_result : # print('vis 1 ',vis_1) for name_id in vis_result: for data in NOZZLE: if data['site_id'] == name_id['name_id']: name_id['nozzle_data_check'].append(data['nozzle_num']) if data['pump_log_address'] not in name_id['pump_log_address_check']: name_id['pump_log_address_check'].append(data['pump_log_address']) if data['log_address'] not in name_id['log_address_check']: name_id['log_address_check'].append(data['log_address']) for count in vis_result: count_log = len(count['pump_log_address_check']) count_num = len(count['nozzle_data_check']) count_log_main = len(count['log_address_check']) count['pump_log_address_count'] = count_log count['nozzle_data_count'] = count_num count['log_address_count'] = count_log_main GET_VIS_DATA_ALL_CHECK_STORE = [] #สำหรับเก็บค่า Unit_log_address เพื่อป้องกันไม่ให้มีการบันทึกซ้ำ for Unit_check in vis_result : for GET_VIS_DATA_ALL_CHECK in GET_VIS_DATA_ALL : log_check = str(GET_VIS_DATA_ALL_CHECK['name_id']) + str(GET_VIS_DATA_ALL_CHECK['Unit_log_address']) if GET_VIS_DATA_ALL_CHECK['name_id'] == Unit_check['name_id']: if log_check not in GET_VIS_DATA_ALL_CHECK_STORE: GET_VIS_DATA_ALL_CHECK_STORE.append(log_check) value = {'Unit_log_address': GET_VIS_DATA_ALL_CHECK['Unit_log_address'],'DataUnitMap_IP': GET_VIS_DATA_ALL_CHECK['DataUnitMap_IP'] ,'nozzle':[]} Unit_check['Unit_log_address'].append(value) GET_NOZZLE_CHECK_STORE = [] #สำหรับเก็บค่า Unit_log_address เพื่อป้องกันไม่ให้มีการบันทึกซ้ำ for nozzle_check in vis_result : for GET_VIS_DATA_ALL_CHECK in GET_VIS_DATA_ALL: if GET_VIS_DATA_ALL_CHECK['name_id'] == nozzle_check['name_id']: log_check = str(GET_VIS_DATA_ALL_CHECK['name_id']) + str(GET_VIS_DATA_ALL_CHECK['Unit_log_address']) value = {'Unit_log_address': GET_VIS_DATA_ALL_CHECK['Unit_log_address'] ,'nozzle':[]} for nozzle_loop in nozzle_check['Unit_log_address'] : if nozzle_loop['Unit_log_address'] == GET_VIS_DATA_ALL_CHECK['Unit_log_address']: nozzle_loop['nozzle'].append(GET_VIS_DATA_ALL_CHECK) # print(vis_result) return (vis_result) def RequestDataDBByUserRequestByIpAddress(user_type,ip_address_request): data = [] data_site_name_id = Status.objects.values('name_id', 'site__station_name','site__station_ip','site__station_monitor_device' ,'MWGT_status','VIS_status','NOZZLE_status_check','BATTERY_status_check','VIS_last_time','Unit_log_address').annotate(dcount=Count('Unit_log_address')).filter(site__station_active=True,site__station_ip=ip_address_request).order_by('name_id') data_status = Status.objects.values().filter(site__station_active=True,site__station_ip=ip_address_request) nozzle_count = Nozzle.objects.values().filter(site__station_active=True,site__station_ip=ip_address_request) results = connect_data_to_db.prepare_nozzle(data_site_name_id, data_status,nozzle_count) return creating_flex_messages.CreateFormDetailByIpAddress(results) def different_time_calculate(TimeZone,TimeCalculate): # print(TimeCalculate) # TimeCalculateDetail = TimeCalculate[1].MWGT_last_time # print('TimeCalculateDetail',TimeCalculate) different_time = relativedelta(TimeZone,TimeCalculate) # คำนวณหาผลต่างระหว่างวันที่ Now กับ MWGT_last_time day_loss = different_time.days # แสดงผลลัพท์เป็นจำนวนวัน จาก different_time hours_loss = different_time.hours # แสดงผลลัพท์เป็นจำนวน ชั่วโมง จาก different_time minutes_loss = different_time.minutes # แสดงผลลัพท์เป็นจำนวนวัน นาที different_time hours_count = TimeZone - TimeCalculate hours_def = hours_count.total_seconds() hours_deff = (hours_def/60)/60 # คำนวณผลต่างของเวลามให้แสดงผลในรูปแบบชั่วโมง # print (hours_deff) # datetime_now = datetime.datetime.now().strftime("%d-%m-%y %H:%M") # MWGT_last_time = TimeCalculate.strftime("%d-%m-%y %H:%M") # แปลง datetime # print('TimeCalculateDetail',TimeCalculate) # print('different_time',different_time) # print('day_loss',day_loss) # print('hours_loss',hours_loss) # print('minutes_loss',minutes_loss) # print('datetime_now',datetime_now) # print('MWGT_last_time',MWGT_last_time) return day_loss , hours_loss , minutes_loss , hours_deff def RequestDataDBForMGR(): dt = datetime.datetime.now().strftime("%d-%m-%d %H:%M") VIS_SUM_OFFLINE = Status.objects.filter(VIS_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() MWGT_SUM_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() TOTAL_SITE_ACTIVE = Site.objects.filter(station_active=True).values('station_ip').annotate(dcount=Count('station_ip')).count() NOZZLE_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True).count() BATTERY_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True).count() return creating_flex_messages.CreateFormAllStatusForMGR(dt,VIS_SUM_OFFLINE,MWGT_SUM_OFFLINE,NOZZLE_OFFLINE,BATTERY_OFFLINE,TOTAL_SITE_ACTIVE) def RequestAllDataForTechnician(user_type,message): dt = datetime.datetime.now().strftime("%d-%m-%d %H:%M") VIS_SUM_OFFLINE = Status.objects.filter(VIS_status='offline',site__station_active=True,site__team_support=user_type.if_technician).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() MWGT_SUM_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True,site__team_support=user_type.if_technician).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() TOTAL_SITE_ACTIVE = Site.objects.filter(station_active=True,team_support=user_type.if_technician).values('station_ip').annotate(dcount=Count('station_ip')).count() NOZZLE_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True,site__team_support=user_type.if_technician).count() BATTERY_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True,site__team_support=user_type.if_technician).count() return creating_flex_messages.CreateFormAllStatusForFirstLevel(dt,VIS_SUM_OFFLINE,MWGT_SUM_OFFLINE,NOZZLE_OFFLINE,BATTERY_OFFLINE,TOTAL_SITE_ACTIVE,user_type) def RequestAllDataForAllUser(user_type,message): dt = datetime.datetime.now().strftime("%d-%m-%d %H:%M") VIS_SUM_OFFLINE = Status.objects.filter(VIS_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() MWGT_SUM_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() TOTAL_SITE_ACTIVE = Site.objects.filter(station_active=True).values('station_ip').annotate(dcount=Count('station_ip')).count() NOZZLE_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True).count() BATTERY_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True).count() return creating_flex_messages.CreateFormAllStatusForFirstLevel(dt,VIS_SUM_OFFLINE,MWGT_SUM_OFFLINE,NOZZLE_OFFLINE,BATTERY_OFFLINE,TOTAL_SITE_ACTIVE,user_type) def RequestDataDBForTechnician(user_type,message): VIS_SUM_OFFLINE = Status.objects.filter(VIS_status='offline',site__station_active=True,site__team_support=user_type.if_technician).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() MWGT_SUM_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True,site__team_support=user_type.if_technician).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() if message in ('nozzle_status','battery_status') : TOTAL_SITE_ACTIVE = Nozzle.objects.filter(site__station_active=True,active_nozzle=True,site__team_support=user_type.if_technician).values('id').count() if message not in ('nozzle_status','battery_status') : TOTAL_SITE_ACTIVE = Site.objects.filter(station_active=True,team_support=user_type.if_technician).values('station_ip').annotate(dcount=Count('station_ip')).count() # MWGT_LAST_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).latest('Timestramp') NOZZLE_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True,site__team_support=user_type.if_technician).count() # NOZZLE_LAST_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True).latest('Timestramp') BATTERY_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True,site__team_support=user_type.if_technician).count() # BATTERY_LAST_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True).latest('Timestramp') GET_VIS_DATA = Status.objects.select_related('site').filter(VIS_status='offline',site__station_active=True,site__team_support=user_type.if_technician) GET_MWGT_DATA = Status.objects.select_related('site').filter(MWGT_status='offline', site__station_active=True,site__team_support=user_type.if_technician) GET_NOZZLE_DATA = Status.objects.select_related('site').filter(NOZZLE_status_check='offline', site__station_active=True,site__team_support=user_type.if_technician) GET_BATTERY_DATA = Status.objects.select_related('site').filter(BATTERY_status_check='low',site__station_active=True,site__team_support=user_type.if_technician) STATUS_CONFIG = Setup_Config.objects.values() for setup_config in STATUS_CONFIG : time_alert_alarm_hours = setup_config['time_alert_alarm_hours'] time_alert_warning_hours = setup_config['time_alert_warning_hours'] battery_level_alarm_volt = setup_config['battery_level_alarm_volt'] battery_level_low_volt = setup_config['battery_level_low_volt'] battery_level_failed_volt = setup_config['battery_level_failed_volt'] data_store = [] vis_check = [] mwgt_check = [] vis_result = [] mwgt_result = [] nozzle_result = [] battery_result = [] for data in GET_VIS_DATA: if data.DataUnitMap_IP not in vis_check: vis_check.append(data.DataUnitMap_IP) # vis_check2.append(data) time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.VIS_last_time) vis_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'VIS', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team , 'TEAM_NAME': data.site.team_support.team_name , 'VIS_last_time':data.VIS_last_time, 'TIME_UPDATE':timezone.now()}) for data in GET_MWGT_DATA: if data.DataUnitMap_IP not in mwgt_check: mwgt_check.append(data.DataUnitMap_IP) # vis_check2.append(data) time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) mwgt_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'MWGT', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts ,'MWGT_last_time':data.MWGT_last_time, 'TEAM_ID':data.site.team_support.team , 'TEAM_NAME': data.site.team_support.team_name , 'DataUnitMap_IP':data.DataUnitMap_IP, 'MWGT_last_time':data.MWGT_last_time,'TIME_UPDATE':timezone.now()}) # print('mwgt_result',mwgt_result) for data in GET_NOZZLE_DATA: time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) # print('time_def_check',time_def_check) # print('time',data.MWGT_last_time) nozzle_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'NOZZLE', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team ,'VIS_last_time':data.VIS_last_time,'Unit_log_address':data.Unit_log_address, 'TEAM_NAME': data.site.team_support.team_name , 'NOZZLE_pump_log_address':data.NOZZLE_pump_log_address , 'NOZZLE_num':data.NOZZLE_num , 'TIME_UPDATE':timezone.now()}) # print('mwgt_result',nozzle_result) for data in GET_BATTERY_DATA: time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) battery_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'BATT', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team ,'BATTERY_status_check':data.BATTERY_status_check,'NOZZLE_SN':data.NOZZLE_SN, 'NOZZLE_Battery_Status_Volts':data.NOZZLE_Battery_Status_Volts,'TEAM_NAME': data.site.team_support.team_name , 'NOZZLE_pump_log_address':data.NOZZLE_pump_log_address , 'NOZZLE_num':data.NOZZLE_num , 'TIME_UPDATE':timezone.now()}) # print('mwgt_result',battery_result) data = {'user_type':user_type,'TIME_UPDATE':timezone.now(),'VIS_SUM_OFFLINE':VIS_SUM_OFFLINE,'MWGT_SUM_OFFLINE':MWGT_SUM_OFFLINE, 'TOTAL_SITE_ACTIVE':TOTAL_SITE_ACTIVE,'NOZZLE_OFFLINE':NOZZLE_OFFLINE, 'BATTERY_OFFLINE':BATTERY_OFFLINE, 'VIS_DETAIL':vis_result ,'MWTG_DETAIL':mwgt_result ,'NOZZLE_DETAIL':nozzle_result ,'BATTERY_DETAIL':battery_result, 'time_alert_alarm_hours':time_alert_alarm_hours,'time_alert_warning_hours':time_alert_warning_hours,'battery_level_alarm_volt':battery_level_alarm_volt, 'battery_level_low_volt':battery_level_low_volt,'battery_level_failed_volt':battery_level_failed_volt} if message == 'vis_status' : return creating_flex_messages.CreateFormVisFlexMessageDetail(data,user_type) elif message == 'mwgt_status' : return creating_flex_messages.CreateFormMwgtFlexMessageDetail(data,user_type) elif message == 'nozzle_status': return creating_flex_messages.CreateFormNozzleFlexMessageDetail(data,user_type) elif message == 'battery_status': return creating_flex_messages.CreateFormBatteryFlexMessageDetail(data,user_type) def RequestLastVisStatusRecord(name_id): # สำหรับเช็ค status vis ล่าสุดเพื่อตอบกลับไปให้เครื่อง VIS ดำเนินการต่อ # for vis_check in (payload): # Loop each nozzle for update into database name_id = name_id['events'][0]['name_id'] try : vis_last_status = Status.objects.filter(name_id=name_id).values('VIS_status').distinct().first() if vis_last_status != None : return vis_last_status['VIS_status'] # หากค้นหาข้อมูลเจอ หรือเคยมีการบันทึกไว้ก่อนหน้า else : vis_last_status = 'not_found' return vis_last_status # หากค้นหาไม่เจอ หรือ สถานีใหม่ ที่ยังไม่เคยรับข้อมูลเข้า # for i in vis_last_status : # print (i) # return vis_last_status['VIS_status'] # สำหรับเช็ค status vis ล่าสุดเพื่อตอบกลับไปให้เครื่อง VIS ดำเนินการต่อ except Status.DoesNotExist: print ('Cannot sent battery back to Decive') def RequestDataDBForAllUser(user_type,message): VIS_SUM_OFFLINE = Status.objects.filter(VIS_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() MWGT_SUM_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).values('DataUnitMap_IP').annotate(dcount=Count('DataUnitMap_IP')).count() if message in ('nozzle_status','battery_status') : TOTAL_SITE_ACTIVE = Nozzle.objects.filter(site__station_active=True,active_nozzle=True,).values('id').count() if message not in ('nozzle_status','battery_status') : TOTAL_SITE_ACTIVE = Site.objects.filter(station_active=True).values('station_ip').annotate(dcount=Count('station_ip')).count() # MWGT_LAST_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).latest('Timestramp') # MWGT_LAST_OFFLINE = Status.objects.filter(MWGT_status='offline',site__station_active=True).latest('Timestramp') NOZZLE_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True).count() # NOZZLE_LAST_OFFLINE = Status.objects.filter(NOZZLE_status_check='offline',site__station_active=True).latest('Timestramp') BATTERY_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True).count() # BATTERY_LAST_OFFLINE = Status.objects.filter(BATTERY_status_check='low', site__station_active=True).latest('Timestramp') GET_VIS_DATA = Status.objects.select_related('site').filter(VIS_status='offline',site__station_active=True) GET_MWGT_DATA = Status.objects.select_related('site').filter(MWGT_status='offline', site__station_active=True) GET_NOZZLE_DATA = Status.objects.select_related('site').filter(NOZZLE_status_check='offline', site__station_active=True) GET_BATTERY_DATA = Status.objects.select_related('site').filter(BATTERY_status_check='low',site__station_active=True) STATUS_CONFIG = Setup_Config.objects.values() for setup_config in STATUS_CONFIG : time_alert_alarm_hours = setup_config['time_alert_alarm_hours'] time_alert_warning_hours = setup_config['time_alert_warning_hours'] battery_level_alarm_volt = setup_config['battery_level_alarm_volt'] battery_level_low_volt = setup_config['battery_level_low_volt'] battery_level_failed_volt = setup_config['battery_level_failed_volt'] data_store = [] vis_check = [] mwgt_check = [] vis_result = [] mwgt_result = [] nozzle_result = [] battery_result = [] for data in GET_VIS_DATA: if data.DataUnitMap_IP not in vis_check: vis_check.append(data.DataUnitMap_IP) # vis_check2.append(data) time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.VIS_last_time) vis_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'VIS', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team , 'TEAM_NAME': data.site.team_support.team_name , 'VIS_last_time':data.VIS_last_time, 'TIME_UPDATE':timezone.now()}) for data in GET_MWGT_DATA: if data.DataUnitMap_IP not in mwgt_check: mwgt_check.append(data.DataUnitMap_IP) # vis_check2.append(data) time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) mwgt_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'MWGT', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team ,'DataUnitMap_IP':data.DataUnitMap_IP,'MWGT_last_time':data.MWGT_last_time, 'TEAM_NAME': data.site.team_support.team_name , 'TIME_UPDATE':timezone.now()}) # print('mwgt_result',mwgt_result) for data in GET_NOZZLE_DATA: time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) # print('time_def_check',time_def_check) # print('time',data.MWGT_last_time) nozzle_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'NOZZLE', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts , 'TEAM_ID':data.site.team_support.team ,'VIS_last_time':data.VIS_last_time,'Unit_log_address':data.Unit_log_address, 'TEAM_NAME': data.site.team_support.team_name , 'NOZZLE_pump_log_address':data.NOZZLE_pump_log_address , 'NOZZLE_num':data.NOZZLE_num , 'TIME_UPDATE':timezone.now()}) # print('mwgt_result',nozzle_result) for data in GET_BATTERY_DATA: time_def_check = connect_data_to_db.different_time_calculate(timezone.now(),data.MWGT_last_time) battery_result.append({'name':data.site,'ip_address':data.site.station_ip,'type':'BATT', 'NOZZLE_Last_conn':data.NOZZLE_Last_conn,'time_dif':{'day':time_def_check[0],'hour':time_def_check[1],'minutes':time_def_check[2],'hours_deff':time_def_check[3]}, 'NOZZLE_Battery_Status':data.NOZZLE_Battery_Status_Volts ,'NOZZLE_SN':data.NOZZLE_SN, 'TEAM_ID':data.site.team_support.team , 'BATTERY_status_check':data.BATTERY_status_check,'NOZZLE_Battery_Status_Volts':data.NOZZLE_Battery_Status_Volts, 'TEAM_NAME': data.site.team_support.team_name , 'NOZZLE_pump_log_address':data.NOZZLE_pump_log_address , 'NOZZLE_num':data.NOZZLE_num , 'TIME_UPDATE':timezone.now()}) # print('mwgt_result',battery_result) data = {'user_type':user_type,'TIME_UPDATE':timezone.now(),'VIS_SUM_OFFLINE':VIS_SUM_OFFLINE,'MWGT_SUM_OFFLINE':MWGT_SUM_OFFLINE, 'TOTAL_SITE_ACTIVE':TOTAL_SITE_ACTIVE,'NOZZLE_OFFLINE':NOZZLE_OFFLINE, 'BATTERY_OFFLINE':BATTERY_OFFLINE, 'VIS_DETAIL':vis_result ,'MWTG_DETAIL':mwgt_result ,'NOZZLE_DETAIL':nozzle_result ,'BATTERY_DETAIL':battery_result, 'time_alert_alarm_hours':time_alert_alarm_hours,'time_alert_warning_hours':time_alert_warning_hours,'battery_level_alarm_volt':battery_level_alarm_volt, 'battery_level_low_volt':battery_level_low_volt,'battery_level_failed_volt':battery_level_failed_volt} if message == 'vis_status' : return creating_flex_messages.CreateFormVisFlexMessageDetail(data,user_type) elif message == 'mwgt_status' : return creating_flex_messages.CreateFormMwgtFlexMessageDetail(data,user_type) elif message == 'nozzle_status': return creating_flex_messages.CreateFormNozzleFlexMessageDetail(data,user_type) elif message == 'battery_status': return creating_flex_messages.CreateFormBatteryFlexMessageDetail(data,user_type)
88.520376
373
0.673206
3,772
28,238
4.698568
0.08245
0.035378
0.043164
0.047396
0.808554
0.788354
0.762794
0.760537
0.753089
0.750324
0
0.002168
0.216092
28,238
319
374
88.520376
0.789167
0.11141
0
0.569853
0
0
0.164869
0.039629
0
0
0
0
0
1
0.033088
false
0
0.051471
0
0.147059
0.003676
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d74863532d38a1e387a56975c7d04bb6c95af90e
42
py
Python
backend/addons/sale_invoice_line_note/tests/__init__.py
maherjaballi/odoo-react-cicd
e99f0e3216094818d94e99df19da9626afe7f9d8
[ "MIT" ]
null
null
null
backend/addons/sale_invoice_line_note/tests/__init__.py
maherjaballi/odoo-react-cicd
e99f0e3216094818d94e99df19da9626afe7f9d8
[ "MIT" ]
null
null
null
backend/addons/sale_invoice_line_note/tests/__init__.py
maherjaballi/odoo-react-cicd
e99f0e3216094818d94e99df19da9626afe7f9d8
[ "MIT" ]
null
null
null
from . import test_sale_invoice_line_note
21
41
0.880952
7
42
4.714286
1
0
0
0
0
0
0
0
0
0
0
0
0.095238
42
1
42
42
0.868421
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d77b31bfb7cead15620838ff992e0c280e6b43f5
1,174
py
Python
users/migrations/0012_auto_20201111_2258.py
linikerunk/tcc-people-analytics
fdda975682d5299c8384e31ebb974dc085330875
[ "MIT" ]
null
null
null
users/migrations/0012_auto_20201111_2258.py
linikerunk/tcc-people-analytics
fdda975682d5299c8384e31ebb974dc085330875
[ "MIT" ]
1
2020-10-11T10:09:39.000Z
2020-10-11T10:09:39.000Z
users/migrations/0012_auto_20201111_2258.py
linikerunk/TCC_PeopleAnalytics
fdda975682d5299c8384e31ebb974dc085330875
[ "MIT" ]
null
null
null
# Generated by Django 2.2.5 on 2020-11-12 01:58 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('users', '0011_auto_20201024_1533'), ] operations = [ migrations.RemoveField( model_name='costcenter', name='active', ), migrations.RemoveField( model_name='costcenter', name='created', ), migrations.RemoveField( model_name='costcenter', name='modified', ), migrations.RemoveField( model_name='employee', name='active', ), migrations.RemoveField( model_name='employee', name='created', ), migrations.RemoveField( model_name='employee', name='modified', ), migrations.RemoveField( model_name='unity', name='active', ), migrations.RemoveField( model_name='unity', name='created', ), migrations.RemoveField( model_name='unity', name='modified', ), ]
23.48
47
0.504259
91
1,174
6.373626
0.362637
0.325862
0.403448
0.465517
0.741379
0.741379
0
0
0
0
0
0.042759
0.382453
1,174
49
48
23.959184
0.757241
0.03833
0
0.837209
1
0
0.14197
0.020408
0
0
0
0
0
1
0
false
0
0.023256
0
0.093023
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d79431e424f8b8617e779e1b152bc295a29010bc
91
py
Python
TornadoAPI/models/customers.py
darkwind/WebTemplate
7ed4f32393eb2df4a7b7fc0034c0dcebb9cc5173
[ "MIT" ]
null
null
null
TornadoAPI/models/customers.py
darkwind/WebTemplate
7ed4f32393eb2df4a7b7fc0034c0dcebb9cc5173
[ "MIT" ]
null
null
null
TornadoAPI/models/customers.py
darkwind/WebTemplate
7ed4f32393eb2df4a7b7fc0034c0dcebb9cc5173
[ "MIT" ]
null
null
null
from sqlalchemy import Column, BigInteger, String from tornado_sqlalchemy import SQLAlchemy
45.5
49
0.879121
11
91
7.181818
0.636364
0.405063
0
0
0
0
0
0
0
0
0
0
0.098901
91
2
50
45.5
0.963415
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ad4fb54bf50fc4ad6a5dd39c22fd69d632b8b7af
48,730
py
Python
solvers.py
acer8/Navier_Stokes_2D
2884ee81239a9b43d422b78fc1cc9bf24cae89f7
[ "MIT" ]
3
2018-03-06T11:50:34.000Z
2022-03-16T00:14:45.000Z
solvers.py
acer8/Navier_Stokes_2D
2884ee81239a9b43d422b78fc1cc9bf24cae89f7
[ "MIT" ]
null
null
null
solvers.py
acer8/Navier_Stokes_2D
2884ee81239a9b43d422b78fc1cc9bf24cae89f7
[ "MIT" ]
4
2015-09-03T02:12:31.000Z
2018-11-30T11:43:36.000Z
# -*- coding: utf-8 -*- """ This file contains the iterative numerical solvers which uses Projection methods """ from __future__ import division import numpy as np import matplotlib.pyplot as plt from scipy.sparse.linalg import LinearOperator import scipy.sparse import scipy.sparse.linalg as slg from pyamg import smoothed_aggregation_solver from matplotlib import cm import time import sys import copy import structure3 __all__ = ['LinearSystem_solver', 'Gauge_method', 'Alg1', 'Error'] class LinearSystem_solver(): '''this class contains the linear system solvers for both velocity and pressure it returns the linear system in Scipy sparse matrix form and linear operator form''' def __init__(self, Re, mesh, integration_method='Riemann'): self.mesh = mesh self.Re = Re self.integration_method = integration_method # Linear systemas for velocities (in the form of sparse matrices) # It can be used for both intermediate velocity fields (u*) and Gauge variables (m) # It returns both the sparse matrix system A and its linear operator def Linsys_velocity_matrix(self, velocity): m = self.mesh.m n = self.mesh.n dt = self.mesh.dt dx = self.mesh.dx dy = self.mesh.dy Re = self.Re # for square domain only, lx = ly and dx = dy = dh dh = dx a = dt/(2*Re*dh**2) b = (Re*dh**2)/dt + 2 # Dirichlet boundary condition is applied if velocity == "u": # construct matrix A: Au = rhs # A is symmetric and positive definite with dimension NxN N = m*(n-1) # block matrix maindiag = np.zeros(n-1) maindiag[:] = 2*b sidediag = np.zeros(n-2) sidediag[:] = -1 B = scipy.sparse.diags([maindiag,sidediag,sidediag],[0,-1,1]) A1 = scipy.sparse.kron(scipy.sparse.eye(m,m),B) md = np.zeros(N) md[0:n-1] = 3.0 md[-(n-1):] = 3.0 sdl = -np.ones(N-(n-1)) sdl[-(n-1):] = -2.0 sdu = sdl[::-1] sdll = np.zeros((n-2)*(n-1)) sdll[-(n-1):] = 0.2 sduu = sdll[::-1] A2 = scipy.sparse.diags([md,sdl,sdu,sdll,sduu],[0,-(n-1),n-1,-2*(n-1),2*(n-1)]) A = scipy.sparse.csc_matrix((A1+A2)*a) #print np.linalg.cond(np.matrix(A.todense())), "condition number velocity" A_linop = scipy.sparse.linalg.aslinearoperator(A) return [A, A_linop] elif velocity == "v": # construct A: Av = rhs N = (m-1)*n # block matrix maindiag = np.zeros(n) maindiag[:] = 2*b maindiag[0] = 2*b+3 maindiag[-1] = 2*b+3 sidediagl = -np.ones(n-1) sidediagl[-1] = -2.0 sidediagu = sidediagl[::-1] sdl = np.zeros(n-2) sdl[-1] = 0.2 sdu = sdl[::-1] B = scipy.sparse.diags([maindiag,sidediagl,sidediagu,sdl,sdu],[0,-1,1,-2,2]) A1 = scipy.sparse.kron(scipy.sparse.eye(m-1,m-1),B) sd = -np.ones(N-n) A2 = scipy.sparse.diags([sd,sd],[-n,n]) A = scipy.sparse.csc_matrix((A1+A2)*a) #print np.linalg.cond(np.matrix(A.todense())), "condition number velocity" A_linop = scipy.sparse.linalg.aslinearoperator(A) return [A,A_linop] # the linear system solver for velocity fields (using Biconjugate gradient method) # returns VelocityField instances (only interior points are calculated) # ALuv = [A, A_linop]: contains the lineary system in the sparse matrix and linear operator form # rhsuv = [rhsu, rhsv]: right hand side of u and v velocities (they need to be boundary corrected) def Linsys_velocity_solver(self, ALuv, rhsuv, tol=1e-12): m = self.mesh.m n = self.mesh.n dx = self.mesh.dx dy = self.mesh.dy # for square domain only, lx = ly and dx = dy = dh dh = dx uvl = [] # only solving the interior points, rhsuv needs to be boundary corrected ## solve for u and v sequentially for i in xrange(2): ## for u if i == 0: N = m*(n-1) row = m col = n-1 ## for v else: N = (m-1)*n row = m-1 col = n ## convert rhs into vector (m*(n-1)) rhs = rhsuv.get_uv()[i] rhs = rhs.reshape(N) AL = ALuv[i] A = AL[0] A_linop = AL[1] u = scipy.sparse.linalg.bicg(A=A_linop, b=rhs, tol=tol) u = u[0].reshape(row, col) uvl.append(u) AL = [] rhs = 0 row = 0 col = 0 # uvstar: u* the intermediate velocity field in the form of VelocityField object # note that this is the same as the Gauge variable (m) in the Gauge method uvstar = structure3.VelocityField(uvl[0], uvl[1], self.mesh) return uvstar # the Pressure Poisson lineary system # returns thePoisson pressure matrix A, preconditioner and its linear operaters (if applicable) def Poisson_pressure_matrix(self, solve_method): m = self.mesh.m n = self.mesh.n dx = self.mesh.dx dy = self.mesh.dy # for square domain only, lx = ky and dx = dy = dh dh = dx # construct matrix A: Ap = rhs, p is pressure (with interior points) # Neumann boundary condition is applied # A is negative definite so use -A which is positive definite # block matrix maindiag = np.ones(n) maindiag[1:n-1] = (2*maindiag[1:n-1]) sidediag = np.ones(n-1) B = scipy.sparse.diags([maindiag/(dh**2),-sidediag/(dh**2),-sidediag/(dh**2)],[0,-1,1]) A1 = scipy.sparse.kron(scipy.sparse.eye(m,n),B) A2 = scipy.sparse.kron(B, scipy.sparse.eye(m,n)) A = A1+A2 A = scipy.sparse.csc_matrix(A) # add the zero integral constraint # integration matrix C = self.mesh.integrate(integration_method=self.integration_method) A = scipy.sparse.hstack([A,scipy.sparse.csc_matrix(np.matrix(C).T)]) # add one zero column to make sure A is square C = np.append(C,0) A = scipy.sparse.vstack([A,scipy.sparse.csc_matrix(C)]) A = scipy.sparse.csc_matrix(A) #print np.linalg.cond(A), 'condition number of the Poisson pressure linear system solver # Biconjugate gradient method if solve_method == "ILU": A_linop = scipy.sparse.linalg.aslinearoperator(A) # MMD_AT_PLUS_A, MMD_ATA, COLAMD defines different types of preconditioners # for more detail, see Scipy.sparse.linalg.spilu documentations A_ILU = slg.spilu(A,permc_spec='MMD_AT_PLUS_A') #A_ILU = slg.spilu(A,permc_spec='MMD_ATA') #A_ILU = slg.spilu(A,permc_spec='COLAMD') M = slg.LinearOperator(shape=(m*n+1,m*n+1),matvec=A_ILU.solve) return [A_linop, M, A] # direct solve elif solve_method == "DIR": return A # Solves the Pressure Poisson problem using either Biconjugate gradient method (with ILU factorisation preconditioner) or direct solve def Poisson_pressure_solver(self, rhs, solve_method, precd_AL, tol=1e-12): m = self.mesh.m n = self.mesh.n dt = self.mesh.dt dx = self.mesh.dx dy = self.mesh.dy # for square domain only, lx = ky and dx = dy = dh dh = dx # convert rhs into vector (m*n) rhs = rhs.get_value() rhs = (-rhs).reshape(m*n) # add the zero integration constraint to the right hand side rhs = np.hstack([rhs, np.zeros(1)]) N = m*n # Biconjugate gradient method if solve_method == "ILU": # use Incomplete LU to find a preconditioner A_linop = precd_AL[0] M = precd_AL[1] A = precd_AL[2] p = scipy.sparse.linalg.bicgstab(A=A_linop, b=rhs, tol=tol, maxiter=N, M=M)[0] Ap = A*np.matrix(np.ravel(p)).T r = rhs - np.array(Ap.T) print np.max(np.abs(r)), "residual" print p[-1], 'lambda constant' p = p[:-1] p = p.reshape(m,n) p = structure3.CentredPotential(p, self.mesh) print self.mesh.integrate(p, self.integration_method), 'integral of phi' # returns p (phi) variable in the form of CentredPotential object return p # direct solve elif solve_method == "DIR": A = precd_AL p = scipy.sparse.linalg.spsolve(A=A, b=rhs) Ap = A*np.matrix(np.ravel(p)).T r = rhs - np.array(Ap.T) print np.max(np.abs(r)), "residual" print p[-1], 'lambda constant' p = p[:-1] p = p.reshape(m,n) print np.sum(p), 'integral of phi' p = structure3.CentredPotential(p, self.mesh) # returns p (phi) variable in the form of CentredPotential object return p # below constructs the 4 different Projection method solvers (Gauge, Alg 1, Alg 2, Alg 3) class Gauge_method(): '''This class constructs the Gauge method solver''' def __init__(self, Re, mesh): self.Re = Re self.n = mesh.n self.m = mesh.m self.xu = mesh.xu self.yu = mesh.yu self.xv = mesh.xv self.yv = mesh.yv self.gds = mesh.gds self.sdomain = mesh.sdomain self.tdomain = mesh.tdomain self.Tn = mesh.Tn self.t0 = mesh.tdomain[0] self.dt = mesh.dt self.dx = mesh.dx self.dy = mesh.dy self.mesh = mesh # initial set up def setup(self, InCond_uv_init, Boundary_uv_type, solve_method='ILU', integration_method='Riemann'): ## InCond_uv: specifies the velocity initial condition linsys_solver = LinearSystem_solver(self.Re, self.mesh, integration_method) phi_mat = linsys_solver.Poisson_pressure_matrix(solve_method) m1_mat = linsys_solver.Linsys_velocity_matrix("u") m2_mat = linsys_solver.Linsys_velocity_matrix("v") InCond_uvcmp = structure3.VelocityComplete(self.mesh, InCond_uv_init, 0).complete(Boundary_uv_type) uv_cmp = copy.copy(InCond_uvcmp) mn_cmp = copy.copy(uv_cmp) initial_setup_parameters = [phi_mat, m1_mat, m2_mat, InCond_uvcmp, uv_cmp, mn_cmp, integration_method, solve_method] return initial_setup_parameters def iterative_solver(self, Boundary_uv_type, Tn, initial_setup_parameters): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt Re = self.Re phi_mat = initial_setup_parameters[0] m1_mat = initial_setup_parameters[1] m2_mat = initial_setup_parameters[2] # uvold_cmp: u and v velocity fields at time n-1 # cmp: in the completed format (interior + boundary + ghost nodes) uvold_cmp = initial_setup_parameters[3] # uv_cmp: u and v at time n uv_cmp = initial_setup_parameters[4] # Gauge variable at time n (in the completed format) mn_cmp = initial_setup_parameters[5] integration_method = initial_setup_parameters[6] solve_method = initial_setup_parameters[7] # int: interior points only mn_int = structure3.VelocityField(mn_cmp.get_int_uv()[0], mn_cmp.get_int_uv()[1], self.mesh) # phiold: phi variable at time n-1 phiold = np.zeros((m,n)) phiold_cmp = structure3.CentredPotential(phiold, self.mesh).complete() # phin_cmp: phi variable at time n phin_cmp = np.copy(phiold_cmp) print Tn, "number of iterations" # main iterative solver test_problem_name = Boundary_uv_type for t in xrange(Tn): forcing_term = structure3.Forcing_term(self.mesh, test_problem_name, t+0.5).select_forcing_term() convc_uv = uv_cmp.non_linear_convection() preconvc_uv = uvold_cmp.non_linear_convection() diff_mn = mn_cmp.diffusion() if Boundary_uv_type == 'periodic_forcing_1': # Stokes problem rhs_mstar = mn_int + dt*((1.0/(2*Re))*diff_mn + forcing_term) elif Boundary_uv_type == 'periodic_forcing_2': # Stokes problem rhs_mstar = mn_int + dt*((1.0/(2*Re))*diff_mn + forcing_term) else: # full Navier Stokes problem rhs_mstar = mn_int + dt*(-1.5*convc_uv + 0.5*preconvc_uv + (1.0/(2*Re))*diff_mn + forcing_term) # calculate the approximation to phi at time n+1 gradphiuv = self.gradphi_app(phiold_cmp, phin_cmp) # boundary correction step rhs_mstarcd = self.correct_boundary(rhs_mstar, t+1, Boundary_uv_type, gradphiuv) # solving for the Gauge variable m Linsys_solve = LinearSystem_solver(Re, self.mesh) mstar = Linsys_solve.Linsys_velocity_solver([m1_mat,m2_mat], rhs_mstarcd) mstarcmp1, uvbnd_value = structure3.VelocityComplete(self.mesh, [mstar.get_uv()[0], mstar.get_uv()[1]], t+1).complete(Boundary_uv_type, return_bnd=True) div_mstar = mstarcmp1.divergence() # solving for the phi variable phi = Linsys_solve.Poisson_pressure_solver(div_mstar, solve_method, phi_mat) print solve_method if t == 0: #div_mn = np.zeros((m,n)) div_mn = div_mstar else: div_mn = mn_cmp.divergence() phiacd = phi - phin_cmp[1:m+1,1:n+1] # pressure correction step p = phiacd/dt - 1.0/(2*Re)*(div_mstar+div_mn) print self.mesh.integrate(p, integration_method), 'integral of p' gradp = p.gradient() phiold_cmp = np.copy(phin_cmp) phin_cmp = np.copy(phi.complete()) # velocity update step gradphi = phi.gradient() uvn_int = mstar - gradphi uvold_cmp = copy.copy(uv_cmp) uv_cmp = structure3.VelocityComplete(self.mesh, [uvn_int.get_uv()[0], uvn_int.get_uv()[1]], t+1).complete(Boundary_uv_type) # complete mstar mn_cmp = self.complete_mstar(mstar, uvbnd_value, phin_cmp) mn_int = structure3.VelocityField(mn_cmp.get_int_uv()[0], mn_cmp.get_int_uv()[1], self.mesh) print "iteration "+str(t) return uv_cmp, p, gradp ## this function calculates graident of phi at time n+1 # using second order approximation to gradient of phi^(n+1). Used in correcting m* # phi^{n+1} appro 2*phi^n - phi^{n-1} def gradphi_app(self, phiold_cmp, phin_cmp): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt phiapp_cmp = 2*phin_cmp - phiold_cmp gradphiu = (phiapp_cmp[:,1:n+2] - phiapp_cmp[:,0:n+1])/dx gradphiv = (phiapp_cmp[1:m+2,:] - phiapp_cmp[0:m+1,:])/dy # obtain gradphiu North and South boundary by cubic interpolation gradphiuN = 5.0/16*(gradphiu[0,:] +3*gradphiu[1,:] - gradphiu[2,:]+0.2*gradphiu[3,:]) gradphiuS = 5.0/16*(gradphiu[-1,:] +3*gradphiu[-2,:] - gradphiu[-3,:]+0.2*gradphiu[-4,:]) gradphiu[0,:] = gradphiuN gradphiu[-1,:] = gradphiuS # obtain gradphiv West and East boundary by cubic interpolation gradphivW = 5.0/16*(gradphiv[:,0] +3*gradphiv[:,1] - gradphiv[:,2]+0.2*gradphiv[:,3]) gradphivE = 5.0/16*(gradphiv[:,-1] +3*gradphiv[:,-2] - gradphiv[:,-3]+0.2*gradphiv[:,-4]) gradphiv[:,0] = gradphivW gradphiv[:,-1] = gradphivE return [gradphiu, gradphiv] # boundary correction used in solving for Gauge variable def correct_boundary(self, rhs_mstar, t, Boundary_type, gradphiuv): # rhsuv is a VelocityField object with dimension interior u and v [(m*(n-1), (m-1)*n)] n = self.n m = self.m Re = self.Re dx = self.dx dy = self.dy dt = self.dt lam = dt/(2.0*Re) VC = structure3.VelocityComplete(self.mesh, [rhs_mstar.get_uv()[0], rhs_mstar.get_uv()[1]], t) gradphiu = gradphiuv[0] gradphiv = gradphiuv[1] if Boundary_type == "driven_cavity": uN = VC.bnd_driven_cavity('u')['N'] uS = VC.bnd_driven_cavity('u')['S'] uW = VC.bnd_driven_cavity('u')['W'] uE = VC.bnd_driven_cavity('u')['E'] vN = VC.bnd_driven_cavity('v')['N'] vS = VC.bnd_driven_cavity('v')['S'] vW = VC.bnd_driven_cavity('v')['W'] vE = VC.bnd_driven_cavity('v')['E'] elif Boundary_type == "Taylor": uN = VC.bnd_Taylor('u')['N'][1:n] uS = VC.bnd_Taylor('u')['S'][1:n] uW = VC.bnd_Taylor('u')['W'] uE = VC.bnd_Taylor('u')['E'] vN = VC.bnd_Taylor('v')['N'] vS = VC.bnd_Taylor('v')['S'] vW = VC.bnd_Taylor('v')['W'][1:m] vE = VC.bnd_Taylor('v')['E'][1:m] elif Boundary_type == "periodic_forcing_1": uN = VC.bnd_forcing_1('u')['N'][1:n] uS = VC.bnd_forcing_1('u')['S'][1:n] uW = VC.bnd_forcing_1('u')['W'] uE = VC.bnd_forcing_1('u')['E'] vN = VC.bnd_forcing_1('v')['N'] vS = VC.bnd_forcing_1('v')['S'] vW = VC.bnd_forcing_1('v')['W'][1:m] vE = VC.bnd_forcing_1('v')['E'][1:m] elif Boundary_type == "periodic_forcing_2": uN = VC.bnd_forcing_2('u')['N'][1:n] uS = VC.bnd_forcing_2('u')['S'][1:n] uW = VC.bnd_forcing_2('u')['W'] uE = VC.bnd_forcing_2('u')['E'] vN = VC.bnd_forcing_2('v')['N'] vS = VC.bnd_forcing_2('v')['S'] vW = VC.bnd_forcing_2('v')['W'][1:m] vE = VC.bnd_forcing_2('v')['E'][1:m] gradphiuW = gradphiu[1:m+1,0] gradphiuE = gradphiu[1:m+1,-1] gradphiuN = gradphiu[0,1:n] gradphiuS = gradphiu[-1,1:n] # North and South boundary uNbc = uN + gradphiuN uSbc = uS + gradphiuS resu1 = np.zeros((m,n-1)) resu2 = np.zeros((m,n-1)) resu1[0,:] = (16.0/5)*(uNbc)*(lam/(dy**2)) resu1[-1,:] = (16.0/5)*(uSbc)*(lam/(dy**2)) # West and East boundary uWbc = uW uEbc = uE resu2[:,0] = (uWbc)*(lam/(dx**2)) resu2[:,-1] = (uEbc)*(lam/(dx**2)) resu = resu1+resu2 resv1 = np.zeros((m-1,n)) resv2 = np.zeros((m-1,n)) gradphivN = gradphiv[0,1:n+1] gradphivS = gradphiv[-1,1:n+1] gradphivW = gradphiv[1:m,0] gradphivE = gradphiv[1:m,-1] # North and South boundary vNbc = vN vSbc = vS resv2[0,:] = vNbc*(lam/(dy**2)) resv2[-1,:] = vSbc*(lam/(dy**2)) # West and East boundary vWbc = vW + gradphivW vEbc = vE + gradphivE resv1[:,0] = (16.0/5)*vWbc*(lam/(dx**2)) resv1[:,-1] = (16.0/5)*vEbc*(lam/(dx**2)) resv = resv1+resv2 rhs_mstarcd = rhs_mstar + [resu, resv] return rhs_mstarcd # completing the Gauge variable at time n+1 def complete_mstar(self, mstar_int, uvbnd_value, phiacd_cmp): # complete m* using phi^(n+1) n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt uN, uS, uW, uE = uvbnd_value[0] vN, vS, vW, vE = uvbnd_value[1] m1star_cmp = np.zeros((m+2,n+1)) m2star_cmp = np.zeros((m+1,n+2)) m1star_cmp[1:m+1,1:n] = mstar_int.get_uv()[0] m2star_cmp[1:m,1:n+1] = mstar_int.get_uv()[1] m1star_cmp[1:m+1,0] = uW m1star_cmp[1:m+1,-1] = uE m2star_cmp[0,1:n+1] = vN m2star_cmp[-1,1:n+1] = vS gdphi_cmpu = (phiacd_cmp[:,1:n+2] - phiacd_cmp[:,0:n+1])/dx gdphi_cmpuN = 5.0/16*(gdphi_cmpu[0,:] +3*gdphi_cmpu[1,:] - gdphi_cmpu[2,:]+0.2*gdphi_cmpu[3,:]) gdphi_cmpuS = 5.0/16*(gdphi_cmpu[-1,:] +3*gdphi_cmpu[-2,:] - gdphi_cmpu[-3,:]+0.2*gdphi_cmpu[-4,:]) # use phi^{n+1} just computed m1starN = uN + gdphi_cmpuN m1starS = uS + gdphi_cmpuS m1star_cmp[0,:] = (16.0/5)*m1starN - 3*m1star_cmp[1,:] + m1star_cmp[2,:] - 0.2*m1star_cmp[3,:] m1star_cmp[-1,:] = (16.0/5)*m1starS - 3*m1star_cmp[-2,:] + m1star_cmp[-3,:] - 0.2*m1star_cmp[-4,:] gdphi_cmpv = (phiacd_cmp[1:m+2,:] - phiacd_cmp[0:m+1,:])/dy gdphi_cmpvW = 5.0/16*(gdphi_cmpv[:,0] +3*gdphi_cmpv[:,1] - gdphi_cmpv[:,2]+0.2*gdphi_cmpv[:,3]) gdphi_cmpvE = 5.0/16*(gdphi_cmpv[:,-1] +3*gdphi_cmpv[:,-2] - gdphi_cmpv[:,-3]+0.2*gdphi_cmpv[:,-4]) m2starW = vW + gdphi_cmpvW m2starE = vE + gdphi_cmpvE m2star_cmp[:,0] = (16.0/5)*m2starW - 3*m2star_cmp[:,1] + m2star_cmp[:,2] - 0.2*m2star_cmp[:,3] m2star_cmp[:,-1] = (16.0/5)*m2starE - 3*m2star_cmp[:,-2] + m2star_cmp[:,-3] - 0.2*m2star_cmp[:,-4] return structure3.VelocityField(m1star_cmp, m2star_cmp, self.mesh) class Alg1_method(): '''This class constructs the Alg 1 method solver Note that this solver is inherently first order accurate in time for the pressure variable because its pressure update formula limits the accuracy''' def __init__(self, Re, mesh): self.Re = Re self.n = mesh.n self.m = mesh.m self.xu = mesh.xu self.yu = mesh.yu self.xv = mesh.xv self.yv = mesh.yv self.gds = mesh.gds self.sdomain = mesh.sdomain self.tdomain = mesh.tdomain self.Tn = mesh.Tn self.t0 = mesh.tdomain[0] self.dt = mesh.dt self.dx = mesh.dx self.dy = mesh.dy self.mesh = mesh # initial set up def setup(self, InCond, Boundary_uv_type, solve_method='ILU', integration_method='Riemann'): ## InCond_uv: specifies the velocity initial condition linsys_solver = LinearSystem_solver(self.Re, self.mesh, integration_method) phi_mat = linsys_solver.Poisson_pressure_matrix(solve_method) u_mat = linsys_solver.Linsys_velocity_matrix("u") v_mat = linsys_solver.Linsys_velocity_matrix("v") InCond_uvcmp = structure3.VelocityComplete(self.mesh, InCond[0], 0).complete(Boundary_uv_type) uvn_cmp = copy.copy(InCond_uvcmp) InCond_p = structure3.CentredPotential(InCond[1], self.mesh) initial_setup_parameters = [phi_mat, u_mat, v_mat, InCond_uvcmp, uvn_cmp, InCond_p, integration_method, solve_method] return initial_setup_parameters def iterative_solver(self, Boundary_uv_type, Tn, initial_setup_parameters): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt Re = self.Re phi_mat = initial_setup_parameters[0] u_mat = initial_setup_parameters[1] v_mat = initial_setup_parameters[2] # uvold_cmp: u and v velocity fields at time n-1 # cmp: in the completed format (interior + boundary + ghost nodes) uvold_cmp = initial_setup_parameters[3] # uvn_cmp: u and v at time n uvn_cmp = initial_setup_parameters[4] pold = initial_setup_parameters[5] integration_method = initial_setup_parameters[6] solve_method = initial_setup_parameters[7] pn = copy.copy(pold) print Tn, "number of iterations" # main iterative solver test_problem_name = Boundary_uv_type for t in xrange(Tn): forcing_term = structure3.Forcing_term(self.mesh,test_problem_name,t+0.5).select_forcing_term() convc_uv = uvn_cmp.non_linear_convection() preconvc_uv = uvold_cmp.non_linear_convection() diff_uvn = uvn_cmp.diffusion() gradp_uvn = pn.gradient() uvn_int = structure3.VelocityField(uvn_cmp.get_int_uv()[0], uvn_cmp.get_int_uv()[1], self.mesh) if Boundary_uv_type == 'periodic_forcing_1': # Stokes problem rhs_uvstar = uvn_int + dt*(- gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) elif Boundary_uv_type == 'periodic_forcing_2': # Stokes problem rhs_uvstar = uvn_int + dt*(- gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) else: # full Navier Stokes problem rhs_uvstar = uvn_int + dt*(-1.5*convc_uv + 0.5*preconvc_uv - gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) # boundary correction step rhs_uvstarcd = self.correct_boundary(rhs_uvstar, t+1, Boundary_uv_type) # solving for the intermediate velocity variable uv* Linsys_solve = LinearSystem_solver(Re, self.mesh) uvstar = Linsys_solve.Linsys_velocity_solver([u_mat,v_mat], rhs_uvstarcd) uvstarcmp, uvbnd_value = structure3.VelocityComplete(self.mesh, [uvstar.get_uv()[0], uvstar.get_uv()[1]], t+1).complete(Boundary_uv_type, return_bnd=True) div_uvstar = uvstarcmp.divergence() # solving for the phi variable phi = Linsys_solve.Poisson_pressure_solver(div_uvstar/dt, solve_method, phi_mat) # pressure correction step # note this formula makes the perssure variable first order accurate in time p = pn + phi print self.mesh.integrate(p, integration_method), 'integral of p' gradp = p.gradient() pold = copy.copy(pn) pn = copy.copy(p) # velocity update step gradphi = phi.gradient() uvn_int = uvstar - dt*gradphi uvold_cmp = copy.copy(uvn_cmp) uvn_cmp = structure3.VelocityComplete(self.mesh, [uvn_int.get_uv()[0], uvn_int.get_uv()[1]], t+1).complete(Boundary_uv_type) print "iteration "+str(t) return uvn_cmp, p, gradp # boundary correction def correct_boundary(self, rhs_uvstar, t, Boundary_type): # rhsuv is a VelocityField object with dimension interior u and v [(m*(n-1), (m-1)*n)] n = self.n m = self.m Re = self.Re dx = self.dx dy = self.dy dt = self.dt lam = dt/(2.0*Re) VC = structure3.VelocityComplete(self.mesh, [rhs_uvstar.get_uv()[0], rhs_uvstar.get_uv()[1]], t) if Boundary_type == "driven_cavity": uN = VC.bnd_driven_cavity('u')['N'] uS = VC.bnd_driven_cavity('u')['S'] uW = VC.bnd_driven_cavity('u')['W'] uE = VC.bnd_driven_cavity('u')['E'] vN = VC.bnd_driven_cavity('v')['N'] vS = VC.bnd_driven_cavity('v')['S'] vW = VC.bnd_driven_cavity('v')['W'] vE = VC.bnd_driven_cavity('v')['E'] elif Boundary_type == "Taylor": uN = VC.bnd_Taylor('u')['N'][1:n] uS = VC.bnd_Taylor('u')['S'][1:n] uW = VC.bnd_Taylor('u')['W'] uE = VC.bnd_Taylor('u')['E'] vN = VC.bnd_Taylor('v')['N'] vS = VC.bnd_Taylor('v')['S'] vW = VC.bnd_Taylor('v')['W'][1:m] vE = VC.bnd_Taylor('v')['E'][1:m] elif Boundary_type == "periodic_forcing_1": uN = VC.bnd_forcing_1('u')['N'][1:n] uS = VC.bnd_forcing_1('u')['S'][1:n] uW = VC.bnd_forcing_1('u')['W'] uE = VC.bnd_forcing_1('u')['E'] vN = VC.bnd_forcing_1('v')['N'] vS = VC.bnd_forcing_1('v')['S'] vW = VC.bnd_forcing_1('v')['W'][1:m] vE = VC.bnd_forcing_1('v')['E'][1:m] elif Boundary_type == "periodic_forcing_2": uN = VC.bnd_forcing_2('u')['N'][1:n] uS = VC.bnd_forcing_2('u')['S'][1:n] uW = VC.bnd_forcing_2('u')['W'] uE = VC.bnd_forcing_2('u')['E'] vN = VC.bnd_forcing_2('v')['N'] vS = VC.bnd_forcing_2('v')['S'] vW = VC.bnd_forcing_2('v')['W'][1:m] vE = VC.bnd_forcing_2('v')['E'][1:m] # North and South boundary resu1 = np.zeros((m,n-1)) resu2 = np.zeros((m,n-1)) resu1[0,:] = (16.0/5)*uN*(lam/(dy**2)) resu1[-1,:] = (16.0/5)*uS*(lam/(dy**2)) # West and East boundary resu2[:,0] = uW*(lam/(dx**2)) resu2[:,-1] = uE*(lam/(dx**2)) resu = resu1+resu2 resv1 = np.zeros((m-1,n)) resv2 = np.zeros((m-1,n)) # North and South boundary resv2[0,:] = vN*(lam/(dy**2)) resv2[-1,:] = vS*(lam/(dy**2)) # West and East boundary resv1[:,0] = (16.0/5)*vW*(lam/(dx**2)) resv1[:,-1] = (16.0/5)*vE*(lam/(dx**2)) resv = resv1+resv2 rhs_uvstarcd = rhs_uvstar + [resu, resv] return rhs_uvstarcd class Alg2_method(): '''This class constructs the Alg 2 method solver''' def __init__(self, Re, mesh): self.Re = Re self.n = mesh.n self.m = mesh.m self.xu = mesh.xu self.yu = mesh.yu self.xv = mesh.xv self.yv = mesh.yv self.gds = mesh.gds self.sdomain = mesh.sdomain self.tdomain = mesh.tdomain self.Tn = mesh.Tn self.t0 = mesh.tdomain[0] self.dt = mesh.dt self.dx = mesh.dx self.dy = mesh.dy self.mesh = mesh # initial set up def setup(self, InCond, Boundary_uv_type, solve_method='ILU', integration_method='Riemann'): ## InCond_uv: specifies the velocity initial condition linsys_solver = LinearSystem_solver(self.Re, self.mesh, integration_method) phi_mat = linsys_solver.Poisson_pressure_matrix(solve_method) u_mat = linsys_solver.Linsys_velocity_matrix("u") v_mat = linsys_solver.Linsys_velocity_matrix("v") InCond_uvcmp = structure3.VelocityComplete(self.mesh, InCond[0], 0).complete(Boundary_uv_type) uvn_cmp = copy.copy(InCond_uvcmp) InCond_p = structure3.CentredPotential(InCond[1], self.mesh) initial_setup_parameters = [phi_mat, u_mat, v_mat, InCond_uvcmp, uvn_cmp, InCond_p, integration_method, solve_method] return initial_setup_parameters def iterative_solver(self, Boundary_uv_type, Tn, initial_setup_parameters): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt Re = self.Re phi_mat = initial_setup_parameters[0] u_mat = initial_setup_parameters[1] v_mat = initial_setup_parameters[2] # uvold_cmp: u and v velocity fields at time n-1 # cmp: in the completed format (interior + boundary + ghost nodes) uvold_cmp = initial_setup_parameters[3] # uvn_cmp: u and v at time n uvn_cmp = initial_setup_parameters[4] pold = initial_setup_parameters[5] integration_method = initial_setup_parameters[6] solve_method = initial_setup_parameters[7] pn = copy.copy(pold) print Tn, "number of iterations" # main iterative solver test_problem_name = Boundary_uv_type for t in xrange(Tn): forcing_term = structure3.Forcing_term(self.mesh,test_problem_name,t+0.5).select_forcing_term() convc_uv = uvn_cmp.non_linear_convection() preconvc_uv = uvold_cmp.non_linear_convection() diff_uvn = uvn_cmp.diffusion() gradp_uvn = pn.gradient() uvn_int = structure3.VelocityField(uvn_cmp.get_int_uv()[0], uvn_cmp.get_int_uv()[1], self.mesh) if Boundary_uv_type == 'periodic_forcing_1': # Stokes problem rhs_uvstar = uvn_int + dt*(- gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) elif Boundary_uv_type == 'periodic_forcing_2': # Stokes problem rhs_uvstar = uvn_int + dt*(- gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) else: # full Navier Stokes problem rhs_uvstar = uvn_int + dt*(-1.5*convc_uv + 0.5*preconvc_uv - gradp_uvn + (1.0/(2*Re))*diff_uvn + forcing_term) # boundary correction step rhs_uvstarcd = self.correct_boundary(rhs_uvstar, t+1, Boundary_uv_type) # solving for the intermediate velocity variable uv* Linsys_solve = LinearSystem_solver(Re, self.mesh) uvstar = Linsys_solve.Linsys_velocity_solver([u_mat,v_mat], rhs_uvstarcd) uvstarcmp, uvbnd_value = structure3.VelocityComplete(self.mesh, [uvstar.get_uv()[0], uvstar.get_uv()[1]], t+1).complete(Boundary_uv_type, return_bnd=True) div_uvstar = uvstarcmp.divergence() # solving for the phi variable phi = Linsys_solve.Poisson_pressure_solver(div_uvstar/dt, solve_method, phi_mat) # pressure correction step p = pn + phi - div_uvstar/(2*Re) print self.mesh.integrate(p, integration_method), 'integral of p' gradp = p.gradient() pold = copy.copy(pn) pn = copy.copy(p) # velocity update stemp gradphi = phi.gradient() uvn_int = uvstar - dt*gradphi uvold_cmp = copy.copy(uvn_cmp) uvn_cmp = structure3.VelocityComplete(self.mesh, [uvn_int.get_uv()[0], uvn_int.get_uv()[1]], t+1).complete(Boundary_uv_type) print "iteration "+str(t) return uvn_cmp, p, gradp # boundary correction def correct_boundary(self, rhs_uvstar, t, Boundary_type): # rhsuv is a VelocityField object with dimension interior u and v [(m*(n-1), (m-1)*n)] n = self.n m = self.m Re = self.Re dx = self.dx dy = self.dy dt = self.dt lam = dt/(2.0*Re) VC = structure3.VelocityComplete(self.mesh, [rhs_uvstar.get_uv()[0], rhs_uvstar.get_uv()[1]], t) if Boundary_type == "driven_cavity": uN = VC.bnd_driven_cavity('u')['N'] uS = VC.bnd_driven_cavity('u')['S'] uW = VC.bnd_driven_cavity('u')['W'] uE = VC.bnd_driven_cavity('u')['E'] vN = VC.bnd_driven_cavity('v')['N'] vS = VC.bnd_driven_cavity('v')['S'] vW = VC.bnd_driven_cavity('v')['W'] vE = VC.bnd_driven_cavity('v')['E'] elif Boundary_type == "Taylor": uN = VC.bnd_Taylor('u')['N'][1:n] uS = VC.bnd_Taylor('u')['S'][1:n] uW = VC.bnd_Taylor('u')['W'] uE = VC.bnd_Taylor('u')['E'] vN = VC.bnd_Taylor('v')['N'] vS = VC.bnd_Taylor('v')['S'] vW = VC.bnd_Taylor('v')['W'][1:m] vE = VC.bnd_Taylor('v')['E'][1:m] elif Boundary_type == "periodic_forcing_1": uN = VC.bnd_forcing_1('u')['N'][1:n] uS = VC.bnd_forcing_1('u')['S'][1:n] uW = VC.bnd_forcing_1('u')['W'] uE = VC.bnd_forcing_1('u')['E'] vN = VC.bnd_forcing_1('v')['N'] vS = VC.bnd_forcing_1('v')['S'] vW = VC.bnd_forcing_1('v')['W'][1:m] vE = VC.bnd_forcing_1('v')['E'][1:m] elif Boundary_type == "periodic_forcing_2": uN = VC.bnd_forcing_2('u')['N'][1:n] uS = VC.bnd_forcing_2('u')['S'][1:n] uW = VC.bnd_forcing_2('u')['W'] uE = VC.bnd_forcing_2('u')['E'] vN = VC.bnd_forcing_2('v')['N'] vS = VC.bnd_forcing_2('v')['S'] vW = VC.bnd_forcing_2('v')['W'][1:m] vE = VC.bnd_forcing_2('v')['E'][1:m] # North and South boundary resu1 = np.zeros((m,n-1)) resu2 = np.zeros((m,n-1)) resu1[0,:] = (16.0/5)*uN*(lam/(dy**2)) resu1[-1,:] = (16.0/5)*uS*(lam/(dy**2)) # West and East boundary resu2[:,0] = uW*(lam/(dx**2)) resu2[:,-1] = uE*(lam/(dx**2)) resu = resu1+resu2 resv1 = np.zeros((m-1,n)) resv2 = np.zeros((m-1,n)) # North and South boundary resv2[0,:] = vN*(lam/(dy**2)) resv2[-1,:] = vS*(lam/(dy**2)) # West and East boundary resv1[:,0] = (16.0/5)*vW*(lam/(dx**2)) resv1[:,-1] = (16.0/5)*vE*(lam/(dx**2)) resv = resv1+resv2 rhs_uvstarcd = rhs_uvstar + [resu, resv] return rhs_uvstarcd class Alg3_method(): '''This class constructs the Alg2 method (pressure free) solver''' def __init__(self, Re, mesh): self.Re = Re self.n = mesh.n self.m = mesh.m self.xu = mesh.xu self.yu = mesh.yu self.xv = mesh.xv self.yv = mesh.yv self.gds = mesh.gds self.sdomain = mesh.sdomain self.tdomain = mesh.tdomain self.Tn = mesh.Tn self.t0 = mesh.tdomain[0] self.dt = mesh.dt self.dx = mesh.dx self.dy = mesh.dy self.mesh = mesh # initial set up def setup(self, InCond_uv_init, Boundary_uv_type, solve_method='ILU', integration_method='Riemann'): ## InCond_uv: specifies the velocity initial condition linsys_solver = LinearSystem_solver(self.Re, self.mesh) phi_mat = linsys_solver.Poisson_pressure_matrix(solve_method) u_mat = linsys_solver.Linsys_velocity_matrix("u") v_mat = linsys_solver.Linsys_velocity_matrix("v") InCond_uvcmp = structure3.VelocityComplete(self.mesh, InCond_uv_init, 0).complete(Boundary_uv_type) uv_cmp = copy.copy(InCond_uvcmp) initial_setup_parameters = [phi_mat, u_mat, v_mat, InCond_uvcmp, uv_cmp, integration_method, solve_method] return initial_setup_parameters def iterative_solver(self, Boundary_uv_type, Tn, initial_setup_parameters): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt Re = self.Re phi_mat = initial_setup_parameters[0] u_mat = initial_setup_parameters[1] v_mat = initial_setup_parameters[2] # uvold_cmp: u and v velocity fields at time n-1 # cmp: in the completed format (interior + boundary + ghost nodes) uvold_cmp = initial_setup_parameters[3] # uvn_cmp: u and v at time n uvn_cmp = initial_setup_parameters[4] integration_method = initial_setup_parameters[5] solve_method = initial_setup_parameters[6] # int: interior points only uvn_int = structure3.VelocityField(uvn_cmp.get_int_uv()[0], uvn_cmp.get_int_uv()[1], self.mesh) # phiold: phi variable at time n-1 phiold = np.zeros((m,n)) phiold_cmp = structure3.CentredPotential(phiold, self.mesh).complete() # phin_cmp: phi variable at time n phin_cmp = np.copy(phiold_cmp) print Tn, "number of iterations" # main iterative solver test_problem_name = Boundary_uv_type for t in xrange(Tn): forcing_term = structure3.Forcing_term(self.mesh,test_problem_name,t+0.5).select_forcing_term() convc_uv = uvn_cmp.non_linear_convection() preconvc_uv = uvold_cmp.non_linear_convection() diff_uvn = uvn_cmp.diffusion() if Boundary_uv_type == 'periodic_forcing_1': # Stokes problem rhs_uvstar = uvn_int + dt*((1.0/(2*Re))*diff_uvn + forcing_term) elif Boundary_uv_type == 'periodic_forcing_2': # Stokes problem rhs_uvstar = uvn_int + dt*((1.0/(2*Re))*diff_uvn + forcing_term) else: # full Navier Stokes problem rhs_uvstar = uvn_int + dt*(-1.5*convc_uv + 0.5*preconvc_uv + (1.0/(2*Re))*diff_uvn + forcing_term) # calculate the approximation to phi at time n+1 gradphiuv = self.gradphi_app(phiold_cmp, phin_cmp) # boundary correction step rhs_uvstarcd = self.correct_boundary(rhs_uvstar, t+1, Boundary_uv_type, gradphiuv) # solving for the intermediate velocity variable uv* Linsys_solve = LinearSystem_solver(Re, self.mesh) uvstar = Linsys_solve.Linsys_velocity_solver([u_mat,v_mat], rhs_uvstarcd) uvstarcmp = structure3.VelocityComplete(self.mesh, [uvstar.get_uv()[0], uvstar.get_uv()[1]], t+1).complete(Boundary_uv_type) div_uvstar = uvstarcmp.divergence() # solving for the phi variable phi = Linsys_solve.Poisson_pressure_solver(div_uvstar/dt, solve_method, phi_mat) # pressure correction step p = phi - div_uvstar/(2*Re) print self.mesh.integrate(p, integration_method), 'integral of p' gradp = p.gradient() phiold_cmp = np.copy(phin_cmp) phin_cmp = np.copy(phi.complete()) # velocity update stemp gradphi = phi.gradient() uvn_int = uvstar - dt*gradphi uvold_cmp = copy.copy(uvn_cmp) uvn_cmp = structure3.VelocityComplete(self.mesh, [uvn_int.get_uv()[0], uvn_int.get_uv()[1]], t+1).complete(Boundary_uv_type) print "iteration "+str(t) #break return uvn_cmp, p, gradp ## this function calculates graident of phi at time n+1 # using second order approximation to gradient of phi^(n+1). Used in correcting uv* # phi^{n+1} appro 2*phi^n - phi^{n-1} def gradphi_app(self, phiold_cmp, phin_cmp): n = self.n m = self.m dx = self.dx dy = self.dy dt = self.dt phiapp_cmp = 2*phin_cmp - phiold_cmp gradphiu = (phiapp_cmp[:,1:n+2] - phiapp_cmp[:,0:n+1])/dx gradphiv = (phiapp_cmp[1:m+2,:] - phiapp_cmp[0:m+1,:])/dy # obtain gradphiu North and South boundary by cubic interpolation gradphiuN = 5.0/16*(gradphiu[0,:] +3*gradphiu[1,:] - gradphiu[2,:]+0.2*gradphiu[3,:]) gradphiuS = 5.0/16*(gradphiu[-1,:] +3*gradphiu[-2,:] - gradphiu[-3,:]+0.2*gradphiu[-4,:]) gradphiu[0,:] = gradphiuN gradphiu[-1,:] = gradphiuS # obtain gradphiv West and East boundary by cubic interpolation gradphivW = 5.0/16*(gradphiv[:,0] +3*gradphiv[:,1] - gradphiv[:,2]+0.2*gradphiv[:,3]) gradphivE = 5.0/16*(gradphiv[:,-1] +3*gradphiv[:,-2] - gradphiv[:,-3]+0.2*gradphiv[:,-4]) gradphiv[:,0] = gradphivW gradphiv[:,-1] = gradphivE return [gradphiu, gradphiv] # boundary correction used in solving for the intermediate velocity field (uv*) def correct_boundary(self, rhs_uvstar, t, Boundary_type, gradphiuv): # rhsuv is a VelocityField object with dimension interior u and v [(m*(n-1), (m-1)*n)] n = self.n m = self.m Re = self.Re dx = self.dx dy = self.dy dt = self.dt lam = dt/(2.0*Re) VC = structure3.VelocityComplete(self.mesh, [rhs_uvstar.get_uv()[0], rhs_uvstar.get_uv()[1]], t) gradphiu = gradphiuv[0] gradphiv = gradphiuv[1] if Boundary_type == "driven_cavity": uN = VC.bnd_driven_cavity('u')['N'] uS = VC.bnd_driven_cavity('u')['S'] uW = VC.bnd_driven_cavity('u')['W'] uE = VC.bnd_driven_cavity('u')['E'] vN = VC.bnd_driven_cavity('v')['N'] vS = VC.bnd_driven_cavity('v')['S'] vW = VC.bnd_driven_cavity('v')['W'] vE = VC.bnd_driven_cavity('v')['E'] elif Boundary_type == "Taylor": uN = VC.bnd_Taylor('u')['N'][1:n] uS = VC.bnd_Taylor('u')['S'][1:n] uW = VC.bnd_Taylor('u')['W'] uE = VC.bnd_Taylor('u')['E'] vN = VC.bnd_Taylor('v')['N'] vS = VC.bnd_Taylor('v')['S'] vW = VC.bnd_Taylor('v')['W'][1:m] vE = VC.bnd_Taylor('v')['E'][1:m] elif Boundary_type == "periodic_forcing_1": uN = VC.bnd_forcing_1('u')['N'][1:n] uS = VC.bnd_forcing_1('u')['S'][1:n] uW = VC.bnd_forcing_1('u')['W'] uE = VC.bnd_forcing_1('u')['E'] vN = VC.bnd_forcing_1('v')['N'] vS = VC.bnd_forcing_1('v')['S'] vW = VC.bnd_forcing_1('v')['W'][1:m] vE = VC.bnd_forcing_1('v')['E'][1:m] elif Boundary_type == "periodic_forcing_2": uN = VC.bnd_forcing_2('u')['N'][1:n] uS = VC.bnd_forcing_2('u')['S'][1:n] uW = VC.bnd_forcing_2('u')['W'] uE = VC.bnd_forcing_2('u')['E'] vN = VC.bnd_forcing_2('v')['N'] vS = VC.bnd_forcing_2('v')['S'] vW = VC.bnd_forcing_2('v')['W'][1:m] vE = VC.bnd_forcing_2('v')['E'][1:m] gradphiuW = gradphiu[1:m+1,0] gradphiuE = gradphiu[1:m+1,-1] gradphiuN = gradphiu[0,1:n] gradphiuS = gradphiu[-1,1:n] # North and South boundary uNbc = uN + dt*gradphiuN uSbc = uS + dt*gradphiuS resu1 = np.zeros((m,n-1)) resu2 = np.zeros((m,n-1)) resu1[0,:] = (16.0/5)*(uNbc)*(lam/(dy**2)) resu1[-1,:] = (16.0/5)*(uSbc)*(lam/(dy**2)) # West and East boundary uWbc = uW uEbc = uE resu2[:,0] = (uWbc)*(lam/(dx**2)) resu2[:,-1] = (uEbc)*(lam/(dx**2)) resu = resu1+resu2 resv1 = np.zeros((m-1,n)) resv2 = np.zeros((m-1,n)) gradphivN = gradphiv[0,1:n+1] gradphivS = gradphiv[-1,1:n+1] gradphivW = gradphiv[1:m,0] gradphivE = gradphiv[1:m,-1] # North and South boundary vNbc = vN vSbc = vS resv2[0,:] = vNbc*(lam/(dy**2)) resv2[-1,:] = vSbc*(lam/(dy**2)) # West and East boundary vWbc = vW + dt*gradphivW vEbc = vE + dt*gradphivE resv1[:,0] = (16.0/5)*vWbc*(lam/(dx**2)) resv1[:,-1] = (16.0/5)*vEbc*(lam/(dx**2)) resv = resv1+resv2 rhs_uvstarcd = rhs_uvstar + [resu, resv] return rhs_uvstarcd class Error(): ''' This class calculates the error norms for the solver by comparing the numerical and analyticalsolutions''' def __init__(self, uv_cmp, uv_exact_bnd, p, p_exact, gradp, gradp_exact, div_uv, mesh): self.mesh = mesh self.uv_cmp = uv_cmp self.uv_bnd = uv_cmp.get_bnd_uv() self.uv_exact_bnd = uv_exact_bnd self.p_exact = p_exact self.p = p self.gradp = gradp self.gradp_exact = gradp_exact self.div_uv = div_uv def velocity_error(self): n = self.mesh.n m = self.mesh.m # m: row, n: col uebnd = self.uv_bnd[0] - self.uv_exact_bnd.get_uv()[0] vebnd = self.uv_bnd[1] - self.uv_exact_bnd.get_uv()[1] L1 = [] L2 = [] Linf = [] for x in [uebnd, vebnd]: xv = np.ravel(x) a=sum(abs(xv[:])**2)/(m**2) # L2x = np.sqrt(sum(xv[:]**2))/(m**2) Linfx = abs(xv[:]).max() L1x = sum(abs(xv[:]))/(m**2) L1.append(L1x) L2x = np.sqrt(a) L2.append(L2x) Linf.append(Linfx) ubnderror = {'L1': L1[0], 'L2': L2[0], 'Linf': Linf[0]} vbnderror = {'L1': L1[1], 'L2': L2[1], 'Linf': Linf[1]} return ubnderror, vbnderror def pressure_error(self): n = self.mesh.n m = self.mesh.m perror = self.p - self.p_exact pv = np.ravel(perror.get_value()) a=sum(abs(pv[:])**2)/(m**2) # L2p = np.sqrt(sum(pv[:]**2))/(m**2) Linfp = abs(pv[:]).max() L1p = sum(abs(pv[:]))/(m**2) L2p = np.sqrt(a) perror_dict = {'L1': L1p, 'L2': L2p, 'Linf': Linfp} return perror_dict def pressure_gradient_error(self): n = self.mesh.n m = self.mesh.m gradp_error = self.gradp - self.gradp_exact gradpu_error, gradpv_error = gradp_error.get_uv() gradpu_errorv = np.ravel(gradpu_error) gradpv_errorv = np.ravel(gradpv_error) gradperror_list = [] for gradpe in [gradpu_errorv, gradpv_errorv]: a=sum(abs(gradpe[:])**2)/(m**2) Linfp = abs(gradpe[:]).max() L1p = sum(abs(gradpe[:]))/(m**2) L2p = np.sqrt(a) gradperror_dict = {'L1': L1p, 'L2': L2p, 'Linf': Linfp} gradperror_list.append(gradperror_dict) avg_gradp_error_dict = {'L1': (gradperror_list[0]['L1']+gradperror_list[1]['L1'])/2, 'L2': (gradperror_list[0]['L2']+gradperror_list[1]['L2'])/2, 'Linf': (gradperror_list[0]['Linf']+gradperror_list[1]['Linf'])/2} return gradperror_list[0], gradperror_list[1], avg_gradp_error_dict
40.507066
213
0.566099
6,959
48,730
3.787182
0.065814
0.024284
0.029141
0.020641
0.767558
0.736672
0.71569
0.703775
0.697856
0.694897
0
0.031814
0.293679
48,730
1,202
214
40.540765
0.733897
0.134435
0
0.741203
0
0
0.025346
0
0
0
0
0
0
0
null
null
0
0.013621
null
null
0.021566
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
ad7238a181b0b4c915c6513ec5fcbec043d0a020
4,318
py
Python
test/test_budget_update_run.py
usmanwardag/dollar_bot
75b02701932b932ae447edf3495acc6dbb886b2b
[ "MIT" ]
null
null
null
test/test_budget_update_run.py
usmanwardag/dollar_bot
75b02701932b932ae447edf3495acc6dbb886b2b
[ "MIT" ]
40
2021-11-20T01:28:17.000Z
2021-12-05T20:52:32.000Z
test/test_budget_update_run.py
usmanwardag/dollar_bot
75b02701932b932ae447edf3495acc6dbb886b2b
[ "MIT" ]
null
null
null
from code import budget_update import mock from mock import ANY from mock.mock import patch from telebot import types @patch("telebot.telebot") def test_run_overall_budget_overall_case(mock_telebot, mocker): mc = mock_telebot.return_value mocker.patch.object(budget_update, "helper") budget_update.helper.isOverallBudgetAvailable.return_value = True budget_update.update_overall_budget = mock.Mock(return_value=True) message = create_message("hello from testing") budget_update.run(message, mc) assert budget_update.update_overall_budget.called @patch("telebot.telebot") def test_run_overall_budget_category_case(mock_telebot, mocker): mc = mock_telebot.return_value mocker.patch.object(budget_update, "helper") budget_update.helper.isOverallBudgetAvailable.return_value = False budget_update.helper.isCategoryBudgetAvailable.return_value = True budget_update.update_category_budget = mock.Mock(return_value=True) message = create_message("hello from testing") budget_update.run(message, mc) assert budget_update.update_category_budget.called @patch("telebot.telebot") def test_run_overall_budget_new_budget_case(mock_telebot, mocker): mc = mock_telebot.return_value mc.reply_to.return_value = True mocker.patch.object(budget_update, "helper") budget_update.helper.isOverallBudgetAvailable.return_value = False budget_update.helper.isCategoryBudgetAvailable.return_value = False message = create_message("hello from testing") budget_update.run(message, mc) assert mc.reply_to.called mc.reply_to.assert_called_with(message, "Select Budget Type", reply_markup=ANY) @patch("telebot.telebot") def test_post_type_selection_failing_case(mock_telebot, mocker): mc = mock_telebot.return_value mc.send_message.return_value = True mocker.patch.object(budget_update, "helper") budget_update.helper.getBudgetTypes.return_value = {} budget_update.helper.throw_exception.return_value = True # budget_update.update_overall_budget = mock.Mock(return_value=True) message = create_message("hello from testing") budget_update.post_type_selection(message, mc) assert mc.send_message.called assert budget_update.helper.throw_exception.called @patch("telebot.telebot") def test_post_type_selection_overall_budget_case(mock_telebot, mocker): mc = mock_telebot.return_value mocker.patch.object(budget_update, "helper") budget_update.helper.getBudgetTypes.return_value = { "overall": "Overall Budget", "category": "Category-Wise Budget", } budget_update.update_overall_budget = mock.Mock(return_value=True) message = create_message("Overall Budget") budget_update.post_type_selection(message, mc) assert budget_update.update_overall_budget.called @patch("telebot.telebot") def test_post_type_selection_categorywise_budget_case(mock_telebot, mocker): mc = mock_telebot.return_value mocker.patch.object(budget_update, "helper") budget_update.helper.getBudgetTypes.return_value = { "overall": "Overall Budget", "category": "Category-Wise Budget", } budget_update.update_category_budget = mock.Mock(return_value=True) message = create_message("Category-Wise Budget") budget_update.post_type_selection(message, mc) assert budget_update.update_category_budget.called @patch("telebot.telebot") def test_post_option_selectio_working(mock_telebot, mocker): mc = mock_telebot.return_value budget_update.update_category_budget = mock.Mock(return_value=True) message = create_message("Continue") budget_update.post_option_selection(message, mc) assert budget_update.update_category_budget.called @patch("telebot.telebot") def test_post_option_selection_nonworking(mock_telebot, mocker): mc = mock_telebot.return_value budget_update.update_category_budget = mock.Mock(return_value=True) message = create_message("Randomtext") budget_update.post_option_selection(message, mc) assert budget_update.update_category_budget.called is False def create_message(text): params = {"messagebody": text} chat = types.User(11, False, "test") message = types.Message(1, None, None, chat, "text", params, "") message.text = text return message
34
83
0.774896
552
4,318
5.742754
0.11413
0.143849
0.090852
0.055521
0.852366
0.828391
0.827129
0.827129
0.790221
0.769401
0
0.000803
0.134785
4,318
126
84
34.269841
0.847698
0.015285
0
0.588889
0
0
0.097647
0
0
0
0
0
0.111111
1
0.1
false
0
0.055556
0
0.166667
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ad78968dd7b42c2c7d9ef0ec372c00b1a0311894
186
py
Python
python/launch_agent.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
null
null
null
python/launch_agent.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
null
null
null
python/launch_agent.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
2
2021-09-21T21:19:11.000Z
2021-09-24T23:11:35.000Z
import logging from Remote.agent import serve from Agents.ExpectedSarsaLambda import ExpectedSarsaTileCodingContinuing logging.basicConfig() serve(ExpectedSarsaTileCodingContinuing())
23.25
72
0.876344
16
186
10.1875
0.625
0
0
0
0
0
0
0
0
0
0
0
0.075269
186
7
73
26.571429
0.947674
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d109ef06e4f5740b721feae97143276a6f96ad28
117
py
Python
myenv/lib/python2.7/site-packages/materializecssform/config.py
dkumarlinux/saleor
e3a852fed7da38e4141b0755bd282012f508c7b9
[ "BSD-3-Clause" ]
null
null
null
myenv/lib/python2.7/site-packages/materializecssform/config.py
dkumarlinux/saleor
e3a852fed7da38e4141b0755bd282012f508c7b9
[ "BSD-3-Clause" ]
2
2022-02-10T16:51:56.000Z
2022-02-10T18:23:52.000Z
myenv/lib/python2.7/site-packages/materializecssform/config.py
dkumarlinux/saleor
e3a852fed7da38e4141b0755bd282012f508c7b9
[ "BSD-3-Clause" ]
null
null
null
from django.conf import settings MATERIALIZECSS_COLUMN_COUNT = getattr(settings, 'MATERIALIZECSS_COLUMN_COUNT', 12)
29.25
82
0.846154
14
117
6.785714
0.714286
0.463158
0.589474
0.694737
0
0
0
0
0
0
0
0.018692
0.08547
117
3
83
39
0.869159
0
0
0
0
0
0.230769
0.230769
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
d16c317cb76a761f635d7d8c0b45d64b498b5053
605
py
Python
examples/simple/ajax.py
peiwei/django-dajaxice
bf41c7623804856326b09f0724b4cb7d14440d7e
[ "BSD-3-Clause" ]
60
2015-01-09T23:02:52.000Z
2021-03-27T13:46:55.000Z
examples/simple/ajax.py
peiwei/django-dajaxice
bf41c7623804856326b09f0724b4cb7d14440d7e
[ "BSD-3-Clause" ]
15
2015-02-19T15:06:15.000Z
2017-10-27T15:06:47.000Z
examples/simple/ajax.py
peiwei/django-dajaxice
bf41c7623804856326b09f0724b4cb7d14440d7e
[ "BSD-3-Clause" ]
55
2015-01-02T22:27:13.000Z
2021-04-27T19:34:15.000Z
import json from dajaxice.decorators import dajaxice_register @dajaxice_register(method='GET') @dajaxice_register(method='POST', name='other_post') def hello(request): return json.dumps({'message': 'hello'}) @dajaxice_register(method='GET') @dajaxice_register(method='POST', name="more.complex.bye") def bye(request): raise Exception("PUMMMM") return json.dumps({'message': 'bye'}) @dajaxice_register def lol(request): return json.dumps({'message': 'lol'}) @dajaxice_register(method='GET') def get_args(request, foo): return json.dumps({'message': 'hello get args %s' % foo})
22.407407
61
0.715702
78
605
5.435897
0.346154
0.264151
0.259434
0.207547
0.471698
0.259434
0.259434
0.259434
0.259434
0
0
0
0.117355
605
26
62
23.269231
0.794007
0
0
0.176471
0
0
0.173554
0
0
0
0
0
0
1
0.235294
false
0
0.117647
0.176471
0.588235
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
0f13cceace5f4dbb6077b315c5d629cb4c51fe6f
21
py
Python
test/login.py
longkangzhi/test008
a2914fabd6d36bbad6599824a28b5d36f6589a12
[ "MIT" ]
null
null
null
test/login.py
longkangzhi/test008
a2914fabd6d36bbad6599824a28b5d36f6589a12
[ "MIT" ]
null
null
null
test/login.py
longkangzhi/test008
a2914fabd6d36bbad6599824a28b5d36f6589a12
[ "MIT" ]
null
null
null
a = 10 b = 20 c = 30
5.25
6
0.428571
6
21
1.5
1
0
0
0
0
0
0
0
0
0
0
0.5
0.428571
21
3
7
7
0.25
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0f1e9f0d12d86230a7b95ac7c1308310168bfd95
7,135
py
Python
tests/test_integration/test_pre_commit_hooks.py
mondeja/md2po
063ed45c613c98d82f7955fe9c7e2deabe109c2e
[ "BSD-3-Clause" ]
null
null
null
tests/test_integration/test_pre_commit_hooks.py
mondeja/md2po
063ed45c613c98d82f7955fe9c7e2deabe109c2e
[ "BSD-3-Clause" ]
17
2020-08-19T11:34:56.000Z
2020-09-19T14:25:29.000Z
tests/test_integration/test_pre_commit_hooks.py
mondeja/md2po
063ed45c613c98d82f7955fe9c7e2deabe109c2e
[ "BSD-3-Clause" ]
null
null
null
import os import subprocess def pre_commit_run_all_files(cwd=os.getcwd()): return subprocess.run( ['pre-commit', 'run', '--all-files'], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) def test_md2po_pre_commit_hook(tmp_dir, git_init, git_add_commit): with tmp_dir([ ( '.pre-commit-config.yaml', '''repos: - repo: https://github.com/mondeja/mdpo rev: master hooks: - id: md2po files: ^README\\.md args: - --po-filepath - README.po ''', ), ('README.md', '# Foo\n'), ('README.po', '#\nmsgid ""\nmsgstr ""\n\nmsgid "Foo"\nmsgstr ""\n'), ]) as (filesdir, _, readme_md_path, readme_po_path): # first execution, is updated proc = git_init(cwd=filesdir) assert proc.returncode == 0 git_add_commit('First commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 0 assert proc.stdout.decode('utf-8').splitlines()[-1].endswith('Passed') # second execution, is outdated with open(readme_md_path, 'a') as f: f.write('\nbar\n') git_add_commit('Second commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode != 0 assert proc.stdout.decode('utf-8').splitlines()[-1] == ( '- files were modified by this hook' ) with open(readme_po_path) as f: assert f.read() == '''# msgid "" msgstr "" msgid "Foo" msgstr "" #: README.md:block 2 (paragraph) msgid "bar" msgstr "" ''' def test_po2md_pre_commit_hook(tmp_dir, git_init, git_add_commit): with tmp_dir([ ( '.pre-commit-config.yaml', '''repos: - repo: https://github.com/mondeja/mdpo rev: master hooks: - id: po2md files: ^README\\.md args: - -p - README.po - -s - README.es.md ''', ), ('README.md', '# Foo\n'), ('README.es.md', '# Foo es\n'), ( 'README.po', '''# msgid "" msgstr "" msgid "Foo" msgstr "Foo es" ''', ), ]) as ( filesdir, _, readme_src_md_path, readme_dst_md_path, readme_po_path, ): # first execution, is updated proc = git_init(cwd=filesdir) assert proc.returncode == 0 git_add_commit('First commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 0 assert proc.stdout.decode('utf-8').splitlines()[-1].endswith('Passed') # second execution, is outdated with open(readme_src_md_path, 'a') as f: f.write('\nbar\n') with open(readme_po_path, 'a') as f: f.write('\nmsgid "bar"\nmsgstr "bar es"\n') git_add_commit('Second commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode != 0 assert proc.stdout.decode('utf-8').splitlines()[-1] == ( '- files were modified by this hook' ) with open(readme_dst_md_path) as f: assert f.read() == '''# Foo es bar es ''' def test_mdpo2html_pre_commit_hook(tmp_dir, git_init, git_add_commit): with tmp_dir([ ( '.pre-commit-config.yaml', '''repos: - repo: https://github.com/mondeja/mdpo rev: master hooks: - id: mdpo2html files: ^README\\.html args: - -p - README.po - -s - README.es.html ''', ), ('README.html', '<h1>Foo</h1>\n'), ('README.es.html', '<h1>Foo es</h1>\n'), ( 'README.po', '''# msgid "" msgstr "" msgid "Foo" msgstr "Foo es" ''', ), ]) as ( filesdir, _, readme_html_path, readme_html_es_path, readme_po_path, ): # first execution, is updated proc = git_init(cwd=filesdir) assert proc.returncode == 0 git_add_commit('First commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 0 assert proc.stdout.decode('utf-8').splitlines()[-1].endswith('Passed') with open(readme_html_es_path) as f: assert f.read() == '<h1>Foo es</h1>\n' # second execution, is outdated with open(readme_html_path, 'a') as f: f.write('\n<p>bar</p>\n') with open(readme_po_path, 'a') as f: f.write('\nmsgid "bar"\nmsgstr "bar es"\n') git_add_commit('Second commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode != 0 assert proc.stdout.decode('utf-8').splitlines()[-1] == ( '- files were modified by this hook' ) with open(readme_html_es_path) as f: assert f.read() == '<h1>Foo es</h1>\n\n<p>bar es</p>\n' def test_md2po2md_pre_commit_hook(tmp_dir, git_init, git_add_commit): with tmp_dir({ '.pre-commit-config.yaml': '''repos: - repo: https://github.com/mondeja/mdpo rev: master hooks: - id: md2po2md files: ^README\\.md args: - -l - es - -o - locale/{lang} - --no-location ''', 'README.md': '# Foo\n', }) as filesdir: # first execution, files don't exist proc = git_init(cwd=filesdir) assert proc.returncode == 0 git_add_commit('First commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 1 assert proc.stdout.decode('utf-8').splitlines()[-1] == '- exit code: 1' locale_dir = os.path.join(filesdir, 'locale') assert os.path.isdir(locale_dir) locale_es_dir = os.path.join(locale_dir, 'es') assert os.path.isdir(locale_es_dir) readme_md_es_path = os.path.join(locale_es_dir, 'README.md') readme_po_es_path = os.path.join(locale_es_dir, 'README.md.po') assert os.path.isfile(readme_md_es_path) assert os.path.isfile(readme_po_es_path) with open(readme_po_es_path) as f: assert f.read() == '''# msgid "" msgstr "" msgid "Foo" msgstr "" ''' with open(readme_md_es_path) as f: assert f.read() == '# Foo\n' # second execution, translation with open(readme_po_es_path, 'w') as f: f.write('''# msgid "" msgstr "" msgid "Foo" msgstr "Foo es" ''') git_add_commit('Second commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 1 assert proc.stdout.decode('utf-8').splitlines()[-1] == ( '- files were modified by this hook' ) with open(readme_md_es_path) as f: assert f.read() == '# Foo es\n' # third execution, is updated git_add_commit('Third commit', cwd=filesdir) proc = pre_commit_run_all_files(cwd=filesdir) assert proc.returncode == 0 assert proc.stdout.decode('utf-8').splitlines()[-1].endswith('Passed')
26.722846
79
0.565102
922
7,135
4.181128
0.118221
0.062776
0.040467
0.070817
0.814527
0.774578
0.749157
0.719326
0.71284
0.702464
0
0.010059
0.289418
7,135
266
80
26.823308
0.750296
0.037281
0
0.642512
0
0
0.293046
0.013413
0
0
0
0
0.15942
1
0.024155
false
0.019324
0.009662
0.004831
0.038647
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0f427ef393aecfeca9a37b787d450fb8edf406bf
28
py
Python
testpackage/setup.py
HENRYMARTIN5/SolutionPackages
74fef25b1e3615792adc8e8bae22d709ae013b0d
[ "MIT" ]
1
2022-01-02T13:47:33.000Z
2022-01-02T13:47:33.000Z
testpackage/setup.py
HENRYMARTIN5/SolutionPackages
74fef25b1e3615792adc8e8bae22d709ae013b0d
[ "MIT" ]
null
null
null
testpackage/setup.py
HENRYMARTIN5/SolutionPackages
74fef25b1e3615792adc8e8bae22d709ae013b0d
[ "MIT" ]
null
null
null
print("I'm the setup file!")
28
28
0.678571
6
28
3.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.107143
28
1
28
28
0.76
0
0
0
0
0
0.655172
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
0f66a6d414ef7a347febfcd491ff160392917216
19,928
py
Python
analysis/encoding.py
BZSROCKETS/cemrl
499939535794ee027f08b8a4133eefd0bb7abe14
[ "MIT" ]
null
null
null
analysis/encoding.py
BZSROCKETS/cemrl
499939535794ee027f08b8a4133eefd0bb7abe14
[ "MIT" ]
null
null
null
analysis/encoding.py
BZSROCKETS/cemrl
499939535794ee027f08b8a4133eefd0bb7abe14
[ "MIT" ]
null
null
null
import os import pickle import matplotlib.pyplot as plt from matplotlib.patches import Ellipse import numpy as np import colorsys plt.rc('text', usetex=True) plt.rc('font', family='serif') number2name_ml10 = { 0: 'reach-v1', 1: 'push-v1', 2: 'pick-place-v1', 3: 'door-open-v1', 4: 'drawer-close-v1', 5: 'button-press-topdown-v1', 6: 'peg-insert-side-v1', 7: 'window-open-v1', 8: 'sweep-v1', 9: 'basketball-v1', 10: 'drawer-open-v1', 11: 'door-close-v1', 12: 'shelf-place-v1', 13: 'sweep-into-v1', 14: 'lever-pull-v1'} number2name_cheetah_multi_task = { 1: 'velocity', 2: 'goal direction', 3: 'goal', 4: 'rollover', 5: 'stand-up'} number2name = number2name_cheetah_multi_task cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] def plot_encodings_split_with_rewards(epoch, exp_directory, save=False, normalize=False, legend=False): encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb")) base_tasks = list(encoding_storage.keys()) #rewards_per_base_task = [sum([encoding_storage[base][key]['reward_mean'] / len(list(encoding_storage[base].keys())) for key in encoding_storage[base].keys()]) for base in base_tasks] if len(base_tasks) == 15: figsize = (20, 5) elif len(base_tasks) == 10: figsize = (15, 5) elif len(base_tasks) == 1: figsize = (7, 5) elif len(base_tasks) == 3: figsize = (7, 5) else: figsize = None fig, axes_tuple = plt.subplots(nrows=3, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1, 1]}, figsize=figsize) if len(axes_tuple.shape) == 1: axes_tuple = np.expand_dims(axes_tuple, 1) latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0] # Normalization over base tasks of dim if normalize: normalizer = [] mean_std = ['mean', 'std'] for dim in range(latent_dim): temp_dict = {} for element in mean_std: values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())]) temp_dict[element] = dict(mean=values.mean(), std=values.std()) normalizer.append(temp_dict) for i, base in enumerate(base_tasks): # encodings #target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()]) #sort_indices = np.argsort(target_values) for dim in range(latent_dim): x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] #Normalize if normalize: x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9) y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9) label_string = "Encoding $z_" + str(dim) + "$" #axes_tuple[0][i].errorbar(target_values[sort_indices], x_values, yerr=y_values, fmt=".", label=label_string) axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values, fmt=".", label=label_string)#, capsize=2 if axes_tuple.shape[1] > 1: #axes_tuple[0][i].set_title("Base Task " + str(i)) nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1]) if len(nameWithoutVersion.split('-')) > 2: split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:]) else: split_name = nameWithoutVersion axes_tuple[0][i].set_title(split_name) else: axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=14) # rewards #axes_tuple[2][i].plot(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], 'x') axes_tuple[2][i].bar(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], width=0.01, align='center') # base task encodings #axes_tuple[1][i].plot(target_values[sort_indices], [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$") axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$") axes_tuple[1][i].set_xlabel("Specification", fontsize=12) axes_tuple[1][i].set_yticks(np.arange(-1, len(base_tasks), 1), minor=True) axes_tuple[1][0].set_ylim(-1, 10) #len(base_tasks) axes_tuple[0][i].grid() axes_tuple[1][i].grid(which='minor') axes_tuple[1][i].grid(which='major') axes_tuple[2][i].grid() axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=12) axes_tuple[1][0].set_ylabel('Base task \n encoding $\mathbf{y}$', fontsize=12) axes_tuple[2][0].set_ylabel('Average \n reward $R$', fontsize=12) if legend: axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True) axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True) if save: plt.tight_layout() fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + "_with_rewards" + ".pdf", format="pdf") fig.show() # print("Here to create plot 1") print("Created plot") def plot_encodings_split_with_rewards_cheetah(epoch, exp_directory, save=False, normalize=False, legend=False): encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb")) base_tasks = list(encoding_storage.keys()) #rewards_per_base_task = [sum([encoding_storage[base][key]['reward_mean'] / len(list(encoding_storage[base].keys())) for key in encoding_storage[base].keys()]) for base in base_tasks] if len(base_tasks) == 15: figsize = (20, 5) elif len(base_tasks) == 10: figsize = (15, 5) elif len(base_tasks) == 1: figsize = (7, 5) elif len(base_tasks) == 3: figsize = (7, 5) else: figsize = None fig, axes_tuple = plt.subplots(nrows=3, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1, 1]}, figsize=figsize) if len(axes_tuple.shape) == 1: axes_tuple = np.expand_dims(axes_tuple, 1) latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0] # Normalization over base tasks of dim if normalize: normalizer = [] mean_std = ['mean', 'std'] for dim in range(latent_dim): temp_dict = {} for element in mean_std: values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())]) temp_dict[element] = dict(mean=values.mean(), std=values.std()) normalizer.append(temp_dict) for i, base in enumerate(base_tasks): # encodings #target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()]) #sort_indices = np.argsort(target_values) for dim in range(latent_dim): x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] #Normalize if normalize: x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9) y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9) label_string = "Encoding $z_" + str(dim) + "$" #axes_tuple[0][i].errorbar(target_values[sort_indices], x_values, yerr=y_values, fmt=".", label=label_string) axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values, fmt=".", label=label_string)#, capsize=2 if axes_tuple.shape[1] > 1: #axes_tuple[0][i].set_title("Base Task " + str(i)) nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1]) if len(nameWithoutVersion.split('-')) > 2: split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:]) else: split_name = nameWithoutVersion split_name = number2name[base] axes_tuple[0][i].set_title(split_name) else: axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=14) # rewards #axes_tuple[2][i].plot(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], 'x') axes_tuple[2][i].bar(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], width=0.01, align='center') # base task encodings #axes_tuple[1][i].plot(target_values[sort_indices], [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$") axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$") axes_tuple[1][i].set_xlabel("Specification", fontsize=12) axes_tuple[1][i].set_yticks(np.arange(-1, len(base_tasks), 1), minor=True) axes_tuple[1][0].set_ylim(-1, 10) #len(base_tasks) axes_tuple[0][i].grid() axes_tuple[1][i].grid(which='minor') axes_tuple[1][i].grid(which='major') axes_tuple[2][i].grid() axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=12) axes_tuple[1][0].set_ylabel('Base task \n encoding $\mathbf{y}$', fontsize=12) axes_tuple[2][0].set_ylabel('Average \n reward $R$', fontsize=12) if legend: axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True) axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True) if save: plt.tight_layout() fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + "_with_rewards" + ".pdf", format="pdf") fig.show() print("Created plot") def plot_encodings_split(epoch, exp_directory, save=False, normalize=False, legend=False): encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb")) base_tasks = list(encoding_storage.keys()) if len(base_tasks) == 10: figsize = (15, 5) elif len(base_tasks) == 1: figsize = (7, 5) elif len(base_tasks) == 3: figsize = (7, 5) else: figsize = None fig, axes_tuple = plt.subplots(nrows=2, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1]}, figsize=figsize) if len(axes_tuple.shape) == 1: axes_tuple = np.expand_dims(axes_tuple, 1) latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0] base_task_encodings = [np.argmax(a['base']) for base in base_tasks for a in list(encoding_storage[base].values())] # Normalization over base tasks of dim if normalize: normalizer = [] mean_std = ['mean', 'std'] for dim in range(latent_dim): temp_dict = {} for element in mean_std: values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())]) temp_dict[element] = dict(mean=values.mean(), std=values.std()) normalizer.append(temp_dict) for i, base in enumerate(base_tasks): fontsize=26 # encodings #target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()]) #sort_indices = np.argsort(target_values) for dim in range(latent_dim): x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices] #Normalize if normalize: x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9) y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9) label_string = "Encoding $z_" + str(dim) + "$" # 2 classes: capsize=3, elinewidth=3, capthick=3, markersize=9 # more classes: capsize=2, elinewidth=2, capthick=2, markersize=7 axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values, fmt="d", color='tab:green', label=label_string, capsize=2, elinewidth=2, capthick=2, markersize=7, markerfacecolor='yellow', markeredgecolor='black') if axes_tuple.shape[1] > 1: #axes_tuple[0][i].set_title("Base Task " + str(i)) nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1]) if len(nameWithoutVersion.split('-')) > 2: split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:]) else: split_name = nameWithoutVersion split_name = number2name[base] axes_tuple[0][i].set_title(split_name, fontsize=fontsize) else: axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=fontsize) # base task encodings axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(task['base']) for task in list(encoding_storage[base].values())], 'd', color='yellow', markersize=7, markerfacecolor='yellow', markeredgecolor='black') # markersize=7 for multiple tasks, 9 for two axes_tuple[1][i].set_xlabel("Specification", fontsize=fontsize) axes_tuple[1][i].set_ylim(-1, np.max(base_task_encodings) + 1) axes_tuple[0][i].tick_params(axis="x", labelsize=fontsize) axes_tuple[0][i].tick_params(axis="y", labelsize=fontsize) axes_tuple[1][i].tick_params(axis="x", labelsize=fontsize) axes_tuple[1][i].tick_params(axis="y", labelsize=fontsize) axes_tuple[1][i].set_yticks(np.arange(-1, np.max(base_task_encodings) + 2, 1)) axes_tuple[0][i].grid(b=True, which='major', alpha=1) axes_tuple[1][i].grid(which='minor') axes_tuple[1][i].grid(which='major') axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=fontsize) axes_tuple[1][0].set_ylabel('Encoding $\mathbf{y}$', fontsize=fontsize) plt.tight_layout() if legend: axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True, fontsize=fontsize) axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True, fontsize=fontsize) plt.subplots_adjust(wspace=0.15, hspace=0.15) plt.grid(b=True, which='major', alpha=1) if save: fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + ".pdf", format="pdf", bbox_inches = "tight") plt.show() print(exp_directory) print("Created plot") def plot_encodings(epoch, exp_directory, save=False, normalize=False): encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb")) base_tasks = list(encoding_storage.keys()) fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row') #fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row') fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row', figsize=(15, 3)) #fig.suptitle("Epoch " + str(epoch), fontsize="x-large") if len(base_tasks) == 1: axes_tuple = [axes_tuple] latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0] for i, base in enumerate(base_tasks): for dim in range(latent_dim): x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())]) y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())]) #Normalize if normalize: mean = x_values.mean() std = x_values.std() x_values = (x_values - mean) / (std + 1e-9) mean = y_values.mean() std = y_values.std() y_values = (y_values - mean) / (std + 1e-9) axes_tuple[i].errorbar(list(encoding_storage[base].keys()), x_values, yerr=y_values, fmt=".", label="Encoding $\mathbf{z}$") axes_tuple[i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Class encoding $\mathbf{y}$") #axes_tuple[i].set_title("Base Task " + str(i) + ", Epoch " + str(epoch)) axes_tuple[i].set_title("Base Task " + str(i)) #axes_tuple[i].set_title("Epoch " + str(epoch)) #axes_tuple[i].set_xlabel("Specification") #, fontsize=10 axes_tuple[i].grid() #axes_tuple[i].set_ylim(-0.1, 0.1) #axes_tuple[i].legend() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True) #fig.text(0.0, 0.5, 'Encoding $\mathbf{z}$', va='center', rotation='vertical') #plt.subplots_adjust(wspace=0, hspace=0) if save: fig.savefig(exp_directory + "/encoding_epoch" + str(epoch) + ("_normalized" if normalize else "") +".pdf", dpi=300, format="pdf") plt.show() print("Created plot") def plot_encodings_2D(epoch, exp_directory): encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb")) base_tasks = list(encoding_storage.keys()) fig, ax = plt.subplots() for i, base in enumerate(base_tasks): specification = np.array(list(encoding_storage[base].keys())) spec_max = specification.max() means1 = [a['mean'][0] for a in list(encoding_storage[base].values())] means2 = [a['mean'][1] for a in list(encoding_storage[base].values())] vars1 = [a['mean'][0] for a in list(encoding_storage[base].values())] vars2 = [a['mean'][1] for a in list(encoding_storage[base].values())] points = ax.scatter(means1, means2, c=specification, cmap='autumn', zorder=0) ax.errorbar(means1, means2, xerr=np.array(vars1) / 2, yerr=np.array(vars2) / 2, alpha=0.2, fmt="o", color="black", zorder=-2) for j in range(len(encoding_storage[base])): #color = np.expand_dims(np.array(colorsys.hsv_to_rgb(hue[j], 1, 1)), 0) e = Ellipse((means1[j], means2[j]), vars1[j], vars2[j], fill=False, zorder=-1) ax.add_artist(e) e.set_clip_box(ax.bbox) e.set_alpha(0.2) #e.set_color(color[j]) fig.colorbar(points) plt.show() if __name__ == "__main__": #plot_encodings_split(0, "/path/to/exp", save=False, normalize=False) pass
54.898072
275
0.616519
2,767
19,928
4.266715
0.096856
0.068609
0.102998
0.072082
0.858631
0.842114
0.817127
0.789175
0.777655
0.763934
0
0.024059
0.209504
19,928
363
276
54.898072
0.725386
0.151696
0
0.661922
0
0
0.086247
0.001365
0.010676
0
0
0
0
1
0.017794
false
0.003559
0.021352
0
0.039146
0.017794
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7e274ec4396cf4dd7ed91c1ed6dba12f09f63f2f
174
py
Python
sql_connectors/__init__.py
andmatt/sql_connectors
f2680ae7e847fc94064d506bf8da9a3b1ef68f43
[ "MIT" ]
8
2018-03-29T17:20:17.000Z
2020-10-24T21:12:10.000Z
sql_connectors/__init__.py
andmatt/sql_connectors
f2680ae7e847fc94064d506bf8da9a3b1ef68f43
[ "MIT" ]
3
2018-10-17T19:59:23.000Z
2019-01-14T18:41:01.000Z
sql_connectors/__init__.py
andmatt/sql_connectors
f2680ae7e847fc94064d506bf8da9a3b1ef68f43
[ "MIT" ]
5
2018-10-15T20:07:59.000Z
2019-10-10T14:12:21.000Z
# -*- coding: utf-8 -*- from ._version import __version__, __version_info__ from .storage import LocalStorage __all__ = ["__version__", "__version_info__", "LocalStorage"]
24.857143
61
0.747126
18
174
5.944444
0.555556
0.261682
0.336449
0
0
0
0
0
0
0
0
0.006536
0.12069
174
6
62
29
0.69281
0.12069
0
0
0
0
0.258278
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
7e2c6268f71a969ef5c7bbf91fba49d974fd0f66
34
py
Python
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_13_absolute_from_import.py
JetBrains-Research/Lupa
c105487621564c60cae17395bf32eb40868ceb89
[ "Apache-2.0" ]
16
2022-01-11T00:32:20.000Z
2022-03-25T21:40:52.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_13_absolute_from_import.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
12
2021-07-05T11:42:01.000Z
2021-12-23T07:57:54.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_13_absolute_from_import.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
3
2021-09-10T13:21:54.000Z
2021-11-23T11:37:55.000Z
from src.tasks.task1 import utils
17
33
0.823529
6
34
4.666667
1
0
0
0
0
0
0
0
0
0
0
0.033333
0.117647
34
1
34
34
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7e67f73b709257d530869cc72715eb7c4be3f518
200
py
Python
lldb/packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
medismailben/llvm-project
e334a839032fe500c3bba22bf976ab7af13ce1c1
[ "Apache-2.0" ]
765
2015-12-03T16:44:59.000Z
2022-03-07T12:41:10.000Z
packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
DalavanCloud/lldb
e913eaf2468290fb94c767d474d611b41a84dd69
[ "Apache-2.0" ]
1,815
2015-12-11T23:56:05.000Z
2020-01-10T19:28:43.000Z
packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
DalavanCloud/lldb
e913eaf2468290fb94c767d474d611b41a84dd69
[ "Apache-2.0" ]
284
2015-12-03T16:47:25.000Z
2022-03-12T05:39:48.000Z
from lldbsuite.test import lldbinline from lldbsuite.test import decorators lldbinline.MakeInlineTest( __file__, globals(), [ decorators.expectedFailureAll( compiler="gcc")])
25
38
0.72
18
200
7.777778
0.666667
0.185714
0.242857
0.328571
0
0
0
0
0
0
0
0
0.195
200
7
39
28.571429
0.869565
0
0
0
0
0
0.015
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
7e7d7b00a4dfbc24dac95b85c60686ccdb54f2a9
33
py
Python
dateparse/__init__.py
tobiasli/dateparse
7ba61ea6a1ac0c98c7b7c69bd50c889fd33a6d29
[ "MIT" ]
null
null
null
dateparse/__init__.py
tobiasli/dateparse
7ba61ea6a1ac0c98c7b7c69bd50c889fd33a6d29
[ "MIT" ]
2
2015-11-15T21:09:30.000Z
2019-10-26T21:06:45.000Z
dateparse/__init__.py
tobiasli/dateparse
7ba61ea6a1ac0c98c7b7c69bd50c889fd33a6d29
[ "MIT" ]
null
null
null
from dateparse.dateparse import *
33
33
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.090909
33
1
33
33
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7eae4b6e82a862485ccc6089efc8ef1a41a449ab
21,921
py
Python
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/vfi.py
kecorbin/genielibs
5d3951b8911013691822e73e9c3d0f557ca10f43
[ "Apache-2.0" ]
null
null
null
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/vfi.py
kecorbin/genielibs
5d3951b8911013691822e73e9c3d0f557ca10f43
[ "Apache-2.0" ]
null
null
null
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/vfi.py
kecorbin/genielibs
5d3951b8911013691822e73e9c3d0f557ca10f43
[ "Apache-2.0" ]
null
null
null
from abc import ABC import warnings import contextlib from genie.conf.base.attributes import UnsupportedAttributeWarning,\ AttributesHelper from genie.conf.base.cli import CliConfigBuilder from genie.conf.base.config import CliConfig from genie.libs.conf.l2vpn.pseudowire import PseudowireNeighbor,\ PseudowireIPv4Neighbor class Vfi(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 (config-l2vpn-bg-bd-vfi) if attributes.value('virtual',force=True): title = attributes.format('access-vfi {name}', force=True) else: title = attributes.format('vfi {name}', force=True) with configurations.submode_context(title): if unconfig and attributes.iswildcard: configurations.submode_unconfig() sub, attributes2 = attributes.namespace('autodiscovery_bgp') if sub is not None: configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) sub, attributes2 = attributes.namespace('multicast_p2mp') if sub is not None: configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-vfi-pw) for sub, attributes2 in attributes.mapping_values('neighbor_attr', keys=self.pseudowire_neighbors, sort=True): configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / shutdown if attributes.value('shutdown'): configurations.append_line('shutdown') # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / vpn-id 1 configurations.append_line(attributes.format('vpn-id {vpn_id}')) return CliConfig(device=self.device, unconfig=unconfig, cli_config=configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class AutodiscoveryBgpAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp (config-l2vpn-bg-bd-vfi-ad) with configurations.submode_context('autodiscovery bgp'): if not attributes.value('enabled', force=True): configurations.submode_cancel() # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / control-word if attributes.value('control_word'): configurations.append_line('control-word') # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / rd 1.2.3.4:1 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / rd 100000:200 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / rd 100:200000 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / rd auto configurations.append_line(attributes.format('rd {rd}')) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-policy export <rtepol> configurations.append_line(attributes.format('route-policy {export_route_policy}')) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target 1.2.3.4:1 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target 100000:200 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target 100:200000 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export 1.2.3.4:1 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export 100000:200 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export 100:200000 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export import 1.2.3.4:1 (bug) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export import 100000:200 (bug) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target export import 100:200000 (bug) both_route_targets = set(self.export_route_targets) & set(self.import_route_targets) for v, attributes2 in attributes.sequence_values('export_route_targets', sort=True): if v in both_route_targets: cfg = 'route-target {}'.format(v.route_target) else: cfg = 'route-target export {}'.format(v.route_target) if v.stitching: warning.warn(UnsupportedAttributeWarning, 'route-target export/import stitching') configurations.append_line(cfg) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target import 1.2.3.4:1 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target import 100000:200 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / route-target import 100:200000 for v, attributes2 in attributes.sequence_values('import_route_targets', sort=True): if v not in both_route_targets: cfg = 'route-target import {}'.format(v.route_target) if v.stitching: warning.warn(UnsupportedAttributeWarning, 'route-target export/import stitching') configurations.append_line(cfg) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp (config-l2vpn-bg-bd-vfi-ad-sig) sub, attributes2 = attributes.namespace('signaling_protocol_bgp') if sub is not None: configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp (config-l2vpn-bg-bd-vfi-ad-sig) sub, attributes2 = attributes.namespace('signaling_protocol_ldp') if sub is not None: configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / table-policy <rtepol> configurations.append_line(attributes.format('table-policy {table_policy}')) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class SignalingProtocolBgpAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp (config-l2vpn-bg-bd-vfi-ad-sig) with configurations.submode_context('signaling-protocol bgp'): if not attributes.value('enabled', force=True): configurations.submode_cancel() # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / ve-id 1 configurations.append_line(attributes.format('ve-id {ve_id}')) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol bgp / ve-range 11 configurations.append_line(attributes.format('ve-range {ve_range}')) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class SignalingProtocolLdpAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp (config-l2vpn-bg-bd-vfi-ad-sig) with configurations.submode_context('signaling-protocol ldp'): if not attributes.value('enabled', force=True): configurations.submode_cancel() # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label both # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label both static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label receive # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label receive static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label transmit # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / load-balancing flow-label transmit static # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / vpls-id 1.2.3.4:1 # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / autodiscovery bgp / signaling-protocol ldp / vpls-id 100:200000 configurations.append_line(attributes.format('vpls-id {vpls_id}')) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class MulticastP2mpAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp (config-l2vpn-bg-bd-vfi-p2mp) with configurations.submode_context('multicast p2mp'): if not attributes.value('enabled', force=True): configurations.submode_cancel() # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp / signaling-protocol bgp (config-l2vpn-bg-bd-vfi-p2mp-bgp) #sub, attributes2 = attributes.namespace('signaling_protocol_bgp') #if sub is not None: # configurations.append_block( # sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp / transport rsvp-te (config-l2vpn-bg-bd-vfi-p2mp-te) sub, attributes2 = attributes.namespace('transport_rsvp_te') if sub is not None: configurations.append_block( sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig)) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class SignalingProtocolBgpAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp / signaling-protocol bgp (config-l2vpn-bg-bd-vfi-ad-sig) with configurations.submode_context('signaling-protocol bgp'): if not attributes.value('enabled', force=True): configurations.submode_cancel() return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class TransportRsvpTeAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp / transport rsvp-te (config-l2vpn-bg-bd-vfi-p2mp-te) with configurations.submode_context('transport rsvp-te'): if not attributes.value('enabled', force=True): configurations.submode_cancel() # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / multicast p2mp / transport rsvp-te / attribute-set p2mp-te someword4 configurations.append_line(attributes.format('attribute-set p2mp-te {attribute_set_p2mp_te}')) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs) class NeighborAttributes(ABC): def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs): assert not apply assert not kwargs, kwargs attributes = AttributesHelper(self, attributes) configurations = CliConfigBuilder(unconfig=unconfig) if isinstance(self.neighbor, PseudowireIPv4Neighbor): # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-vfi-pw) assert self.ip is not None assert self.pw_id is not None nbr_ctx = attributes.format('neighbor {ip} pw-id {pw_id}', force=True) else: raise ValueError(self.neighbor) assert nbr_ctx with configurations.submode_context(nbr_ctx): # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 none # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 snoop profile someword4 v = attributes.value('dhcp_ipv4_snooping_profile') if v is not None: if v is False: configurations.append_line('dhcp ipv4 none') else: configurations.append_line('dhcp ipv4 snoop profile {}'.format(v)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / igmp snooping profile someword4 v = attributes.value('igmp_snooping_profile') if v is not None: if v is False: pass else: configurations.append_line('igmp snooping profile {}'.format(v)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / mld snooping profile someword4 v = attributes.value('mld_snooping_profile') if v is not None: if v is False: pass else: configurations.append_line('mld snooping profile {}'.format(v)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16 remote_label = attributes.value('mpls_static_label') if remote_label is not None: local_label = self.neighbor_attr[self.local_neighbor].mpls_static_label if local_label is None: warnings.warn( 'neighbor {!r} mpls_static_label missing'.format(self.local_neighbor), UnsupportedAttributeWarning) else: configurations.append_line('mpls static label local {} remote {}'.\ format(local_label, remote_label)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / pw-class someword4 v = attributes.value('pw_class') if v is not None: configurations.append_line('pw-class {}'.\ format(v.device_attr[self.device].name)) # iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 / neighbor 1.2.3.4 pw-id 1 / static-mac-address aaaa.bbbb.cccc configurations.append_line(attributes.format('static-mac-address {static_mac_address}')) return str(configurations) def build_unconfig(self, apply=True, attributes=None, **kwargs): return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
62.991379
189
0.62748
2,304
21,921
5.90625
0.072049
0.042622
0.068195
0.089506
0.838257
0.80754
0.786008
0.76749
0.76749
0.76749
0
0.026655
0.291456
21,921
347
190
63.172911
0.849472
0.377355
0
0.619048
0
0
0.074652
0.009939
0
0
0
0
0.090476
1
0.07619
false
0.009524
0.057143
0.038095
0.247619
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0e1803846cc2a25ea7d3543b56fc66b7cd327f16
2,091
py
Python
diceThing.py
HappyNapalm/dungeon_crawl_python
d53a0936292b40bf098076f4fe27bcfaeb713591
[ "Unlicense" ]
null
null
null
diceThing.py
HappyNapalm/dungeon_crawl_python
d53a0936292b40bf098076f4fe27bcfaeb713591
[ "Unlicense" ]
null
null
null
diceThing.py
HappyNapalm/dungeon_crawl_python
d53a0936292b40bf098076f4fe27bcfaeb713591
[ "Unlicense" ]
null
null
null
from random import randint while(True): stats=[0,1,2,3,4,5] i=0 while(i<6): a=1 j=0 while(j<4): a = a+randint(1,6) if(a>8&a<18): j=j+1 a = a-3 stats[i] = a i=i+1 print("\nWhat is your race Adventurer?\n") race = input() race_parse = race.lowercase() if (race_parse == "human"): print("Str :",stats[0]) print("Dex :",stats[1]) print("Con :",stats[2]) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]) elif (race_parse == "dwarf"): print("Str :",stats[0]) print("Dex :",stats[1]) print("Con :",stats[2]+2) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]-2) elif (race_parse == "elf"): print("Str :",stats[0]) print("Dex :",stats[1]+2) print("Con :",stats[2]-2) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]) elif (race_parse == "half-elf"): print("Str :",stats[0]) print("Dex :",stats[1]) print("Con :",stats[2]) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]) elif (race_parse == "half-orc"): print("Str :",stats[0]+2) print("Dex :",stats[1]) print("Con :",stats[2]) print("Int :",stats[3]-2) print("Wis :",stats[4]) print("Cha :",stats[5]-2) elif (race_parse == "halfling"): print("Str :",stats[0]-2) print("Dex :",stats[1]+2) print("Con :",stats[2]) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]) elif (race == "gnome"): print("Str :",stats[0]-2) print("Dex :",stats[1]+2) print("Con :",stats[2]) print("Int :",stats[3]) print("Wis :",stats[4]) print("Cha :",stats[5]) else: print("Speak up Champion!")
28.643836
47
0.436633
273
2,091
3.318681
0.175824
0.092715
0.100442
0.108168
0.756071
0.756071
0.756071
0.756071
0.756071
0.700883
0
0.05244
0.343376
2,091
72
48
29.041667
0.607429
0
0
0.558824
0
0
0.150074
0
0
0
0
0
0
1
0
false
0
0.014706
0
0.014706
0.647059
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
0e31435f187ca1450633d3c8e3ea5d1d2e5a66be
43
py
Python
signalr/transports/__init__.py
talboren/signalr-client-py
bc41ab6602348258140372a5d78dc0e4f8f6205d
[ "Apache-2.0" ]
58
2015-08-28T18:45:54.000Z
2022-01-21T17:53:43.000Z
signalr/transports/__init__.py
talboren/signalr-client-py
bc41ab6602348258140372a5d78dc0e4f8f6205d
[ "Apache-2.0" ]
48
2015-08-29T18:19:59.000Z
2021-07-13T07:32:40.000Z
signalr/transports/__init__.py
talboren/signalr-client-py
bc41ab6602348258140372a5d78dc0e4f8f6205d
[ "Apache-2.0" ]
67
2015-08-28T22:44:47.000Z
2022-03-03T12:37:14.000Z
from ._auto_transport import AutoTransport
21.5
42
0.883721
5
43
7.2
1
0
0
0
0
0
0
0
0
0
0
0
0.093023
43
1
43
43
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0e34295d5e20de58ffba5123aa3b88f9ea34f5d2
68,075
py
Python
tests/001_theoretical/test_011_list_blueprint.py
vitlabuda/datalidator
539063a98990c6be165baeff6c2a74ac2fd7a130
[ "BSD-3-Clause" ]
null
null
null
tests/001_theoretical/test_011_list_blueprint.py
vitlabuda/datalidator
539063a98990c6be165baeff6c2a74ac2fd7a130
[ "BSD-3-Clause" ]
null
null
null
tests/001_theoretical/test_011_list_blueprint.py
vitlabuda/datalidator
539063a98990c6be165baeff6c2a74ac2fd7a130
[ "BSD-3-Clause" ]
null
null
null
#!/bin/false # Copyright (c) 2022 Vít Labuda. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import os.path import sys if "DATALIDATOR_TESTS_AUTOPATH" in os.environ: __TESTS_DIR = os.path.dirname(os.path.realpath(__file__)) __MODULE_DIR = os.path.realpath(os.path.join(__TESTS_DIR, "../..")) if __TESTS_DIR not in sys.path: sys.path.insert(0, __TESTS_DIR) if __MODULE_DIR not in sys.path: sys.path.insert(0, __MODULE_DIR) from typing import Iterable, Tuple, Any import theoretical_testutils import pytest import string import datetime import ipaddress import urllib.parse import uuid from datalidator.blueprints.ParsingMode import ParsingMode from datalidator.blueprints.impl.ListBlueprint import ListBlueprint from datalidator.blueprints.impl.IntegerBlueprint import IntegerBlueprint from datalidator.blueprints.impl.StringBlueprint import StringBlueprint from datalidator.blueprints.impl.GenericBlueprint import GenericBlueprint from datalidator.blueprints.exc.InputDataNotConvertibleExc import InputDataNotConvertibleExc from datalidator.blueprints.exc.InputDataTypeNotInAllowlistExc import InputDataTypeNotInAllowlistExc from datalidator.blueprints.exc.InputDataTypeInBlocklistExc import InputDataTypeInBlocklistExc from datalidator.filters.impl.ListDeduplicateItemsFilter import ListDeduplicateItemsFilter from datalidator.filters.impl.ListSortFilter import ListSortFilter from datalidator.filters.exc.SortingFailedInFilterExc import SortingFailedInFilterExc from datalidator.validators.impl.SequenceContainsItemValidator import SequenceContainsItemValidator from datalidator.validators.impl.SequenceHasAllItemsUniqueValidator import SequenceHasAllItemsUniqueValidator from datalidator.validators.impl.SequenceIsNotEmptyValidator import SequenceIsNotEmptyValidator from datalidator.validators.impl.SequenceMaximumLengthValidator import SequenceMaximumLengthValidator from datalidator.validators.impl.SequenceMinimumLengthValidator import SequenceMinimumLengthValidator from datalidator.validators.impl.IntegerIsPositiveValidator import IntegerIsPositiveValidator from datalidator.validators.impl.NumberMaximumValueValidator import NumberMaximumValueValidator from datalidator.validators.exc.DataValidationFailedExc import DataValidationFailedExc from datalidator.validators.exc.err.InvalidValidatorConfigError import InvalidValidatorConfigError # Some input collections (e.g. sets) are unordered! def ignore_order_of_output_list(expected_output_list: list): # DP: Factory return lambda output: (output.__class__ is list) and (sorted(output) == sorted(expected_output_list)) def exception_raising_comparison_key_extraction_function(item): # noqa raise theoretical_testutils.TestException() class IterableObject: def __init__(self, iter_: Iterable[Any]): self.__seq: Tuple[Any, ...] = tuple(iter_) def __iter__(self): for item in self.__seq: yield item class ExceptionRaisingIterableObject: def __init__(self, raise_: bool): self.__raise: bool = raise_ def __iter__(self): yield -123 if self.__raise: raise theoretical_testutils.TestException() class CustomTestListItem: def __init__(self, id_: int, name: str): self.__id: int = id_ self.__name: str = name def get_id(self) -> int: return self.__id def get_name(self) -> str: return self.__name def __eq__(self, other): if isinstance(other, self.__class__): return (self.__id == other.get_id()) and (self.__name == other.get_name()) return NotImplemented __LIST_BLUEPRINT_TEST_SUITE = ( (ListBlueprint(item_blueprint=IntegerBlueprint(), parsing_mode=ParsingMode.MODE_LOOSE), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], [789, -123, 2, 4, 456, -888222, 1, 0]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), [789, -123, 2, 4, 456, -888222, 1, 0]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0]) ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], [2, 2, 2, 2, 2, 0, 0]), ("1234567890", [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]), (b"\x00\x00\x00\x00", [0, 0, 0, 0]), (b"abcdef", [97, 98, 99, 100, 101, 102]), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), [97, 98, 99, 100, 101, 102]), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]), (sorted((100, 5, 849, 2, -456, 999)), [-456, 2, 5, 100, 849, 999]), (sorted("18754522"), [1, 2, 2, 4, 5, 5, 7, 8]), (sorted(b"cabfdeee"), [97, 98, 99, 100, 101, 101, 101, 102]), (sorted(bytearray(b"cabfdeee")), [97, 98, 99, 100, 101, 101, 101, 102]), ((i * i for i in range(10)), [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]), (map(lambda x: x + "000", ("1", "2", "3")), [1000, 2000, 3000]), (map(lambda x: x ** 2, range(5)), [0, 1, 4, 9, 16]), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), [123, 789456, 9]), (IterableObject([]), []), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), [-555, 2, 1, 123000, 999]), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list([-789, 0, 5])), (IterableObject(range(1, 10, 2)), [1, 3, 5, 7, 9]), (IterableObject("886644"), [8, 8, 6, 6, 4, 4]), (IterableObject(b"abc"), [97, 98, 99]), (IterableObject(bytearray(b"abc")), [97, 98, 99]), (ExceptionRaisingIterableObject(raise_=False), [-123]), ([], []), (tuple(), []), (set(), []), (dict(), []), ("", []), (b"", []), (("abc" for _ in range(0)), []), (("abc" for _ in range(1)), InputDataNotConvertibleExc), ((theoretical_testutils.EmptyObject() for _ in range(0)), []), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), InputDataNotConvertibleExc), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], InputDataNotConvertibleExc), ([789, float("-inf"), True], InputDataNotConvertibleExc), ([789, float("nan"), True], InputDataNotConvertibleExc), ([789, "", True], InputDataNotConvertibleExc), ((789, "", True), InputDataNotConvertibleExc), ({789, "", True}, InputDataNotConvertibleExc), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataNotConvertibleExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], InputDataTypeNotInAllowlistExc), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], InputDataTypeNotInAllowlistExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataNotConvertibleExc), ("-123", InputDataNotConvertibleExc), ("123_000", InputDataNotConvertibleExc), ("hello", InputDataNotConvertibleExc), (None, InputDataNotConvertibleExc), (False, InputDataNotConvertibleExc), (True, InputDataNotConvertibleExc), (-123, InputDataNotConvertibleExc), (0, InputDataNotConvertibleExc), (123, InputDataNotConvertibleExc), (-123.5, InputDataNotConvertibleExc), (-0.0, InputDataNotConvertibleExc), (0.0, InputDataNotConvertibleExc), (123.5, InputDataNotConvertibleExc), (float("inf"), InputDataNotConvertibleExc), (float("nan"), InputDataNotConvertibleExc), (int, InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject, InputDataNotConvertibleExc), (datetime.datetime.now(), InputDataNotConvertibleExc), (datetime.datetime.now().date(), InputDataNotConvertibleExc), (datetime.datetime.now().time(), InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode! (ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode! (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), InputDataNotConvertibleExc), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), InputDataNotConvertibleExc), (IterableObject([1, "hello", 3]), InputDataNotConvertibleExc), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), parsing_mode=ParsingMode.MODE_RATIONAL), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], [789, -123, 2, 4, 456, -888222, 1, 0]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), [789, -123, 2, 4, 456, -888222, 1, 0]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeInBlocklistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], [2, 2, 2, 2, 2, 0, 0]), ("1234567890", InputDataTypeInBlocklistExc), (b"\x00\x00\x00\x00", InputDataTypeInBlocklistExc), (b"abcdef", InputDataTypeInBlocklistExc), (bytearray(b"abcdef"), InputDataTypeInBlocklistExc), (range(5, 15), [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]), (sorted((100, 5, 849, 2, -456, 999)), [-456, 2, 5, 100, 849, 999]), (sorted("18754522"), [1, 2, 2, 4, 5, 5, 7, 8]), (sorted(b"cabfdeee"), [97, 98, 99, 100, 101, 101, 101, 102]), (sorted(bytearray(b"cabfdeee")), [97, 98, 99, 100, 101, 101, 101, 102]), ((i * i for i in range(10)), [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]), (map(lambda x: x + "000", ("1", "2", "3")), [1000, 2000, 3000]), (map(lambda x: x ** 2, range(5)), [0, 1, 4, 9, 16]), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), [123, 789456, 9]), (IterableObject([]), []), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), [-555, 2, 1, 123000, 999]), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list([-789, 0, 5])), # The blueprint only sees 'IterableObject', not 'dict', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs. (IterableObject(range(1, 10, 2)), [1, 3, 5, 7, 9]), (IterableObject("886644"), [8, 8, 6, 6, 4, 4]), # The blueprint only sees 'IterableObject', not 'str', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs. (IterableObject(b"abc"), [97, 98, 99]), # The blueprint only sees 'IterableObject', not 'bytes', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs. (IterableObject(bytearray(b"abc")), [97, 98, 99]), # The blueprint only sees 'IterableObject', not 'bytearray', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs. (ExceptionRaisingIterableObject(raise_=False), [-123]), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeInBlocklistExc), ("", InputDataTypeInBlocklistExc), (b"", InputDataTypeInBlocklistExc), (("abc" for _ in range(0)), []), (("abc" for _ in range(1)), InputDataNotConvertibleExc), ((theoretical_testutils.EmptyObject() for _ in range(0)), []), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), InputDataNotConvertibleExc), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], InputDataNotConvertibleExc), ([789, float("-inf"), True], InputDataNotConvertibleExc), ([789, float("nan"), True], InputDataNotConvertibleExc), ([789, "", True], InputDataNotConvertibleExc), ((789, "", True), InputDataNotConvertibleExc), ({789, "", True}, InputDataNotConvertibleExc), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeInBlocklistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], InputDataTypeNotInAllowlistExc), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], InputDataTypeNotInAllowlistExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeInBlocklistExc), ("-123", InputDataTypeInBlocklistExc), ("123_000", InputDataTypeInBlocklistExc), ("hello", InputDataTypeInBlocklistExc), (None, InputDataNotConvertibleExc), (False, InputDataNotConvertibleExc), (True, InputDataNotConvertibleExc), (-123, InputDataNotConvertibleExc), (0, InputDataNotConvertibleExc), (123, InputDataNotConvertibleExc), (-123.5, InputDataNotConvertibleExc), (-0.0, InputDataNotConvertibleExc), (0.0, InputDataNotConvertibleExc), (123.5, InputDataNotConvertibleExc), (float("inf"), InputDataNotConvertibleExc), (float("nan"), InputDataNotConvertibleExc), (int, InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject, InputDataNotConvertibleExc), (datetime.datetime.now(), InputDataNotConvertibleExc), (datetime.datetime.now().date(), InputDataNotConvertibleExc), (datetime.datetime.now().time(), InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode! (ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode! (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), InputDataNotConvertibleExc), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), InputDataNotConvertibleExc), (IterableObject([1, "hello", 3]), InputDataNotConvertibleExc), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), parsing_mode=ParsingMode.MODE_STRICT), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], [789, -123, 2, 4, 456, -888222, 1, 0]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), [789, -123, 2, 4, 456, -888222, 1, 0]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeNotInAllowlistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], [2, 2, 2, 2, 2, 0, 0]), ("1234567890", InputDataTypeNotInAllowlistExc), (b"\x00\x00\x00\x00", InputDataTypeNotInAllowlistExc), (b"abcdef", InputDataTypeNotInAllowlistExc), (bytearray(b"abcdef"), InputDataTypeNotInAllowlistExc), (range(5, 15), InputDataTypeNotInAllowlistExc), (sorted((100, 5, 849, 2, -456, 999)), [-456, 2, 5, 100, 849, 999]), # sorted() returns a list object no matter what its input iterable was! (sorted("18754522"), [1, 2, 2, 4, 5, 5, 7, 8]), # sorted() returns a list object no matter what its input iterable was! (sorted(b"cabfdeee"), [97, 98, 99, 100, 101, 101, 101, 102]), # sorted() returns a list object no matter what its input iterable was! (sorted(bytearray(b"cabfdeee")), [97, 98, 99, 100, 101, 101, 101, 102]), # sorted() returns a list object no matter what its input iterable was! ((i * i for i in range(10)), InputDataTypeNotInAllowlistExc), (map(lambda x: x + "000", ("1", "2", "3")), InputDataTypeNotInAllowlistExc), (map(lambda x: x ** 2, range(5)), InputDataTypeNotInAllowlistExc), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), InputDataTypeNotInAllowlistExc), (IterableObject([]), InputDataTypeNotInAllowlistExc), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), InputDataTypeNotInAllowlistExc), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), InputDataTypeNotInAllowlistExc), (IterableObject(range(1, 10, 2)), InputDataTypeNotInAllowlistExc), (IterableObject("886644"), InputDataTypeNotInAllowlistExc), (IterableObject(b"abc"), InputDataTypeNotInAllowlistExc), (IterableObject(bytearray(b"abc")), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=False), InputDataTypeNotInAllowlistExc), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeNotInAllowlistExc), ("", InputDataTypeNotInAllowlistExc), (b"", InputDataTypeNotInAllowlistExc), (("abc" for _ in range(0)), InputDataTypeNotInAllowlistExc), (("abc" for _ in range(1)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(0)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), InputDataTypeNotInAllowlistExc), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], InputDataNotConvertibleExc), ([789, float("-inf"), True], InputDataNotConvertibleExc), ([789, float("nan"), True], InputDataNotConvertibleExc), ([789, "", True], InputDataNotConvertibleExc), ((789, "", True), InputDataNotConvertibleExc), ({789, "", True}, InputDataNotConvertibleExc), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeNotInAllowlistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], InputDataTypeNotInAllowlistExc), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], InputDataTypeNotInAllowlistExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeNotInAllowlistExc), ("-123", InputDataTypeNotInAllowlistExc), ("123_000", InputDataTypeNotInAllowlistExc), ("hello", InputDataTypeNotInAllowlistExc), (None, InputDataTypeNotInAllowlistExc), (False, InputDataTypeNotInAllowlistExc), (True, InputDataTypeNotInAllowlistExc), (-123, InputDataTypeNotInAllowlistExc), (0, InputDataTypeNotInAllowlistExc), (123, InputDataTypeNotInAllowlistExc), (-123.5, InputDataTypeNotInAllowlistExc), (-0.0, InputDataTypeNotInAllowlistExc), (0.0, InputDataTypeNotInAllowlistExc), (123.5, InputDataTypeNotInAllowlistExc), (float("inf"), InputDataTypeNotInAllowlistExc), (float("nan"), InputDataTypeNotInAllowlistExc), (int, InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject, InputDataTypeNotInAllowlistExc), (datetime.datetime.now(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().date(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().time(), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("127.0.0.1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("::1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), InputDataNotConvertibleExc), # ParseResult is a subclass of tuple!!! (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc), (IterableObject([1, "", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, "hello", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_LOOSE), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]) ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]), ("1234567890", ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]), (b"\x00\x00\x00\x00", ["0", "0", "0", "0"]), (b"abcdef", ["97", "98", "99", "100", "101", "102"]), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), ["97", "98", "99", "100", "101", "102"]), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), ["5", "6", "7", "8", "9", "10", "11", "12", "13", "14"]), (sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]), (sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]), (sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]), (sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]), ((i * i for i in range(10)), ["0", "1", "4", "9", "16", "25", "36", "49", "64", "81"]), (map(lambda x: x + "000", ("1", "2", "3")), ["1000", "2000", "3000"]), (map(lambda x: x ** 2, range(5)), ["0", "1", "4", "9", "16"]), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), ["123", "789456", "\r\n9\t"]), (IterableObject([]), []), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), ["-555", "2.999", "True", "\v+123_000\f", "999"]), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list(["-789", "False", "5.5"])), (IterableObject(range(1, 10, 2)), ["1", "3", "5", "7", "9"]), (IterableObject("886644"), ["8", "8", "6", "6", "4", "4"]), (IterableObject(b"abc"), ["97", "98", "99"]), (IterableObject(bytearray(b"abc")), ["97", "98", "99"]), (ExceptionRaisingIterableObject(raise_=False), ["-123"]), ([], []), (tuple(), []), (set(), []), (dict(), []), ("", []), (b"", []), (("abc" for _ in range(0)), []), (("abc" for _ in range(1)), ["abc"]), ((theoretical_testutils.EmptyObject() for _ in range(0)), []), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), ["1t", "2t", "3t"]), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], ["789", "inf", "True"]), ([789, float("-inf"), True], ["789", "-inf", "True"]), ([789, float("nan"), True], ["789", "nan", "True"]), ([789, "", True], ["789", "", "True"]), ((789, "", True), ["789", "", "True"]), ({789, "", True}, ignore_order_of_output_list(["789", "", "True"])), ([789, "Hello World!", True], ["789", "Hello World!", "True"]), ((789, "Hello World!", True), ["789", "Hello World!", "True"]), ({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, ignore_order_of_output_list(["789", "", "True"])), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", ["1", "2", "3", "a", "4", "5", "6"]), ("-123", ["-", "1", "2", "3"]), ("123_000", ["1", "2", "3", "_", "0", "0", "0"]), ("hello", ["h", "e", "l", "l", "o"]), (None, InputDataNotConvertibleExc), (False, InputDataNotConvertibleExc), (True, InputDataNotConvertibleExc), (-123, InputDataNotConvertibleExc), (0, InputDataNotConvertibleExc), (123, InputDataNotConvertibleExc), (-123.5, InputDataNotConvertibleExc), (-0.0, InputDataNotConvertibleExc), (0.0, InputDataNotConvertibleExc), (123.5, InputDataNotConvertibleExc), (float("inf"), InputDataNotConvertibleExc), (float("nan"), InputDataNotConvertibleExc), (int, InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject, InputDataNotConvertibleExc), (datetime.datetime.now(), InputDataNotConvertibleExc), (datetime.datetime.now().date(), InputDataNotConvertibleExc), (datetime.datetime.now().time(), InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), ["127.0.0.0", "127.0.0.1", "127.0.0.2", "127.0.0.3"]), (ipaddress.ip_network("2001:db8::/126"), ["2001:db8::", "2001:db8::1", "2001:db8::2", "2001:db8::3"]), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), ["1", "", "3"]), (IterableObject([1, "hello", 3]), ["1", "hello", "3"]), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_RATIONAL), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeInBlocklistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]), ("1234567890", InputDataTypeInBlocklistExc), (b"\x00\x00\x00\x00", InputDataTypeInBlocklistExc), (b"abcdef", InputDataTypeInBlocklistExc), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), InputDataTypeInBlocklistExc), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), ["5", "6", "7", "8", "9", "10", "11", "12", "13", "14"]), (sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]), (sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]), (sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]), (sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]), ((i * i for i in range(10)), ["0", "1", "4", "9", "16", "25", "36", "49", "64", "81"]), (map(lambda x: x + "000", ("1", "2", "3")), ["1000", "2000", "3000"]), (map(lambda x: x ** 2, range(5)), ["0", "1", "4", "9", "16"]), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), ["123", "789456", "\r\n9\t"]), (IterableObject([]), []), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), ["-555", "2.999", "True", "\v+123_000\f", "999"]), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list(["-789", "False", "5.5"])), (IterableObject(range(1, 10, 2)), ["1", "3", "5", "7", "9"]), (IterableObject("886644"), ["8", "8", "6", "6", "4", "4"]), (IterableObject(b"abc"), ["97", "98", "99"]), (IterableObject(bytearray(b"abc")), ["97", "98", "99"]), (ExceptionRaisingIterableObject(raise_=False), ["-123"]), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeInBlocklistExc), ("", InputDataTypeInBlocklistExc), (b"", InputDataTypeInBlocklistExc), (("abc" for _ in range(0)), []), (("abc" for _ in range(1)), ["abc"]), ((theoretical_testutils.EmptyObject() for _ in range(0)), []), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), ["1t", "2t", "3t"]), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], ["789", "inf", "True"]), ([789, float("-inf"), True], ["789", "-inf", "True"]), ([789, float("nan"), True], ["789", "nan", "True"]), ([789, "", True], ["789", "", "True"]), ((789, "", True), ["789", "", "True"]), ({789, "", True}, ignore_order_of_output_list(["789", "", "True"])), ([789, "Hello World!", True], ["789", "Hello World!", "True"]), ((789, "Hello World!", True), ["789", "Hello World!", "True"]), ({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeInBlocklistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeInBlocklistExc), ("-123", InputDataTypeInBlocklistExc), ("123_000", InputDataTypeInBlocklistExc), ("hello", InputDataTypeInBlocklistExc), (None, InputDataNotConvertibleExc), (False, InputDataNotConvertibleExc), (True, InputDataNotConvertibleExc), (-123, InputDataNotConvertibleExc), (0, InputDataNotConvertibleExc), (123, InputDataNotConvertibleExc), (-123.5, InputDataNotConvertibleExc), (-0.0, InputDataNotConvertibleExc), (0.0, InputDataNotConvertibleExc), (123.5, InputDataNotConvertibleExc), (float("inf"), InputDataNotConvertibleExc), (float("nan"), InputDataNotConvertibleExc), (int, InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject, InputDataNotConvertibleExc), (datetime.datetime.now(), InputDataNotConvertibleExc), (datetime.datetime.now().date(), InputDataNotConvertibleExc), (datetime.datetime.now().time(), InputDataNotConvertibleExc), (ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc), (ipaddress.ip_address("::1"), InputDataNotConvertibleExc), (ipaddress.ip_network("127.0.0.0/30"), ["127.0.0.0", "127.0.0.1", "127.0.0.2", "127.0.0.3"]), (ipaddress.ip_network("2001:db8::/126"), ["2001:db8::", "2001:db8::1", "2001:db8::2", "2001:db8::3"]), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), (IterableObject([1, "", 3]), ["1", "", "3"]), (IterableObject([1, "hello", 3]), ["1", "hello", "3"]), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_STRICT), ( ([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]), ({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), (frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])), ( {789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"}, InputDataTypeNotInAllowlistExc ), ([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]), ("1234567890", InputDataTypeNotInAllowlistExc), (b"\x00\x00\x00\x00", InputDataTypeNotInAllowlistExc), (b"abcdef", InputDataTypeNotInAllowlistExc), # list(bytes) returns a list of integers (ASCII values)! (bytearray(b"abcdef"), InputDataTypeNotInAllowlistExc), # list(bytes) returns a list of integers (ASCII values)! (range(5, 15), InputDataTypeNotInAllowlistExc), (sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]), (sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]), (sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]), (sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]), ((i * i for i in range(10)), InputDataTypeNotInAllowlistExc), (map(lambda x: x + "000", ("1", "2", "3")), InputDataTypeNotInAllowlistExc), (map(lambda x: x ** 2, range(5)), InputDataTypeNotInAllowlistExc), (filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), InputDataTypeNotInAllowlistExc), (IterableObject([]), InputDataTypeNotInAllowlistExc), (IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), InputDataTypeNotInAllowlistExc), (IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), InputDataTypeNotInAllowlistExc), (IterableObject(range(1, 10, 2)), InputDataTypeNotInAllowlistExc), (IterableObject("886644"), InputDataTypeNotInAllowlistExc), (IterableObject(b"abc"), InputDataTypeNotInAllowlistExc), (IterableObject(bytearray(b"abc")), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=False), InputDataTypeNotInAllowlistExc), ([], []), (tuple(), []), (set(), []), (dict(), InputDataTypeNotInAllowlistExc), ("", InputDataTypeNotInAllowlistExc), (b"", InputDataTypeNotInAllowlistExc), (("abc" for _ in range(0)), InputDataTypeNotInAllowlistExc), (("abc" for _ in range(1)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(0)), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc), (map(lambda x: str(x) + "t", (1, 2, 3)), InputDataTypeNotInAllowlistExc), (map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc), ([789, float("inf"), True], ["789", "inf", "True"]), ([789, float("-inf"), True], ["789", "-inf", "True"]), ([789, float("nan"), True], ["789", "nan", "True"]), ([789, "", True], ["789", "", "True"]), ((789, "", True), ["789", "", "True"]), ({789, "", True}, ignore_order_of_output_list(["789", "", "True"])), ([789, "Hello World!", True], ["789", "Hello World!", "True"]), ((789, "Hello World!", True), ["789", "Hello World!", "True"]), ({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])), ({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeNotInAllowlistExc), ([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]), ([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc), ([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ("123a456", InputDataTypeNotInAllowlistExc), ("-123", InputDataTypeNotInAllowlistExc), ("123_000", InputDataTypeNotInAllowlistExc), ("hello", InputDataTypeNotInAllowlistExc), (None, InputDataTypeNotInAllowlistExc), (False, InputDataTypeNotInAllowlistExc), (True, InputDataTypeNotInAllowlistExc), (-123, InputDataTypeNotInAllowlistExc), (0, InputDataTypeNotInAllowlistExc), (123, InputDataTypeNotInAllowlistExc), (-123.5, InputDataTypeNotInAllowlistExc), (-0.0, InputDataTypeNotInAllowlistExc), (0.0, InputDataTypeNotInAllowlistExc), (123.5, InputDataTypeNotInAllowlistExc), (float("inf"), InputDataTypeNotInAllowlistExc), (float("nan"), InputDataTypeNotInAllowlistExc), (int, InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject, InputDataTypeNotInAllowlistExc), (datetime.datetime.now(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().date(), InputDataTypeNotInAllowlistExc), (datetime.datetime.now().time(), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("127.0.0.1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_address("::1"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), (ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), (urllib.parse.urlparse("https://www.google.cz/test?abc=def"), ["https", "www.google.cz", "/test", "", "abc=def", ""]), # ParseResult is a subclass of tuple!!! (uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc), (IterableObject([1, "", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, "hello", 3]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc), (IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc), (ExceptionRaisingIterableObject(raise_=True), InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListDeduplicateItemsFilter(),)), ( (["1", 2, 3.1], [1, 2, 3]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (["1", True, 2.9, "\r\n2\t", "\v\f 3 ", 3], [1, 2, 3]), ((float(i % 2) for i in range(20)), [0, 1]), ([1, 2, 2, 2, 3, 3, 4], [1, 2, 3, 4]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListSortFilter(None, reverse_order=False),)), ( ([], []), ([123], [123]), ([100, True, -100, "\r\n000_3 ", 0, 2.999, 4, "6", 5], [-100, 0, 1, 2, 3, 4, 5, 6, 100]), (range(10, 0, -1), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), ([1, 1, 2, 1, 3, 5, 4, 4, 5, 2, 3], [1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), ([1, 2, 3, 4, 5], [1, 2, 3, 4, 5]), ((str(i) for i in range(10)), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), filters=(ListSortFilter(None, reverse_order=True),)), ( ([], []), ([123], [123]), ([100, True, -100, "\r\n000_3 ", 0, 2.999, 4, "6", 5], [100, 6, 5, 4, 3, 2, 1, 0, -100]), (range(10, 0, -1), [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), ([1, 1, 2, 1, 3, 5, 4, 4, 5, 2, 3], [5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 1]), ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]), ((str(i) for i in range(10)), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(lambda item: item.get_id(), reverse_order=False),)), ( ([], []), ( [CustomTestListItem(3, "a"), CustomTestListItem(1, "c"), CustomTestListItem(2, "b")], [CustomTestListItem(1, "c"), CustomTestListItem(2, "b"), CustomTestListItem(3, "a")] ), ( (CustomTestListItem(i, string.ascii_uppercase[-((i % 3) + 1)] * 3) for i in range(5)), [CustomTestListItem(0, "ZZZ"), CustomTestListItem(1, "YYY"), CustomTestListItem(2, "XXX"), CustomTestListItem(3, "ZZZ"), CustomTestListItem(4, "YYY")] ), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(lambda item: item.get_id(), reverse_order=True),)), ( ([], []), ( [CustomTestListItem(3, "a"), CustomTestListItem(1, "c"), CustomTestListItem(2, "b")], [CustomTestListItem(3, "a"), CustomTestListItem(2, "b"), CustomTestListItem(1, "c")] ), ( (CustomTestListItem(i, string.ascii_uppercase[-((i % 3) + 1)] * 3) for i in range(5)), [CustomTestListItem(4, "YYY"), CustomTestListItem(3, "ZZZ"), CustomTestListItem(2, "XXX"), CustomTestListItem(1, "YYY"), CustomTestListItem(0, "ZZZ")] ), ([theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), # sorted()'s implementation detail - this raises exception there, but it does not below! ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(),)), ( ([], []), ([789], [789]), ([3, 1, 2], [1, 2, 3]), ([1, 3, 2.5, 2, 1.5, 3.5], [1, 1.5, 2, 2.5, 3, 3.5]), ([theoretical_testutils.EmptyObject()], [theoretical_testutils.EmptyObject()]), # sorted()'s implementation detail - this doesn't raise exception there, but it does above! ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(lambda item: theoretical_testutils.EmptyObject()),)), ( ([], []), ([789], [789]), # sorted()'s implementation detail - this doesn't raise exception there, but it does above! ([3, 1, 2], SortingFailedInFilterExc), ([1, 3, 2.5, 2, 1.5, 3.5], SortingFailedInFilterExc), ([theoretical_testutils.EmptyObject()], [theoretical_testutils.EmptyObject()]), # sorted()'s implementation detail - this doesn't raise exception there, but it does above! ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(lambda: 1),)), ( ([], []), ([789], SortingFailedInFilterExc), # sorted()'s implementation detail - this raises exception there, but it doesn't above! ([3, 1, 2], SortingFailedInFilterExc), ([1, 3, 2.5, 2, 1.5, 3.5], SortingFailedInFilterExc), ([theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), # sorted()'s implementation detail - this raises exception there, but it doesn't above! ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=GenericBlueprint(), filters=(ListSortFilter(exception_raising_comparison_key_extraction_function),)), ( ([], []), ([789], SortingFailedInFilterExc), # sorted()'s implementation detail - this raises exception there, but it doesn't above! ([3, 1, 2], SortingFailedInFilterExc), ([1, 3, 2.5, 2, 1.5, 3.5], SortingFailedInFilterExc), ([theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), # sorted()'s implementation detail - this raises exception there, but it doesn't above! ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], SortingFailedInFilterExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceContainsItemValidator(5, negate=False),)), ( ([True, "\r\n2 ", 3.9, 4, "\t 5\v \f "], [1, 2, 3, 4, 5]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), ([False, False, 4, 5.5, "\r 5\v", 5, 777], [0, 0, 4, 5, 5, 5, 777]), ([True, 2, 3.8, "\n 4\t", "6", 789], DataValidationFailedExc), (filter(lambda x: (x % 5) != 0, range(15)), DataValidationFailedExc), ([1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 777], DataValidationFailedExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceContainsItemValidator(5, negate=True),)), ( ([True, "\r\n2 ", 3.9, 4, "\t 5\v \f "], DataValidationFailedExc), (range(10), DataValidationFailedExc), ([False, False, 4, 5.5, "\r 5\v", 5, 777], DataValidationFailedExc), ([True, 2, 3.8, "\n 4\t", "6", 789], [1, 2, 3, 4, 6, 789]), (filter(lambda x: (x % 5) != 0, range(15)), [1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14]), ([1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 777], [1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 777]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceHasAllItemsUniqueValidator(),)), ( (["1", 2, 3.1], [1, 2, 3]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (["1", True, 2.9, "\r\n2\t", "\v\f 3 ", 3], DataValidationFailedExc), ((float(i % 2) for i in range(20)), DataValidationFailedExc), ([1, 2, 2, 2, 3, 3, 4], DataValidationFailedExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceIsNotEmptyValidator(negate=False),)), ( ([True], [1]), ([True, 2.9, "3", 4, "\n\r 5\t \v"], [1, 2, 3, 4, 5]), (range(1), [0]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), ([1, 2, 3], [1, 2, 3]), ([], DataValidationFailedExc), (range(0), DataValidationFailedExc), (filter(lambda x: x > 100, range(20)), DataValidationFailedExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceIsNotEmptyValidator(negate=True),)), ( ([True], DataValidationFailedExc), ([True, 2.9, "3", 4, "\n\r 5\t \v"], DataValidationFailedExc), (range(1), DataValidationFailedExc), (range(10), DataValidationFailedExc), ([1, 2, 3], DataValidationFailedExc), ([], []), (range(0), []), (filter(lambda x: x > 100, range(20)), []), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceMaximumLengthValidator(3),)), ( ([], []), (range(0), []), ([True], [1]), (range(1), [0]), ([True, 2.9], [1, 2]), (range(2), [0, 1]), ([True, 2.9, "\r\n003 \t"], [1, 2, 3]), (map(lambda x: float(x ** 2), range(3)), [0, 1, 4]), ([True, 2.9, "\r\n003 \t", 4], DataValidationFailedExc), (map(lambda x: float(x ** 2), range(4)), DataValidationFailedExc), ([True, 2.9, "\r\n003 \t", 4, "\v 000_005 "], DataValidationFailedExc), (map(lambda x: float(x ** 2), range(5)), DataValidationFailedExc), (range(10), DataValidationFailedExc), (map(lambda x: "\n\r000_000_000_" + str(x) + " \f\f\v", range(10)), DataValidationFailedExc), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint(item_blueprint=IntegerBlueprint(), validators=(SequenceMinimumLengthValidator(3),)), ( ([], DataValidationFailedExc), (range(0), DataValidationFailedExc), ([True], DataValidationFailedExc), (range(1), DataValidationFailedExc), ([True, 2.9], DataValidationFailedExc), (range(2), DataValidationFailedExc), ([True, 2.9, "\r\n003 \t"], [1, 2, 3]), (map(lambda x: float(x ** 2), range(3)), [0, 1, 4]), ([True, 2.9, "\r\n003 \t", 4], [1, 2, 3, 4]), (map(lambda x: float(x ** 2), range(4)), [0, 1, 4, 9]), ([True, 2.9, "\r\n003 \t", 4, "\v 000_005 "], [1, 2, 3, 4, 5]), (map(lambda x: float(x ** 2), range(5)), [0, 1, 4, 9, 16]), (range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (map(lambda x: "\n\r000_000_000_" + str(x) + " \f\f\v", range(10)), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), )), (ListBlueprint( item_blueprint=ListBlueprint(item_blueprint=IntegerBlueprint()) ), ( (1, InputDataNotConvertibleExc), ([1, 2, 3], InputDataNotConvertibleExc), ([[1, 2, 3], [1, 2], 1], InputDataNotConvertibleExc), ([[1, 2, 3], [1, 2], [1]], [[1, 2, 3], [1, 2], [1]]), ([(1, 2, 3), (1, 2), (1,)], [[1, 2, 3], [1, 2], [1]]), (((1, 2, 3), (1, 2), (1,)), [[1, 2, 3], [1, 2], [1]]), ([], []), ((), []), ([[]], [[]]), ([()], [[]]), (([],), [[]]), (((),), [[]]), (theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc), ([theoretical_testutils.EmptyObject()], InputDataNotConvertibleExc), ([[theoretical_testutils.EmptyObject()]], InputDataTypeNotInAllowlistExc), )), (ListBlueprint( # "Real use case" simulation - a list of IDs received from a client. item_blueprint=IntegerBlueprint( parsing_mode=ParsingMode.MODE_STRICT, validators=(IntegerIsPositiveValidator(), NumberMaximumValueValidator(2**31 - 1)) ), filters=(ListDeduplicateItemsFilter(), ListSortFilter()), validators=(SequenceIsNotEmptyValidator(), SequenceMaximumLengthValidator(5)), parsing_mode=ParsingMode.MODE_STRICT ), ( (range(3), InputDataTypeNotInAllowlistExc), ((i ** 2 for i in range(4)), InputDataTypeNotInAllowlistExc), (map(lambda x: x ** 2, range(4)), InputDataTypeNotInAllowlistExc), ("123", InputDataTypeNotInAllowlistExc), (b'abcd', InputDataTypeNotInAllowlistExc), (bytearray(b'xyz'), InputDataTypeNotInAllowlistExc), (dict(), InputDataTypeNotInAllowlistExc), ({1: 2, 3: 4}, InputDataTypeNotInAllowlistExc), (123, InputDataTypeNotInAllowlistExc), (123.456, InputDataTypeNotInAllowlistExc), (True, InputDataTypeNotInAllowlistExc), (float("inf"), InputDataTypeNotInAllowlistExc), (float("nan"), InputDataTypeNotInAllowlistExc), (None, InputDataTypeNotInAllowlistExc), (theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc), (object(), InputDataTypeNotInAllowlistExc), ([1, 2.0, 3], InputDataTypeNotInAllowlistExc), ([1, "2", 3], InputDataTypeNotInAllowlistExc), ([1, None, 3], InputDataTypeNotInAllowlistExc), ([1, theoretical_testutils.EmptyObject(), 3], InputDataTypeNotInAllowlistExc), (["123"], InputDataTypeNotInAllowlistExc), ([None], InputDataTypeNotInAllowlistExc), ([[], []], InputDataTypeNotInAllowlistExc), ([{}, {}], InputDataTypeNotInAllowlistExc), ([theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc), ([1, 0, 2], DataValidationFailedExc), ([-100], DataValidationFailedExc), ([1, 2**31, 2], DataValidationFailedExc), ([2**64], DataValidationFailedExc), ([], DataValidationFailedExc), ([1, 2, 3, 4, 5, 6], DataValidationFailedExc), (list(range(10, 100)), DataValidationFailedExc), ([1, 2, 2, 3], [1, 2, 3]), ([1] * 50, [1]), ([1, 2, 3, 4, 5] * 20, [1, 2, 3, 4, 5]), ([3], [3]), ([5, 1, 2, 4, 3], [1, 2, 3, 4, 5]), ([3, 3, 1, 2, 1, 3, 2, 1, 1, 1, 2, 3, 2, 1, 1, 2, 3, 2], [1, 2, 3]), ((1, 2.0, 3), InputDataTypeNotInAllowlistExc), ((1, "2", 3), InputDataTypeNotInAllowlistExc), ((1, None, 3), InputDataTypeNotInAllowlistExc), ((1, theoretical_testutils.EmptyObject(), 3), InputDataTypeNotInAllowlistExc), (("123",), InputDataTypeNotInAllowlistExc), ((None,), InputDataTypeNotInAllowlistExc), (([], []), InputDataTypeNotInAllowlistExc), (({}, {}), InputDataTypeNotInAllowlistExc), ((theoretical_testutils.EmptyObject(), theoretical_testutils.EmptyObject()), InputDataTypeNotInAllowlistExc), ((1, 0, 2), DataValidationFailedExc), ((-100,), DataValidationFailedExc), ((1, 2**31, 2), DataValidationFailedExc), ((2**64,), DataValidationFailedExc), ((), DataValidationFailedExc), ((1, 2, 3, 4, 5, 6), DataValidationFailedExc), (tuple(range(10, 100)), DataValidationFailedExc), ((1, 2, 2, 3), [1, 2, 3]), ((1,) * 50, [1]), ((1, 2, 3, 4, 5) * 20, [1, 2, 3, 4, 5]), ((3,), [3]), ((5, 1, 2, 4, 3), [1, 2, 3, 4, 5]), ((3, 3, 1, 2, 1, 3, 2, 1, 1, 1, 2, 3, 2, 1, 1, 2, 3, 2), [1, 2, 3]), ({1, 2.0, 3}, InputDataTypeNotInAllowlistExc), ({1, "2", 3}, InputDataTypeNotInAllowlistExc), ({1, None, 3}, InputDataTypeNotInAllowlistExc), ({1, object(), 3}, InputDataTypeNotInAllowlistExc), ({"123"}, InputDataTypeNotInAllowlistExc), ({None}, InputDataTypeNotInAllowlistExc), ({tuple(), tuple()}, InputDataTypeNotInAllowlistExc), ({frozenset(), frozenset()}, InputDataTypeNotInAllowlistExc), ({object(), object()}, InputDataTypeNotInAllowlistExc), ({1, 0, 2}, DataValidationFailedExc), ({-100}, DataValidationFailedExc), ({1, 2**31, 2}, DataValidationFailedExc), ({2**64}, DataValidationFailedExc), (set(), DataValidationFailedExc), ({1, 2, 3, 4, 5, 6}, DataValidationFailedExc), (set(range(10, 100)), DataValidationFailedExc), ({1, 2, 2, 3}, [1, 2, 3]), ({3}, [3]), ({5, 1, 2, 4, 3}, [1, 2, 3, 4, 5]), ({3, 3, 1, 2, 1, 3, 2, 1, 1, 1, 2, 3, 2, 1, 1, 2, 3, 2}, [1, 2, 3]), (frozenset([1, 2.0, 3]), InputDataTypeNotInAllowlistExc), (frozenset([1, "2", 3]), InputDataTypeNotInAllowlistExc), (frozenset([1, None, 3]), InputDataTypeNotInAllowlistExc), (frozenset([1, object(), 3]), InputDataTypeNotInAllowlistExc), (frozenset(["123"]), InputDataTypeNotInAllowlistExc), (frozenset([None]), InputDataTypeNotInAllowlistExc), (frozenset([tuple(), tuple()]), InputDataTypeNotInAllowlistExc), (frozenset([frozenset(), frozenset()]), InputDataTypeNotInAllowlistExc), (frozenset([object(), object()]), InputDataTypeNotInAllowlistExc), (frozenset([1, 0, 2]), DataValidationFailedExc), (frozenset([-100]), DataValidationFailedExc), (frozenset([1, 2**31, 2]), DataValidationFailedExc), (frozenset([2**64]), DataValidationFailedExc), (frozenset(), DataValidationFailedExc), (frozenset([1, 2, 3, 4, 5, 6]), DataValidationFailedExc), (frozenset(range(10, 100)), DataValidationFailedExc), (frozenset([1, 2, 2, 3]), [1, 2, 3]), (frozenset([3]), [3]), (frozenset([5, 1, 2, 4, 3]), [1, 2, 3, 4, 5]), (frozenset([3, 3, 1, 2, 1, 3, 2, 1, 1, 1, 2, 3, 2, 1, 1, 2, 3, 2]), [1, 2, 3]), )), ) @pytest.mark.parametrize(("blueprint", "input_", "output"), theoretical_testutils.test_function_parameter_generator(__LIST_BLUEPRINT_TEST_SUITE)) def test_list_blueprint(blueprint, input_, output): theoretical_testutils.perform_test(blueprint, input_, output) def test_list_blueprint_default_parsing_mode(): assert ListBlueprint(item_blueprint=IntegerBlueprint()).get_parsing_mode() == ParsingMode.MODE_RATIONAL def test_list_blueprint_item_blueprint(): item_bp = IntegerBlueprint() assert ListBlueprint(item_blueprint=item_bp).get_item_blueprint() is item_bp def test_list_blueprint_filter_and_validator_sequences(): filter_seq = ( ListDeduplicateItemsFilter(), ListSortFilter() ) validator_seq = ( SequenceContainsItemValidator("???"), SequenceHasAllItemsUniqueValidator(), SequenceIsNotEmptyValidator(), SequenceMaximumLengthValidator(100), SequenceMinimumLengthValidator(50) ) list_bp = ListBlueprint(GenericBlueprint(), filters=filter_seq, validators=validator_seq) assert (list_bp.get_filters() == filter_seq) and (list_bp.get_validators() == validator_seq) def test_list_sort_filter_default_instance_attributes(): instance = ListSortFilter() assert (instance.get_comparison_key_extraction_function() is None) and (instance.is_order_reversed() is False) def test_list_sort_filter_instance_attributes(): def __extraction_func(item): return item instance = ListSortFilter(comparison_key_extraction_function=__extraction_func, reverse_order=True) assert (instance.get_comparison_key_extraction_function() is __extraction_func) and (instance.is_order_reversed() is True) def test_sequence_contains_item_validator_default_negation(): assert SequenceContainsItemValidator("!!!").is_negated() is False def test_sequence_contains_item_validator_checked_item(): item = theoretical_testutils.EmptyObject() assert SequenceContainsItemValidator(item).get_checked_item() is item def test_sequence_is_not_empty_validator_default_negation(): assert SequenceIsNotEmptyValidator().is_negated() is False @pytest.mark.parametrize("length", (0, 1, 100, 1000, 1_000_000, 1_000_000_000_000_000)) def test_sequence_maximum_length_validator_maximum_acceptable_length(length): assert SequenceMaximumLengthValidator(length).get_maximum_acceptable_length() == length @pytest.mark.parametrize("length", (-1, -100, -100_000_000_000_000)) def test_sequence_maximum_length_validator_invalid_maximum_acceptable_length(length): with pytest.raises(InvalidValidatorConfigError): SequenceMaximumLengthValidator(length) @pytest.mark.parametrize("length", (0, 1, 100, 1000, 1_000_000, 1_000_000_000_000_000)) def test_sequence_minimum_length_validator_minimum_acceptable_length(length): assert SequenceMinimumLengthValidator(length).get_minimum_acceptable_length() == length @pytest.mark.parametrize("length", (-1, -100, -100_000_000_000_000)) def test_sequence_minimum_length_validator_invalid_minimum_acceptable_length(length): with pytest.raises(InvalidValidatorConfigError): SequenceMinimumLengthValidator(length)
65.205939
380
0.625075
7,291
68,075
5.735976
0.058565
0.06743
0.100811
0.008226
0.822673
0.801463
0.788814
0.771646
0.756701
0.750747
0
0.095392
0.187073
68,075
1,043
381
65.268456
0.660318
0.068483
0
0.663918
0
0
0.082907
0.004009
0
0
0
0
0.010309
1
0.024742
false
0
0.031959
0.004124
0.065979
0.045361
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0e59f7b6bc63b08ce066e99e011691eb71388805
25
py
Python
notest.py
jainankit/public-test
fc29a02b03187c107c816f70ed1802d008cb538a
[ "MIT" ]
null
null
null
notest.py
jainankit/public-test
fc29a02b03187c107c816f70ed1802d008cb538a
[ "MIT" ]
null
null
null
notest.py
jainankit/public-test
fc29a02b03187c107c816f70ed1802d008cb538a
[ "MIT" ]
null
null
null
print("no") assert False
8.333333
12
0.72
4
25
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
2
13
12.5
0.818182
0
0
0
0
0
0.08
0
0
0
0
0
0.5
1
0
true
0
0
0
0
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
1
0
6
adc524cc2436d88f40e4aab609c1cb7bb8e25273
114
py
Python
amfe/linalg/__init__.py
ma-kast/AMfe
99686cc313fb8904a093fb42e6cf0b38f8cfd791
[ "BSD-3-Clause" ]
null
null
null
amfe/linalg/__init__.py
ma-kast/AMfe
99686cc313fb8904a093fb42e6cf0b38f8cfd791
[ "BSD-3-Clause" ]
null
null
null
amfe/linalg/__init__.py
ma-kast/AMfe
99686cc313fb8904a093fb42e6cf0b38f8cfd791
[ "BSD-3-Clause" ]
null
null
null
from .linearsolvers import * from .eigen import * from .norms import * from .orth import * from .MKLutils import *
22.8
28
0.745614
15
114
5.666667
0.466667
0.470588
0
0
0
0
0
0
0
0
0
0
0.166667
114
5
29
22.8
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bc2ce608f338e5a06805c76fbadf9c5c9c60296d
5,442
py
Python
sources/nitram_micro_mono.py
nitram509/nitram-micro-font
03f2e78373de790a89f2b364da2b53837ad45c4c
[ "MIT" ]
10
2017-04-16T18:26:26.000Z
2022-03-08T18:05:39.000Z
sources/nitram_micro_mono.py
nitram509/nitram-micro-font
03f2e78373de790a89f2b364da2b53837ad45c4c
[ "MIT" ]
null
null
null
sources/nitram_micro_mono.py
nitram509/nitram-micro-font
03f2e78373de790a89f2b364da2b53837ad45c4c
[ "MIT" ]
1
2019-03-23T16:15:06.000Z
2019-03-23T16:15:06.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- nitram_micro_mono_CP437 = [ 0, 0, 0, 0, 0, 10, 0, 4, 17, 14, 10, 0, 0, 14, 17, 27, 31, 31, 14, 4, 0, 0, 0, 0, 0, 0, 4, 10, 4, 14, 4, 14, 14, 4, 14, 0, 14, 14, 14, 0, 0, 0, 0, 0, 0, 0, 4, 10, 4, 0, 0, 0, 0, 0, 0, 30, 28, 31, 21, 7, 5, 13, 31, 12, 4, 20, 22, 31, 6, 4, 15, 10, 10, 10, 5, 21, 14, 27, 14, 21, 4, 12, 28, 12, 4, 4, 6, 7, 6, 4, 4, 14, 4, 14, 4, 10, 10, 10, 0, 10, 12, 11, 10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 31, 31, 0, 0, 0, 0, 0, 4, 14, 21, 4, 4, 4, 4, 21, 14, 4, 4, 8, 31, 8, 4, 4, 2, 31, 2, 4, 0, 2, 2, 30, 0, 0, 14, 14, 14, 0, 4, 14, 31, 0, 0, 0, 0, 31, 14, 4, 0, 0, 0, 0, 0, 4, 4, 4, 0, 4, 10, 10, 0, 0, 0, 10, 31, 10, 31, 10, 31, 5, 31, 20, 31, 17, 8, 4, 2, 17, 6, 9, 22, 9, 22, 8, 4, 0, 0, 0, 8, 4, 4, 4, 8, 2, 4, 4, 4, 2, 21, 14, 31, 14, 21, 0, 4, 14, 4, 0, 0, 0, 0, 4, 2, 0, 0, 14, 0, 0, 0, 0, 0, 0, 2, 8, 4, 4, 4, 2, 14, 25, 21, 19, 14, 4, 6, 4, 4, 14, 14, 8, 14, 2, 14, 14, 8, 12, 8, 14, 2, 2, 10, 14, 8, 14, 2, 14, 8, 14, 6, 2, 14, 10, 14, 14, 8, 12, 8, 8, 14, 10, 14, 10, 14, 14, 10, 14, 8, 14, 0, 4, 0, 4, 0, 0, 4, 0, 4, 2, 8, 4, 2, 4, 8, 0, 14, 0, 14, 0, 2, 4, 8, 4, 2, 14, 17, 12, 0, 4, 14, 9, 5, 1, 14, 6, 9, 17, 31, 17, 7, 9, 15, 17, 15, 14, 17, 1, 17, 14, 15, 25, 17, 17, 15, 31, 1, 15, 1, 31, 31, 1, 15, 1, 1, 14, 1, 25, 17, 14, 9, 17, 31, 17, 17, 14, 4, 4, 4, 14, 12, 8, 8, 10, 14, 9, 5, 3, 5, 9, 1, 1, 1, 1, 15, 17, 27, 21, 17, 17, 17, 19, 21, 25, 17, 14, 25, 17, 17, 14, 7, 9, 7, 1, 1, 14, 17, 17, 25, 30, 7, 9, 7, 5, 9, 30, 1, 14, 16, 15, 31, 4, 4, 4, 4, 9, 17, 17, 17, 14, 10, 10, 10, 10, 4, 9, 17, 21, 21, 10, 17, 10, 4, 10, 17, 17, 10, 4, 4, 4, 31, 8, 4, 2, 31, 12, 4, 4, 4, 12, 2, 4, 4, 4, 8, 6, 4, 4, 4, 6, 4, 10, 0, 0, 0, 0, 0, 0, 0, 14, 4, 8, 0, 0, 0, 6, 9, 17, 31, 17, 7, 9, 15, 17, 15, 14, 17, 1, 17, 14, 15, 25, 17, 17, 15, 31, 1, 15, 1, 31, 31, 1, 15, 1, 1, 14, 1, 25, 17, 14, 9, 17, 31, 17, 17, 14, 4, 4, 4, 14, 12, 8, 8, 10, 14, 18, 10, 6, 10, 18, 1, 1, 1, 1, 15, 17, 27, 21, 17, 17, 17, 19, 21, 25, 17, 14, 25, 17, 17, 14, 7, 9, 7, 1, 1, 14, 17, 17, 25, 30, 7, 9, 7, 5, 9, 30, 1, 14, 16, 15, 31, 4, 4, 4, 4, 9, 17, 17, 17, 14, 10, 10, 10, 10, 4, 9, 17, 21, 21, 10, 17, 10, 4, 10, 17, 17, 10, 4, 4, 4, 31, 8, 4, 2, 31, 12, 4, 2, 4, 12, 4, 4, 4, 4, 4, 6, 4, 8, 4, 6, 10, 5, 0, 0, 0, 0, 4, 10, 10, 14, 0, 0, 0, 0, 0, 10, 0, 10, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 14, 10, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 17, 17, 17, 31, 0, 14, 10, 14, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 14, 10, 14, 0, 0, 0, 0, 0, 0, 10, 0, 14, 10, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 14, 10, 14, 0, 0, 0, 0, 0, 3, 25, 11, 9, 11, 28, 23, 21, 21, 29, 0, 3, 1, 1, 1, 10, 0, 14, 10, 14, 10, 0, 10, 10, 14, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 6, 17, 14, 0, 0, 28, 4, 4, 0, 0, 7, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 4, 4, 4, 4, 18, 9, 18, 4, 4, 9, 18, 9, 4, 0, 10, 0, 10, 0, 10, 21, 10, 21, 10, 21, 10, 21, 10, 21, 4, 4, 4, 4, 4, 4, 4, 7, 4, 4, 4, 7, 4, 7, 4, 10, 10, 11, 10, 10, 0, 0, 15, 10, 10, 0, 7, 4, 7, 4, 10, 11, 8, 11, 10, 10, 10, 10, 10, 10, 0, 15, 8, 11, 10, 10, 11, 8, 15, 0, 10, 10, 15, 0, 0, 4, 7, 4, 7, 0, 0, 0, 7, 4, 4, 4, 4, 28, 0, 0, 4, 4, 31, 0, 0, 0, 0, 31, 4, 4, 4, 4, 28, 4, 4, 0, 0, 31, 0, 0, 4, 4, 31, 4, 4, 4, 28, 4, 28, 4, 10, 10, 26, 10, 10, 10, 26, 2, 30, 0, 0, 30, 2, 26, 10, 10, 27, 0, 31, 0, 0, 31, 0, 27, 10, 10, 26, 2, 26, 10, 0, 31, 0, 31, 0, 10, 27, 0, 27, 10, 4, 31, 0, 31, 0, 10, 10, 31, 0, 0, 0, 31, 0, 31, 4, 0, 0, 31, 10, 10, 10, 10, 30, 0, 0, 4, 28, 4, 28, 0, 0, 28, 4, 28, 4, 0, 0, 30, 10, 10, 10, 10, 31, 10, 10, 4, 31, 4, 31, 4, 4, 4, 7, 0, 0, 0, 0, 28, 4, 4, 31, 31, 31, 31, 31, 0, 0, 31, 31, 31, 3, 3, 3, 3, 3, 24, 24, 24, 24, 24, 31, 31, 31, 0, 0, 0, 0, 0, 0, 0, 6, 9, 13, 17, 13, 0, 0, 0, 0, 0, 14, 17, 17, 17, 14, 0, 4, 10, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 4, 10, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 31, 14, 0, 16, 14, 10, 14, 1, 12, 2, 14, 2, 12, 6, 9, 9, 9, 9, 14, 0, 14, 0, 14, 4, 14, 4, 0, 14, 2, 4, 8, 4, 14, 8, 4, 2, 4, 14, 8, 20, 4, 4, 4, 4, 4, 4, 5, 2, 4, 0, 14, 0, 4, 10, 5, 0, 10, 5, 4, 14, 4, 0, 0, 0, 14, 14, 14, 0, 0, 0, 4, 0, 0, 24, 8, 11, 10, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
20.850575
27
0.336641
1,290
5,442
1.417829
0.031783
0.311646
0.367414
0.398032
0.609623
0.498633
0.463641
0.43357
0.393658
0.364133
0
0.57257
0.425211
5,442
261
28
20.850575
0.012148
0.006983
0
0.46124
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cb25f140aac484165158e63270c573c7a5cdc0a9
1,293
py
Python
tests/test_build_url.py
shubham-surya/core-lib
543db80706746a937e5ed16bd50f2de8d58b32e4
[ "MIT" ]
null
null
null
tests/test_build_url.py
shubham-surya/core-lib
543db80706746a937e5ed16bd50f2de8d58b32e4
[ "MIT" ]
9
2021-03-11T02:29:17.000Z
2022-03-22T19:01:18.000Z
tests/test_build_url.py
shubham-surya/core-lib
543db80706746a937e5ed16bd50f2de8d58b32e4
[ "MIT" ]
2
2022-01-27T11:19:00.000Z
2022-02-11T11:33:09.000Z
import unittest from core_lib.data_layers.data.data_helpers import build_url class TestBuildUrl(unittest.TestCase): def test_build_url(self): self.assertEqual(build_url(host="some_domain.com"), "some_domain.com") self.assertEqual(build_url(protocol="http", host="some_domain.com"), "http://some_domain.com") self.assertEqual(build_url(protocol="http", host="some_domain.com", username="shay"), "http://shay@some_domain.com") self.assertEqual(build_url(protocol="http", host="some_domain.com", username="shay", password="pass"), "http://shay:pass@some_domain.com") self.assertEqual(build_url(protocol="http", host="some_domain.com", username="shay", password="pass", port=80), "http://shay:pass@some_domain.com:80") self.assertEqual(build_url(protocol="http", host="some_domain.com", username="shay", password="pass", port=80, path="x/y/z"), "http://shay:pass@some_domain.com:80/x/y/z") params = { "protocol": "http", "host": "some_domain.com", "username": "shay", "password": "pass", "port": 80, "path": "/x/y/z", "file": "file.foo" } self.assertEqual(build_url(**params), "http://shay:pass@some_domain.com:80/x/y/z/file.foo")
47.888889
178
0.640371
174
1,293
4.603448
0.206897
0.174782
0.227216
0.200999
0.724095
0.724095
0.709114
0.675406
0.675406
0.675406
0
0.011215
0.172467
1,293
26
179
49.730769
0.737383
0
0
0
0
0.05
0.344934
0
0
0
0
0
0.35
1
0.05
false
0.25
0.1
0
0.2
0
0
0
0
null
0
1
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
cb2fc7da7e1ba7a4229cfe15412a78f918bd9743
36
py
Python
Minimal-For-Cpanel/passenger_wsgi.py
DhirajBajracharya/minimal-django
2e3ede1dcff96b7188021fa0aab69f5ba8ee65ab
[ "MIT" ]
null
null
null
Minimal-For-Cpanel/passenger_wsgi.py
DhirajBajracharya/minimal-django
2e3ede1dcff96b7188021fa0aab69f5ba8ee65ab
[ "MIT" ]
null
null
null
Minimal-For-Cpanel/passenger_wsgi.py
DhirajBajracharya/minimal-django
2e3ede1dcff96b7188021fa0aab69f5ba8ee65ab
[ "MIT" ]
null
null
null
from minimal.wsgi import application
36
36
0.888889
5
36
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.083333
36
1
36
36
0.969697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cb64c743f614117d6bf9f0ddf779af40fc03cbaa
275
py
Python
Test.py
gemirson/DevOps
b4270c34d7923b2e7c2cf60288f9a133143a9e3a
[ "Apache-2.0" ]
null
null
null
Test.py
gemirson/DevOps
b4270c34d7923b2e7c2cf60288f9a133143a9e3a
[ "Apache-2.0" ]
7
2019-07-31T22:52:14.000Z
2019-08-01T01:03:49.000Z
Test.py
gemirson/DevOps
b4270c34d7923b2e7c2cf60288f9a133143a9e3a
[ "Apache-2.0" ]
null
null
null
import pytest from DevOps import Sum from DevOps import Sub from DevOps import Mul from DevOps import Div def test_somar(): assert Sum(2,4)==6 def test_sub(): assert Sub(2,4)==-2 def test_mul(): assert Mul(2,4)==8 def test_div(): assert Div(2,4)==0.5
19.642857
29
0.665455
51
275
3.509804
0.352941
0.223464
0.357542
0
0
0
0
0
0
0
0
0.059908
0.210909
275
14
29
19.642857
0.764977
0
0
0
0
0
0
0
0
0
0
0
0.307692
1
0.307692
true
0
0.384615
0
0.692308
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
cbba487b4dbc6b551001a062edb021a86097ca46
68
py
Python
Chapter 01/Chap01_Example1.1.py
Anancha/Programming-Techniques-using-Python
e80c329d2a27383909d358741a5cab03cb22fd8b
[ "MIT" ]
null
null
null
Chapter 01/Chap01_Example1.1.py
Anancha/Programming-Techniques-using-Python
e80c329d2a27383909d358741a5cab03cb22fd8b
[ "MIT" ]
null
null
null
Chapter 01/Chap01_Example1.1.py
Anancha/Programming-Techniques-using-Python
e80c329d2a27383909d358741a5cab03cb22fd8b
[ "MIT" ]
null
null
null
a=10 print(type(a)) a='Python' print(type(a)) a=False print(type(a))
11.333333
14
0.676471
15
68
3.066667
0.4
0.586957
0.652174
0.478261
0
0
0
0
0
0
0
0.031746
0.073529
68
6
15
11.333333
0.698413
0
0
0.5
0
0
0.086957
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
cbda601f4da1ace2e8493ec89d4493fb32518836
68
py
Python
py/is_prime.py
iaueos/lang
038278a02ae48f283ebb392828b94aab4e49104d
[ "MIT" ]
null
null
null
py/is_prime.py
iaueos/lang
038278a02ae48f283ebb392828b94aab4e49104d
[ "MIT" ]
null
null
null
py/is_prime.py
iaueos/lang
038278a02ae48f283ebb392828b94aab4e49104d
[ "MIT" ]
null
null
null
def is_prime(n): return not re.match(r'^.?$|^(..+?)\1+$', '1'*n)
34
51
0.470588
12
68
2.583333
0.833333
0
0
0
0
0
0
0
0
0
0
0.033898
0.132353
68
2
51
34
0.491525
0
0
0
0
0
0.246377
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
1dc6d8b8f4296951f195a745633f6e0071c92803
22,522
py
Python
pyspatialopt/analysis/pyqgis_analysis.py
giserh/pyspatialopt
86fed48b8fa258be05b008538289577dbcc5e9f1
[ "MIT" ]
55
2016-07-18T20:09:43.000Z
2022-01-26T19:33:09.000Z
pyspatialopt/analysis/pyqgis_analysis.py
giserh/pyspatialopt
86fed48b8fa258be05b008538289577dbcc5e9f1
[ "MIT" ]
6
2016-11-07T03:42:46.000Z
2019-08-15T18:48:47.000Z
pyspatialopt/analysis/pyqgis_analysis.py
giserh/pyspatialopt
86fed48b8fa258be05b008538289577dbcc5e9f1
[ "MIT" ]
18
2016-09-01T22:18:56.000Z
2022-01-27T18:15:50.000Z
# -*- coding: UTF-8 -*- import logging import math import os import qgis import qgis.core import qgis.utils from pyspatialopt import version def generate_query(unique_ids, unique_field_name, wrap_values_in_quotes=False): """ Generates a select or definition query that can applied to the input layers :param unique_ids: (list) A list of ids to query :param unique_field_name: (string) The name of field that the ids correspond to :param wrap_values_in_quotes: (bool) Should the ids be wrapped in quotes (if unique_field_name is string) :return: (string) A query string that can be applied to a layer """ if unique_ids: if wrap_values_in_quotes: query = "{} in (-1,{})".format(unique_field_name, ",".join("'{0}'".format(w) for w in unique_ids)) else: query = "{} in (-1,{})".format(unique_field_name, ",".join(unique_ids)) else: query = "{} in (-1)".format(unique_field_name) return query def reset_layers(*args): """ Clears the selection and definition query applied to the layers :param args: (Feature Layers) The feature layers to reset :return: """ for layer in args: layer.setSubsetString("") layer.removeSelection() def generate_serviceable_demand(dl, dl_demand_field, dl_id_field, *args): """ Finds to total serviceable coverage when 2 facility layers are used Merges polygons & dissolves them to form one big area of total coverage Then intersects with demand layer :param dl: (Feature Layer) The demand polygon or point layer :param dl_demand_field: (string) The field representing demand :param dl_id_field: (string) The name of the unique field for the demand layer :param args: (Feature Layer) The facility layers to use :return: (dictionary) A dictionary of similar format to the coverage format """ # Reset DF # Check parameters so we get useful exceptions and messages reset_layers(dl) reset_layers(*args) # Check parameters so we get useful exceptions and messages if dl.wkbType() not in [qgis.utils.QGis.WKBPoint, qgis.utils.QGis.WKBPolygon]: raise TypeError("Demand layer must have polygon or point geometry") dl_field_names = [field.name() for field in dl.pendingFields()] if dl_demand_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_demand_field)) if dl_id_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_id_field)) logging.getLogger().info("Initializing output...") if dl.wkbType() == qgis.utils.QGis.WKBPolygon: output = { "version": version.__version__, "demand": {}, "type": { "mode": "serviceableDemand", "type": "partial"} } else: output = { "version": version.__version__, "demand": {}, "type": { "mode": "serviceableDemand", "type": "binary"} } # Merge all of facility layers together logging.getLogger().info("Combining facilities...") dissolved_geom = None for layer in args: for feature in layer.getFeatures(): if dissolved_geom is None: dissolved_geom = feature.geometry() dissolved_geom = dissolved_geom.combine(feature.geometry()) logging.getLogger().info("Determining possible service coverage for each demand unit...") for feature in dl.getFeatures(): if dl.wkbType() == qgis.utils.QGis.WKBPolygon: if dissolved_geom.intersects(feature.geometry()): intersected = dissolved_geom.intersection(feature.geometry()) if intersected.area() > 0: serviceable_demand = math.ceil(float(intersected.area() / feature.geometry().area()) * feature[ dl_demand_field]) else: serviceable_demand = 0.0 else: serviceable_demand = feature[dl_demand_field] else: if dissolved_geom.contains(feature.geometry()): serviceable_demand = feature[dl_demand_field] else: serviceable_demand = 0.0 # Make sure serviceable is less than or equal to demand, floating point issues output["demand"][str(feature[dl_id_field])] = {"serviceableDemand": 0} if serviceable_demand < feature[dl_demand_field]: output["demand"][str(feature[dl_id_field])]["serviceableDemand"] = serviceable_demand else: output["demand"][str(feature[dl_id_field])]["serviceableDemand"] = feature[dl_demand_field] logging.getLogger().info("Serviceable demand successfully created.") reset_layers(dl) reset_layers(*args) return output def generate_binary_coverage(dl, fl, dl_demand_field, dl_id_field, fl_id_field, fl_variable_name=None): """ Generates a dictionary representing the binary coverage of a facility to demand points :param dl: (Feature Layer) The demand polygon or point layer :param fl: (Feature Layer) The facility service area polygon layer :param dl_demand_field: (string) The name of the field in the demand layer that describes the demand :param dl_id_field: (string) The name of the unique identifying field on the demand layer :param fl_id_field: (string) The name of the unique identifying field on the facility layer :param fl_variable_name: (string) The name to use to represent the facility variable :return: (dictionary) A nested dictionary storing the coverage relationships """ # Check parameters so we get useful exceptions and messages if dl.wkbType() not in [qgis.utils.QGis.WKBPoint, qgis.utils.QGis.WKBPolygon]: raise TypeError("Demand layer must have polygon or point geometry") if fl.wkbType() != qgis.utils.QGis.WKBPolygon: raise TypeError("Facility service area layer must have polygon geometry") dl_field_names = [field.name() for field in dl.pendingFields()] if dl_demand_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_demand_field)) if dl_id_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_id_field)) fl_field_names = [field.name() for field in fl.pendingFields()] if fl_id_field not in fl_field_names: raise ValueError("'{}' field not found in facility service area layer".format(fl_id_field)) reset_layers(dl, fl) if fl_variable_name is None: fl_variable_name = os.path.basename(os.path.abspath(fl.dataProvider().dataSourceUri())).split(".")[0] logging.getLogger().info("Initializing facilities in output...") output = { "version": version.__version__, "type": { "mode": "coverage", "type": "binary", }, "demand": {}, "totalDemand": 0.0, "totalServiceableDemand": 0.0, "facilities": {fl_variable_name: []} } # List all of the facilities logging.getLogger().info("Initializing facilities in output...") for feature in fl.getFeatures(): output["facilities"][fl_variable_name].append(str(feature[fl_id_field])) # Build empty data structure logging.getLogger().info("Initializing demand in output...") for feature in dl.getFeatures(): output["demand"][str(feature[dl_id_field])] = { "area": round(feature.geometry().area()), "demand": round(feature[dl_demand_field]), "serviceableDemand": 0.0, "coverage": {fl_variable_name: {}} } logging.getLogger().info("Determining binary coverage for each demand unit...") for feature in fl.getFeatures(): if dl.wkbType() == qgis.utils.QGis.WKBPoint: geom = feature.geometry() for dl_p in dl.getFeatures(): geom2 = dl_p.geometry() if geom.intersects(geom2): output["demand"][str(dl_p[dl_id_field])]["serviceableDemand"] = \ output["demand"][str(dl_p[dl_id_field])]["demand"] output["demand"][str(dl_p[dl_id_field])]["coverage"][fl_variable_name][ str(feature[fl_id_field])] = 1 else: geom = feature.geometry() for dl_p in dl.getFeatures(): geom2 = dl_p.geometry() if geom.contains(geom2): output["demand"][str(dl_p[dl_id_field])]["serviceableDemand"] = \ output["demand"][str(dl_p[dl_id_field])]["demand"] output["demand"][str(dl_p[dl_id_field])]["coverage"][fl_variable_name][ str(feature[fl_id_field])] = 1 for feature in dl.getFeatures(): output["totalServiceableDemand"] += output["demand"][str(feature[dl_id_field])]["serviceableDemand"] output["totalDemand"] += feature[dl_demand_field] logging.getLogger().info("Binary coverage successfully generated.") reset_layers(dl, fl) return output def generate_partial_coverage(dl, fl, dl_demand_field, dl_id_field, fl_id_field, fl_variable_name=None): """ Generates a dictionary representing the partial coverage (based on area) of a facility to demand areas :param dl: (Feature Layer) The demand polygon layer :param fl: (Feature Layer) The facility service area polygon layer :param dl_demand_field: (string) The name of the field in the demand layer that describes the demand :param dl_id_field: (string) The name of the unique identifying field on the demand layer :param fl_id_field: (string) The name of the unique identifying field on the facility layer :param fl_variable_name: (string) The name to use to represent the facility variable :return: (dictionary) A nested dictionary storing the coverage relationships """ # Reset DF # Check parameters so we get useful exceptions and messages if dl.wkbType() != qgis.utils.QGis.WKBPolygon: raise TypeError("Demand layer must have polygon geometry") if fl.wkbType() != qgis.utils.QGis.WKBPolygon: raise TypeError("Facility service area layer must have polygon geometry") dl_field_names = [field.name() for field in dl.pendingFields()] if dl_demand_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_demand_field)) if dl_id_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_id_field)) fl_field_names = [field.name() for field in fl.pendingFields()] if fl_id_field not in fl_field_names: raise ValueError("'{}' field not found in facility service area layer".format(fl_id_field)) reset_layers(dl, fl) # If no facility layer name provided, use the name of the feature class/shapefile if fl_variable_name is None: fl_variable_name = os.path.basename(os.path.abspath(fl.dataProvider().dataSourceUri())).split(".")[0] # Create the initial data structure logging.getLogger().info("Initializing facilities in output...") output = { "version": version.__version__, "type": { "mode": "coverage", "type": "partial", }, "demand": {}, "totalDemand": 0.0, "totalServiceableDemand": 0.0, "facilities": {fl_variable_name: []} } # List all of the facilities for feature in fl.getFeatures(): output["facilities"][fl_variable_name].append(str(feature[fl_id_field])) # Build empty data structure logging.getLogger().info("Initializing demand in output...") for feature in dl.getFeatures(): output["demand"][str(feature[dl_id_field])] = { "area": round(feature.geometry().area()), "demand": round(feature[dl_demand_field]), "serviceableDemand": 0.0, "coverage": {fl_variable_name: {}} } # Dissolve all facility service areas so we can find the total serviceable area logging.getLogger().info("Combining facilities...") dissolved_geom = None for feature in fl.getFeatures(): if dissolved_geom is None: dissolved_geom = feature.geometry() dissolved_geom = dissolved_geom.combine(feature.geometry()) # Iterate over each intersected polygon and areal interpolate the demand that is covered logging.getLogger().info("Determining partial coverage for each demand unit...") for feature in dl.getFeatures(): intersected = dissolved_geom.intersection(feature.geometry()) if intersected.area() > 0: serviceable_demand = math.ceil(float(intersected.area() / feature.geometry().area()) * feature[dl_demand_field]) else: serviceable_demand = 0.0 # Make sure serviceable is less than or equal to demand, floating point issues if serviceable_demand < output["demand"][str(feature[dl_id_field])]["demand"]: output["demand"][str(feature[dl_id_field])]["serviceableDemand"] = serviceable_demand else: output["demand"][str(feature[dl_id_field])]["serviceableDemand"] = \ output["demand"][str(feature[dl_id_field])]["demand"] for feature2 in fl.getFeatures(): intersected_fd = feature.geometry().intersection(feature2.geometry()) if intersected_fd.area() > 0: demand = math.ceil(float(intersected_fd.area() / feature.geometry().area()) * feature[dl_demand_field]) if demand < output["demand"][feature[str(dl_id_field)]]["serviceableDemand"]: output["demand"][str(feature[dl_id_field])]["coverage"][fl_variable_name] \ [str(feature2[fl_id_field])] = demand else: output["demand"][str(feature[dl_id_field])]["coverage"][fl_variable_name][ str(feature2[fl_id_field])] = output["demand"][str(feature[dl_id_field])]["serviceableDemand"] for feature in dl.getFeatures(): output["totalServiceableDemand"] += output["demand"][str(feature[dl_id_field])]["serviceableDemand"] output["totalDemand"] += feature[dl_demand_field] logging.getLogger().info("Partial coverage successfully generated.") reset_layers(dl, fl) return output def generate_traumah_coverage(dl, dl_service_area, tc_layer, ad_layer, dl_demand_field, air_distance_threshold, dl_id_field="FID", tc_layer_id_field="FID", ad_layer_id_field="FID"): """ Generates a coverage model for the TRAUMAH model. The traumah model uses trauma centers (TC), air depots (AD), and demand :param dl: (Feature Layer) The demand point layer :param dl_service_area (Feature Layer) The demand service area (generally derived from street network) :param tc_layer: (Feature Layer) The Trauma Center point layer :param ad_layer: (Feature Layer) The Air Depot point layer :param dl_demand_field: (string) The attribute that represents the demand in the demand layer :param air_distance_threshold: (float) The maximum total distance a helicopter can fly :param dl_id_field: (string) The attribute that represents unique ids for the demand layers :param tc_layer_id_field: (string) The attribute that represents unique ids for the trauma center layers :param ad_layer_id_field: (string) The attribute that represents unique ids for the air depot layers :return: (dictionary) A nested dictionary storing the coverage relationships """ if dl.wkbType() != qgis.utils.QGis.WKBPoint: raise TypeError("Demand layer must have point geometry") if dl_service_area.wkbType() != qgis.utils.QGis.WKBPolygon: raise TypeError("Demand layer must have polygon geometry") if tc_layer.wkbType() != qgis.utils.QGis.WKBPoint: raise TypeError("Trauma center layer must have point geometry") dl_field_names = [field.name() for field in dl.pendingFields()] if dl_demand_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_demand_field)) if dl_id_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_id_field)) tc_layer_field_names = [field.name() for field in tc_layer.pendingFields()] if tc_layer_id_field not in tc_layer_field_names: raise ValueError("'{}' field not found in trauma center layer".format(tc_layer_id_field)) ad_layer_field_names = [field.name() for field in ad_layer.pendingFields()] if ad_layer_id_field not in ad_layer_field_names: raise ValueError("'{}' field not found in trauma center layer".format(ad_layer_id_field)) reset_layers(dl, dl_service_area, ad_layer, tc_layer) ad_variable_name = "AirDepot" tc_variable_name = "TraumaCenter" ad_tc_variable_name = "ADTCPair" logging.getLogger().info("Initializing facilities in output...") output = { "version": version.__version__, "type": { "mode": "coverage", "type": "traumah", }, "demand": {}, "totalDemand": 0.0, "totalServiceableDemand": 0.0, "facilities": {ad_variable_name: [], tc_variable_name: []} } # List all of the facilities logging.getLogger().info("Initializing facilities in output...") for feature in ad_layer.getFeatures(): output["facilities"][ad_variable_name].append(str(feature[ad_layer_id_field])) for feature in tc_layer.getFeatures(): output["facilities"][tc_variable_name].append(str(feature[tc_layer_id_field])) # Build empty data structure logging.getLogger().info("Initializing demand in output...") for feature in dl.getFeatures(): output["demand"][str(feature[dl_id_field])] = { "area": round(feature.geometry().area()), "demand": round(feature[dl_demand_field]), "serviceableDemand": 0.0, "coverage": {tc_variable_name: [], ad_tc_variable_name: []} } logging.getLogger().info("Determining binary coverage (using ground transport service area) for each demand unit...") for feature in tc_layer.getFeatures(): geom = feature.geometry() for dl_p in dl_service_area.getFeatures(): geom2 = dl_p.geometry() if geom2.intersects(geom): output["demand"][str(dl_p[dl_id_field])]["coverage"][tc_variable_name].append({ tc_variable_name: str(feature[tc_layer_id_field]) }) logging.getLogger().info("Determining binary coverage (using air transportation) for each demand unit...") for d in dl.getFeatures(): geom = d.geometry() distances = {} for t in tc_layer.getFeatures(): geom2 = t.geometry() distances[t[tc_layer_id_field]] = geom.distance(geom2) for a in ad_layer.getFeatures(): geom2 = a.geometry() distance = geom2.distance(geom) for k, v in distances.items(): if distance + v <= air_distance_threshold: output["demand"][str(d[dl_id_field])]["coverage"][ad_tc_variable_name].append({ tc_variable_name: str(k), ad_variable_name: str(a[ad_layer_id_field]) }) logging.getLogger().info("Binary traumah coverage successfully generated.") reset_layers(dl, tc_layer, ad_layer) return output def get_covered_demand(dl, dl_demand_field, mode, *args): """ Finds to total serviceable coverage when 2 facility layers are used Merges polygons & dissolves them to form one big area of total coverage Then intersects with demand layer :param dl: (Feature Layer) The demand polygon or point layer :param dl_demand_field: (string) The field representing demand :param mode: (string) ['binary', 'partial'] The type of coverage to use :param args: (Feature Layer) The facility layers to use :return: (dictionary) A dictionary of similar format to the coverage format """ # Reset DF # Check parameters so we get useful exceptions and messages reset_layers(dl) # Check parameters so we get useful exceptions and messages if mode not in ['binary', 'partial']: raise ValueError("'{}' is not a valid mode").format(mode) if dl.wkbType() not in [qgis.utils.QGis.WKBPoint, qgis.utils.QGis.WKBPolygon]: raise TypeError("Demand layer must have polygon or point geometry") dl_field_names = [field.name() for field in dl.pendingFields()] if dl_demand_field not in dl_field_names: raise ValueError("'{}' field not found in demand layer".format(dl_demand_field)) # Merge all of facility layers together logging.getLogger().info("Combining facilities...") dissolved_geom = None for layer in args: for feature in layer.getFeatures(): if dissolved_geom is None: dissolved_geom = feature.geometry() dissolved_geom = dissolved_geom.combine(feature.geometry()) total_coverage = 0 logging.getLogger().info("Determining possible service coverage for each demand unit...") for feature in dl.getFeatures(): if dl.wkbType() == qgis.utils.QGis.WKBPolygon and mode == "partial": if dissolved_geom.intersects(feature.geometry()): intersected = dissolved_geom.intersection(feature.geometry()) if intersected.area() > 0: serviceable_demand = float(intersected.area() / feature.geometry().area()) * feature[ dl_demand_field] else: serviceable_demand = 0.0 else: serviceable_demand = feature[dl_demand_field] else: if dissolved_geom.contains(feature.geometry()): serviceable_demand = feature[dl_demand_field] else: serviceable_demand = 0.0 # Make sure serviceable is less than or equal to demand, floating point issues if serviceable_demand < feature[dl_demand_field]: total_coverage += serviceable_demand else: total_coverage += feature[dl_demand_field] logging.getLogger().info("Covered demand is: {}".format(total_coverage)) reset_layers(dl) return total_coverage
50.611236
181
0.658423
2,816
22,522
5.088778
0.083807
0.032729
0.025122
0.023726
0.800768
0.781577
0.756664
0.727495
0.688207
0.664131
0
0.003426
0.23537
22,522
444
182
50.725225
0.828698
0.21943
0
0.644776
1
0
0.171008
0.006368
0
0
0
0
0
1
0.020896
false
0
0.020896
0
0.059701
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1dd3e9ff2d5ee7ec519703b04b09b9da47b11501
106
py
Python
pypeloton/__init__.py
raman325/pypeloton
b653bd6ae1d28ad2dde420a9f369467c2aabe6a2
[ "MIT" ]
2
2021-03-13T21:14:27.000Z
2022-01-03T01:43:06.000Z
pypeloton/__init__.py
raman325/pypeloton
b653bd6ae1d28ad2dde420a9f369467c2aabe6a2
[ "MIT" ]
null
null
null
pypeloton/__init__.py
raman325/pypeloton
b653bd6ae1d28ad2dde420a9f369467c2aabe6a2
[ "MIT" ]
null
null
null
from .pypeloton import Peloton, PelotonAsync # noqa: F401 from .version import __version__ # noqa: F401
35.333333
58
0.773585
13
106
6
0.615385
0.205128
0
0
0
0
0
0
0
0
0
0.067416
0.160377
106
2
59
53
0.808989
0.198113
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
380b1f5ee0c93c1b372642b2b6e290154ac067f2
313
py
Python
distributed/diagnostics/__init__.py
met-office-lab/distributed
46e31cadd55456bbd0b85a01f040d1eb33ee587f
[ "BSD-3-Clause" ]
1
2019-01-02T20:00:52.000Z
2019-01-02T20:00:52.000Z
distributed/diagnostics/__init__.py
met-office-lab/distributed
46e31cadd55456bbd0b85a01f040d1eb33ee587f
[ "BSD-3-Clause" ]
null
null
null
distributed/diagnostics/__init__.py
met-office-lab/distributed
46e31cadd55456bbd0b85a01f040d1eb33ee587f
[ "BSD-3-Clause" ]
1
2021-10-11T13:46:48.000Z
2021-10-11T13:46:48.000Z
from __future__ import print_function, division, absolute_import from ..utils import ignoring with ignoring(ImportError): from .progressbar import progress with ignoring(ImportError): from .resource_monitor import Occupancy with ignoring(ImportError): from .scheduler_widgets import scheduler_status
31.3
64
0.817891
36
313
6.861111
0.527778
0.145749
0.279352
0.327935
0
0
0
0
0
0
0
0
0.13099
313
9
65
34.777778
0.908088
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.125
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
381557ea05a9ac279507edff1e74462a50219760
125
py
Python
wechat/basic/singals.py
yasongxu/wechat
99cc499a045a3705711740882cdc38d55416fbb5
[ "MIT" ]
45
2017-06-21T10:35:53.000Z
2022-03-30T09:43:09.000Z
wechat/basic/singals.py
yasongxu/wechat
99cc499a045a3705711740882cdc38d55416fbb5
[ "MIT" ]
3
2017-11-12T13:07:08.000Z
2021-06-10T18:39:25.000Z
wechat/basic/singals.py
yasongxu/wechat
99cc499a045a3705711740882cdc38d55416fbb5
[ "MIT" ]
15
2017-06-22T00:45:09.000Z
2021-03-19T07:02:58.000Z
from django.dispatch import Signal handler_add = Signal(providing_args=["user"]) view_init = Signal(providing_args=["user"])
31.25
45
0.784
17
125
5.529412
0.705882
0.319149
0.404255
0.489362
0
0
0
0
0
0
0
0
0.08
125
4
46
31.25
0.817391
0
0
0
0
0
0.063492
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
69a9f64da81d628b0661b842afcf27d30953cab1
148
py
Python
tests/unit/exceptions/test_rpgtk_base_exception.py
is-gabs/rpgtk
11f657ed52374b0d9a106f5e0f9b433441141f6d
[ "MIT" ]
2
2022-02-18T01:22:11.000Z
2022-03-02T02:32:19.000Z
tests/unit/exceptions/test_rpgtk_base_exception.py
is-gabs/rpgtk
11f657ed52374b0d9a106f5e0f9b433441141f6d
[ "MIT" ]
null
null
null
tests/unit/exceptions/test_rpgtk_base_exception.py
is-gabs/rpgtk
11f657ed52374b0d9a106f5e0f9b433441141f6d
[ "MIT" ]
null
null
null
from rpgtk.exceptions import RPGTKBaseException def test_should_extends_exception(): assert issubclass(RPGTKBaseException, Exception) is True
24.666667
60
0.837838
16
148
7.5625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.114865
148
5
61
29.6
0.923664
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
69bdf9f9e7abd700a603f084e58be842b02d91b1
128
py
Python
core/Lib/re.py
dillionhacker/python222
205414c33fba8166167fd8a6a03eda1a68f16316
[ "Apache-2.0" ]
1
2022-03-17T13:55:02.000Z
2022-03-17T13:55:02.000Z
core/Lib/re.py
tuankien2601/python222
205414c33fba8166167fd8a6a03eda1a68f16316
[ "Apache-2.0" ]
null
null
null
core/Lib/re.py
tuankien2601/python222
205414c33fba8166167fd8a6a03eda1a68f16316
[ "Apache-2.0" ]
null
null
null
# Portions Copyright (c) 2005 Nokia Corporation #Minimal "re" compatibility wrapper from sre import * from sre import __all__
21.333333
48
0.78125
17
128
5.647059
0.823529
0.145833
0.270833
0
0
0
0
0
0
0
0
0.037383
0.164063
128
5
49
25.6
0.859813
0.625
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
69c7143aad57f1f67447043e4808dc60fce50dae
106
py
Python
dataflowfx/processing/__init__.py
nullptrninja/py-data-workflow
bba717f1c891e87561df5018333a261c5b72cdb5
[ "MIT" ]
1
2021-05-09T02:17:43.000Z
2021-05-09T02:17:43.000Z
dataflowfx/processing/__init__.py
nullptrninja/py-data-workflow
bba717f1c891e87561df5018333a261c5b72cdb5
[ "MIT" ]
null
null
null
dataflowfx/processing/__init__.py
nullptrninja/py-data-workflow
bba717f1c891e87561df5018333a261c5b72cdb5
[ "MIT" ]
null
null
null
from dataflowfx.processing.dataProcessor import * from dataflowfx.processing.dataProcessingGroup import *
35.333333
55
0.867925
10
106
9.2
0.6
0.304348
0.521739
0
0
0
0
0
0
0
0
0
0.075472
106
2
56
53
0.938776
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
69f6b0c0b7f4e2f931d1123531021cc73a05382e
77
py
Python
Maya/cicd/python/libMayaExtended/libMayaExtended/mayaSceneApi.py
Mu-L/Exporters
235ad02230791351d7a0440d9568641d28e2e77e
[ "Apache-2.0" ]
445
2017-10-18T01:54:00.000Z
2022-03-31T16:27:54.000Z
Maya/cicd/python/libMayaExtended/libMayaExtended/mayaSceneApi.py
Mu-L/Exporters
235ad02230791351d7a0440d9568641d28e2e77e
[ "Apache-2.0" ]
646
2017-10-16T00:46:17.000Z
2022-03-31T17:40:36.000Z
Maya/cicd/python/libMayaExtended/libMayaExtended/mayaSceneApi.py
Mu-L/Exporters
235ad02230791351d7a0440d9568641d28e2e77e
[ "Apache-2.0" ]
313
2017-10-15T09:20:45.000Z
2022-03-31T09:11:34.000Z
import maya.OpenMaya as OpenMaya import maya.OpenMayaRender as OpenMayaRender
38.5
44
0.883117
10
77
6.8
0.5
0.294118
0
0
0
0
0
0
0
0
0
0
0.090909
77
2
44
38.5
0.971429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
384d7e0e7e24ecb0b97965b628833d405e541a33
28,503
py
Python
nflwin/tests/test_preprocessing.py
ryanfox/NFLWin
25967e7c11f7283289851912c5cc97a3a48394ab
[ "MIT" ]
15
2016-09-12T16:16:54.000Z
2021-12-28T03:28:50.000Z
nflwin/tests/test_preprocessing.py
ryanfox/NFLWin
25967e7c11f7283289851912c5cc97a3a48394ab
[ "MIT" ]
12
2016-06-10T01:52:49.000Z
2019-10-18T00:51:12.000Z
nflwin/tests/test_preprocessing.py
ryanfox/NFLWin
25967e7c11f7283289851912c5cc97a3a48394ab
[ "MIT" ]
8
2017-05-21T17:04:01.000Z
2021-12-28T03:27:34.000Z
from __future__ import print_function, division import numpy as np import pandas as pd import pytest from sklearn.utils.validation import NotFittedError from sklearn.pipeline import Pipeline from nflwin import preprocessing class TestPipelines(object): """Testing if pipelining cleaning steps works.""" def test_map_to_int_to_onehot(self): fit_df = pd.DataFrame({"quarter": ["Q1", "Q1", "Q1", "Q2", "Q2"]}) transform_df = fit_df.copy() mti = preprocessing.MapToInt("quarter", copy=True) ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["quarter"], copy=True) pipe = Pipeline(steps=[("one", mti), ("two", ohe)]) pipe.fit(fit_df) output_df = pipe.transform(transform_df) expected_df = pd.DataFrame({"onehot_col1": [1.0, 1, 1, 0, 0], "onehot_col2": [0.0, 0, 0, 1, 1]}) pd.util.testing.assert_frame_equal(output_df, expected_df) class TestComputeElapsedTime(object): """Testing if we can properly map quarters and time elapsed to a total time elapsed.""" def test_bad_quarter_colname_produces_error(self): input_df = pd.DataFrame({"blahblahblah": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_bad_time_elapsed_colname_produces_error(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "blahblahblah": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_preexisting_output_colname_produces_error(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_time_elapsed": [0, 0, 0, 0, 0]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", total_time_colname="total_time_elapsed") cet.fit(input_df) with pytest.raises(KeyError): cet.transform(input_df) def test_incomplete_quarter_mapping(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", quarter_to_second_mapping={ "Q1": 0, "Q2": 900, "Q4": 2700, "OT1":3600} ) cet.fit(input_df) with pytest.raises(TypeError): cet.transform(input_df) def test_simple_working_case(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed") cet.fit(input_df) transformed_df = cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 900, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_inplace_transform(self): input_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", copy=False) cet.fit(input_df) cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["Q1", "Q2", "Q3", "Q4", "OT"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 900, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(input_df, expected_df) def test_custom_mapping(self): input_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40]}) cet = preprocessing.ComputeElapsedTime("quarter", "time_elapsed", quarter_to_second_mapping={ "quarter1": 0, "Q2": 500, "Q3": 1800, "Q4": 2700, "OT1":3600}) cet.fit(input_df) transformed_df = cet.transform(input_df) expected_df = pd.DataFrame({"quarter": ["quarter1", "Q2", "Q3", "Q4", "OT1"], "time_elapsed": [200, 0, 50, 850, 40], "total_elapsed_time": [200, 500, 1850, 3550, 3640]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) class TestComputeIfOffenseIsHome(object): """Testing if we can correctly compute if the offense is the home team.""" def test_bad_offense_colname_produces_error(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "blahblahblah": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_bad_home_team_colname_produces_error(self): input_df = pd.DataFrame({"blahblahblah": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_existing_offense_home_team_colname_produces_error(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="home_team") ciow.fit(input_df) with pytest.raises(KeyError): ciow.transform(input_df) def test_correct_answer_with_copy(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) expected_input_df = input_df.copy() expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"], "offense_home_team": [True, False, True]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="offense_home_team", copy=True) transformed_df = ciow.transform(input_df) pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_input_df.sort_index(axis=1)) pd.util.testing.assert_frame_equal(transformed_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1)) def test_correct_answer_without_copy(self): input_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"]}) expected_transformed_df = pd.DataFrame({"home_team": ["a", "a", "a"], "offense_team": ["a", "b", "a"], "offense_home_team": [True, False, True]}) ciow = preprocessing.ComputeIfOffenseIsHome("offense_team", "home_team", offense_home_team_colname="offense_home_team", copy=False) ciow.transform(input_df) pd.util.testing.assert_frame_equal(input_df.sort_index(axis=1), expected_transformed_df.sort_index(axis=1)) class TestMapToInt(object): """Testing if the integer mapper works.""" def test_fit_bad_colname_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("blahblahblah") with pytest.raises(KeyError): mti.fit(input_df) def test_mapping_without_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) expected_output = {"one": 0, "two": 1, "four": 2, "six": 3} assert mti.mapping == expected_output def test_mapping_with_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", np.nan, "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) expected_output = {"one": 0, "two": 1, "four": 2, "six": 3} assert mti.mapping == expected_output def test_transform_before_fit_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") with pytest.raises(NotFittedError): mti.transform(input_df) def test_transform_bad_colname_produces_error(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transform_df = pd.DataFrame({"blahblahblah": ["one", "two", "one", "four", "six", "two", "one", "one"]}) with pytest.raises(KeyError): mti.transform(transform_df) def test_transform_without_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transformed_df = mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_transform_with_nans(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", np.nan, "one"]}) mti = preprocessing.MapToInt("one") mti.fit(input_df) transformed_df = mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, np.nan, 0]}) pd.util.testing.assert_frame_equal(transformed_df, expected_df) def test_transform_inplace(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) mti = preprocessing.MapToInt("one", copy=False) mti.fit(input_df) mti.transform(input_df) expected_df = pd.DataFrame({"one": [0, 1, 0, 2, 3, 1, 0, 0]}) pd.util.testing.assert_frame_equal(input_df, expected_df) def test_transform_copy(self): input_df = pd.DataFrame({"one": ["one", "two", "one", "four", "six", "two", "one", "one"]}) expected_df = input_df.copy() mti = preprocessing.MapToInt("one", copy=True) mti.fit(input_df) transformed_data = mti.transform(input_df) pd.util.testing.assert_frame_equal(input_df, expected_df) class TestOneHotEncoderFromDataFrame(object): """Testing if the one-hot encoder wrapper works.""" def setup_method(self, method): self.data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) self.data = self.data[["one", "two", "three"]] def test_correct_dtype_passed(self): ohe = preprocessing.OneHotEncoderFromDataFrame(dtype=np.int) assert ohe.dtype == np.int def test_correct_handle_unknown_string_passed(self): ohe = preprocessing.OneHotEncoderFromDataFrame(handle_unknown="ignore") assert ohe.handle_unknown == "ignore" def test_encode_all_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names="all") ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 1, 1, 0], "onehot_col5": [0., 0, 0, 1], "onehot_col6": [1., 0, 1, 0], "onehot_col7": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_encode_some_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"]) ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"two": [2, 2, 2, 5], "onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 0, 1, 0], "onehot_col5": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_copy_data_works(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=True) ohe.fit(self.data) transformed_data = ohe.transform(self.data) expected_data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_inplace_transform_works(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=False) data = self.data.copy() ohe.fit(self.data) ohe.transform(self.data) expected_data = pd.DataFrame({"two": [2, 2, 2, 5], "onehot_col1": [1., 0, 0, 1], "onehot_col2": [0., 1, 0, 0], "onehot_col3": [0., 0, 1, 0], "onehot_col4": [1., 0, 1, 0], "onehot_col5": [0., 1, 0, 1]}) pd.util.testing.assert_frame_equal(self.data.sort_index(axis=1), expected_data.sort_index(axis=1)) def test_encoding_subset_columns(self): ohe = preprocessing.OneHotEncoderFromDataFrame(categorical_feature_names=["one", "three"], copy=True) shifted_data = self.data[2:] ohe.fit(shifted_data) transformed_data = ohe.transform(shifted_data) self.data = pd.DataFrame({"one": [1, 2, 3, 1], "two": [2, 2, 2, 5], "three": [0, 5, 0, 5]}) expected_data = pd.DataFrame({"two": [2, 5], "onehot_col1": [0., 1], "onehot_col2": [1., 0], "onehot_col3": [1., 0], "onehot_col4": [0., 1]}, index=[2, 3]) print(transformed_data) print(expected_data) pd.util.testing.assert_frame_equal(transformed_data.sort_index(axis=1), expected_data.sort_index(axis=1)) class TestCreateScoreDifferential(object): """Testing if score differentials are properly created.""" def test_bad_home_score_colname(self): csd = preprocessing.CreateScoreDifferential("badcol", "away_score", "offense_home") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.transform(data) def test_bad_away_score_colname(self): csd = preprocessing.CreateScoreDifferential("home_score", "badcol", "offense_home") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_bad_offense_home_colname(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "badcol") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_differential_column_already_exists(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="used_col") data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "used_col": [0, 0, 0, 0]}) with pytest.raises(KeyError): csd.fit(data) csd.transform(data) def test_differential_works_offense_is_home(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_works_offense_is_away(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [False, False, False, False]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [False, False, False, False], "score_diff": [9, -2, 2, 11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_works_offense_is_mix(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff") input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, False, False]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, False, False], "score_diff": [-9, 2, 2, 11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_with_copied_data(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff", copy=True) input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_transformed_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) transformed_data = csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_input_data.sort_index(axis=1), input_data.sort_index(axis=1)) pd.util.testing.assert_frame_equal(expected_transformed_data.sort_index(axis=1), transformed_data.sort_index(axis=1)) def test_differential_with_inplace_data(self): csd = preprocessing.CreateScoreDifferential("home_score", "away_score", "offense_home", score_differential_colname="score_diff", copy=False) input_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True]}) expected_data = pd.DataFrame({"home_score": [1, 2, 3, 4], "away_score": [10, 0, 5, 15], "offense_home": [True, True, True, True], "score_diff": [-9, 2, -2, -11]}) csd.fit(input_data) csd.transform(input_data) pd.util.testing.assert_frame_equal(expected_data.sort_index(axis=1), input_data.sort_index(axis=1)) class TestCheckColumnNames(object): """Testing whether column names are properly checked.""" def test_transform_called_before_fit(self): ccn = preprocessing.CheckColumnNames() data = pd.DataFrame() with pytest.raises(NotFittedError): ccn.transform(data) def test_transform_data_has_wrong_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4]}) ccn.fit(input_data) test_data = pd.DataFrame({"one": [1, 2], "three": [3, 4]}) with pytest.raises(KeyError): ccn.transform(test_data) def test_transform_reorders_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4], "three": [5, 6]}) test_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12]}) expected_data = test_data.copy() #Ensure columns are in a particular order: input_data = input_data[["one", "two", "three"]] test_data = test_data[["two", "one", "three"]] expected_data = expected_data[["one", "two", "three"]] with pytest.raises(AssertionError): pd.util.testing.assert_frame_equal(test_data, expected_data) ccn.fit(input_data) pd.util.testing.assert_frame_equal(ccn.transform(test_data), expected_data) def test_transform_drops_unnecessary_columns(self): ccn = preprocessing.CheckColumnNames() input_data = pd.DataFrame({"one": [1, 2], "two": [3, 4], "three": [5, 6]}) test_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12], "four": [13, 14]}) expected_data = pd.DataFrame({"one": [7, 8], "two": [9, 10], "three": [11, 12]}) #Ensure columns are in a particular order: input_data = input_data[["one", "two", "three"]] expected_data = expected_data[["one", "two", "three"]] ccn.fit(input_data) pd.util.testing.assert_frame_equal(ccn.transform(test_data), expected_data) def test_transform_with_user_specified_colums(self): ccn = preprocessing.CheckColumnNames(column_names=["c", "b", "a"]) input_data = pd.DataFrame({"e": [-2, -1, 0], "a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9], "d": [10, 11, 12]}) expected_data = pd.DataFrame({"c": [7, 8, 9], "b": [4, 5, 6], "a": [1, 2, 3]}) expected_data = expected_data[["c", "b", "a"]] transformed_data = ccn.transform(input_data) pd.util.testing.assert_frame_equal(expected_data, transformed_data)
49.83042
121
0.486861
2,916
28,503
4.526749
0.075446
0.03447
0.031515
0.029697
0.804318
0.772424
0.742045
0.730909
0.709167
0.704167
0
0.041517
0.383118
28,503
571
122
49.917688
0.709208
0.016279
0
0.647436
0
0
0.096029
0
0
0
0
0
0.066239
1
0.094017
false
0.004274
0.014957
0
0.123932
0.00641
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
38817cda082f7a2430e3d6ecaa0bd99892992d00
94
py
Python
week1/increment.py
rghvat/Competitive-Programmer-Core-Skills
5e6a834d5a283855f788627d96647786d39108d0
[ "Apache-2.0" ]
null
null
null
week1/increment.py
rghvat/Competitive-Programmer-Core-Skills
5e6a834d5a283855f788627d96647786d39108d0
[ "Apache-2.0" ]
null
null
null
week1/increment.py
rghvat/Competitive-Programmer-Core-Skills
5e6a834d5a283855f788627d96647786d39108d0
[ "Apache-2.0" ]
null
null
null
from math import log, floor def no_decimal_digit(num): return floor(log(num+1, 10)) + 1
15.666667
36
0.691489
17
94
3.705882
0.764706
0
0
0
0
0
0
0
0
0
0
0.052632
0.191489
94
5
37
18.8
0.776316
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
38aae2b9900549f32ff47692dd888719d1c2b6a1
115
py
Python
src/app/utils.py
jmfer1/flagsmith-api
e7de1b5ebbcb58197e9545dc760c4b80c86a6836
[ "BSD-3-Clause" ]
1
2021-01-06T17:32:26.000Z
2021-01-06T17:32:26.000Z
src/app/utils.py
agiannelli/flagsmith-api
e7de1b5ebbcb58197e9545dc760c4b80c86a6836
[ "BSD-3-Clause" ]
null
null
null
src/app/utils.py
agiannelli/flagsmith-api
e7de1b5ebbcb58197e9545dc760c4b80c86a6836
[ "BSD-3-Clause" ]
null
null
null
import shortuuid def create_hash(): """Helper function to create a short hash""" return shortuuid.uuid()
16.428571
48
0.695652
15
115
5.266667
0.8
0
0
0
0
0
0
0
0
0
0
0
0.2
115
6
49
19.166667
0.858696
0.330435
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
38c56a18363635e0785391215c9ad6872be28e88
71
py
Python
extensions/config_extension.py
chonhan/flask_restapi_clean_architecture
7cc460386d1f70e88234ab31f291131290485e05
[ "MIT" ]
29
2020-06-11T10:15:12.000Z
2022-03-26T06:49:48.000Z
{{cookiecutter.project_name}}/extensions/config_extension.py
sarimurrab/cookiecutter-space
4ba9da5c0c16c902dc737951bc84c21671a24091
[ "MIT" ]
2
2021-03-20T04:01:53.000Z
2021-03-20T04:02:06.000Z
{{cookiecutter.project_name}}/extensions/config_extension.py
sarimurrab/cookiecutter-space
4ba9da5c0c16c902dc737951bc84c21671a24091
[ "MIT" ]
13
2020-12-24T14:33:05.000Z
2022-03-26T13:26:51.000Z
from config import configurations def register_config(app): pass
11.833333
33
0.774648
9
71
6
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.183099
71
5
34
14.2
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
1
0
1
0
0
6
38e18301ce69520be06208111547c3e763e0f68d
187,275
py
Python
Library/dformpy/dformpy.py
MostaphaG/Summer_project-df
fec6d2335928fc5ccb833eabb4962e45566681fd
[ "MIT" ]
1
2022-02-14T07:42:58.000Z
2022-02-14T07:42:58.000Z
Library/dformpy/dformpy.py
MostaphaG/Summer_project-df
fec6d2335928fc5ccb833eabb4962e45566681fd
[ "MIT" ]
null
null
null
Library/dformpy/dformpy.py
MostaphaG/Summer_project-df
fec6d2335928fc5ccb833eabb4962e45566681fd
[ "MIT" ]
null
null
null
# Differential form python module attempt - 1 # import needed modules import numpy as np import matplotlib.pyplot as plt from matplotlib.lines import Line2D from sympy import diff, simplify from sympy.parsing.sympy_parser import parse_expr from math import isnan from matplotlib import patches as patch from matplotlib import cm # input many numpy functions to deal with user input from numpy import sin, cos, tan, sqrt, log, arctan, arcsin, arccos, tanh from numpy import sinh, cosh, arcsinh, arccosh, arctanh, exp, pi, e # define function that sets the recursion constant for the loop to plot stacks # pre-define the displacements from mid point needed # c is the Truth value from parity (odd or even number n) def G(s, n, c): ''' G(s, n, c) Defines coefficints needed to displace stack sheets along direction perp. to form, depending on how many are to be plotted. Parameters: -------- s - det. number of sheets to draw n - which sheet is sequence one is to be drawn now c - int bool, as 0 or 1, defines parity of n Returns: -------- Coefficient to fractional sheet displacement. ''' if c == 0: return ((2*s + 1)/(2*(n-1))) else: return (s/(n-1)) # define a function that will analytically find the 2-form from given expressions # in a given number of dimensions and in terms of given coordinate symbols def find_2_form(expressions, coords, xg, yg, zg=None, m=2): ''' find_2_form(expressions, coords, xg, yg, zg=None, m=2) Finds the analytical 2 form using sympy experssion handling. Parameters: --------------- expressions - list of sympy experssions for the 1 form scaling fucntions coords - list of coordinate names as strings, that were used in experssions xg, yg - grids zg - possible grid m - number of dimensions Returns: --------------- result - analytical, unformatted 2-form equation ''' # define a sympy expression for string 0 sympy_expr_zero = parse_expr('0*x', evaluate=False) # set up an array to store derrivatives. ext_ds = np.empty((m, m), dtype='object') # set up an array to store the results # in 2D only dx^dy, in 3D (m=3) (in order): dx^dy, dx^dz, dy^dz result = np.empty((int((m-1)*m/2), 1), dtype='object') for i in range(int((m-1)*m/2)): result[i] = str(result[i]) # loop over differentiating each, when differentiating w.r.t its coord, set to 0 for coord_index in range(len(coords)): # loop over differentiating each component: for comp_index in range(len(expressions)): # when equal set to 0, when not-differentiate: if comp_index == coord_index: ext_ds[comp_index, coord_index] = str(sympy_expr_zero) elif comp_index != coord_index: ext_ds[comp_index, coord_index] = str(diff(expressions[comp_index], coords[coord_index])) # change the signs for wedges in wrong order if comp_index < coord_index: ext_ds[comp_index, coord_index] = ' - (' + str(ext_ds[comp_index, coord_index]) + ')' elif comp_index > coord_index: ext_ds[comp_index, coord_index] = ' + ' + str(ext_ds[comp_index, coord_index]) # merge the results into a 2-form (for 2-form on R^2, the result is a single component (dx^xy)) # do so by adding opposite elements along the diagonal ( / ) components of ext_ds # this includes taking elemets with switched i and j # set up a variable to count pairs (pairs because we are forming 2-forms): pair = 0 # loop over opposing elements (matching elementary 2-forms) for i in range(1, m): for j in range(i): # initially clear the element from its Nonetype placeholder result[pair, 0] = '' # extract opposing elements temp = ext_ds[i, j] temp1 = ext_ds[j, i] # check these against zero entries: if (temp == '0') or (temp == '-(0)') or (temp == '0*x'): pass else: result[pair, 0] += temp if (temp1 == '0') or (temp1 == '-(0)') or (temp1 == '0*x'): pass else: result[pair, 0] += temp1 # update the result row counter pair += 1 return result # # create a local result, that will be used to evaluate the resulting string # # and format it # loc_res = result + '' # # # format string in each result row # for d in range(pair): # # format the result to be 'python understood' to be able to use the eval() # loc_res[d, 0] = loc_res[d, 0].replace('x', 'xg') # loc_res[d, 0] = loc_res[d, 0].replace('y', 'yg') # loc_res[d, 0] = loc_res[d, 0].replace('z', 'zg') # # # check against constant result, to be of correct shape before eval is used # if loc_res[d, 0].find('x') & loc_res[d, 0].find('y') == -1: # loc_res[d, 0] = '(' + str(loc_res[d, 0]) + ')* np.ones(np.shape(xg))' # if loc_res[d, 0].find('x') & loc_res[d, 0].find('y') == -1: # loc_res[d, 0] = '(' + str(loc_res[d, 0]) + ')* np.ones(np.shape(yg))' # # # set up a vector to store the 2-form numerically, from xg and yg and possibly further # # Note - need pt_den being supplied m times. # # not overall generalised, as not needed past m=3. # if m == 2: # form_2 = np.empty((1, pt_den, pt_den)) # form_2[0, :, :] = eval(loc_res[0, 0]) # elif m == 3: # form_2 = np.empty((3, pt_den, pt_den, pt_den)) # for d in range(3): # form_2[d, :, :, :] = eval(loc_res[d, 0]) # # # return useful findings to the user # return form_2, result, ext_ds # %% ''' function to create a 1-form object and define methods for it ''' # define the 1-form object and all its methods class form_1(): ''' form_1(xg, yg, F_x, F_y, F_x_eqn=None, F_y_eqn=None) Defines a 1-form object and returns it to user. Parameters: --------------- xg - grid of x values (2D numpy.ndarray) yg - grid of y values (2D numpy.ndarray) F_x - grid of dx form components (2D numpy.ndarray) F_y - grid of dy form components (2D numpy.ndarray) Optional: F_x_eqn - expression for dx form component f(x,y) (string) F_y_eqn - expression for dy form component f(x,y) (string) Instance variables: --------------- xg, yg, F_x, F_y s_max - int - maximum number of sheets per stack s_min - int - minimum number of sheets per stack pt_den - int - number of points on grids, extracted from grids, assumes square grid fract - float/int - length of sheet in stack as fraction of whole plot size scale - float/int - constant multpilier to change scaling w_head - float/int - width of arrowghead on stack as size of sheet h_head - float/int - height of arrowghead on stack as size of sheet arrowheads - bool - determines of arrowheads showld be drawn on stacks color - str - colour to draw stacks with, can be Hex when using '#FFFFFF' logarithmic_scale_bool - bool - determines if log scaling is used delta_factor - float/int - determined size of blank boarder in figure as fraction of whole plot size Methods: --------------- give_eqn return_string colour arrow_heads head_width head_height log_scaling max_sheets sheet_size surround_space set_density plot ext_d num_ext_d hodge wedge_analytical wedge_num zoom interior_d contravariant ''' def __init__(self, xg, yg, F_x, F_y, F_x_eqn=None, F_y_eqn=None): self.xg = xg self.yg = yg self.F_x = F_x self.F_y = F_y self.s_max = 6 self.s_min = 1 self.fract = 0.05 self.scale = 1 self.w_head = 1/8 self.h_head = 1/4 self.arrowheads = True self.color = '#8B14F3' self.logarithmic_scale_bool = 0 self.delta_factor = 10 # define equations if given: # user must change to access some methods, will indicate when needed # Note, the string must be given with x and y as variables if F_x_eqn is not None: self.form_1_str_x = str(simplify(F_x_eqn)) else: self.form_1_str_x = None if F_y_eqn is not None: self.form_1_str_y = str(simplify(F_y_eqn)) else: self.form_1_str_y = None # ##################################################################### # write customising methods # ##################################################################### # define a mehtod to allow user to supply the string equation # of the 1-form def give_eqn(self, equation_str_x, equation_str_y): ''' give_eqn(equation_str_x, equation_str_y) This must be the equation of the supplied numerical 1-form in terms of variables x and y. All formatting is as in numpy, but no library calling in string exception - exponential - call it as e**(expression) it re-evaluates the numerical values to match the new equations A warning is shown if any differences are detected, not rigorous though Will often show for most minor changes Has to be given, for some methods to be computable Methods will indicate when needed Parameters: --------------- equation_str_x - string of the dx component, with x and y as variables equation_str_y - string of the dy component, with x and y as variables Returns: None ''' # set equation parameters to simplified inputs self.form_1_str_x = str(simplify(equation_str_x)) self.form_1_str_y = str(simplify(equation_str_y)) # make the values match automatically to limit how often mismatch occurs # substitute these into the equation, but keep it local: str_x = self.form_1_str_x + '' str_y = self.form_1_str_y + '' str_x = str_x.replace('x', '(self.xg)') str_x = str_x.replace('y', '(self.yg)') str_y = str_y.replace('x', '(self.xg)') str_y = str_y.replace('y', '(self.yg)') # check against constant forms, to have correct shape if str_x.find('x') & str_x.find('y') == -1: str_x = '(' + str(str_x) + ')* np.ones(np.shape(self.xg))' if str_y.find('x') & str_y.find('y') == -1: str_y = '(' + str(str_y) + ')* np.ones(np.shape(self.yg))' # re-evaluate the 2-form numerically, warn user if changed # if not ((self.F_x is eval(str_x)) and (self.F_y is eval(str_y))): # print('Warning: Equations did not exactly match numerical values, and these were change to agree with equations') # evaluate formatted equations and save self.F_x = eval(str_x) self.F_y = eval(str_y) # deifne a function to return the string equations to the user def return_string(self): ''' Returns unformatted strings for component equations back to user Done in case user wants to access strings that got here by ext. alg. Parmateres: None Returns: None ''' return self.form_1_str_x, self.form_1_str_y # change colour def colour(self, color): ''' Changes the colour that stacks plot in Note, not strictly needed, can change it by instance.color(colour) Parmaeters: ------------- color - string - string to define a colour of stacks can be any matplotlib understood colour or Hex in #FFFFFF Returns: None ''' self.color = str(color) # change arrowsheads def arrow_heads(self): ''' Changes the boolean that determines if arrowheads are plotted on stacks. Whenever it is called, it changes that boolean to opposite The form object is initialised with this as True Note, not strictly needed, can change it by instance.arrowheads(bool) Parmaeters: None Returns: None ''' self.arrowheads = not self.arrowheads # change w_head def head_width(self, wide): ''' Sets the width of the arrowhead on a stacks to the desired float as a fraction of the stack length in the direction perp. to form Note, not strictly needed, can change it by instance.w_head(width) Parmaeters: --------------- wide - float/int - Sets the width as a fraction of the stack length Returns: None ''' self.w_head = float(wide) # change h_head def head_height(self, high): ''' Sets the height of the arrowhead on a stacks to the desired float as a fraction of the stack length in the direction parall. to form Note, not strictly needed, can change it by instance.h_head(height) Parmaeters: --------------- high - float/int - Sets the height as a fraction of the stack length Returns: None ''' self.h_head = float(high) # change boolean that det. if to sclae logarithmically def log_scaling(self): ''' Changes the boolean that determines if scaling is logarithmic Whenever it is called, it changes that boolean to opposite The form object is initialised with this as False Note, not strictly needed, can change it by instance.logarithmic_scale_bool(bool) Parmaeters: None Returns: None ''' self.logarithmic_scale_bool = not self.logarithmic_scale_bool # self.base = base # define methods to change s_max def max_sheets(self, maximum): ''' Changes maximum number of sheets to draw on a stack. These still scale relative to max magnitude. Note, not strictly needed, can change it by instance.s_max(maximum) Parmaeters: --------------- maximum - int - Max number of sheets to plot per stack Returns: None ''' self.s_max = maximum # define method to change fraction of sheetsize w.r.t graph size: def sheet_size(self, fraction): ''' Changes the size of stack in direction perp. to form. It is done in in terms of the fraction of plot size Note, not strictly needed, can change it by instance.fract(fraction) Parmaeters: --------------- fraction - float/int - size of stack in terms of the fraction of plot size Returns: None ''' self.fract = fraction # define a method to change spare spacing around figure def surround_space(self, delta_denominator): ''' Sets the extra blank space around the domain of grids in axis Note, not strictly needed, can change it by instance.delta_factor(delta_denominator) Parmaeters: --------------- delta_denominator - float/int - denominator or fraction to use eg. supplying 3 will make the white space 1/3 of the width of the domain of the grid. Returns: None ''' self.delta_factor = delta_denominator # define a method to change the density of grids in same range # requires string input of 1-form: def set_density(self, points_number): ''' Changes the desnity of points in the same range to the input value Requires the string equation to be supplied to not 'extrapolate' Only creates 2 axis with same number of points each cannot be used for any custom grids Parameters: -------------- points_number - new number of points to use per axis Returns: None ''' if self.form_1_str_x == None or self.form_1_str_y == None: # Error raise ValueError('Error: You need to supply the 1-form equation to do this, see \'give_eqn\' method') else: # redefine the grids x = np.linspace(self.xg[0,0], self.xg[0, -1], points_number) y = np.linspace(self.yg[0,0], self.yg[-1, 0], points_number) self.xg, self.yg = np.meshgrid(x, y) # substitute these into the equation, but keep it local: str_x = self.form_1_str_x + '' str_y = self.form_1_str_y + '' str_x = str_x.replace('x', '(self.xg)') str_x = str_x.replace('y', '(self.yg)') str_y = str_y.replace('x', '(self.xg)') str_y = str_y.replace('y', '(self.yg)') # check against constant forms, to have correct array shape if str_x.find('x') & str_x.find('y') == -1: str_x = '(' + str(str_x) + ')* np.ones(np.shape(self.xg))' if str_y.find('x') & str_y.find('y') == -1: str_y = '(' + str(str_y) + ')* np.ones(np.shape(self.yg))' # re-evaluate the 1-form numerically self.F_x = eval(str_x) self.F_y = eval(str_y) # ##################################################################### # More useful methods (plotting, zooming and ext. alg.) # ##################################################################### # define a fucntion that will use the set up 1-form and plot it def plot(self, axis): ''' plot(axis) Uses the attribues of the object as set originally and as customised with methods to create a plot of the 1-form Parameters: ------------- axis: matplotlib axes that the plot it to be put on Returns: None ''' # get the lengths of x and y from their grids x_len = len(self.xg[:, 0]) y_len = len(self.yg[0, :]) # Extract L from the x and y grids Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) L = 0.5*(Lx + Ly) # average, needed for stack sizes only x0 = self.xg[0, 0] + Lx y0 = self.yg[0, 0] + Ly # reset axis limits ax_Lx = Lx + Lx/self.delta_factor ax_Ly = Ly + Ly/self.delta_factor axis.set_xlim(-ax_Lx + x0, ax_Lx + x0) axis.set_ylim(-ax_Ly + y0, ax_Ly + y0) # find the distance between neightbouring points on the grid # for drawing extra arefacts dist_points = self.xg[0, 1] - self.xg[0, 0] # define an empty array of magnitudes, to then fill with integer rel. mags R_int = np.zeros(shape=((x_len), (y_len))) # ######################################################################### # get variables needed for the initial stack plot # ######################################################################### # set all insignificant values to zero: self.F_x[np.abs(self.F_x) < 1e-15] = 0 self.F_x[np.abs(self.F_x) < 1e-15] = 0 # find the arrow length corresponding to each point and store in mag array mag = np.sqrt(self.F_x**2 + self.F_y**2) # find direction of each arrow angles = np.arctan2(self.F_y, self.F_x) # theta defined from positive x axis ccw # find regions ON GRID that are nan or inf as a bool array # deal with infs and nans in mag # set to zero points that are not defined or inf # and mark them on axis isnan_arr = np.isnan(mag) for i in range(x_len): for j in range(y_len): if isnan_arr[i, j]: # colour this region as a shaded square rect = patch.Rectangle((self.xg[i, j] - dist_points/2, self.yg[i, j] - dist_points/2), dist_points, dist_points, color='#B5B5B5') axis.add_patch(rect) mag[i, j] = 0 if abs(mag[i, j]) == np.inf or abs(mag[i, j]) > 1e15: # colour this point as a big red dot circ = patch.Circle((self.xg[i, j], self.yg[i, j]), L*self.fract/3, color='red') axis.add_patch(circ) mag[i, j] = 0 # ######################################################################### # use the the direction of arrows to define stack properties # ######################################################################### # define length of sheet as a fraction of total graph scale # this also sets max, total height of stack (along its direction) s_L = self.fract * L # ######################################################################### # define stack based on geometrical arguments # sheets perp. to hypothetical arrow, shifted along it # their density porp to mag, + arrowhead on top # ######################################################################### # find the maximum magnitude for scaling max_size = np.max(mag) # setrelative scaling, linear or logarithmic if self.logarithmic_scale_bool: mag1 = mag + 1 logmag1 = np.log(mag1) R = logmag1/np.max(logmag1) # Re-assign R else: R = mag/max_size # define tigonometirc shifts I_sin = np.sin(angles) I_cos = np.cos(angles) # precalculate heavy operations # define the points that set out a line of the stack sheet (middle line) A_x = self.xg + (s_L/2)*I_sin A_y = self.yg - (s_L/2)*I_cos B_x = self.xg - (s_L/2)*I_sin B_y = self.yg + (s_L/2)*I_cos # define points of stack arrowheads as arrays for all stacks p_sh1x = self.xg + (s_L/2)*I_cos + (s_L*self.w_head)*I_sin p_sh1y = self.yg + (s_L/2)*I_sin - (s_L*self.w_head)*I_cos p_sh2x = self.xg + (s_L/2)*I_cos - (s_L*self.w_head)*I_sin p_sh2y = self.yg + (s_L/2)*I_sin + (s_L*self.w_head)*I_cos p_sh3x = self.xg + (s_L*0.5 + s_L*self.h_head)*I_cos p_sh3y = self.yg + (s_L*0.5 + s_L*self.h_head)*I_sin # special case, when there is only 1 line in the stack plot: P_sh1x = self.xg + (s_L*self.w_head)*I_sin P_sh1y = self.yg - (s_L*self.w_head)*I_cos P_sh2x = self.xg - (s_L*self.w_head)*I_sin P_sh2y = self.yg + (s_L*self.w_head)*I_cos P_sh3x = self.xg + (s_L*self.h_head)*I_cos P_sh3y = self.yg + (s_L*self.h_head)*I_sin # array of number of sheets for each stack for i in range(self.s_max - self.s_min + 1): t = self.s_max - i R_int[R <= t/self.s_max] = t # loop over each coordinate plotting for i in range(x_len): for j in range(y_len): # varible for current considered magnitude as it is reused # avoids extracting from R many times. n = R_int[i, j] # do not plot anything if magnitude is exactly zero if mag[i,j] == 0: continue # deal with even number of sheets from magnitudes: if n % 2 == 0: # parameter to loop over in the recursion equation s = 0 # points for sheets required for the given magnitude # from these define all the needed lines and plot them while s <= 0.5*(n-2): # maximum set by equations (documentation) # define all the points for the 2 currently looped +- sheets in while loop Ax1 = A_x[i, j] + G(s, n, 0)*s_L*I_cos[i, j] Ay1 = A_y[i, j] + G(s, n, 0)*s_L*I_sin[i, j] Bx1 = B_x[i, j] + G(s, n, 0)*s_L*I_cos[i, j] By1 = B_y[i, j] + G(s, n, 0)*s_L*I_sin[i, j] Ax2 = A_x[i, j] - G(s, n, 0)*s_L*I_cos[i, j] Ay2 = A_y[i, j] - G(s, n, 0)*s_L*I_sin[i, j] Bx2 = B_x[i, j] - G(s, n, 0)*s_L*I_cos[i, j] By2 = B_y[i, j] - G(s, n, 0)*s_L*I_sin[i, j] # from these, define the 2 lines, for this run axis.add_line(Line2D((Ax1, Bx1), (Ay1, By1), linewidth=1, color=self.color)) axis.add_line(Line2D((Ax2, Bx2), (Ay2, By2), linewidth=1, color=self.color)) # update parameter to reapet and draw all needed arrows s += 1 # deal with the odd number of stacks: else: # Add the centre line for odd numbers of stacks axis.add_line(Line2D((A_x[i, j], B_x[i, j]), (A_y[i, j], B_y[i, j]), linewidth=1, color=self.color)) # then loop over the remaining lines as per the recursion formula: s = 1 # exclude already completed 0 # define all remaining sheets for the magnitude: while s <= 0.5*(n-1): # maximum set by equations (documentation) # define all the points for the current +- displacement in while loop Ax1 = A_x[i, j] + G(s, n, 1)*s_L*I_cos[i, j] Ay1 = A_y[i, j] + G(s, n, 1)*s_L*I_sin[i, j] Bx1 = B_x[i, j] + G(s, n, 1)*s_L*I_cos[i, j] By1 = B_y[i, j] + G(s, n, 1)*s_L*I_sin[i, j] Ax2 = A_x[i, j] - G(s, n, 1)*s_L*I_cos[i, j] Ay2 = A_y[i, j] - G(s, n, 1)*s_L*I_sin[i, j] Bx2 = B_x[i, j] - G(s, n, 1)*s_L*I_cos[i, j] By2 = B_y[i, j] - G(s, n, 1)*s_L*I_sin[i, j] # from these, define the 2 displaced lines axis.add_line(Line2D((Ax1,Bx1),(Ay1,By1), linewidth=1, color=self.color)) axis.add_line(Line2D((Ax2,Bx2),(Ay2,By2), linewidth=1, color=self.color)) # update parameter s += 1 # dela with arrowheads if self.arrowheads: # from central sheet for n=1 or on top sheet for n>1 if n > 1: # for all lines but the single sheet one axis.add_line(Line2D((p_sh1x[i, j],p_sh3x[i, j]),(p_sh1y[i, j],p_sh3y[i, j]), linewidth=1, color = self.color)) axis.add_line(Line2D((p_sh2x[i, j],p_sh3x[i, j]),((p_sh2y[i, j],p_sh3y[i, j])), linewidth=1, color = self.color)) else: # when only 1-sheet is drawn axis.add_line(Line2D((P_sh1x[i, j], P_sh3x[i, j]), (P_sh1y[i, j], P_sh3y[i, j]), linewidth=1, color = self.color)) axis.add_line(Line2D((P_sh2x[i, j], P_sh3x[i, j]), ((P_sh2y[i, j], P_sh3y[i, j])), linewidth=1, color = self.color)) else: pass # method to find its exterior derivative def ext_d(self): ''' ext_d() Computes the exterior derivative and returns it as the 2-form object ''' if self.form_1_str_x == None or self.form_1_str_y == None: # ERROR raise ValueError('Error: You need to supply the 1-form equations to do this, look at \'give_eqn\' method') else: # the strings have been correctly given, compute the # exterior derivative # get the inpus from fields of x and u components x_comp_str = self.form_1_str_x y_comp_str = self.form_1_str_y # from found u and v in the interior derivative, set up sympy components sympy_expr_x = parse_expr(x_comp_str, evaluate=False) sympy_expr_y = parse_expr(y_comp_str, evaluate=False) # combine the 2 into a list: expressions = np.array([sympy_expr_x, sympy_expr_y]) # set up an array of coordinates that need to be used (in standard order) coords = ['x', 'y'] # set up dimensionality m = 2 # from these get the 2-form result = find_2_form(expressions, coords, self.xg, self.yg, zg=None, m=m) # format, and evaluate # get the string of this new 2-form form_2_str = str(simplify(result[0][0])) # keep a local, unformatted version of this # to supply to form_2 form_2_str_loc = form_2_str*1 # numerically evaluate it, careful about constants # to evaluate it, make sure to use grids form_2_str = form_2_str.replace('x', '(self.xg)') form_2_str = form_2_str.replace('y', '(self.yg)') if form_2_str.find('x') & form_2_str.find('y') == -1: form_2_str = '(' + str(form_2_str) + ')* np.ones(np.shape(self.xg))' # evaluate, set up new object and return form_2_result = eval(form_2_str) result_form = form_2(self.xg, self.yg, form_2_result, form_2_str_loc) # return it to the user return result_form # define a funciton to complete numerical only curl def num_ext_d(self): ''' Takes in no arguments computes the exterior derivative numerically only The equations do not need to be given If given, they do not get passed onto the 2-form object anyway NUMERICAL ONLY, they will be lost! returns 2-form object ''' # get steps in dx and dy: dx = self.xg[0, :] dy = self.yg[:, 0] # copy F_x and F_y, locally fx = self.F_x + np.zeros(np.shape(self.xg)) fy = self.F_y + np.zeros(np.shape(self.xg)) # clean up F_x and F_y from nan etc for i in range(len(self.xg[:, 0])): for j in range(len(self.yg[0, :])): # correct for ill defined values if isnan(fx[i, j]): fx[i, j] = 0 if isnan(fy[i, j]): fy[i, j] = 0 if abs(fx[i, j]) == np.inf or abs(fx[i, j]) > 1e15: fx[i, j] = 1e10 if abs(fy[i, j]) == np.inf or abs(fy[i, j]) > 1e15: fy[i, j] = 1e10 # Calculate deirvatvies as needed, using numpy gradient. dy_F_x, _ = np.gradient(fx, dx, dy) _, dx_F_y = np.gradient(fy, dx, dy) # from these, get the 2-form form_2_result = dx_F_y - dy_F_x # return 2-form object to user result_form = form_2(self.xg, self.yg, form_2_result) # return it to the user return result_form # define a method to Hodge it def hodge(self, keep_object=False): ''' hodge(keep_object=False) Parameters: ------------- keep_object - determines if the result should be returned as a new 1-form or if current one need to be changed. Default is False. When False, a new object is created When true, the acted on is modified. It calulates the Hodge on R^2 by the standard definition: dx -> dy and dy -> -dx Does no analytically using the equations provided in the instance returns: 1-form if keep_object is False, else returns nothing ''' # check for equations: if self.form_1_str_x == None or self.form_1_str_y == None: # ERROR raise TypeError('Error: You need to supply the 1-form equation to do this, look at \'give_eqn\' method') else: # some equations are there, compute the Hodge on these: new_str_x = '-(' + self.form_1_str_y + ')' new_str_y = self.form_1_str_x # from these, get numerical solutions, evaulated on local # strings changed to relate to the self grids # need to supply these unformatted, so save those: form_1_x_unformated, form_1_y_unformated = new_str_x*1, new_str_y*1 # from these strings, get the numerical 1-form: new_str_x = new_str_x.replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = new_str_y.replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') # correct for constants if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' # evaluate form_1_x = eval(new_str_x) form_1_y = eval(new_str_y) # depending on keep_object, return: if keep_object: self.F_x = form_1_x self.F_y = form_1_y self.form_1_str_x = form_1_x_unformated self.form_1_str_y = form_1_y_unformated elif not keep_object: new_object = form_1(self.xg, self.yg, form_1_x, form_1_y, F_x_eqn=form_1_x_unformated, F_y_eqn=form_1_y_unformated) return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') def num_hodge(self, keep_object=False): ''' num_hodge(keep_object=False) Parameters: ------------- keep_object - determines if the result should be returned as a new 1-form or if current one need to be changed. Default is False. When False, a new object is created When true, the acted on is modified. It calulates the Hodge on R^2 by the standard definition: dx -> dy and dy -> -dx Does no numerically using only component arrays. If equations have been previously provided, this method will loose them returns: 1-form if keep_object is False, else returns nothing ''' # check if equations have been given: # if they have, doing it only numerically would create # a mismatch, warn user if self.form_1_str_x != None or self.form_1_str_y != None: print('Warning: You supplied equations, doing it numerically only will result in a mismacth between numerical values and equations') # now complete the process numerically save as instructed # check keep_object: if keep_object: # change the object self properties accoringly new_x = -self.F_y new_y = self.F_x self.F_x = new_x self.F_y = new_y elif not keep_object: # pass these in to the object to create a new one: # N.B no equations to supply new_object = form_1(self.xg, self.yg, -self.F_y, self.F_x) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') # define a fucntion to compute a wedge product of two 1 forms def wedge(self, form_second, degree=1, keep_object=False): ''' wedge(form_second, degree=1, keep_object=False) Parameters: ---------------- form_second - the form to wedge the 1-form with. Can be supplied as a DFormPy instance, a tuple of equations, or a single string equation depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component equations as strings in terms of x and y. To wedge with 0-form or 2-form, supply corresponding instances or a single equation. When using equations, to distinguish between them, provide parmater 'degree'. degree - default is 1. Only used when a single string is supplied as form_second, to distinguish betwen 0-form and 2-form for 0-form, degree=0, for 2-form, degree=2. Determines what form is to be wegded with the given 1-form. keep_object - bool -default=False - only used when 1-form is wedged with a 0-form. If False, a new object is created as a result of the wedge. If True, the 1-form acted on is modified to be the result of the wedge. To do so here, strings for the form must be supplied. Computes the Wedge product using strings, ANALYTICALLY Returns: -------------- Wedged with 0-form returns a 1-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, returns a 2-form instance Wedged with a 2-form, operation makes a 3-form, which on R^2 is always = zero, only message displays. ''' # test if equations were given first: if self.form_1_str_x == None or self.form_1_str_y == None: raise ValueError('Error: You need to supply the 1-form equation to do this, look at \'give_eqn\' method') # set up variable to store order of supplied form, initially assume 1-form order = 1 # get needed second obejct strings dep. on input if isinstance(form_second, tuple): # if equations were given here take these, if numerical grids were given - error! # check size , should be a 1-form if len(form_second) == 2: # 1-form/\1-form, check if strings supplied if isinstance(form_second[0], str) and isinstance(form_second[1], str): to_wedge_x_2_str = form_second[0] to_wedge_y_2_str = form_second[1] order = 1 else: raise ValueError('for analytical calulation, supply 1-form equations as strings') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: to_wedge_0_form_str = form_second order = 0 elif degree == 2: # Error, gives 3 form = 0 on R2 order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise ValueError('not possible digree given or supplied one string for a 1-form') else: # object supplied, get numericals checking which object is given: if isinstance(form_second, form_1): if form_second.form_1_str_x is None or form_second.form_1_str_y is None: raise ValueError('supplied 1-form instance must contain equations for analytical calculation') else: to_wedge_x_2_str = form_second.form_1_str_x to_wedge_y_2_str = form_second.form_1_str_y order = 1 elif isinstance(form_second, form_0): if form_second.form_0_str is None: raise ValueError('supplied 0-form instance must contain equations for analytical calculation') else: to_wedge_0_form_str = form_second.form_0_str order = 0 elif isinstance(form_second, form_2): order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise TypeError('Supplied form to wedge with is not recognised') # Deal with 1-form/\1-form: if order == 1: # first, mathematically: 2-form = f*m - g*h form_2_str = str(simplify( '(' + self.form_1_str_x + ')*(' + to_wedge_y_2_str + ')' + ' - (' + self.form_1_str_y + ')*(' + to_wedge_x_2_str + ')' )) # keep it as it is locally to supply it to object maker later form_2_str_loc = form_2_str + '' # format it to be in terms of grids and: # check against constant and zero 2-forms being supplied # get the numerical evaluation of it form_2_str = form_2_str.replace('x', 'self.xg') form_2_str = form_2_str.replace('y', 'self.yg') if form_2_str.find('x') & form_2_str.find('y') == -1: form_2_str = '(' + str(form_2_str) + ')* np.ones(np.shape(self.xg))' # evaluate it numerically on the grid supplied form_2_result = eval(form_2_str) # create a 2-form object from this; to return and do so ret_object = form_2(self.xg, self.yg, form_2_result, form_2_str_loc) return ret_object elif order == 0: # first, find the result of the 1-form: new_str_x = str(simplify('(' + self.form_1_str_x + ')*(' + to_wedge_0_form_str + ')')) new_str_y = str(simplify('(' + self.form_1_str_y + ')*(' + to_wedge_0_form_str + ')')) # keep it as it is locally to supply it to object maker later form_1_str_x_loc = new_str_x + '' form_1_str_y_loc = new_str_y + '' # format it to be in terms of grids and: # check against constant and zero 1-forms being supplied # get the numerical evaluation of it new_str_x = new_str_x.replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = new_str_y.replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' form_1_x = eval(new_str_x) form_1_y = eval(new_str_y) # depending on keep_object, return: if keep_object: self.F_x = form_1_x self.F_y = form_1_y self.form_1_str_x = form_1_str_x_loc self.form_1_str_y = form_1_str_y_loc elif not keep_object: new_object = form_1(self.xg, self.yg, form_1_x, form_1_y, F_x_eqn=form_1_str_x_loc, F_y_eqn=form_1_str_y_loc) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order is None: # made a form that is always zero on R2, no need to make it # Warning already shown, when degree was set pass else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') # define a method for numerical wedge product def num_wedge(self, form_second, degree=1, keep_object=False): ''' num_wedge(form_second, degree=1, keep_object=False) Parameters: ---------------- form_second - the form to wedge the 1-form with. Can be supplied as a DFormPy instance, a tuple of grids of same size and dimensions as this 1-form, or a single grid of scaling function values depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component grids of same size as 1-form acted on. To wedge with 0-form or 2-form, supply corresponding instances or a single grid. When using grids, to distinguish between them, provide parmater 'degree'. degree - default is 1. Only used when a single grid is supplied as form_second, to distinguish betwen 0-form and 2-form for 0-form, degree=0, for 2-form, degree=2. Determines what form is to be wegded with the given 1-form. keep_object - bool -default=False - only used when 1-form is wedged with a 0-form. If False, a new object is created as a result of the wedge. If True, the 1-form acted on is modified to be the result of the wedge. Computes the Wedge product numerically Returns: -------------- Wedged with 0-form returns a 1-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, returns a 2-form instance Wedged with a 2-form, operation makes a 3-form, which on R^2 is always = zero, only message displays. ''' # test if equations were given first: if isinstance(self.form_1_str_x, str) or isinstance(self.form_1_str_y, str): print('The first 1-form you are completing the wedge with has equations supplied, these will be lost') # set up variable to store order of supplied form, initially assume 1-form order = 1 # get needed second obejct grids dep. on input if isinstance(form_second, tuple): # check size to see what it is to be wedged with. # tuple should only be length 2 --> 1-form/\1-form if len(form_second) == 2: # 1-form/\1-form, extract components # if numerical grids were given, take these, if equations, change to values on grids: if isinstance(form_second[0], str) and isinstance(form_second[1], str): new_str_x = form_second[0].replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = form_second[1].replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' f12_x = eval(new_str_x) f12_y = eval(new_str_y) order = 1 elif isinstance(form_second[0], np.ndarray) and isinstance(form_second[1], np.ndarray): f12_x = form_second[0] f12_y = form_second[1] order = 1 else: raise ValueError('Not recognised input tuple') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, np.ndarray): # check degree: if degree == 0: to_wedge_0_form = form_second order = 0 elif degree == 1: raise ValueError('for degree 1, supply a 1-form, not a single grid') elif degree == 2: # Error, gives 3 form = 0 on R2 order = None print('This operation makes a 3-form, which on R^2 is always = zero') elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: str_0_form = form_second.replace('x', '(self.xg)') str_0_form = str_0_form.replace('y', '(self.yg)') if str_0_form.find('x') & str_0_form.find('y') == -1: str_0_form = '(' + str(str_0_form) + ')* np.ones(np.shape(self.xg))' to_wedge_0_form = eval(str_0_form) order = 0 elif degree == 2: # Error, gives 3 form = 0 on R2 order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise ValueError('not possible digree given or supplied one string for a 1-form') # object supplied, get grids checking which object is given: elif isinstance(form_second, form_1): f12_x = form_second.F_x f12_y = form_second.F_y order = 1 elif isinstance(form_second, form_0): to_wedge_0_form = form_second.form_0 order = 0 elif isinstance(form_second, form_2): order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise TypeError('Supplied form to wedge with is not recognised') # USe given inputs to evaluate the result: # Deal with 1-form/\1-form: if order == 1: # from these get the numerical 2-form result = self.F_x * f12_y - self.F_y * f12_x # return it to user: ret_object = form_2(self.xg, self.yg, result) return ret_object elif order == 0: # first, find the result of the 1-form new_form_1_x = to_wedge_0_form * self.F_x new_form_1_y = to_wedge_0_form * self.F_y # depending on keep_object, return: if keep_object: self.F_x = new_form_1_x self.F_y = new_form_1_y elif not keep_object: new_object = form_1(self.xg, self.yg, new_form_1_x, new_form_1_y) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order is None: # made a form that is always zero on R2, no need to make it # Warning already shown, when degree was set pass else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') def zoom(self, target=[0, 0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' zoom(target=[0, 0], mag=2, dpd=9, inset=True, axis=None, insize=0.3) Parameters: -------------- Create a new window which displays the field zoomed at a certain point User gives arguments Target: Determines the zoom location, coordinates mag: +ve float, determines zooming amount dpd: +int, determines how many points on each axis inset - bool - determines if zoom is to plotted as an inset if True, need to also give axis on which to plot axis - matplotlib axes instance - on it, the instance will plot. insize - float - size of inset as fraction of total figure returns: -------------- if inset is False, returns the zoomed in insatnce as a 0-form object if inset if True, returns the inset axis, with the plot on them on top of the given axis and the 0-form instance ''' # Requires user to provide eqn of the 1-form they are zooming on. if self.form_1_str_x == None or self.form_1_str_y == None: # ERROR raise TypeError('No equation provided, see \'give_eqn\' method') else: # Zoom must be one or greater if mag < 1: raise ValueError('Mag must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if inset == False: insize = 1 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings u_str = self.form_1_str_x v_str = self.form_1_str_y # Check if the equations provided contain x and y terms if u_str.find('x') & u_str.find('y') == -1: u_str = '(' + str(u_str) + ')* np.ones(np.shape(dxg))' else: u_str = u_str.replace('x', 'dxg') u_str = u_str.replace('y', 'dyg') if v_str.find('x') & v_str.find('y') == -1: v_str = '(' + str(v_str) + ')* np.ones(np.shape(dyg))' else: v_str = v_str.replace('x', 'dxg') v_str = v_str.replace('y', 'dyg') # Generate arrays for the components of the zoom field u_zoom = eval(u_str) v_zoom = eval(v_str) # crate the zoomed in form zoom_form = form_1(dxg, dyg, u_zoom, v_zoom, self.form_1_str_x, self.form_1_str_y) zoom_form.sheet_size(1/dpd) q = 1 xi = (x_m - self.xg[0,0])/(2*Lx) yi = (y_m - self.yg[0,0])/(2*Ly) if inset == True: if axis != None: # Create inset axis in the current axis. zoom_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) zoom_form.plot(zoom_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return zoom_inset_ax, zoom_form else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return zoom_form # define a mehtod to evaluate the interior derivative of the 1-form # with respect to a given vector field object or without. def interior_d(self, vector_field=None): ''' interior_d(vector_field=None) Computes the interior derivative of the 1-form Parameters: ------------------ Vector_field = vector field object of DFormPy library to do the derivative with respect to, needs equations to work with nuymerical_only being False. Can also supply equations in a tuple: (eqn_x, eqn_y). If using numerical only, can supply object or tuple of numpy arrays (array_x, atrray_y). If nothing is supplied for it, it assumes F_x = 1 and F_y = 1, with correct form and shape Does no analytically using equations provided in instance Returns 0-form object ''' # test if equations were given first: if self.form_1_str_x == None or self.form_1_str_y == None: # ERROR raise ValueError('Error: You need to supply the 1-form equations to do this, look at \'give_eqn\' method') # if the vector field was supplied, extract its equations, if possible if vector_field is None: # if none was given, do it with respect to uniform 1, 1 vf_x_str = '1' vf_y_str = '1' elif type(vector_field) == tuple: # if equations were given, take these, is numericals were given here, break! if type(vector_field[0]) == str: vf_x_str = vector_field[0] vf_y_str = vector_field[1] else: raise ValueError('for analytical result, supply VF equations') else: if vector_field.str_x == None or vector_field.str_y == None: # ERROR raise ValueError('Error: You need to supply the VF equations to do this, look at \'give_eqn\' method') else: vf_x_str = str(simplify(vector_field.str_x)) vf_y_str = str(simplify(vector_field.str_y)) # combine them correctly with the 1-form strings: zero_form_str = str(simplify('(' + self.form_1_str_x + ')*(' + vf_x_str + ')' + ' + (' + self.form_1_str_y + ')*(' + vf_y_str + ')')) # keep an unformatted version to supply to the 0-form zero_form_str_unformatted = zero_form_str + '' # format the expression to be evluated zero_form_str = zero_form_str.replace('x', 'self.xg') zero_form_str = zero_form_str.replace('y', 'self.yg') # check against constants in the expression to be evaluated if zero_form_str.find('x') & zero_form_str.find('y') == -1: zero_form_str = '(' + str(zero_form_str) + ')* np.ones(np.shape(self.xg))' else: pass # evaulate the numerical zero form: zero_form_result = eval(zero_form_str) # return it, with equations, to user, depending on their figure # preferances result_form = form_0(self.xg, self.yg, zero_form_result, zero_form_str_unformatted) # return it to the user return result_form # numerical interior derivaitve def num_interior_d(self, vector_field=None): ''' num_interior_d(self, vector_field=None) Computes the interior derivative of the 1-form Parameters: -------------- Vector_field = vector field object of DFormPy library to do the derivative with respect to, needs equations to work with nuymerical_only being False. Can also supply equations in a tuple: (eqn_x, eqn_y). If using numerical only, can supply object or tuple of numpy arrays (array_x, atrray_y). If nothing is supplied for it, it assumes F_x = 1 and F_y = 1, with correct form and shape Does no numerically using arrays provided in instance If equations were proivided, this method will lose them Returns 0-form object ''' # check if equations have been given: # if they have, doing it only numerically would create # a mismatch, Warn user if self.form_1_str_x == None or self.form_1_str_y == None: pass else: # equations have been given, a mismatch may occur # warn the user print('Warning: You supplied equations, doing it numerically only will not pass equations to the 0-form and these will be lost') # Take the vector field components, checking what was input if vector_field is None: # if none was given, do it with respect to uniform 1, 1 vf_x = np.ones(np.shape(self.xg)) vf_y = np.ones(np.shape(self.yg)) elif type(vector_field) == tuple: # if numerical grids were given, take these # if equations were given here, evaulate them to grids if type(vector_field[0]) == str: new_str_x = vector_field[0].replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = vector_field[1].replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' vf_x = eval(new_str_x) vf_y = eval(new_str_y) else: vf_x = vector_field[0] vf_y = vector_field[1] else: # extract needed properties from the object supplied vf_x = vector_field.F_x vf_y = vector_field.F_y # Complete the interior derivative 1-form --> 0-form: zero_form_result = self.F_x * vf_x + self.F_y * vf_y # supply these to the 0-form object creator result_form = form_0(self.xg, self.yg, zero_form_result) # return it to the user return result_form # define a method to change a supplied Vector filed to the 1-form def contravariant(self, g=[['1', '0'], ['0', '1']]): ''' contravariant(g=[['1', '0'], ['0', '1']]) Passes in everything it can (all it has been supplied) to the VF object. Works via the ('inverse') metric on R2 Can supply the metric in as equations or as evaluated arrays Format of the metric is a list of numpy arrays 0th array is the top row, its 0th component is 11, 1st is 12 1st array is the botton row, its 0th comp is 21 and 1st is 22. Note, if it is supplied as arrays, they must come from numpy grids via meshgrid, if it is supplied as strings, needs to be in terms of x and y, and contain no special funtions, apart from ones imported here automatically and listed in the documentation Returns a single object (VF object) ''' # extract what is needed form the metric depending on what the user # supplied # check if it has string components if type(g[0][0]) == str and type(g[0][1]) == str and type(g[1][0]) == str and type(g[1][1]) == str: # deal with supplied string metric # need to format it, correct it for constants and evaluate it's numerical equivalent str_comp_00 = g[0][0] + '' str_comp_01 = g[0][1] + '' str_comp_10 = g[1][0] + '' str_comp_11 = g[1][1] + '' str_comp_00 = str_comp_00.replace('x', '(self.xg)') str_comp_00 = str_comp_00.replace('y', '(self.yg)') str_comp_01 = str_comp_01.replace('x', '(self.xg)') str_comp_01 = str_comp_01.replace('y', '(self.yg)') str_comp_10 = str_comp_10.replace('x', '(self.xg)') str_comp_10 = str_comp_10.replace('y', '(self.yg)') str_comp_11 = str_comp_11.replace('x', '(self.xg)') str_comp_11 = str_comp_11.replace('y', '(self.yg)') # check against constant form components: if str_comp_00.find('x') & str_comp_00.find('y') == -1: str_comp_00 = '(' + str(str_comp_00) + ')* np.ones(np.shape(self.xg))' if str_comp_01.find('x') & str_comp_01.find('y') == -1: str_comp_01 = '(' + str(str_comp_01) + ')* np.ones(np.shape(self.yg))' if str_comp_10.find('x') & str_comp_10.find('y') == -1: str_comp_10 = '(' + str(str_comp_10) + ')* np.ones(np.shape(self.yg))' if str_comp_11.find('x') & str_comp_11.find('y') == -1: str_comp_11 = '(' + str(str_comp_11) + ')* np.ones(np.shape(self.yg))' # evaluate the components numerically, inputting them into a # stored numerical metric comp_00 = eval(str_comp_00) comp_01 = eval(str_comp_01) comp_10 = eval(str_comp_10) comp_11 = eval(str_comp_11) g_num = [[comp_00, comp_01], [comp_10, comp_11]] # set up a dummy variable to store the fact that numericals were given # not to check again later analytics = True elif type(g[0][0]) == np.ndarray and type(g[0][1]) == np.ndarray and type(g[1][0]) == np.ndarray and type(g[1][1]) == np.ndarray: # deal with the metric being supplied as components # if the user has 1-form equations, warn that these # will be lost, due to numerical calculations if self.form_1_str_x == None and self.form_1_str_y == None: pass else: print('The 1-form has equations, but the metric does not, these will be lost and the resulting VF will only have numerical values, not equations supplied') # No need to do anythng more to the metric # just rename the metric here g_num = g # set up the dummy variable analytics = False else: # Inconsistant metric components raise TypeError('Metric components are inconcisstant') # from 1-form components, get VF components by the metric # first, do so numerically, as this must always happen form_x = self.F_x * g_num[0][0] + self.F_y * g_num[0][1] form_y = self.F_y * g_num[1][1] + self.F_x * g_num[1][0] # if the equations were given, evaluate these analytically too: # only if vector filed originally has equations if analytics: if self.form_1_str_x == None and self.form_1_str_y == None: print('You supplied the metric as equations (or it was default), but did not give 1-form equations, therefore only numericals will be completed') analytics = False else: x_str_form = '(' + self.form_1_str_x + ')*(' + g[0][0] + ') + (' + self.form_1_str_y + ')*(' + g[0][1] + ')' y_str_form = '(' + self.form_1_str_y + ')*(' + g[1][1] + ') + (' + self.form_1_str_x + ')*(' + g[1][0] + ')' # simplify them x_str_form = str(simplify(x_str_form)) y_str_form = str(simplify(y_str_form)) else: pass # based on what was given into the Vector field, return a 1-form object with these parameters if analytics: result_field = vector_field(self.xg, self.yg, form_x, form_y, x_str_form, y_str_form) elif not analytics: result_field = vector_field(self.xg, self.yg, form_x, form_y) # return the found object return result_field # %% ''' function to create a 2-form object and define methods for it ''' # define 2-form object that can be customised and plotted class form_2(): ''' defines a 2-form object and returns it to user Takes 3 arguments basic, these are the 2 grids in 2D, which muse be square and of equal sizes. Then 1 argument for the dx^dy component based on the same grids. Also takes in an equation which is needed for some operaions Takes in a figure if one is to be supplied. Can take axis for subplots in The subplots only occur if subplots input is set to True, default is False ''' # set up all variables def __init__(self, xg, yg, form2, form_2_eq=None): self.xg = xg self.yg = yg self.form_2 = form2 self.s_max = 6 self.s_min = 2 self.pt_den_x = len(xg[0, :]) self.pt_den_y = len(yg[:, 0]) self.fract_x = 2/((self.pt_den_x - 1)) self.fract_y = 2/((self.pt_den_y - 1)) self.colour_list = ['red', 'blue', 'grey'] self.logarithmic_scale_bool = 0 # self.base = 10 self.delta_factor = 10 if form_2_eq is not None: self.form_2_str = str(simplify(form_2_eq)) # to start with, user must change to access some methods # Note, the string must be given with x and y as variables else: self.form_2_str = None # ##################################################################### # Define basic methods to customise this object # ##################################################################### # define a mehtod to allow user to supply the string equation # of the 2-form def give_eqn(self, equation_str): ''' Takes in 1-argument, string This must be the equation of the supplied numerical 0-form It must be in terms of x and y. Has to be given, for some methods to be calculatable. ''' self.form_2_str = equation_str # update the numerical values to always match string = self.form_2_str + '' string = string.replace('x', '(self.xg)') string = string.replace('y', '(self.yg)') # correct for consatnt form before evaluating if string.find('x') & string.find('y') == -1: string = '(' + str(string) + ')* np.ones(np.shape(self.xg))' else: pass # re-evaluate the 2-form numerically self.form_2 = eval(string) # deifne a function to return the string equation to the user def return_string(self): ''' Takes in no arguments, returns the unformatted string back to user This is done in case user wants to access strings that got here not by input but by ext. alg. ''' return self.form_2_str # change colour list def colours(self, colour_list): ''' Takes input of a list of three string. String must be formatted as to be accepted by maplotlib colors changes the colours for 2-form orientation. Order: [clockwise, counterclosckwise, zero] ''' # make sure input was a list of strings: if not(isinstance(colour_list[0], str) and isinstance(colour_list[1], str) and isinstance(colour_list[2], str)): raise TypeError('Wrongly formatted string list, chech required inputs') # change stored colour list self.colour_list = colour_list # change boolean that det. if to sclae logarithmically def log_scaling(self): ''' Takes no arguments Changes the boolean that determines if scaling is logarithmic Whenever it is called, it changes that boolean to opposite The form object is initialised with this as False (as 0) ''' self.logarithmic_scale_bool = not self.logarithmic_scale_bool # self.base = base # define methods to change s_max def max_sheets(self, maximum): ''' Takes one argument, must be int Changes maximum number of sheets to draw on a stack. These still scale relative to max magnitude. ''' self.s_max = maximum #define a method to change spare spacing around figure def surround_space(self, delta_denominator): ''' Takes in one argument, float or int Sets the extra blank space around the domain of grids in axis The input number defines the denominator or fraction to use eg. supplying 3 will make the white space 1/3 of the width of the domain of the grid. ''' self.delta_factor = delta_denominator # define a method to change the density of grids in same range # requires string input of 1-form: def set_density2(self, points_number_x, points_number_y): ''' Changes number of points on grids to given, if equations have been given Parameters: ------------- points_number_x - int - number of points to put along the x axis points_number_y - int - number of points to put along the y axis Returns: None ''' if self.form_2_str == None: # Error raise TypeError('Error: You need to supply the 2-form equation to do this, look at \'give_eqn\' method') else: # redefine the grids x = np.linspace(self.xg[0,0], self.xg[0,-1], points_number_x) y = np.linspace(self.yg[0,0], self.yg[-1,0], points_number_y) self.xg, self.yg = np.meshgrid(x, y) # based on these change other, dependant variables self.pt_den_x = len(self.xg[0, :]) self.pt_den_y = len(self.yg[:, 0]) self.fract_x = 2/(self.pt_den_x - 1) self.fract_y = 2/(self.pt_den_y - 1) # substitute these into the equation: # but keep it local str_2 = self.form_2_str + '' str_2 = str_2.replace('x', '(self.xg)') str_2 = str_2.replace('y', '(self.yg)') # correct for consatnt form before evaluating if str_2.find('x') & str_2.find('y') == -1: str_2 = '(' + str(str_2) + ')* np.ones(np.shape(self.xg))' else: pass # re-evaluate the 2-form numerically self.form_2 = eval(str_2) # ##################################################################### # Write more complicated methods. That will use this form object # eg. plot, exterior derivative, Hodge etc. # ##################################################################### # define a function to plot the set up 2-form # originally form_2_components_plot def plot(self, axis): ''' Finilises the plotting Takes in 2 inputs: 1) \'keep\'determines if axis should be cleared before. Default is True 2) \' subplot_index \', default set to None, can be input if the user has selected subplots to be allowed when creating the object. Determines which aixs to draw on, indecies are in order that they were added to the object. Uses the attribues of the object as set originally and as customised with methods to create a plot of the 2-form ''' form2 = self.form_2 * 1 # from self, get 2-form too # set all insignificant values to zero: form2[np.abs(form2) < 1e-12] = 0 # get the lengths of x and y from their grids x_len = len(self.xg[0, :]) y_len = len(self.yg[:, 0]) # Extract L from the x and y grids Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) L = 0.5*(Lx + Ly) x0 = self.xg[0, 0] + Lx y0 = self.yg[0, 0] + Ly # reset axis limits ax_Lx = Lx + Lx/self.delta_factor ax_Ly = Ly + Ly/self.delta_factor axis.set_xlim(-ax_Lx + x0, ax_Lx + x0) axis.set_ylim(-ax_Ly + y0, ax_Ly + y0) # get the signs of the input 2-form form_2_sgn = np.sign(form2) # define an empty array of magnitudes, to then fill with integer rel. mags R_int = np.zeros(shape=((y_len), (x_len))) # ######################################################################### # get variables needed for the initial, simplified stack plot # ######################################################################### # set up directions angles =[0*np.ones(np.shape(form2)), (np.pi/2)*np.ones(np.shape(form2))] # deal with sinularities that appear on evaluated points isnan_arr = np.isnan(form2) for i in range(y_len): for j in range(x_len): # set to zero points that are not defined or inf if isnan_arr[i, j] or abs(form2[i, j]) == np.inf or abs(form2[i, j]) > 1e15: # colour this region as a red dot, not square to # not confuse with nigh mag 2-forms in stacks. or worse, in # blocks circ = patch.Circle((self.xg[i, j], self.yg[i, j]), L*(self.fract_x + self.fract_y)/6, color='red') axis.add_patch(circ) form2[i, j] = 0 # ALso, since we got this lop anyway # correct for singularities in planar form 2: # set to zero points that are not defined or inf if isnan(form2[i, j]) is True: form_2_sgn[i, j] = 0 # ######################################################################### # use the the direction of arrows to define stack properties # ######################################################################### # set up the max, total height of stack (along arrow) s_L_x = self.fract_x*Lx s_L_y = self.fract_y*Ly # ######################################################################### # define the stacks based on geometrical arguments # to be perp. to arrow. shifted parallel to it, their density porp to mag # of the arrow and with an arrowhead on top. # ######################################################################### # find the maximum magnitude for scaling mag = abs(form2) max_size = np.max(mag) # careful with singularities, else ---> nan if self.logarithmic_scale_bool: # Add 1 to each magnitude mag1 = mag + 1 # Calculate the appropriate scaling factor # a = max_size**(1/self.s_max) # a = self.base # Take log(base=a) of mag1 logmag1 = np.log(mag1) # Re-assign R R = logmag1/np.max(logmag1) else: # find the relative magnitude of vectors to maximum, as an array R = mag/max_size # if self.logarithmic_scale_bool: # mag1 = mag + 1 # form_2_norm = form2/mag1 # logmag = np.log10(mag1) # form2 = form2_norm*logmag # mag = np.abs(form2) # max_size = np.max(mag) # Now, for both values of theta, complete plotting: for theta in angles: # define tigonometirc shifts I_sin = np.sin(theta) I_cos = np.cos(theta) # define the points that set out a line of the stack sheet (middle line) A_x = self.xg + (s_L_x/2)*I_sin A_y = self.yg - (s_L_y/2)*I_cos B_x = self.xg - (s_L_x/2)*I_sin B_y = self.yg + (s_L_y/2)*I_cos for i in range(self.s_max - self.s_min + 1): t = self.s_max - i R_int[R <= t/self.s_max] = t # loop over each arrow coordinate in x and y for i in range(y_len): for j in range(x_len): # define it for all magnitudes. Separately for odd and even corr. number of sheets: # Label each element with the number of stacks required: linear scaling if form_2_sgn[i, j] == +1: color_index = 0 elif form_2_sgn[i, j] == -1: color_index = 1 else: color_index = 2 # # linear scaling # for t in range(self.s_min, self.s_max+2): # if (t-2)/self.s_max <= R[i, j] <= (t-1)/self.s_max: # R_int[i, j] = t # set a varible for current considered magnitude as it is reused # avoids extracting from R many times. n = R_int[i, j] # deal with even number of sheets from magnitudes: if n % 2 == 0: # define a parameter to loop over in the recursion equation s = 0 # Define the points for sheets required for the given magnitude # from these define all the needed lines and plot them while s <= 0.5*(n-2): # maximum set by equations (documentation) # define all the points for the 2 currently looped +- sheets in while loop Ax1 = A_x[i, j] + G(s, n, 0)*s_L_x*I_cos[i, j] Ay1 = A_y[i, j] + G(s, n, 0)*s_L_y*I_sin[i, j] Bx1 = B_x[i, j] + G(s, n, 0)*s_L_x*I_cos[i, j] By1 = B_y[i, j] + G(s, n, 0)*s_L_y*I_sin[i, j] Ax2 = A_x[i, j] - G(s, n, 0)*s_L_x*I_cos[i, j] Ay2 = A_y[i, j] - G(s, n, 0)*s_L_y*I_sin[i, j] Bx2 = B_x[i, j] - G(s, n, 0)*s_L_x*I_cos[i, j] By2 = B_y[i, j] - G(s, n, 0)*s_L_y*I_sin[i, j] # from these, define the 2 lines, for this run axis.add_line(Line2D((Ax1, Bx1), (Ay1, By1), linewidth=0.5, color=self.colour_list[color_index])) axis.add_line(Line2D((Ax2, Bx2), (Ay2, By2), linewidth=0.7, color=self.colour_list[color_index])) # update parameter to reapet and draw all needed arrows s += 1 # deal with the odd number of stacks: else: # Add the centre line for odd numbers of stacks axis.add_line(Line2D((A_x[i, j], B_x[i, j]), (A_y[i, j], B_y[i, j]), linewidth=0.7, color=self.colour_list[color_index])) # then loop over the remaining lines as per the recursion formula: s = 1 # change the looping parametr to exclude already completed 0 (corr. to middle sheet here) # define all remaining sheets for the magnitude: while s <= 0.5*(n-1): # maximum set by equations (documentation) # define all the points for the current +- displacement in while loop Ax1 = A_x[i, j] + G(s, n, 1)*s_L_x*I_cos[i, j] Ay1 = A_y[i, j] + G(s, n, 1)*s_L_y*I_sin[i, j] Bx1 = B_x[i, j] + G(s, n, 1)*s_L_x*I_cos[i, j] By1 = B_y[i, j] + G(s, n, 1)*s_L_y*I_sin[i, j] Ax2 = A_x[i, j] - G(s, n, 1)*s_L_x*I_cos[i, j] Ay2 = A_y[i, j] - G(s, n, 1)*s_L_y*I_sin[i, j] Bx2 = B_x[i, j] - G(s, n, 1)*s_L_x*I_cos[i, j] By2 = B_y[i, j] - G(s, n, 1)*s_L_y*I_sin[i, j] # from these, define the 2 displaced lines axis.add_line(Line2D((Ax1, Bx1), (Ay1, By1), linewidth=0.7, color=self.colour_list[color_index])) axis.add_line(Line2D((Ax2, Bx2), (Ay2, By2), linewidth=0.7, color=self.colour_list[color_index])) # change the parameter to loop over all changes in displacement for current magnitude s += 1 def ext_d(self): ''' No inputs, no outputs, exterior derivative of a 2-form gives a 3-form, which on R2 is always =0 ''' print('This operation makes a 3-form, which on R^2 is always = zero') # define a fucntion to Hodge the 2-form (into a 0-form) def num_hodge(self): ''' Takes in no arguments Does the hodge numerically based on instance provieded arrays If equations were provided, it will lose them. It calulates the Hodge on R^2 by the standard definition: *(dx^dy) = 1 returns a 0-form ''' # check if equations have been given: # if they have, doing it only numerically would create # a mismatch, avoid that if self.form_2_str != None: # equations have been given, a mismatch may occur # warn the user print('Warning: You supplied equations, doing it numerically only will lose these') # now complete the process numerically # pass these in to the object to create a new one: new_object = form_0(self.xg, self.yg, self.form_2) # N.B no equations to supply # return the new one to the user: return new_object def hodge(self): ''' Takes in no arguments Does the hodge analuically based on instance provieded equations changes the equations AND the numerical answers It calulates the Hodge on R^2 by the standard definition: *(dx^dy) = 1 returns a 0-form ''' # can only be done if equations have been given, check: if self.form_2_str != None: # some equations are there, compute the Hodge on these: # Note: Upto user to make sure their equations match their # numerical input, unless using give eqn, then its updates # numerical values to match # get numerical solutions, evaulated on local # strings changed to relate to the self grids # need to uspply these unformatted, so save those: form_0_str_unformated = self.form_2_str + '' string_0_form = self.form_2_str # formated # from these strings, get the numerical 0-form: string_0_form = string_0_form.replace('x', '(self.xg)') string_0_form = string_0_form.replace('y', '(self.yg)') # correct for constant forms if string_0_form.find('x') & string_0_form.find('y') == -1: string_0_form = '(' + str(string_0_form) + ')* np.ones(np.shape(self.xg))' # evaulated numerically form_0_result = eval(string_0_form) # return object, depending on option for figure passage: # pass these in to the object to create a new one: new_object = form_0(self.xg, self.yg, form_0_result, form_0_eqn=form_0_str_unformated) # return the new one to the user: return new_object else: # ERROR raise TypeError('You need to supply the 2-form equation to do this, look at \'give_eqn\' method') # define a method to create a zoomed in 2-form def zoom(self, target=[0, 0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' Creates a new window which displays the 2-form zoomed at a certain point User gives arguments: Target: Determines the zoom location, coordinates mag: +ve float, determines zooming amount dpd: +int, determines how many points on each axis inset - bool - determies if the zoom is plotted on the parent axis as an inset axis - matplotlib axis, only supply if inset is True, plots intset on these insize - float - size of inset as fraction of total figure returns: -------------- if inset is False, returns the zoomed in insatnce as a 0-form object if inset if True, returns the inset axis, with the plot on them on top of the given axis and the 0-form instance ''' # Requires user to provide eqn of the 1-form they are zooming on. if self.form_2_str == None: # ERROR raise TypeError('No equation provided') else: # Zoom must be one or greater if mag < 1: raise ValueError('Mag must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if inset == False: insize = 1 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings zoom_str = self.form_2_str + '' # Check if the equations provided contain x and y terms if zoom_str.find('x') & zoom_str.find('y') == -1: zoom_str = '(' + str(zoom_str) + ')* np.ones(np.shape(dxg))' else: zoom_str = zoom_str.replace('x', '(dxg)') zoom_str = zoom_str.replace('y', '(dyg)') # Generate arrays for the components of the zoom field zoom_2form = eval(zoom_str) # from that create 2-form instance zoomform2 = form_2(dxg, dyg, zoom_2form, self.form_2_str) q = 1 xi = (x_m - self.xg[0,0])/(2*Lx) yi = (y_m - self.yg[0,0])/(2*Ly) if inset == True: if axis != None: # Create inset axis in the current axis. zoom_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) zoomform2.plot(zoom_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return zoom_inset_ax, zoomform2 else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return zoomform2 # define a mehtod to evaluate the interior derivative of the 2-form # with respect to a given vector field object or without. def interior_d(self, vector_field=None): ''' Computes the interior derivative of the 2-form Takes in: -- Vector_field = vector field object of DFormPy library to do the derivative with respect to, needs equations to work with nuymerical_only being False. Can also supply equations in a tuple: (eqn_x, eqn_y). If using numerical only, can supply object or tuple of numpy arrays (array_x, atrray_y). If nothing is supplied for it, it assumes F_x = 1 and F_y = 1, with correct form and shape Does no analytically via equations in instance Returns: 0-form ''' # test if the equation was given first: if self.form_2_str == None: # ERROR raise ValueError('Error: You need to supply the 2-form equations to do this, look at \'give_eqn\' method') # if the vector field was supplied, extract its equations, if possible if vector_field is None: # if none was given, do it with respect to uniform 1, 1 vf_x_str = '1' vf_y_str = '1' elif type(vector_field) == tuple: # if equations were given, take these, is numericals were given here, break! if type(vector_field[0]) == str: vf_x_str = vector_field[0] vf_y_str = vector_field[1] else: raise ValueError('for analytical result, supply VF equations') else: if vector_field.str_x == None or vector_field.str_y == None: # ERROR raise ValueError('Error: You need to supply the VF equations to do this, look at \'give_eqn\' method') else: vf_x_str = str(simplify(vector_field.str_x)) vf_y_str = str(simplify(vector_field.str_y)) # define strings of the resulting 1-form components u_str = str(simplify('-(' + self.form_2_str + ')*(' + vf_y_str + ')' )) v_str = str(simplify( '(' + self.form_2_str + ')*(' + vf_x_str + ')' )) # keep an unformatted version to supply to the 1-form u_str_unformatted = u_str + '' v_str_unformatted = v_str + '' u_str = u_str.replace('x', '(self.xg)') u_str = u_str.replace('y', '(self.yg)') v_str = v_str.replace('x', '(self.xg)') v_str = v_str.replace('y', '(self.yg)') if u_str.find('x') & u_str.find('y') == -1: u_str = '(' + str(u_str) + ')* np.ones(np.shape(self.xg))' if v_str.find('x') & v_str.find('y') == -1: v_str = '(' + str(v_str) + ')* np.ones(np.shape(self.yg))' # evaulate the numerical 1-form components form: form_x = eval(u_str) form_y = eval(v_str) # create the object to return result_form = form_1(self.xg, self.yg, form_x, form_y, u_str_unformatted, v_str_unformatted) # return it to the user return result_form def num_interior_d(self, vector_field=None): ''' Computes the interior derivative of the 2-form Takes in: -- Vector_field = vector field object of DFormPy library to do the derivative with respect to, needs equations to work with nuymerical_only being False. Can also supply equations in a tuple: (eqn_x, eqn_y). If using numerical only, can supply object or tuple of numpy arrays (array_x, atrray_y). If nothing is supplied for it, it assumes F_x = 1 and F_y = 1, with correct form and shape Does no numerically via arrays in instance If equations were provided, these will be lost Returns: 0-form ''' # check if equations have been given: # if they have, doing it only numerically would create # a mismatch, avoid that if self.form_2_str == None: pass else: # equations have been given, a mismatch may occur # warn the user print('Warning: You supplied equations, doing it numerically only will not pass equations to the 1-form and these will be lost') # now complete the process numerically save as instructed # Take the vector field components, checking what was input! if vector_field is None: # if none was given, do it with respect to uniform 1, 1 vf_x = np.ones(np.shape(self.xg)) vf_y = np.ones(np.shape(self.xg)) elif type(vector_field) == tuple: # if equations were given, take these, is numericals were given here, break! if type(vector_field[0]) == str: raise ValueError('for numerical calulation, supply VF arrays, not equations') else: vf_x = vector_field[0] vf_y = vector_field[1] else: # extract needed properties from the object supplied vf_x = vector_field.F_x vf_y = vector_field.F_y # Complete the interior derivative 2-form --> 1-form: form_x = -self.form_2 * vf_y form_y = self.form_2 * vf_x # supply these to the 1-form object creator result_form = form_1(self.xg, self.yg, form_x, form_y) # return it to the user return result_form # define a fucntion to compute a wedge product def wedge(self, form_second, degree=0, keep_object=False): ''' Parameters: ---------------- form_second - the form to wedge the 2-form with. Can be supplied as a DFormPy instance, a tuple of equations, or a single string equation depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component equations as strings in terms of x and y. To wedge with 0-form or 2-form, supply corresponding instances or a single equation. When using equations, to distinguish between them, provide parmater 'degree'. degree - default is 0. Only used when a single string is supplied as form_second, to distinguish betwen 0-form and 2-form for 0-form, degree=0, for 2-form, degree=2. Determines what form is to be wegded with the given 2-form. keep_object - bool - default=False - only used when 2-form is wedged with a 0-form. If False, a new object is created as a result of the wedge. If True, the 1-form acted on is modified to be the result of the wedge. To do so here, strings for the form must be supplied. Computes the Wedge product using strings, ANALYTICALLY Returns: -------------- Wedged with 0-form returns a 2-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, operation makes a 3-form, which on R^2 is always = zero, only message displays. Wedged with a 2-form, operation makes a 4-form, which on R^2 is always = zero, only message displays. ''' # test if equations were given first: if self.form_2_str == None: raise ValueError('Error: You need to supply the 2-form equation to do this, look at \'give_eqn\' method') # set up variable to store order of supplied form, initially assume 1-form order = 0 # get needed second obejct strings dep. on input if isinstance(form_second, tuple): # if equations were given here take these, if numerical grids were given - error! # check size , should be a 1-form if len(form_second) == 2: # 2-form/\1-form attempt, error if isinstance(form_second[0], str) and isinstance(form_second[1], str): order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise ValueError('for analytical calulation, supply 1-form equations as strings') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: to_wedge_0_form_str = form_second order = 0 elif degree == 2: # Error, gives 4 form = 0 on R2 order = None print('This operation makes a 4-form, which on R^2 is always = zero') else: raise ValueError('not possible digree given or supplied one string for a 1-form') else: # object supplied, get numericals checking which object is given: if isinstance(form_second, form_1): print('This operation makes a 3-form, which on R^2 is always = zero') order = None elif isinstance(form_second, form_0): if form_second.form_0_str is None: raise ValueError('supplied 0-form instance must contain equations for analytical calculation') else: to_wedge_0_form_str = form_second.form_0_str order = 0 elif isinstance(form_second, form_2): order = None print('This operation makes a 4-form, which on R^2 is always = zero') else: raise TypeError('Supplied form to wedge with is not recognised') # Deal with 2-form/\0-form: if order == 0: # first, mathematically: 2-form = f*m - g*h form_2_str = str(simplify('(' + self.form_2_str + ')*(' + to_wedge_0_form_str + ')')) # keep it as it is locally to supply it to object maker later form_2_str_loc = form_2_str + '' # format it to be in terms of grids and: # check against constant and zero 2-forms being supplied # get the numerical evaluation of it form_2_str = form_2_str.replace('x', 'self.xg') form_2_str = form_2_str.replace('y', 'self.yg') if form_2_str.find('x') & form_2_str.find('y') == -1: form_2_str = '(' + str(form_2_str) + ')* np.ones(np.shape(self.xg))' # evaluate it numerically on the grid supplied form_2_result = eval(form_2_str) # depending on keep_object, return: if keep_object: self.form_2 = form_2_result self.form_2_str = form_2_str_loc elif not keep_object: new_object = form_2(self.xg, self.yg, form_2_result, form_2_eq=form_2_str_loc) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order is None: # made a form that is always zero on R2, no need to make it # Warning already shown, when degree was set pass else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') # define a method for numerical wedge product def num_wedge(self, form_second, degree=0, keep_object=False): ''' Parameters: ---------------- form_second - the form to wedge the 2-form with. Can be supplied as a DFormPy instance, a tuple of component grids, or a single string equation depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component grids. To wedge with 0-form or 2-form, supply corresponding instances or a single grid. When using grids, to distinguish between them, provide parmater 'degree'. degree - default is 0. Only used when a grid string is supplied as form_second, to distinguish betwen 0-form and 2-form. For 0-form, degree=0 and for 2-form, degree=2. Determines what form is to be wegded with the given 2-form. keep_object - bool - default=False - only used when 2-form is wedged with a 0-form. If False, a new object is created as a result of the wedge. If True, the 1-form acted on is modified to be the result of the wedge. Computes the Wedge product using strings, numerically Returns: -------------- Wedged with 0-form returns a 2-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, operation makes a 3-form, which on R^2 is always = zero, only message displays. Wedged with a 2-form, operation makes a 4-form, which on R^2 is always = zero, only message displays. ''' # test if equations were given first, warn user of losses: if self.form_2_str == None: print('The first 1-form you are completing the wedge with has equations supplied, these will be lost') # set up variable to store order of supplied form, initially assume 1-form order = 0 # get needed second obejct grids dep. on input if isinstance(form_second, tuple): # check size to see what it is to be wedged with. # tuple should only be length 2 --> 1-form/\1-form if len(form_second) == 2: # 2-form/\1-form attempt, error order = None print('This operation makes a 3-form, which on R^2 is always = zero') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, np.ndarray): # check degree: if degree == 0: to_wedge_0_form = form_second order = 0 elif degree == 1: raise ValueError('for degree 1, supply a 1-form, not a single grid') elif degree == 2: # Error, gives 3 form = 0 on R2 order = None print('This operation makes a 4-form, which on R^2 is always = zero') elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: str_0_form = form_second.replace('x', '(self.xg)') str_0_form = str_0_form.replace('y', '(self.yg)') if str_0_form.find('x') & str_0_form.find('y') == -1: str_0_form = '(' + str(str_0_form) + ')* np.ones(np.shape(self.xg))' to_wedge_0_form = eval(str_0_form) order = 0 elif degree == 1: raise ValueError('for degree 1, supply a 1-form, not a single equation') elif degree == 2: # Error, gives 4 form = 0 on R2 order = None print('This operation makes a 4-form, which on R^2 is always = zero') else: raise ValueError('not possible digree given') # object supplied, get grids checking which object is given: elif isinstance(form_second, form_1): # Error, gives 3 form = 0 on R2 order = None print('This operation makes a 3-form, which on R^2 is always = zero') elif isinstance(form_second, form_0): to_wedge_0_form = form_second.form_0 order = 0 elif isinstance(form_second, form_2): order = None print('This operation makes a 4-form, which on R^2 is always = zero') else: raise TypeError('Supplied form to wedge with is not recognised') # Use given inputs to evaluate the result: if order == 0: # depending on keep_object, return: if keep_object: self.form_2 = to_wedge_0_form * self.form_2 elif not keep_object: new_object = form_2(self.xg, self.yg, to_wedge_0_form * self.form_2) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order is None: # made a form that is always zero on R2, no need to make it # Warning already shown, when degree was set pass else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') # %% ''' function to create a 0-form object and define methods for it ''' # define a function that will set up a 0-form object that can be customised and # plotted class form_0(): ''' form_0(xg, yg, form_0, form_0_eqn=None) Defines a 0-form object and returns it to user. Parameters: --------------- xg - grid of x values (2D numpy.ndarray) yg - grid of y values (2D numpy.ndarray) form_0 - sclar form grid (2D numpy.ndarray) Optional: form_0_eqn - expression for scalar form f(x,y) (string) Instance variables: --------------- xg, yg, form_0, form_0_eqn pt_den - int - number of points on grids, extracted from grids, assumes square grid color - str - colour to draw stacks with, can be Hex when using '#FFFFFF' logarithmic_scale_bool - bool - determines if log scaling is used N - int - base for log scaling delta_factor - float/int - determined size of blank boarder in figure as fraction of whole plot size inline_bool - bool - if labels on contours are put on contour lines denser - int, default is 1 - if equations are given, increases density of contours lines - int - number of contour lines to draw cmap - matplotlib colourmap - colour mapping to use Methods: --------------- give_eqn return_string colour log_scaling surround_space set_density plot ext_d num_ext_d hodge wedge_analytical wedge_num ''' # set up all initial, defualt variables def __init__(self, xg, yg, form_0, form_0_eqn=None): self.xg = xg self.yg = yg self.form_0 = form_0 self.pt_den_x = len(xg[0, :]) self.pt_den_y = len(xg[:, 0]) self.delta_factor = 10 self.denser = 1 self.lines = 15 self.fontsize = 7 self.inline_bool = True # Log scaling parameters self.logarithmic_scale_bool = 0 self.N = 30 if form_0_eqn is not None: self.form_0_str = str(simplify(form_0_eqn)) # user must change to access some methods else: self.form_0_str = None # Note, the string must be given with x and y as variables # gets contour plot with new density. self.cmap = cm.viridis # ##################################################################### # Define basic methods to customise this object # ##################################################################### # define a mehtod to allow user to supply the string equation # of the 0-form def give_eqn(self, equation_str): ''' Allows user to supply equation to instance, if not initially done so Parameters: ------------ equation_str - str - equation of the supplied numerical 0-form It must be in terms of x and y. Has to be given, for some methods to be calculatable. Returns: None ''' self.form_0_str = equation_str # update the numerical values to always match string = self.form_0_str + '' # Check if the equations provided contain x and y terms # and format them to be evaluated if string.find('x') & string.find('y') == -1: string = '(' + str(string) + ')* np.ones(np.shape(xg))' else: string = string.replace('x', '(self.xg)') string = string.replace('y', '(self.yg)') # re-evaluate the 2-form numerically, preventing mismatch self.form_0 = eval(string) # deifne a function to return the string equation to the user def return_string(self): ''' Takes in no arguments, returns the unformatted string back to user This is done in case user wants to access strings that got here not by input but by ext. alg. ''' return self.form_0_str #define a method to change spare spacing around figure def surround_space(self, delta_denominator): ''' Takes in one argument, float or int Sets the extra blank space around the domain of grids in axis The input number defines the denominator or fraction to use eg. supplying 3 will make the white space 1/3 of the width of the domain of the grid. ''' self.delta_factor = delta_denominator def density_increase(self, factor): ''' Takes 1 float/int argument sets increase in density between form grids and contour grids needed if this was accessed by other forms via ext.alg methods Note, This cannot be set to anything but 1, if the 0-form equation as string is not also supplied correctly. ''' self.denser = factor def levels(self, values): ''' Takes 1 argument: values - int or list if int: changes number of contour lines that get drawn the values are set automatically by matplotlib if list: sets values to draw level lines (ascending order) supplied to contour plot from matplotlib via levels ''' if isinstance(values, int) or isinstance(values, list): self.lines = values else: raise TypeError('Require input to be integer or list, if you used a numpy array try: list(your_array)') def log_scaling(self): ''' changes bool for logscaling Default = False changes to the other option each time it is called ''' self.logarithmic_scale_bool = not self.logarithmic_scale_bool def fonts_size(self, size): ''' Takes 1 float/int argument Changes fontsize for contour labels ''' self.fontsize = size def labels(self): ''' Takes no arguments determines if hight labels are put on contours Starts off as True, calling changes it each time ''' self.inline_bool = not self.inline_bool # define a method to change the density of grids in same range # requires string input of 1-form: # technically not so needed here as all plotting is done with denser # which is similar enough. But it might be useful to change # the grids as stored and not just locally for a plot def set_density(self, points_number): ''' set_density(points_number) Changes the desnity of points in the same range to the input value requires the string equation to be supplied Only creates grids with same number of points of each axis. Parameters: --------------- points_number -number of points to evaluate on Returns: none ''' if self.form_0_str == None: # Error raise TypeError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method') else: # redefine the grids x = np.linspace(self.xg[0,0], self.xg[0,-1], points_number) y = np.linspace(self.yg[0,0], self.yg[-1,0], points_number) self.xg, self.yg = np.meshgrid(x,y) # based on these change other, dependant variables self.pt_den_x = len(self.xg[0, :]) self.pt_den_y = len(self.yg[:, 0]) # substitute these into the equation: # but keep it local str_0 = self.form_0_str + '' str_0 = str_0.replace('x', '(self.xg)') str_0 = str_0.replace('y', '(self.yg)') # correct for constant forms if str_0.find('x') & str_0.find('y') == -1: str_0 = '(' + str(str_0) + ')* np.ones(np.shape(self.xg))' # re-evaluate the 2-form numerically self.form_0 = eval(str_0) # ##################################################################### # Write more useful methods plot, exterior derivative, Hodge etc. # ##################################################################### # define a fucntion to plot a zero form pressed. def plot(self, axis): ''' Finilises the plotting Uses the attribues of the object as set originally and as customised with methods to create a plot of the 2-form. parametes: ------------- axis - matplotlib axis that 0-form will be plotted on ''' # Extract L from the x and y grids Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) x0 = self.xg[0, 0] + Lx y0 = self.yg[0, 0] + Ly # reset axis limits ax_Lx = Lx + Lx/self.delta_factor ax_Ly = Ly + Ly/self.delta_factor axis.set_xlim(-ax_Lx + x0, ax_Lx + x0) axis.set_ylim(-ax_Ly + y0, ax_Ly + y0) # cehck requests as to density of lines if self.denser != 1: if self.form_0_str == None: # This cannot be done if a string has not been supplied # ERROR raise TypeError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method') else: # get the supplied form as a string zero_form_str = str(simplify(self.form_0_str)) # set up grids for contours contour_x, contour_y = np.linspace(self.xg[0,0] , self.xg[0,-1] , self.pt_den_x*self.denser), np.linspace(self.yg[0,0] , self.yg[-1,0], self.pt_den_y*self.denser) contour_x_grid, contour_y_grid = np.meshgrid(contour_x, contour_y) # format the given ftring zero_form_str = zero_form_str.replace('x', 'contour_x_grid') zero_form_str = zero_form_str.replace('y', 'contour_y_grid') # evaluate bearing in mind zeros if zero_form_str.find('contour_x_grid') & zero_form_str.find('contour_y_grid') == -1: form_0_contour = eval(zero_form_str)*np.ones(np.shape(contour_x_grid)) else: form_0_contour = eval(zero_form_str) form_0 = form_0_contour xg = contour_x_grid yg = contour_y_grid else: form_0 = self.form_0 xg = self.xg yg = self.yg # set all insignificant values to zero: form_0[np.abs(form_0) < 1e-15] = 0 # deal with sinularities that appear on evaluated points isnan_arr = np.isnan(form_0) for i in range(len(xg[0, :])): for j in range(len(yg[:, 0])): # set to zero points that are not defined or inf if isnan_arr[j, i] or abs(form_0[j, i]) == np.inf or abs(form_0[j, i]) > 1e15: # colour this region as a red dot, not square to # not confuse with high mag 2-forms in stacks. or worse, in # blocks circ = patch.Circle((xg[j, i], yg[j, i]), Lx*0.05/3, color='red') axis.add_patch(circ) form_0[j, i] = 0 if self.logarithmic_scale_bool: mag1 = np.abs(form_0) + 1 form_0_norm = form_0/(mag1) logmag = np.log10(mag1) form_0 = form_0_norm*logmag else: pass CS = axis.contour(xg, yg, form_0, levels=self.lines, cmap=self.cmap) axis.clabel(CS, inline=self.inline_bool, fontsize=self.fontsize) # define a method to compute the exterior derivative def ext_d(self): ''' Takes in no argument computes the exterior derivative and returns it as the 1-form object Returns 1 form object ''' # first make sure that the string has been supplied if self.form_0_str == None: # ERROR raise TypeError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method') else: # can compute the exterior derivative: form_0_str = str(simplify(self.form_0_str)) # from this, need derivatives so set it as a SymPy object sympy_expr_form_0 = parse_expr(form_0_str, evaluate=False) # set up an array of coordinates that need to be used (in standard order) coords = ['x', 'y'] # from these, find the derivatives form_1_x_str = str(diff(sympy_expr_form_0, coords[0])) form_1_y_str = str(diff(sympy_expr_form_0, coords[1])) # need to uspply these unformatted, so save those: form_1_x_unformated, form_1_y_unformated = form_1_x_str*1, form_1_y_str*1 # from these strings, get the numerical 1-form: form_1_x_str = form_1_x_str.replace('x', '(self.xg)') form_1_x_str = form_1_x_str.replace('y', '(self.yg)') form_1_y_str = form_1_y_str.replace('x', '(self.xg)') form_1_y_str = form_1_y_str.replace('y', '(self.yg)') if form_1_x_str.find('x') & form_1_x_str.find('y') == -1: form_1_x_str = '(' + str(form_1_x_str) + ')* np.ones(np.shape(self.xg))' if form_1_y_str.find('x') & form_1_y_str.find('y') == -1: form_1_y_str = '(' + str(form_1_y_str) + ')* np.ones(np.shape(self.yg))' form_1_x = eval(form_1_x_str) form_1_y = eval(form_1_y_str) # supply these to the 1-form object function and return object result_1_form = form_1(self.xg, self.yg, form_1_x, form_1_y, form_1_x_unformated, form_1_y_unformated) return result_1_form # deifne a method to complete the exterior derivative numerically def num_ext_d(self, edge_order=1): ''' Takes in 1 argument: -- edge_order: determines order same as in numpy gradient {1 or 2} Return 1 object - 1-form computes the exterior derivative numerically only The equations do not need to be given If given, they do not get passed onto the 1-form object anyway NUMERICAL ONLY ''' # from numpy gradient, get the gradient array fy, fx = np.gradient(self.form_0, edge_order=edge_order) # supply these to the 1-form object function result_1_form = form_1(self.xg, self.yg, fx, fy) # return the new object to user return result_1_form # deinfe a method for Hodge of a 0-form def num_hodge(self): ''' Takes in no arguments It calulates the Hodge on R^2 by the standard definition: 1* = (dx^dy) Does so numerically via instance provided arrays IF equations were given, this method will lose them returns a 2-form ''' # check if equations have been given: # if they have, doing it only numerically would create # a mismatch, avoid that if self.form_0_str != None: print('Warning: You supplied equations, doing it numerically only will lose these') # now complete the process numerically # pass these in to the object to create a new one and return new_object = form_2(self.xg, self.yg, self.form_0) # N.B no equations to supply return new_object def hodge(self): ''' Takes in no arguments It calulates the Hodge on R^2 by the standard definition: 1* = (dx^dy) Does so analytically via instance provided equtions changes the equations AND the numerical answers returns a 2-form ''' # can only be done if equations have been given, check: if self.form_0_str != None: # some equations are there, compute the Hodge on these: # get numerical solutions, evaulated on local strings # to relate parameter to the self grids and keep strings, because # need to supply these unformatted: form_2_str_unformated = self.form_0_str + '' string_2_form = self.form_0_str # to be formated # from these strings, get the numerical 2-form: string_2_form = string_2_form.replace('x', '(self.xg)') string_2_form = string_2_form.replace('y', '(self.yg)') if string_2_form.find('x') & string_2_form.find('y') == -1: string_2_form = '(' + str(string_2_form) + ')* np.ones(np.shape(self.xg))' # evaulated numerically form_2_result = eval(string_2_form) # create and return object new_object = form_2(self.xg, self.yg, form_2_result, form_2_eq=form_2_str_unformated) return new_object else: # ERROR raise TypeError('You need to supply the 2-form equation to do this, look at \'give_eqn\' method') # define a fucntion to compute a wedge product def wedge(self, form_second, degree=0, keep_object=False): ''' Parameters: ---------------- form_second - the form to wedge the 0-form with. Can be supplied as a DFormPy instance, a tuple of equations, or a single string equation depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component equations as strings in terms of x and y. To wedge with 0-form or 2-form, supply corresponding instances or a single equation. When using equations, to distinguish between them, provide parmater 'degree'. degree - default is 0. Only used when a single string is supplied as form_second, to distinguish betwen 0-form and 2-form for 0-form, degree=0, for 2-form, degree=2. Determines what form is to be wegded with the given 0-form. keep_object - bool -default=False - Only needed when 0-form /\ 0-form If False, a new object is created as a result of the wedge. If True, the 0-form acted on is modified to be the result of the wedge. To do so here, strings for the form must be supplied. Computes the Wedge product using strings, ANALYTICALLY Returns: -------------- Wedged with 0-form returns a 0-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, returns a 1-form instance Wedged with a 2-form, returns a 2-form instance ''' # test if equations were given first: if self.form_0_str is None: raise ValueError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method') # set up variable to store order of supplied form, initially assume 1-form order = 0 # get needed second obejct strings dep. on input if isinstance(form_second, tuple): # if equations were given here take these, if numerical grids were given - error! # check size , should be a 1-form if len(form_second) == 2: # 0-form/\1-form, check if strings supplied if isinstance(form_second[0], str) and isinstance(form_second[1], str): to_wedge_x_2_str = form_second[0] to_wedge_y_2_str = form_second[1] order = 1 else: raise ValueError('for analytical calulation, supply 1-form equations as strings') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: to_wedge_0_form_str = form_second order = 0 elif degree == 2: to_wedge_2_form_str = form_second order = 2 else: raise ValueError('not possible digree given or supplied one string for a 1-form') else: # object supplied, get numericals checking which object is given: if isinstance(form_second, form_1): if form_second.form_1_str_x is None or form_second.form_1_str_y is None: raise ValueError('supplied 1-form instance must contain equations for analytical calculation') else: to_wedge_x_2_str = form_second.form_1_str_x to_wedge_y_2_str = form_second.form_1_str_y order = 1 elif isinstance(form_second, form_0): if form_second.form_0_str is None: raise ValueError('supplied 0-form instance must contain equations for analytical calculation') else: to_wedge_0_form_str = form_second.form_0_str order = 0 elif isinstance(form_second, form_2): if form_second.form_2_str is None: raise ValueError('supplied 2-form instance must contain equations for analytical calculation') else: to_wedge_2_form_str = form_second.form_2_str order = 2 else: raise TypeError('Supplied form to wedge with is not recognised') # Deal with 0-form/\1-form: if order == 1: # first, find the result of the 1-form: new_str_x = str(simplify('(' + self.form_0_str + ')*(' + to_wedge_x_2_str + ')')) new_str_y = str(simplify('(' + self.form_0_str + ')*(' + to_wedge_y_2_str + ')')) # keep it as it is locally to supply it to object maker later form_1_str_x_loc = new_str_x + '' form_1_str_y_loc = new_str_y + '' # format it to be in terms of grids and: # check against constant and zero 1-forms being supplied # get the numerical evaluation of it new_str_x = new_str_x.replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = new_str_y.replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' form_1_x = eval(new_str_x) form_1_y = eval(new_str_y) # return the new one to the user: new_object = form_1(self.xg, self.yg, form_1_x, form_1_y, F_x_eqn=form_1_str_x_loc, F_y_eqn=form_1_str_y_loc) return new_object elif order == 0: form_0_str = str(simplify( '(' + self.form_0_str + ')*(' + to_wedge_0_form_str + ')')) # keep it as it is locally to supply it to object maker later form_0_str_loc = form_0_str + '' # format it to be in terms of grids and: # check against constant and zero 2-forms being supplied # get the numerical evaluation of it form_0_str = form_0_str.replace('x', 'self.xg') form_0_str = form_0_str.replace('y', 'self.yg') if form_0_str.find('x') & form_0_str.find('y') == -1: form_0_str = '(' + str(form_0_str) + ')* np.ones(np.shape(self.xg))' # evaluate it numerically on the grid supplied form_0_result = eval(form_0_str) # depending on keep_object, return: if keep_object: self.form_0 = form_0_result self.form_0_str = form_0_str_loc elif not keep_object: new_object = form_0(self.xg, self.yg, form_0_result, form_0_str_loc) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order is 2: form_2_str = str(simplify( '(' + self.form_0_str + ')*(' + to_wedge_2_form_str + ')')) # keep it as it is locally to supply it to object maker later form_2_str_loc = form_2_str + '' # format it to be in terms of grids and: # check against constant and zero 2-forms being supplied # get the numerical evaluation of it form_2_str = form_2_str.replace('x', 'self.xg') form_2_str = form_2_str.replace('y', 'self.yg') if form_2_str.find('x') & form_2_str.find('y') == -1: form_2_str = '(' + str(form_2_str) + ')* np.ones(np.shape(self.xg))' # evaluate it numerically on the grid supplied form_2_result = eval(form_2_str) # create new instance and return to user new_object = form_2(self.xg, self.yg, form_2_result, form_2_str_loc) return new_object else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') # define a method for numerical wedge product def num_wedge(self, form_second, degree=0, keep_object=False): ''' Parameters: ---------------- form_second - the form to wedge the 0-form with. Can be supplied as a DFormPy instance, a tuple of grids of same size and dimensions as this 0-form, or a single grid of scaling function values depending on what form is to be wedged. To wedge with 1-form, supply 1-form instance, or tuple of component grids of same size as 1-form acted on. To wedge with 0-form or 2-form, supply corresponding instances or a single grid. When using grids, to distinguish between them, provide parmater 'degree'. degree - default is 0. Only used when a single grid is supplied as form_second, to distinguish betwen 0-form and 2-form for 0-form, degree=0, for 2-form, degree=2. Determines what form is to be wegded with the given 0-form. keep_object - bool -default=False - only used when 0-form is wedged with a 0-form. If False, a new object is created as a result of the wedge. If True, the 1-form acted on is modified to be the result of the wedge. Computes the Wedge product numerically Returns: -------------- Wedged with 0-form returns a 0-form object if keep_object is False (default), and returns nothing when it is True Wedged with a 1-form, returns a 1-form instance Wedged with a 2-form, returns a 2-form instance ''' # test if equations were given first: if self.form_0_str is None: pass else: print('The first 0-form you are completing the wedge with has equations supplied, these will be lost') # set up variable to store order of supplied form, initially assume 0-form order = 0 # get needed second obejct grids dep. on input if isinstance(form_second, tuple): # check size to see what it is to be wedged with. # tuple should only be length 2 --> 1-form/\1-form if len(form_second) == 2: # 0-form/\1-form, extract components # if numerical grids were given, take these, if equations, change to values on grids: if isinstance(form_second[0], str) and isinstance(form_second[1], str): new_str_x = form_second[0].replace('x', '(self.xg)') new_str_x = new_str_x.replace('y', '(self.yg)') new_str_y = form_second[1].replace('x', '(self.xg)') new_str_y = new_str_y.replace('y', '(self.yg)') if new_str_x.find('x') & new_str_x.find('y') == -1: new_str_x = '(' + str(new_str_x) + ')* np.ones(np.shape(self.xg))' if new_str_y.find('x') & new_str_y.find('y') == -1: new_str_y = '(' + str(new_str_y) + ')* np.ones(np.shape(self.yg))' f12_x = eval(new_str_x) f12_y = eval(new_str_y) order = 1 elif isinstance(form_second[0], np.ndarray) and isinstance(form_second[1], np.ndarray): f12_x = form_second[0] f12_y = form_second[1] order = 1 else: raise ValueError('Not recognised input tuple') else: raise ValueError('too many or too little equations given in tuple') elif isinstance(form_second, np.ndarray): # check degree: if degree == 0: to_wedge_0_form = form_second order = 0 elif degree == 1: raise ValueError('for degree 1, supply a 1-form, not a single grid') elif degree == 2: to_wedge_2_form = form_second order = 2 elif isinstance(form_second, str): # single string, could be 0-form or 2-form, check given degree: if degree == 0: str_0_form = form_second.replace('x', '(self.xg)') str_0_form = str_0_form.replace('y', '(self.yg)') if str_0_form.find('x') & str_0_form.find('y') == -1: str_0_form = '(' + str(str_0_form) + ')* np.ones(np.shape(self.xg))' to_wedge_0_form = eval(str_0_form) order = 0 elif degree == 2: str_2_form = form_second.replace('x', '(self.xg)') str_2_form = str_2_form.replace('y', '(self.yg)') if str_2_form.find('x') & str_2_form.find('y') == -1: str_2_form = '(' + str(str_2_form) + ')* np.ones(np.shape(self.xg))' to_wedge_2_form = eval(str_2_form) order = 2 else: raise ValueError('not possible digree given or supplied one string for a 1-form') # object supplied, get grids checking which object is given: elif isinstance(form_second, form_1): f12_x = form_second.F_x f12_y = form_second.F_y order = 1 elif isinstance(form_second, form_0): to_wedge_0_form = form_second.form_0 order = 0 elif isinstance(form_second, form_2): order = 2 to_wedge_2_form = form_second.form_2 else: raise TypeError('Supplied form to wedge with is not recognised') # Use given inputs to evaluate the result: # Deal with 0-form/\1-form: if order == 1: # first, find the result of the 1-form new_form_1_x = self.form_0 * f12_x new_form_1_y = self.form_0 * f12_y # create instance and return new_object = form_1(self.xg, self.yg, new_form_1_x, new_form_1_y) return new_object elif order == 0: # from these get the numerical 0-form form_0_result = self.form_0 * to_wedge_0_form # depending on keep_object, return: if keep_object: self.form_0 = form_0_result elif not keep_object: new_object = form_0(self.xg, self.yg, form_0_result) # return the new one to the user: return new_object else: raise ValueError('Error, Invalid input for \'keep_object\'') elif order == 2: # from these get the numerical 0-form form_2_result = self.form_0 * to_wedge_2_form # create instance and return new_object = form_2(self.xg, self.yg, form_2_result) return new_object else: # should never happen, but in case raise ValueError('Variable change during code running, look at \'order\' parameter') # %% ''' function to create a vector field object and define methods for it ''' # define a function that will set up a vector field object that can be customised and # plotted class vector_field(): ''' defines a vector field object and returns it to user Takes 9 arguments, these are the 2 grids in 2D, which muse be square and of equal sizes. Then 2 arguments for the i component and j component based on the same grids Then 2 equations, for x and y (not always needed) Then, can supply a figure if user doesn't wat this object to create a new one for itself Can also supply a bool input to define if subplots are to be allowed these can be added using a method (add_subplot) also, user can supply sub_axis_list, to provide the axis they have set these only work if a figure has been supplied and if subplots is True ''' # set up all variables def __init__(self, xg, yg, F_x, F_y, F_x_eqn=None, F_y_eqn=None): self.xg = xg self.yg = yg self.F_x = F_x self.F_y = F_y self.pt_den = len(xg[:, 0]) # + 1 , assume square grids self.orientation = 'mid' self.scale = 1 self.color = 'black' self.logarithmic_scale_bool = 0 # self.base = 10 self.scale_bool = True self.delta_factor = 10 if F_x_eqn is not None: self.str_x = str(simplify(F_x_eqn)) # to start with, use rmust change to access some methods # Note, the string must be given with x and y as variables else: self.str_x = None if F_y_eqn is not None: self.str_y = str(simplify(F_y_eqn)) else: self.str_y = None # ##################################################################### # write some methods that will allow the user to chenge some of the # above variables # ##################################################################### # deifne a function to return the string equations to the user def give_eqn(self, equation_str_x, equation_str_y): ''' Takes in 1-argument, string This must be the equation of the supplied numerical 0-form It must be in terms of x and y. Has to be given, for some methods to be calculatable. ''' # set equation parameters to simplified inputs self.str_x = str(simplify(equation_str_x)) self.str_y = str(simplify(equation_str_y)) # make the values match automatically to limit how often mismatch occurs # substitute these into the equation: # but keep it local str_x = self.str_x + '' str_y = self.str_y + '' str_x = str_x.replace('x', '(self.xg)') str_x = str_x.replace('y', '(self.yg)') str_y = str_y.replace('x', '(self.xg)') str_y = str_y.replace('y', '(self.yg)') # check kagainst constant form components: if str_x.find('x') & str_x.find('y') == -1: str_x = '(' + str(str_x) + ')* np.ones(np.shape(self.xg))' if str_y.find('x') & str_y.find('y') == -1: str_y = '(' + str(str_y) + ')* np.ones(np.shape(self.yg))' # re-evaluate the 2-form numerically self.F_x = eval(str_x) self.F_y = eval(str_y) def return_string(self): ''' Takes in no arguments, returns the unformatted strings back to user This is done in case user wants to access strings that got here not by input but by ext. alg. ''' return self.str_x, self.str_y # define a method to change figure size def fig_size(self, n, m): ''' Takes two inputs, float or int numbers, sets the figure size to these dimensions in inches. Uses set_size_inches from matploitlib so can just use that on the atribute figure, this function is here just for easier nameing''' self.figure.set_size_inches(n, m) # change colour def colour(self, color): ''' Takes input of a single string.String must be formatted as to be accepted by maplotlib colors changes the colour of plotted stacks. ''' self.color = str(color) # change orientation: def orient(self, string): ''' Takes one input, needs to be a string understood by matplotlib quiver to orient arrows eg. 'tip', 'tail', 'mid' etc. Orients arrows on quiver plot depending on this ''' self.orientation = str(string) # change boolean that det. if to sclae logarithmically def log_scaling(self): ''' Takes no arguments Changes the boolean that determines if scaling is logarithmic Whenever it is called, it changes that boolean to opposite The form object is initialised with this as False ''' self.logarithmic_scale_bool = not self.logarithmic_scale_bool # self.base = base # define a method to be able to change bool that det. if arrows autoscale def autoscale(self): ''' Takes no arguments Changes the boolean that determines if arrows are autoscaled Whenever it is called, it changes that boolean to opposite The form object is initialised with this as False ''' self.scale_bool = not self.scale_bool # define a method to change spare spacing around figure def surround_space(self, delta_denominator): ''' Takes in one argument, float or int Sets the extra blank space around the domain of grids in axis The input number defines the denominator or fraction to use eg. supplying 3 will make the white space 1/3 of the width of the domain of the grid. ''' self.delta_factor = delta_denominator # define a method to change the density of grids in same range # requires string input of 1-form: def set_density(self, points_number): ''' Changes the size of stack in direction perp. to VF It is done in in terms of the fraction of plot size Note, not strictly needed, can change it by instance.fract(fraction) Parmaeters: --------------- fraction - float/int - size of stack in terms of the fraction of plot size Returns: None ''' if self.str_x == None or self.str_y == None: # Error raise ValueError('Error: You need to supply the 1-form equation to do this, look at \'give_eqn\' method') else: # redefine the grids x = np.linspace(self.xg[0,0], self.xg[0,-1], points_number) y = np.linspace(self.yg[0,0], self.yg[-1,0], points_number) self.xg, self.yg = np.meshgrid(x,y) # based on these change other, dependant variables self.pt_den = len(self.xg[:, 0]) # substitute these into the equation: # but keep it local str_x_l = self.str_x + '' str_y_l = self.str_y + '' str_x_l = str_x_l.replace('x', '(self.xg)') str_x_l = str_x_l.replace('y', '(self.yg)') str_y_l = str_y_l.replace('x', '(self.xg)') str_y_l = str_y_l.replace('y', '(self.yg)') # check kagainst constant form components: if str_x_l.find('x') & str_x_l.find('y') == -1: str_x_l = '(' + str(str_x_l) + ')* np.ones(np.shape(self.xg))' if str_y_l.find('x') & str_y_l.find('y') == -1: str_y_l = '(' + str(str_y_l) + ')* np.ones(np.shape(self.yg))' # re-evaluate the 2-form numerically self.F_x = eval(str_x_l) self.F_y = eval(str_y_l) # define a method to plot the vector field using quiver def plot(self, axis): ''' Finilises the plotting Uses the attribues of the object as set originally and as customised with methods to create a plot of the VF Takes in 1 argument: --- axis - matplotlib axes instance, plots on these No Returns ''' # get the lengths of x and y from their grids x_len = len(self.xg[:, 0]) y_len = len(self.yg[0, :]) # Extract L from the x and y grids Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0]) Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0]) L = 0.5*(Lx + Ly) x0 = self.xg[0, 0] + Lx y0 = self.yg[0, 0] + Ly # reset axis limits ax_Lx = Lx + Lx/self.delta_factor ax_Ly = Ly + Ly/self.delta_factor axis.set_xlim(-ax_Lx + x0, ax_Lx + x0) axis.set_ylim(-ax_Ly + y0, ax_Ly + y0) # for arrows to work, with nan and infs # make a local variable of F_x and F_y # so that thye don't alter globally F_x_local = self.F_x * 1 F_y_local = self.F_y * 1 # prevent any magnitudes from being inf or nan # only here, need to do it to u and v not just mag # find the distance between neightbouring points on the grid dist_points = self.xg[0, 1] - self.xg[0, 0] # deal with infs and nans in mag isnan_arrx = np.isnan(F_x_local) isnan_arry = np.isnan(F_y_local) for i in range(x_len): for j in range(y_len): # set to zero points that are not defined or inf if isnan_arrx[i, j] or isnan_arry[i, j]: #colour this region as a shaded square rect = patch.Rectangle((self.xg[i, j] - dist_points/2, self.yg[i, j] - dist_points/2), dist_points, dist_points, color='#B5B5B5') axis.add_patch(rect) F_x_local[i,j] = F_y_local[i,j] = 0 if abs(F_x_local[i, j]) == np.inf or abs(F_y_local[i, j]) == np.inf or abs(F_y_local[i, j]) > 1e15 or abs(F_x_local[i, j]) > 1e15: # colour this point as a big red dot circ = patch.Circle((self.xg[i, j], self.yg[i, j]), Lx*0.05/3, color='red') axis.add_patch(circ) F_x_local[i,j] = F_y_local[i,j] = 0 # isnan_arrx = np.isnan(F_x_local) # isnan_arry = np.isnan(F_y_local) # for i in range(x_len): # for j in range(y_len): # if isnan_arrx[i,j] or isnan_arry[i,j] or abs(F_x_local[i, j]) == np.inf or abs(F_y_local[i, j]) == np.inf or abs(F_y_local[i, j]) > 1e15 or abs(F_x_local[i, j]) > 1e15: # # F_x_local[i,j] = F_y_local[i,j] = 0 # set all insignificant values to zero: F_x_local[np.abs(F_x_local) < 1e-15] = 0 F_y_local[np.abs(F_y_local) < 1e-15] = 0 # find the magnitude corresponding to each point and store in mag array mag = np.sqrt(F_x_local**2 + F_y_local**2) # find the maximum magnitude for scaling max_size = np.max(mag) # careful with singularities, else ---> nan # Rescale components if log scaling is selected if self.logarithmic_scale_bool: mag1 = mag + 1 # min_size = np.min(mag1) unorm = F_x_local/mag1 vnorm = F_y_local/mag1 # logsf = np.log10(mag1/min_size) logmag = np.log10(mag1) F_x_local = unorm*logmag F_y_local = vnorm*logmag mag = np.sqrt(F_x_local**2 + F_y_local**2) max_size = np.max(mag) # deal with requested autoscaling if self.scale_bool is False: ScaleFactor = self.scale elif self.scale_bool is True: ScaleFactor = max_size/(0.9*(2*Lx/self.pt_den)) # plot using matplotlib quiver axis.quiver(self.xg, self.yg, F_x_local, F_y_local, pivot=self.orientation, scale=ScaleFactor, scale_units='xy', color=self.color) def zoom(self, target=[0, 0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' Create a new window which displays the field zoomed at a certain point User gives arguments Target: Determines the zoom location, coordinates mag: +ve float, determines zooming amount dpd: +int, determines how many points on each axis inset - bool - if true, zoomed field plotted on given axis axis - matplotlib axes instance - axis to plot on if instance it True insize - float - size of inset as fraction of total figure Returns: -------- if inset is False: zoomed in VF object if inset is True, inset axis and zoomed in VF object in this order. ''' # Requires user to provide eqn of the 1-form they are zooming on. if self.str_x == None or self.str_y == None: # ERROR raise TypeError('No equation provided') else: # Zoom must be one or greater if mag < 1: raise ValueError('Zoom must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if inset == False: insize = 1 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF Lx = 0.5*(self.xg[0,-1] - self.xg[0,0]) Ly = 0.5*(self.yg[-1,0] - self.yg[0,0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings u_str = self.str_x v_str = self.str_y # Check if the equations provided contain x and y terms if u_str.find('x') & u_str.find('y') == -1: u_str = '(' + str(u_str) + ')* np.ones(np.shape(dxg))' else: u_str = u_str.replace('x', 'dxg') u_str = u_str.replace('y', 'dyg') if v_str.find('x') & v_str.find('y') == -1: v_str = '(' + str(v_str) + ')* np.ones(np.shape(dyg))' else: v_str = v_str.replace('x', 'dxg') v_str = v_str.replace('y', 'dyg') # Generate arrays for the components of the zoom field u_zoom = eval(u_str) v_zoom = eval(v_str) # from that create VF instance zoom_vf = vector_field(dxg, dyg, u_zoom, v_zoom, self.str_x, self.str_y) q = 1 xi = (q*x_m - self.xg[0,0])/(2*Lx) yi = (q*y_m - self.yg[0,0])/(2*Ly) # depending on preferances, return to user and plot if inset == True: if axis != None: # Create inset axis in the current axis. zoom_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) zoom_vf.plot(zoom_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return zoom_inset_ax, zoom_vf else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return zoom_vf def deriv(self, target=[0, 0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' Creates new vector field object at a target location, showing the derivative field at this point. User gives arguments: Target - derivative plot location mag - Magnification level dpd - New plot point density inset - bool - if true, field deriv is plotted on given axis axis - matplotlib axes instance - axis to plot on if instance it True Returns: -------- if inset is False: deriv VF object if inset is True, inset axis and deriv VF object in this order. ''' if self.str_x == None or self.str_y == None: # ERROR raise TypeError('No equation provided') else: # Zoom must be one or greater if mag < 1: raise ValueError('Zoom must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if inset == False: insize = 1 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF Lx = 0.5*(self.xg[0,-1] - self.xg[0,0]) Ly = 0.5*(self.yg[-1,0] - self.yg[0,0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings u_str = self.str_x v_str = self.str_y # Create string to evaluate the field at the target location u_str_point = u_str.replace('x', 'x_m') u_str_point = u_str_point.replace('y', 'y_m') v_str_point = v_str.replace('x', 'x_m') v_str_point = v_str_point.replace('y', 'y_m') # Check if the equations provided contain x and y terms if u_str.find('x') & u_str.find('y') == -1: u_str_grid = '(' + str(u_str) + ')* np.ones(np.shape(dxg))' else: u_str_grid = u_str.replace('x', 'dxg') u_str_grid = u_str_grid.replace('y', 'dyg') if v_str.find('x') & v_str.find('y') == -1: v_str_grid = '(' + str(v_str) + ')* np.ones(np.shape(dyg))' else: v_str_grid = v_str.replace('x', 'dxg') v_str_grid = v_str_grid.replace('y', 'dyg') # Generate arrays for the components of the derivative field U = eval(u_str_grid) - eval(u_str_point) V = eval(v_str_grid) - eval(v_str_point) # from that create VF instance deriv_vf = vector_field(dxg, dyg, U, V, self.str_x, self.str_y) q = 1 # Coordinates for plotting the inset axis xi = (q*x_m - self.xg[0,0])/(2*Lx) yi = (q*y_m - self.yg[0,0])/(2*Ly) # depending on preferances, return to user and plot if inset == True: if axis != None: # Create inset axis in the current axis. deriv_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) deriv_vf.plot(deriv_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return deriv_inset_ax, deriv_vf else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return deriv_vf def div(self, target=[0,0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' Creates new vector field object at a target location, showing the Divergence of the field at this point. User gives arguments: Target - derivative plot location Zoom - Magnification level dpd - New plot point density inset - bool - if true, field div is plotted on given axis axis - matplotlib axes instance - axis to plot on if instance it True Returns: -------- if inset is False: div VF object if inset is True, inset axis and div VF object in this order. ''' if self.str_x == None or self.str_y == None: # ERROR raise TypeError('No equation provided') else: # Zoom must be one or greater if mag < 1: raise ValueError('Zoom must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if inset == False: insize = 1 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF# Get the size of the original VF Lx = 0.5*(self.xg[0,-1] - self.xg[0,0]) Ly = 0.5*(self.yg[-1,0] - self.yg[0,0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings u_str = self.str_x v_str = self.str_y # Create string to evaluate the field at the target location u_str_point = u_str.replace('x', 'x_m') u_str_point = u_str_point.replace('y', 'y_m') v_str_point = v_str.replace('x', 'x_m') v_str_point = v_str_point.replace('y', 'y_m') # Check if the equations provided contain x and y terms if u_str.find('x') & u_str.find('y') == -1: u_str_grid = '(' + str(u_str) + ')* np.ones(np.shape(dxg))' else: u_str_grid = u_str.replace('x', 'dxg') u_str_grid = u_str_grid.replace('y', 'dyg') if v_str.find('x') & v_str.find('y') == -1: v_str_grid = '(' + str(v_str) + ')* np.ones(np.shape(dyg))' else: v_str_grid = v_str.replace('x', 'dxg') v_str_grid = v_str_grid.replace('y', 'dyg') # Generate arrays for the components of the derivative field U = eval(u_str_grid) - eval(u_str_point) V = eval(v_str_grid) - eval(v_str_point) # ============================================================================= # Geometric Divergence Method - See Documentation # ============================================================================= U_div = np.zeros(shape=(dpd, dpd)) V_div = np.zeros(shape=(dpd, dpd)) # Looping Constant N = dpd - 1 # get number of points in quadrant if dpd % 2 == 1: quad_x = int(dpd/2) quad_y = int((dpd+1)/2) else: quad_x = int(dpd/2) quad_y = int(dpd/2) for i in range(quad_x): # get the l number, for projection of j on radial / i on tangent l = i - 0.5*N # INNER LOOP for j in range(quad_y): # get the k number of projection: i on radial / j on tangent k = j - 0.5*N # get the commuting parts of V and W for each square corner # (x and y components of the subtracted field) U_comm_1 = 0.25*(2*U[i, j] + V[j, N-i] - V[N-j, i]) U_comm_2 = 0.25*(2*U[j, N-i] + V[N-i, N-j] - V[i, j]) U_comm_3 = 0.25*(2*U[N-i, N-j] + V[N-j, i] - V[j, N-i]) U_comm_4 = 0.25*(2*U[N-j, i] + V[i, j] - V[N-i, N-j]) V_comm_1 = 0.25*(2*V[i, j] - U[j, N-i] + U[N-j, i]) V_comm_2 = 0.25*(2*V[j, N-i] - U[N-i, N-j] + U[i, j]) V_comm_3 = 0.25*(2*V[N-i, N-j] - U[N-j, i] + U[j, N-i]) V_comm_4 = 0.25*(2*V[N-j, i] - U[i, j] + U[N-i, N-j]) # gte a normalisation factor from l and k A = k**2 + l**2 U_div[i, j] = (U_comm_1*k + V_comm_1*l)*k/A V_div[i, j] = (U_comm_1*k + V_comm_1*l)*l/A U_div[j, N-i] = (U_comm_2*l + V_comm_2*(-k))*l/A V_div[j, N-i] = (U_comm_2*l + V_comm_2*(-k))*(-k)/A U_div[N-i, N-j] = (U_comm_3*(-k) + V_comm_3*(-l))*(-k)/A V_div[N-i, N-j] = (U_comm_3*(-k) + V_comm_3*(-l))*(-l)/A U_div[N-j, i] = (U_comm_4*(-l) + V_comm_4*k)*(-l)/A V_div[N-j, i] = (U_comm_4*(-l) + V_comm_4*k)*k/A # from that create VF instance div_vf = vector_field(dxg, dyg, U_div, V_div, self.str_x, self.str_y) q = 1 # Coordinates for plotting the inset axis xi = (q*x_m - self.xg[0,0])/(2*Lx) yi = (q*y_m - self.yg[0,0])/(2*Ly) # depending on preferances, return to user and plot if inset == True: if axis != None: # Create inset axis in the current axis. div_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) div_vf.plot(div_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return div_inset_ax, div_vf else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return div_vf def curl(self, target=[0,0], mag=2, dpd=9, inset=True, axis=None, insize=0.3): ''' Creates new vector field object at a target location, showing local rotation (Curl) User gives arguments: Target - derivative plot location Zoom - Magnification level dpd - New plot point density inset - bool - if true, field curl is plotted on given axis axis - matplotlib axes instance - axis to plot on if instance it True Returns: -------- if inset is False: div VF object if inset is True, inset axis and curl VF object in this order. ''' if self.str_x == None or self.str_y == None: # ERROR raise TypeError('No equation provided') else: # Zoom must be one or greater if mag < 1: raise ValueError('Zoom must be greater than one') else: if insize > 1 or insize < 0: raise ValueError('Insize must be +ve and less than one') else: # If no inset, set the size of the zoom axis to allow normal plotting if not isinstance(inset, float) and not isinstance(inset, int): insize = 0.4 # Target coordinates x_m = target[0] y_m = target[1] # Get the size of the original VF# Get the size of the original VF Lx = 0.5*(self.xg[0,-1] - self.xg[0,0]) Ly = 0.5*(self.yg[-1,0] - self.yg[0,0]) # Zoom axis range d_range_x = insize*Lx/mag d_range_y = insize*Ly/mag # Set up zoom window grids dx = np.linspace(-d_range_x + x_m, d_range_x + x_m, dpd) dy = np.linspace(-d_range_y + y_m, d_range_y + y_m, dpd) dxg, dyg = np.meshgrid(dx, dy) # Create variables for the user provided equation strings u_str = self.str_x v_str = self.str_y # Create string to evaluate the field at the target location u_str_point = u_str.replace('x', 'x_m') u_str_point = u_str_point.replace('y', 'y_m') v_str_point = v_str.replace('x', 'x_m') v_str_point = v_str_point.replace('y', 'y_m') # Check if the equations provided contain x and y terms if u_str.find('x') & u_str.find('y') == -1: u_str_grid = '(' + str(u_str) + ')* np.ones(np.shape(dxg))' else: u_str_grid = u_str.replace('x', 'dxg') u_str_grid = u_str_grid.replace('y', 'dyg') if v_str.find('x') & v_str.find('y') == -1: v_str_grid = '(' + str(v_str) + ')* np.ones(np.shape(dyg))' else: v_str_grid = v_str.replace('x', 'dxg') v_str_grid = v_str_grid.replace('y', 'dyg') # Generate arrays for the components of the derivative field U = eval(u_str_grid) - eval(u_str_point) V = eval(v_str_grid) - eval(v_str_point) # ============================================================================= # Geometric Curl Method - See Documentation # ============================================================================= U_curl = np.zeros(shape=(dpd, dpd)) V_curl = np.zeros(shape=(dpd, dpd)) # Looping Constant N = dpd - 1 # Quadrant Points if dpd % 2 == 1: quad_x = int(dpd/2) quad_y = int((dpd+1)/2) else: quad_x = int(dpd/2) quad_y = int(dpd/2) for i in range(quad_x): # get the l number, for projection of j on radial / i on tangent l = i - 0.5*N # INNER LOOP for j in range(quad_y): # get the k number of projection: i on radial / j on tangent k = j - 0.5*N # get the commuting parts of V and W for each square corner # (x and y components of the subtracted field) U_comm_1 = 0.25*(2*U[i, j] + V[j, N-i] - V[N-j, i]) U_comm_2 = 0.25*(2*U[j, N-i] + V[N-i, N-j] - V[i, j]) U_comm_3 = 0.25*(2*U[N-i, N-j] + V[N-j, i] - V[j, N-i]) U_comm_4 = 0.25*(2*U[N-j, i] + V[i, j] - V[N-i, N-j]) V_comm_1 = 0.25*(2*V[i, j] - U[j, N-i] + U[N-j, i]) V_comm_2 = 0.25*(2*V[j, N-i] - U[N-i, N-j] + U[i, j]) V_comm_3 = 0.25*(2*V[N-i, N-j] - U[N-j, i] + U[j, N-i]) V_comm_4 = 0.25*(2*V[N-j, i] - U[i, j] + U[N-i, N-j]) # gte a normalisation factor from l and k A = k**2 + l**2 U_curl[i, j] = (U_comm_1*l + V_comm_1*(-k))*l/A V_curl[i, j] = (U_comm_1*l + V_comm_1*(-k))*(-k)/A U_curl[j, N-i] = (U_comm_2*(-k) + V_comm_2*(-l))*(-k)/A V_curl[j, N-i] = (U_comm_2*(-k) + V_comm_2*(-l))*(-l)/A U_curl[N-i, N-j] = (U_comm_3*(-l) + V_comm_3*k)*(-l)/A V_curl[N-i, N-j] = (U_comm_3*(-l) + V_comm_3*k)*k/A U_curl[N-j, i] = (U_comm_4*k + V_comm_4*l)*k/A V_curl[N-j, i] = (U_comm_4*k + V_comm_4*l)*l/A # from that create VF instance curl_vf = vector_field(dxg, dyg, U_curl, V_curl, self.str_x, self.str_y) q = 1 # Coordinates for plotting the inset axis xi = (q*x_m - self.xg[0,0])/(2*Lx) yi = (q*y_m - self.yg[0,0])/(2*Ly) # depending on preferances, return to user and plot if inset == True: if axis != None: # Create inset axis in the current axis. curl_inset_ax = axis.inset_axes([(xi - 0.5*insize), (yi - 0.5*insize), insize, insize]) curl_vf.plot(curl_inset_ax) # return the zoomed on axis # also return zoomed in form in case user wants that. return curl_inset_ax, curl_vf else: raise ValueError('Cannot inset without supplied axis') else: # inset is false, just return the new zoomed in instance return curl_vf # define a method to change a supplied Vector filed to the 1-form def covariant(self, g=[['1', '0'], ['0', '1']]): ''' Passes in everything it can (all it has been supplied) to the 1-form object. Works via the metric on R2 Can supply the metric in as equations or as evaluated arrays Format of the metric is a list of numpy arrays 0th array is the top row, its 0th component is 11, 1st is 12 1st array is the botton row, its 0th comp is 21 and 1st is 22. Note, if it is supplied as arrays, they must come from numpy grids via meshgrid, if it is supplied as strings, needs to be in terms of x and y, and contain no special funtions, apart from ones imported here automatically and listed in the documentation #!!! Returns a single object (1-form object) ''' # extract what is needed form the metric depending on what the user # supplied # check if its has string components if type(g[0][0]) == str and type(g[0][1]) == str and type(g[1][0]) == str and type(g[1][1]) == str: # deal with supplied string metric # need to format it, correct it for constants and evaluate it's numerical equivalent str_comp_00 = g[0][0] + '' str_comp_01 = g[0][1] + '' str_comp_10 = g[1][0] + '' str_comp_11 = g[1][1] + '' str_comp_00 = str_comp_00.replace('x', '(self.xg)') str_comp_00 = str_comp_00.replace('y', '(self.yg)') str_comp_01 = str_comp_01.replace('x', '(self.xg)') str_comp_01 = str_comp_01.replace('y', '(self.yg)') str_comp_10 = str_comp_10.replace('x', '(self.xg)') str_comp_10 = str_comp_10.replace('y', '(self.yg)') str_comp_11 = str_comp_11.replace('x', '(self.xg)') str_comp_11 = str_comp_11.replace('y', '(self.yg)') # check against constant form components: if str_comp_00.find('x') & str_comp_00.find('y') == -1: str_comp_00 = '(' + str(str_comp_00) + ')* np.ones(np.shape(self.xg))' if str_comp_01.find('x') & str_comp_01.find('y') == -1: str_comp_01 = '(' + str(str_comp_01) + ')* np.ones(np.shape(self.yg))' if str_comp_10.find('x') & str_comp_10.find('y') == -1: str_comp_10 = '(' + str(str_comp_10) + ')* np.ones(np.shape(self.yg))' if str_comp_11.find('x') & str_comp_11.find('y') == -1: str_comp_11 = '(' + str(str_comp_11) + ')* np.ones(np.shape(self.yg))' # evaluate the components numerically, inputting them into a # store numerical metric comp_00 = eval(str_comp_00) comp_01 = eval(str_comp_01) comp_10 = eval(str_comp_10) comp_11 = eval(str_comp_11) g_num = [[comp_00, comp_01], [comp_10, comp_11]] # set up a dummy variable to store the fact that numericals were given # not to check again later analytics = True elif type(g[0][0]) == np.ndarray and type(g[0][1]) == np.ndarray and type(g[1][0]) == np.ndarray and type(g[1][1]) == np.ndarray: # deal with the metric being supplied as components # if the user has vector field equations, warn that these can't # be passed anymore, because we don't have equations for this # metric if self.str_x == None and self.str_y == None: pass else: print('The Vector field has equations, but the metric does not, these will be lost and the resulting 1-form will only have numerical values, not equations supplied') # No need to do anythng more to the metric, upto the user to make sure its # correctly sized, as with other code in this library # just rename the metric here g_num = g # set up a dummy variable to store the fact that numericals were # not given, not to check again later analytics = False else: # Inconsistant metric components raise TypeError('Metric components are inconsistent') # from vector field components, get 1-form components by the metric # first, do so numerically, as this must always happen form_x = self.F_x * g_num[0][0] + self.F_y * g_num[0][1] form_y = self.F_y * g_num[1][1] + self.F_x * g_num[1][0] # if the equations were given, evaluate these analytically too: # only if vector file doriginally has equations if analytics: if self.str_x == None and self.str_y == None: print('You supplied the metric as equations (or it was default), but did not give VF equations, therefore only numericals will be completed') analytics = False else: x_str_form = '(' + self.str_x + ')*(' + g[0][0] + ') + (' + self.str_y + ')*(' + g[0][1] + ')' y_str_form = '(' + self.str_y + ')*(' + g[1][1] + ') + (' + self.str_x + ')*(' + g[1][0] + ')' # simplify them x_str_form = str(simplify(x_str_form)) y_str_form = str(simplify(y_str_form)) else: pass # based on what was given into the Vector field # return a 1-form object with these parameters if analytics: result_form = form_1(self.xg, self.yg, form_x, form_y, x_str_form, y_str_form) elif not analytics: result_form = form_1(self.xg, self.yg, form_x, form_y) # return the found object return result_form
44.83481
189
0.522499
25,569
187,275
3.6784
0.039227
0.012376
0.00672
0.009399
0.812223
0.788056
0.762751
0.741263
0.723879
0.701849
0
0.022455
0.37863
187,275
4,176
190
44.845546
0.78579
0.367294
0
0.680682
0
0.003409
0.093544
0.01378
0
0
0
0
0
1
0.042614
false
0.011364
0.005682
0
0.077273
0.015341
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
38e8f66a1fc1b29210c87a2a1d25cbc48f598413
213
py
Python
seahub/share/settings.py
evrimguner/seahub
65d5d4fd78fee3cca9fe86b4fe3c9b0240d2e1e8
[ "Apache-2.0" ]
1
2019-06-25T06:52:58.000Z
2019-06-25T06:52:58.000Z
seahub/share/settings.py
vigossjjj/seahub
9960918b7689c9011129a57436400aed4f545546
[ "Apache-2.0" ]
null
null
null
seahub/share/settings.py
vigossjjj/seahub
9960918b7689c9011129a57436400aed4f545546
[ "Apache-2.0" ]
null
null
null
from django.conf import settings ANONYMOUS_SHARE_COOKIE_TIMEOUT = getattr(settings, 'ANONYMOUS_SHARE_COOKIE_TIMEOUT', 24*60*60) ANONYMOUS_SHARE_LINK_TIMEOUT = getattr(settings, 'ANONYMOUS_SHARE_LINK_TIMEOUT', 2)
42.6
94
0.849765
29
213
5.827586
0.482759
0.331361
0.390533
0.331361
0.668639
0
0
0
0
0
0
0.035354
0.070423
213
4
95
53.25
0.818182
0
0
0
0
0
0.2723
0.2723
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
2a3609dc2e61df5b5400711506d7fbe384658f44
1,116
py
Python
backend/Backendapi/douban/models.py
f0rdream/SkyRead
798b4dd35b7e6be41e5fed4537d3f6034d20494e
[ "MIT" ]
null
null
null
backend/Backendapi/douban/models.py
f0rdream/SkyRead
798b4dd35b7e6be41e5fed4537d3f6034d20494e
[ "MIT" ]
null
null
null
backend/Backendapi/douban/models.py
f0rdream/SkyRead
798b4dd35b7e6be41e5fed4537d3f6034d20494e
[ "MIT" ]
null
null
null
# coding:utf-8 from django.db import models # isbn13:9787111013853 class Comment(models.Model): """ 评论模型 """ isbn13 = models.CharField(max_length=200,default=None) author = models.CharField(max_length=200,null=True,blank=True,default=None) time = models.CharField(max_length=200,null=True,blank=True,default=None) star = models.IntegerField(default=None) vote = models.CharField(max_length=200,null=True,blank=True,default=None) content = models.TextField(default=None) def __unicode__(self): return self.isbn13 class Reading(models.Model): """ 导读模型 """ isbn13 = models.CharField(max_length=200,default=None) title = models.TextField(default=None) note = models.TextField(default=None) def __unicode__(self): return self.title class Review(models.Model): """ 书评模型 """ isbn13 = models.CharField(max_length=200, default=None) title = models.TextField(default=None) author = models.TextField(default=None) content = models.TextField(default=None) def __unicode__(self): return self.title
26.571429
79
0.692652
138
1,116
5.471014
0.289855
0.189404
0.143046
0.190728
0.72053
0.72053
0.72053
0.72053
0.662252
0.662252
0
0.046154
0.184588
1,116
41
80
27.219512
0.783516
0.043907
0
0.521739
0
0
0
0
0
0
0
0
0
1
0.130435
false
0
0.043478
0.130435
1
0
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
6
aa666c11ebc99dc4d53c1def96bb77a4dbcac853
26
py
Python
autodiff/__init__.py
zhuzilin/autodiff
56e167eabf3e870d1a4c9b6ee6583720c2626e3a
[ "MIT" ]
25
2020-12-17T12:50:04.000Z
2021-02-23T03:03:39.000Z
autodiff/__init__.py
zhuzilin/autodiff
56e167eabf3e870d1a4c9b6ee6583720c2626e3a
[ "MIT" ]
5
2021-02-02T22:47:50.000Z
2022-03-12T00:34:44.000Z
autodiff/__init__.py
zhuzilin/autodiff
56e167eabf3e870d1a4c9b6ee6583720c2626e3a
[ "MIT" ]
30
2020-12-31T13:31:56.000Z
2021-01-31T07:12:24.000Z
from .tensor import Tensor
26
26
0.846154
4
26
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2afd5ab82731221d92339edf6fde3ff0042c7012
86
py
Python
pytgcalls/types/list.py
fadhil-riyanto/radiovc
fe02a01ce10b93775fce8c569f6062d71b07b4d4
[ "MIT" ]
null
null
null
pytgcalls/types/list.py
fadhil-riyanto/radiovc
fe02a01ce10b93775fce8c569f6062d71b07b4d4
[ "MIT" ]
null
null
null
pytgcalls/types/list.py
fadhil-riyanto/radiovc
fe02a01ce10b93775fce8c569f6062d71b07b4d4
[ "MIT" ]
null
null
null
from pytgcalls.types.py_object import PyObject class List(list, PyObject): pass
14.333333
46
0.767442
12
86
5.416667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.162791
86
5
47
17.2
0.902778
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
6318dba21bac96bc536640404505201535501232
103
py
Python
chronon/core/__init__.py
McLarenAppliedTechnologies/chronon
f38307c4341f61c4896fb778692a0916876b998b
[ "MIT" ]
2
2020-12-14T11:58:24.000Z
2021-08-09T22:33:26.000Z
chronon/core/__init__.py
McLarenAppliedTechnologies/chronon
f38307c4341f61c4896fb778692a0916876b998b
[ "MIT" ]
1
2021-02-03T15:41:27.000Z
2021-02-03T15:41:27.000Z
chronon/core/__init__.py
McLarenAppliedTechnologies/chronon
f38307c4341f61c4896fb778692a0916876b998b
[ "MIT" ]
null
null
null
# flake8: noqa from .event import * from .process import * from .resource import * from .user import *
17.166667
23
0.718447
14
103
5.285714
0.571429
0.405405
0
0
0
0
0
0
0
0
0
0.011905
0.184466
103
5
24
20.6
0.869048
0.116505
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
63288f0a4de57c3c22459a686b55b3f613827cf3
57
py
Python
examples/pkg1/test_mod2.py
altendky/pytest-monitor
0cf89d0093e6b01788067e146c5b4b9c72d34e0c
[ "MIT" ]
136
2020-02-13T09:47:49.000Z
2022-03-29T10:36:18.000Z
examples/pkg1/test_mod2.py
altendky/pytest-monitor
0cf89d0093e6b01788067e146c5b4b9c72d34e0c
[ "MIT" ]
42
2020-03-07T14:24:13.000Z
2022-03-18T13:59:38.000Z
examples/pkg1/test_mod2.py
altendky/pytest-monitor
0cf89d0093e6b01788067e146c5b4b9c72d34e0c
[ "MIT" ]
29
2020-02-25T19:09:23.000Z
2022-03-21T12:26:22.000Z
import time def test_sleep_400ms(): time.sleep(0.4)
11.4
23
0.701754
10
57
3.8
0.8
0
0
0
0
0
0
0
0
0
0
0.106383
0.175439
57
4
24
14.25
0.702128
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
2d607a044b164e1e96c79932eb74219cdf781c9f
24
py
Python
src/commit/__init__.py
easygittool/EasyGitTool
55ce8aaa6756715e864afdfb3b420d62eef84437
[ "Apache-2.0" ]
1
2019-02-09T11:18:29.000Z
2019-02-09T11:18:29.000Z
src/commit/__init__.py
easygittool/EasyGitTool
55ce8aaa6756715e864afdfb3b420d62eef84437
[ "Apache-2.0" ]
null
null
null
src/commit/__init__.py
easygittool/EasyGitTool
55ce8aaa6756715e864afdfb3b420d62eef84437
[ "Apache-2.0" ]
null
null
null
from src.commit import *
24
24
0.791667
4
24
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2da212e68ea17ed3faba3550b1d035e2ad8e8130
28
py
Python
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/ApplicationModel/AppService/__init__.py
Carlosgm02/UWP-Languages
b5653c8f452b204645e3b6276caa95de2432f77e
[ "MIT" ]
6
2019-10-30T08:41:15.000Z
2021-02-24T09:20:46.000Z
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/ApplicationModel/AppService/__init__.py
carlosgm02/uwp-languages
b5653c8f452b204645e3b6276caa95de2432f77e
[ "MIT" ]
null
null
null
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/ApplicationModel/AppService/__init__.py
carlosgm02/uwp-languages
b5653c8f452b204645e3b6276caa95de2432f77e
[ "MIT" ]
null
null
null
from _app_service import *
14
27
0.785714
4
28
5
1
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
1
28
28
0.869565
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6