hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8626522d55b3754f7c28ddbfd44245ded575b28
| 11,950
|
py
|
Python
|
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | 41
|
2015-01-29T20:10:48.000Z
|
2022-01-26T10:04:28.000Z
|
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | null | null | null |
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | 46
|
2015-01-19T17:46:52.000Z
|
2021-12-19T01:22:47.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import testtools
from ironicclient import exc
from ironicclient.tests.unit import utils
import ironicclient.v1.allocation
ALLOCATION = {'uuid': '11111111-2222-3333-4444-555555555555',
'name': 'Allocation-name',
'owner': None,
'state': 'active',
'node_uuid': '66666666-7777-8888-9999-000000000000',
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
ALLOCATION2 = {'uuid': '55555555-4444-3333-2222-111111111111',
'name': 'Allocation2-name',
'owner': 'fake-owner',
'state': 'allocating',
'node_uuid': None,
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
CREATE_ALLOCATION = copy.deepcopy(ALLOCATION)
for field in ('state', 'node_uuid', 'last_error'):
del CREATE_ALLOCATION[field]
fake_responses = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION, ALLOCATION2]},
),
'POST': (
{},
CREATE_ALLOCATION,
),
},
'/v1/allocations/%s' % ALLOCATION['uuid']:
{
'GET': (
{},
ALLOCATION,
),
'DELETE': (
{},
None,
),
},
'/v1/allocations/?node=%s' % ALLOCATION['node_uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION]},
),
},
'/v1/allocations/?owner=%s' % ALLOCATION2['owner']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]},
),
},
}
fake_responses_pagination = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION],
"next": "http://127.0.0.1:6385/v1/allocations/?limit=1"}
),
},
'/v1/allocations/?limit=1':
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
'/v1/allocations/?marker=%s' % ALLOCATION['uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
}
fake_responses_sorting = {
'/v1/allocations/?sort_key=updated_at':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
'/v1/allocations/?sort_dir=desc':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
}
class AllocationManagerTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerTest, self).setUp()
self.api = utils.FakeAPI(fake_responses)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list(self):
allocations = self.mgr.list()
expect = [
('GET', '/v1/allocations', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_by_node(self):
allocations = self.mgr.list(node=ALLOCATION['node_uuid'])
expect = [
('GET', '/v1/allocations/?node=%s' % ALLOCATION['node_uuid'], {},
None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_by_owner(self):
allocations = self.mgr.list(owner=ALLOCATION2['owner'])
expect = [
('GET', '/v1/allocations/?owner=%s' % ALLOCATION2['owner'], {},
None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION, ALLOCATION2]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_show(self):
allocation = self.mgr.get(ALLOCATION['uuid'])
expect = [
('GET', '/v1/allocations/%s' % ALLOCATION['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(ALLOCATION['uuid'], allocation.uuid)
self.assertEqual(ALLOCATION['name'], allocation.name)
self.assertEqual(ALLOCATION['owner'], allocation.owner)
self.assertEqual(ALLOCATION['node_uuid'], allocation.node_uuid)
self.assertEqual(ALLOCATION['state'], allocation.state)
self.assertEqual(ALLOCATION['resource_class'],
allocation.resource_class)
expected_resp = ({}, ALLOCATION,)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/%s'
% ALLOCATION['uuid']]['GET'])
def test_create(self):
allocation = self.mgr.create(**CREATE_ALLOCATION)
expect = [
('POST', '/v1/allocations', {}, CREATE_ALLOCATION),
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(allocation)
self.assertIn(
ALLOCATION,
self.api.responses['/v1/allocations']['GET'][1]['allocations'])
def test_delete(self):
allocation = self.mgr.delete(allocation_id=ALLOCATION['uuid'])
expect = [
('DELETE', '/v1/allocations/%s' % ALLOCATION['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(allocation)
expected_resp = ({}, ALLOCATION,)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/%s'
% ALLOCATION['uuid']]['GET'])
class AllocationManagerPaginationTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerPaginationTest, self).setUp()
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list_limit(self):
allocations = self.mgr.list(limit=1)
expect = [
('GET', '/v1/allocations/?limit=1', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_marker(self):
allocations = self.mgr.list(marker=ALLOCATION['uuid'])
expect = [
('GET', '/v1/allocations/?marker=%s' % ALLOCATION['uuid'],
{}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
def test_allocations_list_pagination_no_limit(self):
allocations = self.mgr.list(limit=0)
expect = [
('GET', '/v1/allocations', {}, None),
('GET', '/v1/allocations/?limit=1', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = (
{}, {"next": "http://127.0.0.1:6385/v1/allocations/?limit=1",
"allocations": [ALLOCATION]},)
self.assertEqual(expected_resp,
self.api.responses['/v1/allocations']['GET'])
class AllocationManagerSortingTest(testtools.TestCase):
def setUp(self):
super(AllocationManagerSortingTest, self).setUp()
self.api = utils.FakeAPI(fake_responses_sorting)
self.mgr = ironicclient.v1.allocation.AllocationManager(self.api)
def test_allocations_list_sort_key(self):
allocations = self.mgr.list(sort_key='updated_at')
expect = [
('GET', '/v1/allocations/?sort_key=updated_at', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION2, ALLOCATION]},)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/?sort_key=updated_at']['GET'])
def test_allocations_list_sort_dir(self):
allocations = self.mgr.list(sort_dir='desc')
expect = [
('GET', '/v1/allocations/?sort_dir=desc', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(2, len(allocations))
expected_resp = ({}, {"allocations": [ALLOCATION2, ALLOCATION]},)
self.assertEqual(
expected_resp,
self.api.responses['/v1/allocations/?sort_dir=desc']['GET'])
@mock.patch('time.sleep', autospec=True)
@mock.patch('ironicclient.v1.allocation.AllocationManager.get', autospec=True)
class AllocationWaitTest(testtools.TestCase):
def setUp(self):
super(AllocationWaitTest, self).setUp()
self.mgr = ironicclient.v1.allocation.AllocationManager(mock.Mock())
def _fake_allocation(self, state, error=None):
return mock.Mock(state=state, last_error=error)
def test_success(self, mock_get, mock_sleep):
allocations = [
self._fake_allocation('allocating'),
self._fake_allocation('allocating'),
self._fake_allocation('active'),
]
mock_get.side_effect = allocations
result = self.mgr.wait('alloc1')
self.assertIs(result, allocations[2])
self.assertEqual(3, mock_get.call_count)
self.assertEqual(2, mock_sleep.call_count)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
def test_error(self, mock_get, mock_sleep):
allocations = [
self._fake_allocation('allocating'),
self._fake_allocation('error'),
]
mock_get.side_effect = allocations
self.assertRaises(exc.StateTransitionFailed,
self.mgr.wait, 'alloc1')
self.assertEqual(2, mock_get.call_count)
self.assertEqual(1, mock_sleep.call_count)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
def test_timeout(self, mock_get, mock_sleep):
mock_get.return_value = self._fake_allocation('allocating')
self.assertRaises(exc.StateTransitionTimeout,
self.mgr.wait, 'alloc1', timeout=0.001)
mock_get.assert_called_with(
self.mgr, 'alloc1', os_ironic_api_version=None,
global_request_id=None)
| 33.194444
| 78
| 0.573138
| 1,159
| 11,950
| 5.771355
| 0.155306
| 0.087457
| 0.039468
| 0.041112
| 0.6418
| 0.561519
| 0.471221
| 0.433548
| 0.415159
| 0.415159
| 0
| 0.026983
| 0.283598
| 11,950
| 359
| 79
| 33.286908
| 0.754351
| 0.04569
| 0
| 0.493243
| 0
| 0
| 0.16033
| 0.050575
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.064189
| false
| 0
| 0.02027
| 0.003378
| 0.101351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8629eacf541222ae1970586720f609c2d762f08
| 1,105
|
py
|
Python
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 1
|
2015-07-17T19:20:45.000Z
|
2015-07-17T19:20:45.000Z
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 33
|
2015-07-18T02:31:51.000Z
|
2015-08-04T02:07:41.000Z
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 7
|
2015-07-17T16:29:18.000Z
|
2021-08-31T01:03:53.000Z
|
from flask import session, redirect, url_for
from flask.json import jsonify
from api import app, oauth
from api import models
@app.route("/api/v2/login")
def _get_api_v2_login():
redirect_uri = url_for("_get_api_v2_redirect", _external=True)
return oauth.google.authorize_redirect(redirect_uri)
@app.route("/api/v2/redirect")
def _get_api_v2_redirect():
token = oauth.google.authorize_access_token()
user = oauth.google.parse_id_token(token)
session["user"] = user
return redirect("/api/v2/whoami")
@app.route("/api/v2/logout")
def _get_api_v2_logout():
session.pop("user", None)
return redirect("/")
@app.route("/api/v2/whoami")
def _get_api_v2_whoami():
if not "user" in session:
return jsonify({"error": "not logged in"})
return jsonify(
{
"google": session["user"],
"officer": models.Officer.is_officer(session["user"]["email"]),
"primary": models.Officer.is_primary_officer(session["user"]["email"]),
"rit_student": session["user"]["email"].split("@")[1] == "g.rit.edu",
}
)
| 27.625
| 83
| 0.656109
| 148
| 1,105
| 4.668919
| 0.324324
| 0.072359
| 0.057887
| 0.075253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012182
| 0.182805
| 1,105
| 39
| 84
| 28.333333
| 0.753045
| 0
| 0
| 0
| 0
| 0
| 0.175566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f863fdd49bdc9fc91c5a6863a1a6f2c9cb1fed2c
| 418
|
py
|
Python
|
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | 1
|
2018-09-19T06:27:14.000Z
|
2018-09-19T06:27:14.000Z
|
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
def column_generator():
with open('columns.csv', encoding='utf-8') as f:
for line in f:
keyword = line.strip('\n')
# <columnOverride column="tid" property="tid"/>
# print(r'<columnOverride column="{}" property="{}"/>'.format(keyword,keyword))
print(r'<ignoreColumn column="{}"/>'.format(keyword, keyword))
if __name__ == '__main__':
column_generator()
| 34.833333
| 91
| 0.586124
| 45
| 418
| 5.222222
| 0.622222
| 0.12766
| 0.170213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003106
| 0.229665
| 418
| 11
| 92
| 38
| 0.726708
| 0.294258
| 0
| 0
| 0
| 0
| 0.181507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f86413e599720995225d5a002a0228bfbc9b7ed7
| 22,250
|
py
|
Python
|
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | null | null | null |
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | null | null | null |
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | 1
|
2019-02-25T10:27:41.000Z
|
2019-02-25T10:27:41.000Z
|
# -*- coding: utf-8 -*-
""" This file contains language-specific implementation for an
Afrikaans voice.
The idea is that this file contains subclassed Voice and Phoneset
implementations. This package ttslab/voices may then also contain
speaker specific implementations e.g. "afrikaans_SPEAKER.py"
"""
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "Daniel van Niekerk"
__email__ = "dvn.demitasse@gmail.com"
import re
from collections import OrderedDict
from .. phoneset import Phoneset
from .. defaultvoice import LwaziHTSVoice, LwaziPromHTSVoice
from .. synthesizer_htsme import SynthesizerHTSME
import ttslab.hts_labels_prom as hts_labels_prom
class LwaziAfrikaansPhoneset(Phoneset):
""" The clusters and syllabification are ripped from the English
implementation and should be revisited...
"""
def __init__(self):
#Phoneset.__init__(self)
#syllable_clusters are processed in order, thus a list, not a set...
self.features = {"name": "Lwazi Afrikaans Phoneset",
"syllable_clusters": ["VCV", "VCCV", "VCCCV", "VCCCCV",
"VCGV", "VCCGV", "VCCCGV", "VV"],
"wellformed_plosive_clusters": [["p","l"], ["b","l"], ["k","l"], ["g","l"], ["p","r"],
["b","r"], ["t","r"], ["d","r"], ["k","r"], ["g","r"],
["t","w"], ["d","w"], ["g","w"], ["k","w"]],
"wellformed_fricative_clusters": [["f","l"], ["f","r"], ["f","j"], ["ʃ","j"]],
"wellformed_other_clusters": [["m","j"], ["n","j"]],
"wellformed_s_clusters": [["s","p"], ["s","t"], ["s","k"], ["s","m"], ["s","n"],
["s","f"], ["s","w"], ["s","l"], ["s","p","l"],
["s","p","r"], ["s","t","r"], ["s","k","l"],
["s","k","r"], ["s","k","w"]]
}
self.features["wellformed_clusters"] = (self.features["wellformed_plosive_clusters"] +
self.features["wellformed_fricative_clusters"] +
self.features["wellformed_other_clusters"] +
self.features["wellformed_s_clusters"])
self.features["silence_phone"] = "pau"
self.features["closure_phone"] = "paucl"
self.phones = {"pau" : set(["pause"]),
"paucl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
"ə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_central"]),
"əi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_back"]),
"ai" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"œ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front", "articulation_rounded"]),
"əu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"œy" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ŋ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_velar", "voiced"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ɔi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"ʒ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar", "voiced"]),
"æ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ɑː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_low", "position_back"]),
"ɑːi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"iə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front"]),
"øː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front", "articulation_rounded"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"ɦ" : set(["consonant", "manner_fricative", "place_glottal", "voiced"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"iu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"uə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_back", "articulation_rounded"]),
"uəi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"p" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"tʃ" : set(["class_consonantal", "consonant", "manner_affricate", "place_alveolar"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
"ui" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"v" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental", "voiced"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"]),
"x" : set(["class_consonantal", "consonant", "manner_fricative", "place_velar"]),
"y" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"z" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar", "voiced"])
}
self.map = {"pau":"pau",
"paucl":"paucl",
"ʔ":"paugs",
"ə":"q", #sin
"əi":"qi", #wyn
"a":"a", #man
"ai":"ai", #katjie
"ɛ":"E", #ken
"œ":"qoeq", #mus
"əu":"qu", #bou
"œy":"qoeqy", #huis
"ŋ":"N", #sing
"ɔ":"O", #son
"ɔi":"Oi", #potjie
"ʃ":"S", #chef
"ʒ":"Z", #mirage
"æ":"qaeq", #ek
"ɑː":"AA", #aan
"ɑːi":"AAi", #saai
"b":"b",
"d":"d",
"iə":"iq", #seer
"øː":"qooq", #seun
"f":"f",
"g":"g",
"ɦ":"hq",
"i":"i", #sien
"iu":"iu", #meeu
"j":"j",
"k":"k",
"l":"l",
"m":"m",
"n":"n",
"uə":"uq", #room
"uəi":"uqi", #rooi
"p":"p",
"r":"r",
"s":"s",
"t":"t",
"tʃ":"tS", #tjek
"u":"u", #boek
"ui":"ui", #boei
"v":"v", #wens
"w":"w", #twee
"x":"x", #gee
"y":"y", #muur
"z":"z",
"xxx":"xxx"
}
def is_plosive(self, phonename):
return "manner_plosive" in self.phones[phonename]
def is_voiced(self, phonename):
return ("voiced" in self.phones[phonename] or
"vowel" in self.phones[phonename])
def is_obstruent(self, phonename):
return ("class_consonantal" in self.phones[phonename] and
"class_sonorant" not in self.phones[phonename] and
"class_syllabic" not in self.phones[phonename])
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_glide(self, phonename):
return "manner_glide" in self.phones[phonename]
def is_liquid(self, phonename):
return "manner_liquid" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def is_fricative(self, phonename):
return "manner_fricative" in self.phones[phonename]
def is_nasal(self, phonename):
return "manner_nasal" in self.phones[phonename]
def sonority_level(self, phonename):
""" Assigns levels of sonority to phones based on their nature...
"""
if self.is_vowel(phonename):
if "height_low" in self.phones[phonename]:
return 9
if "height_mid" in self.phones[phonename]:
return 8
if "height_high" in self.phones[phonename]:
return 7
if self.is_liquid(phonename):
return 6
if self.is_nasal(phonename):
return 5
if self.is_fricative(phonename):
if self.is_voiced(phonename):
return 4
else:
return 3
if self.is_plosive(phonename):
if self.is_voiced(phonename):
return 2
else:
return 1
return 0
def _process_cluster(self, cluster, phonelist, match):
""" Break cluster into syllables according to the rules defined by
T.A. Hall, "English syllabification as the interaction of
markedness constraints" in Studia Linguistica, vol. 60, 2006,
pp. 1-33
Need to refactor the if statements to make clearer/simpler...
Implementation for English... needs to be revisited...
"""
phonecluster = phonelist[match.start() : match.end()]
if cluster == "VCV":
#always split -> V.CV:
return "V.CV"
if cluster == "VCCV":
CC = phonecluster[1:3]
#if CC cluster is Tautosyllabic -> V.CCV:
if ((CC in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0])) or
(CC[0] == "s" and
self.is_plosive(CC[1]) and
not self.is_voiced(CC[1]))):
return "V.CCV"
#if CC cluster is Heterosyllabic -> VC.CV:
if ((self.sonority_level(CC[1]) < self.sonority_level(CC[0])) or
(self.sonority_level(CC[1]) == self.sonority_level(CC[0])) or
(CC not in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0]))):
return "VC.CV"
if cluster == "VCCCV":
CCC = phonecluster[1:4]
C2C3 = CCC[1:]
#if CCC are all obstruents -> VC.CCV:
if all([self.is_obstruent(C) for C in CCC]):
return "VC.CCV"
#if C2C3 are wellformed onsets -> VC.CCV:
if C2C3 in self.features["wellformed_clusters"]:
return "VC.CCV"
else:
return "VCC.CV"
if cluster == "VCCCCV":
#always split -> VC.CCCV:
return "VC.CCCV"
if cluster == "VCGV":
CG = phonecluster[1:3]
if not self.is_plosive(CG[0]): #C not a stop
return "VC.GV"
else:
if CG not in self.features["wellformed_clusters"]: #C a stop and CG not wellformed
return "VC.GV"
else:
return "V.CGV" #C a stop and CG wellformed
if cluster == "VCCGV":
CCG = phonecluster[1:4]
if CCG[0] == "s":
return "V.CCGV"
else:
return "VC.CGV"
if cluster == "VCCCGV":
return "VC.CCGV"
if cluster == "VV": #not described in the Hall paper...
return "V.V"
def syllabify(self, phonelist):
""" Classes:
C -> Consonant,
V -> Short/Long Vowel/Syllabic sonorant/Diphthong
G -> Glide
"""
#make a copy (to be edited internally)
plist = list(phonelist)
#first construct string representing relevant classes...
classstr = ""
for phone in plist:
if self.is_vowel(phone):
classstr += "V"
elif self.is_glide(phone):
classstr += "G"
else:
classstr += "C"
#Begin Aby's hacks:
# - Change the last phoneclass under certain conditions..
try:
if (self.is_syllabicconsonant(plist[-1]) and
self.is_obstruent(plist[-2])):
classstr = classstr[:-1] + "V"
if (self.is_syllabicconsonant(plist[-1]) and
self.is_nasal(plist[-2])):
classstr = classstr[:-1] + "V"
except IndexError:
pass
#End Aby's hacks...
#find syllable_clusters in order and apply syllabification
#process on each...this should be redone... FIXME!!!
for cluster in self.features["syllable_clusters"]:
match = re.search(cluster, classstr)
while match:
#syllabify cluster
clustersylstr = self._process_cluster(cluster, plist, match)
#update classstr...
start, end = match.span()
classstr = clustersylstr.join([classstr[:start], classstr[end:]])
plist = (plist[:match.start() + clustersylstr.index(".")] +
[""] + plist[match.start() + clustersylstr.index("."):])
#next match...
match = re.search(cluster, classstr)
sylls = [[]]
index = 0
for char in classstr:
if char != ".":
sylls[-1].append(phonelist[index])
index += 1
else:
sylls.append([])
return sylls
class LwaziAfrikaans_simpleGPOS_HTSVoice(LwaziPromHTSVoice):
""" GPOS from Festival English example...
"""
PREPOSITIONS = ["in", "van", "vir", "op", "daardie", "met",
"by", "vanaf", "as", "teen", "voor", "onder",
"na", "oor", "terwyl", "sonder", "dat", "deur",
"tussen", "per", "af", "langs", "hierdie", "naas"]
DETERMINERS = ["die", "n", "geen", "nie", "elke", "nog", "al",
"enige", "beide", "baie"]
MODAL = ["sal", "wil", "mag", "sou", "wou", "moet", "wees"]
CONJUNCTIONS = ["en", "maar", "omdat", "want", "of"]
INTERROGATIVE_PRONOUNS = ["wie", "wat", "watter", "waar", "hoe", "wanneer", "hoekom"]
PERSONAL_PRONOUNS = ["haar", "sy", "hulle", "hul", "ons", "syne", "myne", "hare"]
AUXILIARY_VERBS = ["is", "het"]
GPOS = dict([(word, "prep") for word in PREPOSITIONS] +
[(word, "det") for word in DETERMINERS] +
[(word, "md") for word in MODAL] +
[(word, "cc") for word in CONJUNCTIONS] +
[(word, "wp") for word in INTERROGATIVE_PRONOUNS] +
[(word, "pps") for word in PERSONAL_PRONOUNS] +
[(word, "aux") for word in AUXILIARY_VERBS])
def __init__(self, phoneset, g2p, pronundict, pronunaddendum, synthesizer):
LwaziHTSVoice.__init__(self,
phoneset=phoneset,
g2p=g2p,
pronundict=pronundict,
pronunaddendum=pronunaddendum,
synthesizer=synthesizer)
self.processes = {"text-to-words": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None)]),
"text-to-segments": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None)]),
"text-to-label": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_only")]),
"text-to-wave": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_and_synth")]),
"utt-to-label": OrderedDict([("synthesizer", "label_only")]),
"utt-to-wave": OrderedDict([("synthesizer", "label_and_synth")])}
def gpos(self, utt, processname):
word_rel = utt.get_relation("Word")
for word_item in word_rel:
if word_item["name"] in self.GPOS:
word_item["gpos"] = "nc"
else:
word_item["gpos"] = "c"
return utt
class SynthesizerHTSME_Prominence(SynthesizerHTSME):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_prom.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_prom.p(phone_item),
hts_labels_prom.a(phone_item),
hts_labels_prom.b(phone_item),
hts_labels_prom.c(phone_item),
hts_labels_prom.d(phone_item),
hts_labels_prom.e(phone_item),
hts_labels_prom.f(phone_item),
hts_labels_prom.g(phone_item),
hts_labels_prom.h(phone_item),
hts_labels_prom.i(phone_item),
hts_labels_prom.j(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
| 52.352941
| 177
| 0.465573
| 2,048
| 22,250
| 4.884766
| 0.23291
| 0.034386
| 0.046381
| 0.056677
| 0.423531
| 0.367053
| 0.328669
| 0.30068
| 0.223011
| 0.162535
| 0
| 0.004858
| 0.389438
| 22,250
| 424
| 178
| 52.476415
| 0.731542
| 0.078876
| 0
| 0.119883
| 0
| 0
| 0.233684
| 0.011215
| 0
| 0
| 0
| 0.002358
| 0
| 1
| 0.046784
| false
| 0.002924
| 0.020468
| 0.026316
| 0.204678
| 0.002924
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f865843e860d96b7840567719ae0919a197d73ae
| 144,813
|
py
|
Python
|
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | 1
|
2020-01-14T21:40:29.000Z
|
2020-01-14T21:40:29.000Z
|
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains analysis done for the Ocean iodide (Oi!) project
This includes presentation at conferences etc...
"""
import numpy as np
import pandas as pd
import sparse2spatial as s2s
import sparse2spatial.utils as utils
import matplotlib
import matplotlib.pyplot as plt
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# Get iodide specific functions
import observations as obs
def main():
"""
Run various misc. scripted tasks linked to the "iodide in the ocean" project
"""
pass
# ---- ----- ----- ----- ----- ----- ----- ----- -----
# ----- ----- Misc (associated iodide project tasks)
# These include getting CTM (GEOS-Chem) output for Anoop/Sawalha/TropMet
# --- Make planeflight files for cruise
# mk_pf_files4Iodide_cruise()
# mk_pf_files4Iodide_cruise(mk_column_output_files=True)
# Test the input files for these cruises?
# test_input_files4Iodide_cruise_with_plots()
# Test output files for cruises
# TEST_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False)
# Get numbers for data paper (data descriptor paper)
# get_numbers_for_data_paper()
# Get Longhurst province labelled NetCDF for res
# add_LonghurstProvince2NetCDF(res='4x5', ExStr='TEST_VI' )
# add_LonghurstProvince2NetCDF(res='2x2.5', ExStr='TEST_V' )
# add_LonghurstProvince2NetCDF(res='0.125x0.125', ExStr='TEST_VIII' )
# Add Longhurst Province to a lower res NetCDF file
# folder = './'
# filename = 'Oi_prj_output_iodide_field_1x1_deg_0_5_centre.nc'
# filename = 'Oi_prj_output_iodide_field_0_5x0_5_deg_centre.nc'
# ds = xr.open_dataset(folder+filename)
# add_LonghurstProvince2NetCDF(ds=ds, res='0.5x0.5', ExStr='TEST_VIII')
# process this to csv files for Indian' sea-surface paper
# ---------------------------------------------------------------------------
# ---------- Functions to produce output for Iodide obs. paper -------------
# ---------------------------------------------------------------------------
def get_PDF_of_iodide_exploring_data_rootset(show_plot=False,
ext_str=None):
""" Get PDF of plots exploring the iodide dataset """
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
#
if ext_str == 'Open_ocean':
# Kludge data
# Kludge_tinel_data=True
# if Kludge_tinel_data:
# new_Data = [ 'He_2014', 'He_2013']
# new_Data += ['Chance_2018_'+i for i in 'I', 'II', 'III']
# df.loc[ df['Data_Key'].isin(new_Data), 'Coastal'] = False
# only take data flagged open ocean
df = df.loc[df[u'Coastal'] == 0.0, :]
elif ext_str == 'Coastal':
df = df.loc[df[u'Coastal'] == 1.0, :]
elif ext_str == 'all':
print('Using entire dataset')
else:
print('Need to set region of data to explore - currently', ext_str)
sys.exit()
# setup PDF
savetitle = 'Oi_prj_data_root_exploration_{}'.format(ext_str)
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
# current_palette = sns.color_palette()
current_palette = sns.color_palette("colorblind")
# --- --- --- --- --- --- --- ---
# ---- Add in extra varibles
# iodide / iodate
I_div_IO3_var = 'I$^{-}$/IO$_{3}^{-}$ (ratio)'
df[I_div_IO3_var] = df['Iodide'] / df['Iodate']
# total iodide
I_plus_IO3 = 'I$^{-}$+IO$_{3}^{-}$'
df[I_plus_IO3] = df['Iodide'] + df['Iodate']
# --- Add ocean basin to dataframe
area_var = 'Region'
df[area_var] = None
# setup a dummy column
# --- --- --- --- --- --- --- ---
# --- Plot dataset locations
sns.reset_orig()
# Get lats, lons and size of dataset
lats = df['Latitude'].values
lons = df['Longitude'].values
N_size = df.shape[0]
if ext_str == 'Open_ocean':
title = 'Iodide data (Open Ocean) explored in PDF (N={})'
else:
title = 'Iodide data (all) explored in this PDF (N={})'
# plot up
AC.plot_lons_lats_spatial_on_map(lats=lats, lons=lons,
title=title.format(N_size),
split_title_if_too_long=False,
f_size=10)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# plot up with no limits
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(ext_str))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5,
for ylimit in ylimits:
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(ext_str, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# TODO - update to use proper definitions
# for southern ocean use the files below
# for rest https://www.nodc.noaa.gov/woce/woce_v3/wocedata_1/woce-uot/summary/bound.htm
#
# --- iodide to iodide ratio ( split by region )
# Between 120E and -80E its Pacific
upper_val = 120
lower_val = -80
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).any(axis=1))
varname = 'Pacific Ocean ({} to {}{})'.format(upper_val, lower_val, unit)
df.loc[bool, area_var] = varname
# Between -80E and 30E its Atlantic
upper_val = -80
lower_val = 30
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Atlantic Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# Between 30E and 120E its Indian
upper_val = 30
lower_val = 120
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Indian Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# if latitude below 60S, overwrite to be Southern ocean
varname = 'Southern Ocean'
df.loc[df['Latitude'] < -60, area_var] = varname
# --- --- --- --- --- --- --- ---
# --- locations of data
sns.reset_orig()
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# locations ?
lons = df_tmp[u'Longitude'].tolist()
lats = df_tmp[u'Latitude'].tolist()
# Now plot
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats)
# fig=fig, ax=ax , color='blue', label=label, alpha=alpha,
# window=window, axis_titles=axis_titles, return_axis=True,
# p_size=p_size)
plt.title('{} ({})'.format(var_, ext_str))
if show_plot:
plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5
for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(var_, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide + iodide
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
plt.title(I_plus_IO3 + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
# ylimits = 1.5, 0.75, 0.5
# for ylimit in ylimits:
# df.plot(kind='scatter', y=I_plus_IO3, x='Latitude' )
# # beautify
# title= ' ({}, y axis limited to {})'.format(var_, ylimit)
# plt.title( I_plus_IO3 + title )
# plt.ylim(-0.05, ylimit )
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# if show_plot: plt.show()
# plt.close()
# plot up with limits on y
ylimits = [100, 600]
# for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
title = ' ({}, y axis={}-{})'.format(var_, ylimits[0], ylimits[1])
plt.title(I_plus_IO3 + title)
plt.ylim(ylimits[0], ylimits[1])
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
# ---------------------------------------------------------------------------
# ---------- Funcs. to process iodine obs/external data --------------------
# ---------------------------------------------------------------------------
def check_points_for_cruises(target='Iodide', verbose=False, debug=False):
"""
Check the cruise points for the new data (Tinel, He, etc...)
"""
# Get the observational data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# And the metadata
metadata_df = obs.get_iodide_obs_metadata()
# Only consider new datasets
new_cruises = metadata_df[metadata_df['In Chance2014?'] == 'N']
df = df[df['Data_Key'].isin(new_cruises['Data_Key'].tolist())]
# Strings to format printing
ptr_str_I = '- '*5 + 'Cruise: {:<20}'
ptr_str_II = '(Source: {:<20}, Location: {:<15}, N: {}, N(Iodide): {})'
# Print by cruise
for data_key in set(df['Data_Key']):
df_m_tmp = metadata_df[metadata_df['Data_Key'] == data_key]
df_tmp = df[df['Data_Key'] == data_key]
# Extract metadata
Cruise = df_m_tmp['Cruise'].values[0]
Source = df_m_tmp['Source'].values[0]
Location = df_m_tmp['Location'].values[0]
#
N = df_tmp.shape[0]
N_I = df_tmp[target].dropna().shape[0]
print(ptr_str_I.format(Cruise))
print(ptr_str_II.format(Source, Location, N, N_I))
# Points for all cruises
N = df.shape[0]
N_I = df[target].dropna().shape[0]
print(ptr_str_I.format('ALL new data'))
print(ptr_str_II.format('', '', N, N_I))
def plot_threshold_plus_SD_spatially(var=None, value=None, std=None, res='4x5',
fillcontinents=True, show_plot=False,
dpi=320, save2png=True,
verbose=True, debug=False):
"""
Plot up the spatial extent of a input variable value + Std. Dev.
"""
# - Local variables
# Get the core input variables
data_root = utils.get_file_locations('data_root')
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
ds = xr.open_dataset(data_root + filename)
# make sure the dataset has units
ds = add_units2ds(ds)
# Use appropriate plotting settings for resolution
if res == '0.125x0.125':
centre = True
else:
centre = False
# Get data
arr = ds[var].mean(dim='time').values
# colour in values above and below threshold (works)
arr[arr >= value] = 1
arr[arr >= value-std] = 0.5
arr[(arr != 1) & (arr != 0.5)] = 0.01
# Get units from dataset
units = ds[var].units
# Plot up
title_str = "'{}' ({}) threshold Value ({}) + \n Standard deviation ({})"
title = title_str.format(var, units, value, std)
if var == 'WOA_TEMP_K':
title += ' (in degC={}, std={})'.format(value-273.15, std)
# Plot using AC_tools
AC.plot_spatial_figure(arr,
# extend=extend,
# fixcb=fixcb, nticks=nticks, \
res=res, show=False, title=title, \
fillcontinents=fillcontinents, centre=centre, units=units,
# f_size=f_size,
no_cb=False)
# Use a tight layout
plt.tight_layout()
# Now save or show
if show_plot:
plt.show()
savetitle = 'Oi_prj_threshold_std_4_var_{}_{}'.format(var, res)
if save2png:
plt.savefig(savetitle+'.png', dpi=dpi)
plt.close()
# ---------------------------------------------------------------------------
# -------------- Reproduction of Chance et al (2014) figures ----------------
# ---------------------------------------------------------------------------
def plot_up_iodide_vs_latitude(show_plot=True):
"""
Reproduce Fig. 3 in Chance et al (2014)
Notes
----
- figure captions:
Variation of sea-surface iodide concentration with latitude for entire
data set (open diamonds) and open ocean data only (filled diamonds).
For clarity, one exceptionally high coastal iodide value (700 nM, 58.25N)
has been omitted.
"""
# - Get data
df = get_core_Chance2014_obs()
# Select data of interest
# ( later add a color selection based on coastal values here? )
vars = ['Iodide', 'Latitude']
print(df)
# and select coastal/open ocean
df_coastal = df[df['Coastal'] == True][vars]
df_open_ocean = df[~(df['Coastal'] == True)][vars]
# - Now plot Obs.
# plot coastal
ax = df_coastal.plot(kind='scatter', x='Latitude', y='Iodide', marker='D',
color='blue', alpha=0.1,
# markerfacecolor="None", **kwds )
)
# plot open ocean
ax = df_open_ocean.plot(kind='scatter', x='Latitude', y='Iodide',
marker='D', color='blue', alpha=0.5, ax=ax,
# markerfacecolor="None", **kwds )
)
# Update aesthetics of plot
plt.ylabel('[Iodide], nM')
plt.xlabel('Latitude, $^{o}$N')
plt.ylim(-5, 500)
plt.xlim(-80, 80)
# save or show?
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_Nitrate(show_plot=True):
"""
Reproduc Fig. 11 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed ( ) and
climatological ( ) nitrate concentration obtained from the World
Ocean Atlas as described in the text for all data (A) and nitrate
concentrations below 2 mM (B) and above 2 mM (C). Dashed lines in B
and C show the relationships between iodide and nitrate adapted from
Campos et al.41 by Ganzeveld et al.27
"""
# - location of data to plot
df = obs.get_processed_df_obs_mod()
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations below 2 mM
df_tmp = df[df['Nitrate'] < 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations above 2 mM
df_tmp = df[df['Nitrate'] > 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k'),
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_SST(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed sea surface
temperature ( ) and climatological sea surface temperature ( ) values
obtained from the World Ocean Atlas as described in the text.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Temperature', y='Iodide', marker='D',
color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Sea surface temperature (SST), $^{o}$C')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_salinity(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed salinity ( , ) and
climatological salinity ( ) values obtained from the World Ocean Atlas as
described in the text for: (A) all data; (B) samples with salinity greater
than 30, shown in shaded area in (A). Note samples with salinity less than
30 have been excluded from further analysis and are not shown in Fig. 8–11.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
# df = df[ ~(df['Coastal']==True) ]
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] < 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] > 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(29, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
def plot_pair_grid(df=None, vars_list=None):
"""
Make a basic pair plot to test the data
"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from itertools import cycle
# make a kde plot
def make_kde(*args, **kwargs):
sns.kdeplot(*args, cmap=next(make_kde.cmap_cycle), **kwargs)
# define colormap to cycle
make_kde.cmap_cycle = cycle(('Blues_r', 'Greens_r', 'Reds_r', 'Purples_r'))
# Plot a pair plot
pg = sns.PairGrid(data, vars=vars_list)
# ---------------------------------------------------------------------------
# ---------------- New plotting of iodine obs/external data -----------------
# ---------------------------------------------------------------------------
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the gridded data for the Arctic and Antarctic
"""
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# Get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# Add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate',
# 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# setup PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space_PERTURBED'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Plot up the perturbations too
for perturb in perturb2use:
perturb
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the input data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - Loop regions and plot PDFs of variables of interest
# vars2use = dfs[ dfs.keys()[0] ].columns
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][vars2use]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df)
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the number of oceanic data points by lat for each lat
# Plot up number of samples for South pole
ds = dsA.sel(lat=(dsA['lat'] <= -65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes for Antarctic (<= -65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up number of samples for North pole
ds = dsA.sel(lat=(dsA['lat'] >= 65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes')
plt.title('Number of gridboxes for Arctic (>= 65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_observational_data_in_Arctic_parameter_space(RFR_dict=None,
plt_up_locs4var_conds=False,
testset='Test set (strat. 20%)',
dpi=320):
"""
Analysis the input observational data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
df = RFR_dict['df']
# Set splits in data to look at
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['>=65N'][testset] == False
dfs['>=65N (training)'] = dfs['>=65N'].loc[bool_, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['<=65S'][testset] == False
dfs['<=65S (training)'] = dfs['<=65S'].loc[bool_, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# - Loop regions and plot pairplots of variables of interest
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_obs_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df[vars2use])
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
# Loop by dataset (region) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
# Plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
df = RFR_dict['df']
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# plot up the PDF distribution of each of the variables.
datasets = sorted(dfs.keys())
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
if plt_up_locs4var_conds:
df = RFR_dict['df']
dfs = {}
# Nitrate greater of equal to
var_ = 'Nitrate >=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate >=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=9'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 9, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=8'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 8, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=7'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 7, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=6'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 6, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=5'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 5, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=4'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 4, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=3'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 3, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=2'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 2, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=1'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 1, :]
# Loop by dataset (nitrate values) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
# plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def Driver2analyse_new_data_vs_existing_data():
"""
Driver to plot up all options for old vs. new analysis plots
"""
regions = 'all', 'coastal', 'noncoastal'
for limit_to_400nM in True, False:
for region in regions:
analyse_new_data_vs_existing_data(region=region,
limit_to_400nM=limit_to_400nM)
def analyse_new_data_vs_existing_data(limit_to_400nM=True, region='all'):
"""
build a set of analysis plots exploring the difference between new and
exisiting datasets
"""
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_new_vs_existing_datasets'
if limit_to_400nM:
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
savetitle += '_limited_to_400nM'
if region == 'all':
savetitle += '_all'
elif region == 'coastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 1, :]
savetitle += '_{}'.format(region)
elif region == 'noncoastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 0, :]
savetitle += '_{}'.format(region)
else:
sys.exit()
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# - Plot up new data ( ~timeseries? )
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of iodide )
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of salinity )
var2plot = u'WOA_Salinity'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of temperature )
var2plot = 'WOA_TEMP'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of depth )
var2plot = u'Depth_GEBCO'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_diagnostic_plots_analysis4observations(inc_all_extract_vars=False,
include_hexbin_plots=False,
model_name='TEMP+DEPTH+SAL',
show_plot=False, dpi=320):
"""
Produce a PDF of comparisons of observations in dataset inventory
"""
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_obs_plots'
if inc_all_extract_vars:
savetitle += '_all_extract_vars'
include_hexbin_plots = True
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
LOCAL_model_name = 'RFR({})'.format(model_name)
pro_df[LOCAL_model_name] = get_model_predictions4obs_point(pro_df,
model_name=model_name)
# Exclude v. high values (N=4 - in intial dataset)
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
# Add coastal flag to data
coastal_flag = 'coastal_flagged'
pro_df = get_coastal_flag(df=pro_df, coastal_flag=coastal_flag)
non_coastal_df = pro_df.loc[pro_df['coastal_flagged'] == 0]
dfs = {'Open-Ocean': non_coastal_df, 'All': pro_df}
# TODO ... add test dataset in here
# Get the point data for params...
point_ars_dict = {}
for key_ in dfs.keys():
point_ars_dict[key_] = {
'Obs.': dfs[key_]['Iodide'].values,
'MacDonald et al (2014)': dfs[key_]['MacDonald2014_iodide'].values,
'Chance et al (2014)': dfs[key_][u'Chance2014_STTxx2_I'].values,
'Chance et al (2014) - Mutivariate': dfs[key_][
u'Chance2014_Multivariate'
].values,
LOCAL_model_name: dfs[key_][LOCAL_model_name],
}
point_ars_dict = point_ars_dict['Open-Ocean']
parm_name_dict = {
'MacDonald et al (2014)': 'MacDonald2014_iodide',
'Chance et al (2014)': u'Chance2014_STTxx2_I',
'Chance et al (2014) - Mutivariate': u'Chance2014_Multivariate',
LOCAL_model_name: LOCAL_model_name,
}
point_data_names = sorted(point_ars_dict.keys())
point_data_names.pop(point_data_names.index('Obs.'))
param_names = point_data_names
# setup color dictionary
current_palette = sns.color_palette("colorblind")
colour_dict = dict(zip(param_names, current_palette[:len(param_names)]))
colour_dict['Obs.'] = 'K'
# --- Plot up locations of old and new data
import seaborn as sns
sns.reset_orig()
plot_up_data_locations_OLD_and_new(save_plot=False, show_plot=False)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against coastal data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
# just non-coastal
for param_name in sorted(parm_name_dict.keys()):
Y = non_coastal_df[parm_name_dict[param_name]].values
X = non_coastal_df['Iodide'].values
title = 'Regression plot of Open-ocean [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against all data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for param_name in point_data_names:
Y = point_ars_dict[param_name]
title = 'Regression plot of all [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---- Plot up new data
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
# plt.legend()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up Salinity
# var2plot = 'WOA_Salinity'
# for dataset in New_datasets:
# tmp_df = pro_df.loc[ pro_df['Data_Key'] == dataset ]
# tmp_df[var2plot].plot()
# ax= plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
# plt.xlabel( 'Obs #')
# plt.ylabel( 'PSU' )
# plt.title( '{} during cruise from {}'.format( var2plot, dataset ) )
# plt.legend()
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# ---- Plot up key comparisons for coastal an non-coastal data
for key_ in sorted(dfs.keys()):
# --- Ln(Iodide) vs. T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP_K'
X = 1 / dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, '1/'+xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_Salinity'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---
if inc_all_extract_vars:
for key_ in sorted(dfs.keys()):
# List extract vraiables
extracted_vars = [
u'WOA_TEMP', u'WOA_Nitrate', u'WOA_Salinity', u'WOA_Dissolved_O2', u'WOA_Phosphate', u'WOA_Silicate', u'Depth_GEBCO', u'SeaWIFs_ChlrA', u'WOA_MLDpt', u'WOA_MLDpt_max', u'WOA_MLDpt_sum', u'WOA_MLDpd', u'WOA_MLDpd_max', u'WOA_MLDpd_sum', u'WOA_MLDvd', u'WOA_MLDvd_max', u'WOA_MLDvd_sum', u'DOC', u'DOCaccum', u'Prod', u'SWrad'
]
# Loop extraced variables and plot
for var_ in extracted_vars:
ylabel = var_
xlabel = 'Iodide'
tmp_df = dfs[key_][[xlabel, ylabel]]
# Kludge to remove '--' from MLD columns
for col in tmp_df.columns:
bool_ = [i == '--' for i in tmp_df[col].values]
tmp_df.loc[bool_, :] = np.NaN
if tmp_df[col].dtype == 'O':
tmp_df[col] = pd.to_numeric(tmp_df[col].values,
errors='coerce')
print(var_, tmp_df.min(), tmp_df.max())
# X = dfs[key_][xlabel].values
# Plot up ax = sns.regplot(x=xlabel, y=ylabel, data=tmp_df )
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- Plot up Just observations and predicted values from models as PDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('PDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up Just observations and predicted values from models as CDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('CDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up parameterisations as regression
# import seaborn as sns; sns.set(color_codes=True)
# sns.set_context("paper")
# xlabel = 'Obs.'
# X = point_ars_dict[xlabel]
# for point_name in point_data_names:
# title = 'Regression plot of [I$^{-}_{aq}$] (nM) '
# title = title + '{} vs {} parameterisation'.format(xlabel, point_name )
# Y = point_ars_dict[point_name]
# ax = sns.regplot(x=X, y=Y )
# # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# # title=None, add_ODR_trendline2plot=True)
# plt.title(title)
# plt.xlabel(xlabel)
# plt.ylabel(point_name)
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# --- Plot up parameterisations as hexbin plot
if include_hexbin_plots:
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for point_name in point_data_names:
title = 'Hexbin of [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel,
point_name)
Y = point_ars_dict[point_name]
get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name,
log=False, title=title, add_ODR_trendline2plot=True)
# plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_PDF_iodide_obs_mod(bins=10):
"""
plot up PDF of predicted values vs. observations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
print(df.shape)
df = df[~(df['Coastal'] == True)]
# df = df[ ~(df['Coastal']==True) ]
# Salinity greater than 30
# df = df[ (df['Salinity'] > 30 ) ]
print(df.shape)
# Plot up data
# Macdonaly et al 2014 values
ax = sns.distplot(df['MacDonald2014_iodide'],
label='MacDonald2014_iodide', bins=bins)
# Chance et al 2014 values
ax = sns.distplot(df['Chance2014_STTxx2_I'],
label='Chance2014_STTxx2_I', bins=bins)
# Iodide obs.
ax = sns.distplot(df['Iodide'], label='Iodide, nM', bins=bins)
# Update aesthetics and show plot?
plt.xlim(-50, 400)
plt.legend(loc='upper right')
plt.show()
def plt_predicted_iodide_vs_obs_Q1_Q3(dpi=320, show_plot=False,
limit_to_400nM=False, inc_iodide=False):
"""
Plot predicted iodide on a latitudinal basis
NOTES
- the is the just obs. location equivilent of the plot produced to show
predict values for all global locations
(Oi_prj_global_predicted_vals_vs_lat)
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Local variables
# sub select variables of interest.
params2plot = [
'Chance2014_STTxx2_I', 'MacDonald2014_iodide',
]
# Set names to overwrite variables with
rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)',
u'MacDonald2014_iodide': 'MacDonald et al. (2014)',
'RFR(Ensemble)': 'RFR(Ensemble)',
'Iodide': 'Obs.',
# u'Chance2014_Multivariate': 'Chance et al. (2014) (Multi)',
}
# filename to save values
filename = 'Oi_prj_global_predicted_vals_vs_lat_only_obs_locs'
# include iodide observations too?
if inc_iodide:
params2plot += ['Iodide']
filename += '_inc_iodide'
CB_color_cycle = AC.get_CB_color_cycle()
color_d = dict(zip(params2plot, CB_color_cycle))
#
if limit_to_400nM:
df = df.loc[df['Iodide'] < 400, :]
filename += '_limited_400nM'
# - Process data
# Add binned mean
# bins = np.arange(-70, 70, 10 )
bins = np.arange(-80, 90, 10)
# groups = df.groupby( np.digitize(df[u'Latitude'], bins) )
groups = df.groupby(pd.cut(df['Latitude'], bins))
# Take means of groups
# groups_avg = groups.mean()
groups_des = groups.describe().unstack()
# - setup plotting
fig, ax = plt.subplots(dpi=dpi)
# - Plot up
X = groups_des['Latitude']['mean'].values # groups_des.index
# X =bins
print(groups_des)
# plot groups
for var_ in params2plot:
# Get quartiles
Q1 = groups_des[var_]['25%'].values
Q3 = groups_des[var_]['75%'].values
# Add median
ax.plot(X, groups_des[var_]['50%'].values,
color=color_d[var_], label=rename_titles[var_])
# add shading for Q1/Q3
ax.fill_between(X, Q1, Q3, alpha=0.2, color=color_d[var_])
# - Plot observations
# Highlight coastal obs
tmp_df = df.loc[df['Coastal'] == True, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='none', s=3,
label='Coastal obs.')
# non-coastal obs
tmp_df = df.loc[df['Coastal'] == False, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='k', s=3,
label='Non-coastal obs.')
# - Beautify
# Add legend
plt.legend()
# Limit plotted y axis extent
plt.ylim(-20, 420)
plt.ylabel('[I$^{-}_{aq}$] (nM)')
plt.xlabel('Latitude ($^{\\rm o}$N)')
plt.savefig(filename, dpi=dpi)
if show_plot:
plt.show()
plt.close()
def plot_up_data_locations_OLD_and_new(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
figsize = (11, 5)
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
p_size = 25
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
m = AC.plot_lons_lats_spatial_on_map(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha,
window=window, axis_titles=axis_titles,
return_axis=True, p_size=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
m.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right')
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def plot_up_data_locations_OLD_and_new_CARTOPY(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
# figsize = (11, 5)
figsize = (11*2, 5*2)
fig = plt.figure(figsize=figsize, dpi=dpi)
# fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
fig, ax = None, None
p_size = 15
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
ax = plot_lons_lats_spatial_on_map_CARTOPY(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha, dpi=dpi,
# window=window, axis_titles=axis_titles,
# return_axis=True,
# add_detailed_map=True,
add_background_image=False,
add_gridlines=False,
s=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
ax.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label, zorder=1000)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right', prop={'size': 6})
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def map_plot_of_locations_of_obs():
"""
Plot up locations of observations of data to double check
"""
import matplotlib.pyplot as plt
# - Settings
plot_all_as_one_plot = True
show = True
# - Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# only consider non-coastal locations
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# Get coordinate values
all_lats = df['Latitude'].values
all_lons = df['Longitude'].values
# Get sub lists of unique identifiers for datasets
datasets = list(set(df['Data_Key']))
n_datasets = len(datasets)
# - Setup plot
#
f_size = 10
marker = 'o'
p_size = 75
dpi = 600
c_list = AC.color_list(int(n_datasets*1.25))
print(c_list, len(c_list))
# plot up white background
arr = np.zeros((72, 46))
vmin, vmax = 0, 0
# - just plot up all sites to test
if plot_all_as_one_plot:
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi,
facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[
vmin, vmax], ax=ax1, no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Scatter plot of points.
m.scatter(all_lons, all_lats, edgecolors=c_list[1], c=c_list[1],
marker=marker, s=p_size, alpha=1,)
# Save and show?
plt.savefig('Iodide_dataset_locations.png', dpi=dpi, transparent=True)
if show:
plt.show()
else:
chunksize = 5
chunked_list = AC.chunks(datasets, chunksize)
counter = 0
for n_chunk_, chunk_ in enumerate(chunked_list):
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi, facecolor='w',
edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[vmin, vmax], ax=ax1,
no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Loop all datasets
for n_dataset_, dataset_ in enumerate(chunk_):
print(n_chunk_, counter, dataset_, c_list[counter])
#
df_sub = df[df['Data_Key'] == dataset_]
lats = df_sub['Latitude'].values
lons = df_sub['Longitude'].values
# Plot up and save.
color = c_list[n_chunk_::chunksize][n_dataset_]
m.scatter(lons, lats, edgecolors=color, c=color,
marker=marker, s=p_size, alpha=.5, label=dataset_)
# add one to counter
counter += 1
plt.legend()
# save chunk...
plt.savefig('Iodide_datasets_{}.png'.format(n_chunk_), dpi=dpi,
transparent=True)
if show:
plt.show()
def plot_up_parameterisations(df=None, save2pdf=True, show=False):
"""
Plot up parameterisations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Consider both Chance and MacDonald parameterisations
params = [i for i in df.columns if ('Mac' in i)]
params += [i for i in df.columns if ('Chance' in i)]
# get details of parameterisations
# filename='Chance_2014_Table2_PROCESSED_17_04_19.csv'
filename = 'Chance_2014_Table2_PROCESSED.csv'
folder = utils.get_file_locations('data_root')
param_df = pd.read_csv(folder+filename)
# only consider non-coastal locations?
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# df = df[ df['Iodide'] < 300 ]
# Setup pdf
if save2pdf:
dpi = 320
savetitle = 'Chance2014_params_vs_recomputed_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Loop parameterisations
# for param in params[:2]: # Only loop two if debugging
for param in params:
# Get meta data for parameter
sub_df = param_df[param_df['TMS ID'] == param]
# Setup a new figure
fig = plt.figure()
# Extract Iodide and param data...
# Take logs of data?
iodide_var = 'Iodide'
try:
print(sub_df['ln(iodide)'].values[0])
if sub_df['ln(iodide)'].values[0] == 'yes':
iodide_var = 'ln(Iodide)'
print('Using log values for ', param)
else:
print('Not using log values for ', param)
except:
print('FAILED to try and use log data for ', param)
X = df[iodide_var].values
# And parameter data?
Y = df[param].values
# Remove nans...
tmp_df = pd.DataFrame(np.array([X, Y]).T, columns=['X', 'Y'])
print(tmp_df.shape)
tmp_df = tmp_df.dropna()
print(tmp_df.shape)
X = tmp_df['X'].values
Y = tmp_df['Y'].values
# PLOT UP as X vs. Y scatter...
title = '{} ({})'.format(param, sub_df['Independent variable'].values)
ax = mk_X_Y_scatter_plot_param_vs_iodide(X=X, Y=Y, title=title,
iodide_var=iodide_var)
# Add Chance2014's R^2 to plot...
try:
R2 = str(sub_df['R2'].values[0])
c = str(sub_df['c'].values[0])
m = str(sub_df['m'].values[0])
eqn = 'y={}x+{}'.format(m, c)
print(R2, c, m, eqn)
alt_text = 'Chance et al (2014) R$^2$'+':{} ({})'.format(R2, eqn)
ax.annotate(alt_text, xy=(0.5, 0.90), textcoords='axes fraction',
fontsize=10)
except:
print('FAILED to get Chance et al values for', param)
# plt.text( 0.75, 0.8, alt_text, ha='center', va='center')
# show/save?
if save2pdf:
# Save out figure
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
del fig
# save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
plt.close("all")
def mk_X_Y_scatter_plot_param_vs_iodide(X=None, Y=None, iodide_var=None,
title=None):
"""
Plots up a X vs. Y plot for a parameterisation of iodine (Y) against obs iodide (X)
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Plot up
plt.scatter(X, Y, marker='+', alpha=0.5)
plt.title(title)
plt.ylabel('Param. [Iodide], nM')
plt.xlabel('Obs. [{}], nM'.format(iodide_var))
# Add a trendline
ax = plt.gca()
AC.Trendline(ax, X=X, Y=Y, color='green')
# Adjust x and y axis limits
round_max_X = AC.myround(max(X), 50, round_up=True)
round_max_Y = AC.myround(max(Y), 50, round_up=True)
if iodide_var == 'ln(Iodide)':
round_max_X = AC.myround(max(X), 5, round_up=True)
round_max_Y = AC.myround(max(Y), 5, round_up=True)
plt.xlim(-(round_max_X/40), round_max_X)
plt.ylim(-(round_max_Y/40), round_max_Y)
# Add an N value to plot
alt_text = '(N={})'.format(len(X))
ax.annotate(alt_text, xy=(0.8, 0.10),
textcoords='axes fraction', fontsize=10)
return ax
def compare_obs_ancillaries_with_extracted_values_WINDOW(dpi=320, df=None):
"""
Plot up a window plot of the observed vs. climatological ancillaries
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=0.75)
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# units dict?
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# Colors to use
CB_color_cycle = AC.get_CB_color_cycle()
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# setup plot
fig = plt.figure(dpi=dpi, figsize=(5, 7.35))
# - 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'WOA_Salinity'
plot_n = 1
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# Plot up the data as a scatter
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# Label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# Title the plots
title = 'Salinity (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# Add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'WOA_Salinity'
plot_n = 2
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] >= 30, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = 'Salinity ($\geq$ 30, PSU)'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = 29
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- Loop and plot
for n_var2plot, var2plot in enumerate(['WOA_TEMP', 'WOA_Nitrate', ]):
plot_n = 2 + 1 + n_var2plot
color = CB_color_cycle[plot_n]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = '{} ({})'.format(obs_var_dict[var2plot], units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add a line for orthogonal distance regression (ODR)
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 5
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
title = 'ChlrA (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 6
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] <= 5, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
units = units_dict[var2plot]
title = 'ChlrA ($\leq$5 {})'.format(units)
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# -- adjust figure and save
# Adjust plot
left = 0.075
right = 0.975
wspace = 0.05
hspace = 0.175
top = 0.95
bottom = 0.075
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
# Save
filename = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params_WINDOW'
plt.savefig(filename, dpi=dpi)
def compare_obs_ancillaries_with_extracted_values(df=None, save2pdf=True,
show=False, dpi=320):
"""
Some species in the dataframe have observed as well as climatology values.
For these species, plot up X/Y and latitudinal comparisons
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# Dict of units for variables
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# sort dataframe by latitude
# df = df.sort_values('Latitude', axis=0, ascending=True)
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# Setup pdf
if save2pdf:
savetitle = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Get variables and confirm which datasets are being used for plot
dfs = {}
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# drop nans...
index2use = df[[obs_var_dict[key_], key_]].dropna().index
dfs[key_] = df.loc[index2use, :]
# Check which datasets are being used
ptr_str = 'For variable: {} (#={})- using: {} \n'
for key_ in vars_sorted:
datasets = list(set(dfs[key_]['Data_Key']))
dataset_str = ', '.join(datasets)
print(ptr_str.format(key_, len(datasets), dataset_str))
# - Loop variables and plot as a scatter plot...
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# new figure
fig = plt.figure()
# drop nans...
df_tmp = df[[obs_var_dict[key_], key_]].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Plot up
sns.regplot(x=obs_var_dict[key_], y=key_, data=df_tmp)
# Add title
plt.title('X-Y plot of {} (N={})'.format(obs_var_dict[key_], N_))
plt.ylabel('Extracted ({}, {})'.format(key_, units_dict[key_]))
plt.xlabel('Obs. ({}, {})'.format(
obs_var_dict[key_], units_dict[key_]))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# - Loop variables and plot verus lat (with difference)
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# New figure
fig = plt.figure()
# Drop nans...
df_tmp = df[[obs_var_dict[key_], key_, 'Latitude']].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Get data to analyse
obs = df_tmp[obs_var_dict[key_]].values
climate = df_tmp[key_].values
X = df_tmp['Latitude'].values
# Plot up
plt.scatter(X, obs, label=obs_var_dict[key_], color='red',
marker="o")
plt.scatter(X, climate, label=key_, color='blue',
marker="o")
plt.scatter(X, climate-obs, label='diff', color='green',
marker="o")
# Athesetics of plot?
plt.legend()
plt.xlim(-90, 90)
plt.ylabel('{} ({})'.format(obs_var_dict[key_], units_dict[key_]))
plt.xlabel('Latitude ($^{o}$N)')
plt.title('{} (N={}) vs. latitude'.format(obs_var_dict[key_], N_))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# Save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_up_lat_STT_var(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
# Add modulus
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local vars
X_varname = "Latitude (Modulus)"
Y_varname = "WOA_TEMP"
S_varname = 'Iodide'
S_label = S_varname
C_varname = S_varname
# - plot
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.show()
def plot_up_lat_varI_varII(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local variables
# override? (unhashed)
varI = 'Iodide'
varII = "WOA_TEMP"
# name local vars
X_varname = "Latitude (Modulus)"
Y_varname = varI
S_varname = varII
S_label = S_varname
C_varname = S_varname
# - plot up
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.ylim(-5, 500)
plt.show()
def plot_chance_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up chance et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for C**2 fit
Xvar2plot = X_var+'($^{2}$)'
df[Xvar2plot] = df[X_var].loc[:].values**2
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
# Plot up
df.plot(kind='scatter', x=Xvar2plot, y=Y_var, ax=ax)
# Add a line of best fit reported param.
actual_data = df[Xvar2plot].values
test_data = np.linspace(AC.myround(actual_data.min()),
AC.myround(actual_data.max()), 20)
m = 0.225
c = 19.0
plt.plot(test_data, ((test_data*m)+c), color='green', ls='--',
label='Chance et al (2014) param.')
# Limit axis to data
plt.xlim(-50, AC.myround(df[Xvar2plot].values.max(), 1000))
plt.ylim(-20, AC.myround(df[Y_var].values.max(), 50, round_up=True))
# Add title and axis labels
N = actual_data.shape[0]
title = 'Linear param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(X_var + ' ($^{o}$C$^{2}$)')
plt.ylabel(Y_var + ' (nM)')
plt.legend(loc='upper left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'Chance_param_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_macdonald_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up MacDonald et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for
Xvar2plot = '1/'+X_var
df[Xvar2plot] = 1. / (df[X_var].loc[:].values+273.15)
Y_var2plot = 'ln({})'.format(Y_var)
df[Y_var2plot] = np.log(df[Y_var].values)
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind='scatter', x=Xvar2plot, y=Y_var2plot, ax=ax)
# Add a line of best fit reported param.
# (run some numbers through this equation... )
actual_data = df[X_var].values + 273.15
test_data = np.linspace(actual_data.min(), actual_data.max(), 20)
test_data_Y = 1.46E6*(np.exp((-9134./test_data))) * 1E9
plt.plot(1./test_data, np.log(test_data_Y),
color='green', ls='--', label='MacDonald et al (2014) param.')
# Limit axis to data
plt.xlim(df[Xvar2plot].values.min()-0.000025,
df[Xvar2plot].values.max()+0.000025)
plt.ylim(0, 7)
# Add title and axis labels
N = actual_data.shape[0]
title = 'Arrhenius param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(Xvar2plot + ' ($^{o}$K)')
plt.ylabel(Y_var2plot + ' (nM)')
plt.legend(loc='lower left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'MacDonald_parameterisation_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_current_parameterisations():
"""
Plot up a comparison of Chance et al 2014 and MacDonald et al 2014 params.
"""
# - Get obs and processed data
# get raw obs
raw_df = get_core_Chance2014_obs()
# don't consider iodide values above 30
raw_df = raw_df[raw_df['Iodide'] > 30.]
# - get processed obs.
pro_df = obs.get_processed_df_obs_mod()
restrict_data_max, restrict_min_salinity = True, True
if restrict_data_max:
# pro_df = pro_df[ pro_df['Iodide'] < 450. ] # used for July Oi! mtg.
# restrict below 400 (per. com. RJC)
pro_df = pro_df[pro_df['Iodide'] < 400.]
if restrict_min_salinity:
pro_df = pro_df[pro_df['WOA_Salinity'] > 30.]
# - Plots with raw obs.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot up Chance
# plot_chance_param(df=raw_df.copy())
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=raw_df.copy())
# - Plots with extract Vars.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot_chance_param(df=pro_df.copy(), data_str='Extracted data',
# X_var='WOA_TEMP')
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=pro_df.copy(), data_str='Extracted data',
X_var='WOA_TEMP')
# ---------------------------------------------------------------------------
# ---------------- Misc. Support for iodide project ------------------------
# ---------------------------------------------------------------------------
def explore_diferences_for_Skagerak():
"""
Explore how the Skagerak data differs from the dataset as a whole
"""
# - Get the observations and model output
folder = utils.get_file_locations('data_root')
filename = 'Iodine_obs_WOA_v8_5_1_ENSEMBLE_csv__avg_nSkag_nOutliers.csv'
dfA = pd.read_csv(folder+filename, encoding='utf-8')
# - Local variables
diffvar = 'Salinity diff'
ds_str = 'Truesdale_2003_I'
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# - Analysis / updates to DataFrames
dfA[diffvar] = dfA['WOA_Salinity'].values - dfA['diffvar'].values
# - Get just the Skagerak dataset
df = dfA.loc[dfA['Data_Key'] == ds_str]
prt_str = 'The general stats on the Skagerak dataset ({}) are: '
print(prt_str.format(ds_str))
# general stats on the iodide numbers
stats = df['Iodide'].describe()
for idx in stats.index.tolist():
vals = stats[stats.index == idx].values[0]
print('{:<10}: {:<10}'.format(idx, vals))
# - stats on the in-situ data
print('\n')
prt_str = 'The stats on the Skagerak ({}) in-situ ancillary obs. are: '
print(prt_str.format(ds_str))
# which in-situ variables are there
vals = df[obs_var_dict.values()].count()
prt_str = "for in-situ variable '{:<15}' there are N={} values"
for idx in vals.index.tolist():
vals2prt = vals[vals.index == idx].values[0]
print(prt_str.format(idx, vals2prt))
def check_numbers4old_chance_and_new_chance():
"""
Do checks on which datasets have changed between versions
"""
# - Get all observational data
NIU, md_df = obs.get_iodide_obs()
folder = '/work/home/ts551/data/iodide/'
filename = 'Iodide_data_above_20m_v8_5_1.csv'
df = pd.read_csv(folder+filename)
df = df[np.isfinite(df['Iodide'])] # remove NaNs
verOrig = 'v8.5.1'
NOrig = df.shape[0]
# Add the is chance flag to the dataset
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
# Where are the new iodide data points
newLODds = set(df.loc[df['ErrorFlag'] == 7]['Data_Key'])
prt_str = 'The new datasets from ErrorFlag 7 are in: {}'
print(prt_str.format(' , '.join(newLODds)))
# Versions with a different number of iodide values
filename = 'Iodide_data_above_20m_v8_2.csv'
df2 = pd.read_csv(folder + filename)
df2 = convert_old_Data_Key_names2new(df2) # Use data descriptor names
df2 = df2[np.isfinite(df2['Iodide'])] # remove NaNs
ver = '8.2'
prt_str = 'Version {} of the data - N={} (vs {} N={})'
print(prt_str.format(ver, df2.shape[0], verOrig, NOrig))
# Do analysis by dataset
for ds in list(set(md_df['Data_Key'])):
N0 = df.loc[df['Data_Key'] == ds, :].shape[0]
N1 = df2.loc[df2['Data_Key'] == ds, :].shape[0]
IsChance = list(set(df.loc[df['Data_Key'] == ds, ChanceStr]))[0]
prt_str = "DS: '{}' (Chance2014={}) has changed by {} to {} ({} vs. {})"
if N0 != N1:
print(prt_str.format(ds, IsChance, N0-N1, N0, verOrig, ver))
def get_numbers_for_data_paper():
"""
Get various numbers/analysis for data descriptor paper
"""
# - Get the full iodide sea-surface dataset
filename = 'Iodide_data_above_20m.csv'
folder = utils.get_file_locations('s2s_root')+'/Iodide/inputs/'
df = pd.read_csv(folder + filename, encoding='utf-8')
# Exclude non finite data points.
df = df.loc[np.isfinite(df['Iodide']), :]
# Save the full data set as .csv for use in Data Descriptor paper
cols2use = [
u'Data_Key', u'Data_Key_ID', 'Latitude', u'Longitude',
# u'\xce\xb4Iodide',
'Year',
# u'Month (Orig.)', # This is RAW data, therefore Month is observation one
u'Month',
'Day',
'Iodide', u'δIodide',
'ErrorFlag', 'Method', 'Coastal', u'LocatorFlag',
]
df = df[cols2use]
# Map references to final .csv from metadata
md_df = obs.get_iodide_obs_metadata()
col2use = u'Reference'
Data_keys = set(df['Data_Key'].values)
for Data_key in Data_keys:
# Get ref for dataset from metadata
bool_ = md_df[u'Data_Key'] == Data_key
REF = md_df.loc[bool_, :][col2use].values[0].strip()
# Add to main data array
bool_ = df[u'Data_Key'] == Data_key
df.loc[bool_, col2use] = REF
# Round up the iodide values
df['Iodide'] = df['Iodide'].round(1).values
df[u'δIodide'] = df[u'δIodide'].round(1).values
df[u'Longitude'] = df[u'Longitude'].round(6).values
df[u'Latitude'] = df[u'Latitude'].round(6).values
# Now lock in values by settings to strings.
df[cols2use] = df[cols2use].astype(str)
# save the resultant file out
filename = 'Oi_prj_Iodide_obs_surface4DataDescriptorPaper.csv'
df.to_csv(filename, encoding='utf-8')
# Get number of samples of iodide per dataset
md_df = obs.get_iodide_obs_metadata()
md_df.index = md_df['Data_Key']
s = pd.Series()
Data_Keys = md_df['Data_Key']
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
s[Data_Key] = df_tmp.shape[0]
md_df['n'] = s
md_df.index = np.arange(md_df.shape[0])
md_df.to_csv('Oi_prj_metadata_with_n.csv', encoding='utf-8')
# Check sum for assignment?
prt_str = '# Assigned values ({}) should equal original DataFrame size:{}'
print(prt_str.format(md_df['n'].sum(), str(df.shape[0])))
# Get number of samples of iodide per obs. technique
Methods = set(df['Method'])
s_ds = pd.Series()
s_n = pd.Series()
for Method in Methods:
df_tmp = df.loc[df['Method'] == Method]
s_n[Method] = df_tmp.shape[0]
s_ds[Method] = len(set(df_tmp['Data_Key']))
# Combine and save
dfS = pd.DataFrame()
dfS['N'] = s_n
dfS['datasets'] = s_ds
dfS.index.name = 'Method'
# Reset index
index2use = [str(i) for i in sorted(pd.to_numeric(dfS.index))]
dfS = dfS.reindex(index2use)
dfS.to_csv('Oi_prj_num_in_Methods.csv', encoding='utf-8')
# Check sum on assignment of methods
prt_str = '# Assigned methods ({}) should equal original DataFrame size:{}'
print(prt_str.format(dfS['N'].sum(), str(df.shape[0])))
prt_str = '# Assigned datasets ({}) should equal # datasets: {}'
print(prt_str.format(dfS['datasets'].sum(), len(set(df['Data_Key']))))
# Check which methods are assign to each dataset
dfD = pd.DataFrame(index=sorted(set(df['Method'].values)))
S = []
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
methods_ = set(df_tmp['Method'].values)
dfD[Data_Key] = pd.Series(dict(zip(methods_, len(methods_)*[True])))
# Do any datasets have more than one method?
print('These datasets have more than one method: ')
print(dfD.sum(axis=0)[dfD.sum(axis=0) > 1])
def mk_PDF_plot_for_Data_descriptor_paper():
"""
Make a PDF plot for the data descriptor paper
"""
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# df = df.loc[df['Iodide'] <400, : ]
# split data into all, Coastal and Non-Coastal
dfs = {}
dfs['All'] = df.copy()
dfs['Coastal'] = df.loc[df['Coastal'] == 1, :]
dfs['Non-coastal'] = df.loc[df['Coastal'] != 1, :]
# if hist=True, use a count instead of density
hist = False
# Loop and plot
axlabel = '[I$^{-}_{aq}$] (nM)'
fig, ax = plt.subplots()
vars2plot = dfs.keys()
for key in vars2plot:
sns.distplot(dfs[key]['Iodide'].values, ax=ax,
axlabel=axlabel, label=key, hist=hist)
# force y axis extend to be correct
ax.autoscale()
# Add a legend
plt.legend()
# Add a label for the Y axis
plt.ylabel('Density')
# save plot
if hist:
savename = 'Oi_prj_Data_descriptor_PDF'
else:
savename = 'Oi_prj_Data_descriptor_PDF_just_Kernal'
plt.savefig(savename+'.png', dpi=dpi)
def mk_pf_files4Iodide_cruise(dfs=None, test_input_files=False,
mk_column_output_files=False, num_tracers=103):
"""
Make planeflight input files for iodide cruises
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# Test the input files?
if test_input_files:
test_input_files4Iodide_cruise_with_plots(dfs=dfs)
# Make planeflight files for DataFrames of cruises data (outputting columns values)
if mk_column_output_files:
# slist = ['O3', 'IO', 'BrO', 'CH2O']
slist = ['TRA_002', 'TRA_046', 'TRA_092', 'TRA_020', 'GLYX']
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
slist = slist + met_vars
for key_ in dfs.keys():
print(key_, dfs[key_].shape)
df = dfs[key_].dropna()
print(df.shape)
# add TYPE flag
df['TYPE'] = 'IDC'
# Grid box level centers [hPa]
alts_HPa = AC.gchemgrid('c_hPa_geos5_r')
# Loop and add in column values
dfs_all = []
for n_alt, hPa_ in enumerate(alts_HPa):
print(hPa_, n_alt)
df_ = df.copy()
df_['PRESS'] = hPa_
dfs_all += [df_]
df = pd.concat(dfs_all)
# make sure rows are in date order
df.sort_values(['datetime', 'PRESS'], ascending=True, inplace=True)
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
# Make planeflight files for DataFrames of cruises data
# (outputting surface values)
else:
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2', 'GLYX']
slist = slist + species + met_vars
for key_ in dfs.keys():
print(key_)
df = dfs[key_].dropna()
# add TYPE flag
df['TYPE'] = 'IDS'
#
df['PRESS'] = 1013.0
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
def test_input_files4Iodide_cruise_with_plots(dfs=None, show=False):
""""
Plot up maps of iodide cruise routes
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# - Test input files
# file to save?
savetitle = 'GC_pf_input_iodide_cruises'
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
vars2test = ['LON', 'LAT']
for key_ in dfs.keys():
df = dfs[key_]
for var_ in vars2test:
# -- Plot X vs Y plot
df_tmp = df[['datetime', var_]]
# calc NaNs
VAR_dropped_N = int(df_tmp.shape[0])
df_tmp = df_tmp.dropna()
VAR_N_data = int(df_tmp.shape[0])
VAR_dropped_N = VAR_dropped_N-VAR_N_data
# plot
df_tmp.plot(x='datetime', y=var_)
#
title = "Timeseries of '{}' for '{}'".format(var_, key_)
title += ' (ALL N={}, exc. {} NaNs)'.format(VAR_N_data,
VAR_dropped_N)
plt.title(title)
# Save / show
file2save_str = 'Iodide_input_file_{}_check_{}.png'.format(
key_, var_)
plt.savefig(file2save_str)
if show:
plt.show()
print(df_tmp[var_].describe())
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# -- Plot up cruise track as map
del df_tmp
df_tmp = df.dropna()
lons = df_tmp['LON'].values
lats = df_tmp['LAT'].values
title = "Cruise track for '{}'".format(key_)
print('!'*100, 'plotting map for: ', key_)
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats, title=title)
plt.ylim(AC.myround(lats.min()-20, 10, ),
AC.myround(lats.max()+20, 10, round_up=True))
plt.xlim(AC.myround(lons.min()-20, 10, ),
AC.myround(lons.max()+20, 10, round_up=True))
if show:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_iodide_cruise_data_from_Anoop_txt_files(verbose=False):
"""
Get observational data and locations from Anoop's txt files
"""
# - Local variables
folder = utils.get_file_locations('data_root')
folder += 'LOCS_Inamdar_Mahajan_cruise_x3/'
cruise_files = {
# 1 8th Southern Ocean Expedition (SOE-8), possibly on the RV Sagar Nidhi
# 'Iodide1': 'cruise1_2014.xlsx',
'SOE-8': 'cruise1_2014.xlsx',
# 2 2nd International Indian Ocean Expedition (<-2),
# possibly one of several cruises in this program
# (IIOE-1 was decades ago). On board RV Sagar Nidhi.
# 'Iodide2': 'cruise2_2015.xlsx',
'IIOE-1': 'cruise2_2015.xlsx',
# 3 9th Southern Ocean Expedition (SOE-9), cruise Liselotte Tinel took samples on
# Ship RV Agulhas.
# 'Iodide3': 'cruise3_2016.xlsx',
'SOE-9': 'cruise3_2016.xlsx',
}
# - Extract data
dfs = {}
for cruise_name in cruise_files.keys():
print('Extracting: ', cruise_name, cruise_files[cruise_name])
# cruise_name = cruise_files.keys()[0]
df = pd.read_excel(folder+cruise_files[cruise_name])
names_dict = {
'Date': 'date', 'UTC': 'date', 'time (UTC)': 'time', 'lat': 'LAT',
'lon': 'LON'
}
if verbose:
print(df.head())
df.rename(columns=names_dict, inplace=True)
if verbose:
print(df.head())
# convert dates to datetime
# def _convert_datetime(x):
# return (270-atan2(x['date'],x['GMAO_UWND'])*180/pi)%360
# df['datetime'] = df.apply( f, axis=1)
df['datetime'] = df['date'].astype(str)+' '+df['time'].astype(str)
df['datetime'] = pd.to_datetime(df['datetime'])
df.index = df['datetime'].values
if verbose:
print(df.head())
dfs[cruise_name] = df[['datetime', 'LON', 'LAT']]
return dfs
def TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False):
"""
Process, plot (test values), then save planeflight values to csv
"""
# Local variables
wd = '/scratch/ts551/GC/v10-01_HAL/'
files_dict = {
'SOE-8': wd+'run.ClBr.Iodide2015.SOE-8',
'IIOE-1': wd+'run.ClBr.Iodide2016.IIOE-1',
'SOE-9': wd+'run.ClBr.Iodide2017.SOE-9',
}
# Test surface output
if just_process_surface_data:
extra_str = 'surface'
dfs = {}
for key_ in files_dict.keys():
wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=wd)
dfs[key_] = df
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(key_, extra_str))
# Save the output as .csv
for key_ in dfs.keys():
savetitle = 'GC_planeflight_compiled_output_for_{}_{}.csv'
savetitle = savetitle.format(key_, extra_str)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
dfs[key_].to_csv(savetitle)
# - Process the output files for column values
else:
specs = ['O3', 'BrO', 'IO', 'CH2O']
extra_str = 'column'
dfs = {}
file_str = 'GC_planeflight_compiled_output_for_{}_{}_II.csv'
for key_ in files_dict.keys():
# for key_ in ['IIOE-1']:
print(key_)
pf_wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=pf_wd)
# now process to column values
df = process_planeflight_column_files(wd=files_dict[key_], df=df)
dfs[key_] = df
# Save the output as .csv
savetitle = file_str.format(key_, extra_str)
df['datetime'] = df.index
df.to_csv(AC.rm_spaces_and_chars_from_str(savetitle))
# Test plots?
for key_ in files_dict.keys():
savetitle = file_str.format(key_, extra_str)
df = pd.read_csv(AC.rm_spaces_and_chars_from_str(savetitle))
df.index = pd.to_datetime(df['datetime'])
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(
key_, extra_str),
specs=specs, units='molec cm$^{-2}$',
scale=1)
def process_planeflight_column_files(wd=None, df=None, res='4x5', debug=False):
"""
Process column of v/v values into single values for total column
"""
# wd=files_dict[key_]; df = dfs[ key_ ]; res='4x5'
specs = ['O3', u'BrO', u'IO', u'CH2O', u'GLYX']
timestamps = list(sorted(set(df.index)))
timestamps_with_duplicates = []
RMM_air = AC.constants('RMM_air')
AVG = AC.constants('AVG')
specs = ['O3', 'BrO', 'IO', 'CH2O']
# get lon lat array of time in troposphere
TPS = AC.get_GC_output(wd=wd+'/', vars=['TIME_TPS__TIMETROP'],
trop_limit=True)
# convert this to boolean (<1 == not strat)
TPS[TPS != 1] = 9999.9
TPS[TPS == 1] = False
TPS[TPS == 9999.9] = True
# And dates
CTM_DATES = AC.get_gc_datetime(wd=wd+'/')
CTM_months = np.array([i.month for i in CTM_DATES])
# a EPOCH = datetime.datetime(1970,1,1)
# CTM_EPOCH = np.array([ (i.month-EPOCH).total_seconds() for i in CTM_DATES ])
# Also get grid of surface area ( m^2 ) and convert to cm2
S_AREA = AC.get_surface_area(res=res) * 10000
A_M = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], trop_limit=True,
dtype=np.float64)
# VOL = AC.get_volume_np( wd=wd, res=res, s_area=S_AREA[...,None])
big_data_l = []
dates = []
# for ts in timestamps[::1000]: # Test processing on first 1000 points
n_timestamps = len(timestamps)
for n_ts, ts in enumerate(timestamps):
print('progress= {:.3f} %'.format((float(n_ts) / n_timestamps)*100.))
tmp_df = df.loc[df.index == ts]
if debug:
print(ts, tmp_df.shape)
# List of pressures (one set = 47 )
PRESS_ = tmp_df['PRESS'].values
# special condition for where there is more than column set
# for a timestamp
# assert( len(PRESS) == 47 )
if len(PRESS_) != 47:
timestamps_with_duplicates += [ts]
prt_str = 'WARNING: DOUBLE UP IN TIMESTEP:{} ({}, shape={})'
print(prt_str.format(ts, len(PRESS_), tmp_df.shape))
print('Just using 1st 47 values')
tmp_df = tmp_df[0:47]
dates += [ts]
else:
dates += [ts]
# Now reverse data (as outputted from highest to lowest)
tmp_df = tmp_df.loc[::-1]
# select everyother value?
# lon select locations
LAT_ = tmp_df['LAT'].values
LON_ = tmp_df['LON'].values
# check there is only one lat and lon
assert len(set(LAT_)) == 1
assert len(set(LON_)) == 1
# - Select 3D vars from ctm.nc file
# get LON, LAT index of box
LON_ind = AC.get_gc_lon(LON_[0], res=res)
LAT_ind = AC.get_gc_lat(LAT_[0], res=res)
# time_ind = AC.find_nearest( CTM_EPOCH, (ts-EPOCH).total_seconds() )
time_ind = AC.find_nearest(CTM_months, ts.month)
# tropspause height? ('TIME_TPS__TIMETROP)
TPS_ = TPS[LON_ind, LAT_ind, :, time_ind]
# Select surface area of grid box
S_AREA_ = S_AREA[LON_ind, LAT_ind, 0]
# comput column by spec
A_M_ = A_M[LON_ind, LAT_ind, :, time_ind]
# Number of molecules per grid box
MOLECS_ = (((A_M_*1E3) / RMM_air) * AVG)
# Extract for species
data_l = []
for spec in specs:
# Get species in v/v
data_ = tmp_df[spec].values
# Mask for troposphere
data_ = np.ma.array(data_[:38], mask=TPS_)
# Get number of molecules
data_ = (data_ * MOLECS_).sum()
# Convert to molecs/cm2
data_ = data_ / S_AREA_
# Store data
data_l += [data_]
# Save location
data_l += [LON_[0], LAT_[0]]
# Save data for all specs
big_data_l += [data_l]
# Convert to DataFrame.
df_col = pd.DataFrame(big_data_l)
df_col.index = dates # timestamps[::1000]
df_col.columns = specs + ['LON', 'LAT']
print(df_col.shape)
return df_col
def process_planeflight_files(wd=None):
"""
Process planeflight files to pd.DataFrame
"""
import glob
import seaborn as sns
sns.set_context("paper", font_scale=0.75)
# Get planeflight data
files = glob.glob(wd+'plane.log.*')
print(wd, len(files), files[0])
names, POINTS = AC.get_pf_headers(files[0])
dfs = [AC.pf_csv2pandas(file=i, vars=names) for i in files]
df = pd.concat(dfs)
# Rename axis
TRA_XXs = [i for i in df.columns if ('TRA_' in i)]
TRA_dict = dict(
zip(TRA_XXs, [v10_ClBrI_TRA_XX_2_name(i) for i in TRA_XXs]))
df.rename(columns=TRA_dict, inplace=True)
return df
def get_test_plots_surface_pf_output(wd=None, name='Planeflight',
df=None, specs=None, units=None, scale=1,
show_plot=False):
"""
Test model output at surface for Indian sgip cruises
"""
import seaborn as sns
sns.set(color_codes=True)
# Get data
if isinstance(df, type(None)):
df = process_planeflight_files(wd=wd, name=name)
# Now add summary plots
dpi = 320
savetitle = 'GC_planeflight_summary_plots_for_{}_V'.format(name)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=True)
# Locations outputted for?
title = 'Locations of {} output'.format(name)
fig, ax = plt.subplots()
AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['LON'].values, lats=df['LAT'].values,
fig=fig, ax=ax)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
# Timeseries of key species
if isinstance(specs, type(None)):
key_spec = ['O3', 'NO', 'NO2', 'OH', 'HO2', 'IO', 'BrO']
extras = ['SO4', 'DMS', 'CH2O', ]
species = ['OH', 'HO2', 'GLYX']
specs = key_spec + extras + species
specs += ['LON', 'LAT']
met = ['GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP',
'GMAO_UWND', 'GMAO_VWND']
specs += met
print(specs)
for spec in specs:
fig, ax = plt.subplots()
if isinstance(units, type(None)):
units, scale = AC.tra_unit(spec, scale=True)
try:
spec_LaTeX = AC.latex_spec_name(spec)
except:
spec_LaTeX = spec
print(spec, units, spec_LaTeX, scale)
dates = pd.to_datetime(df.index).values
plt.plot(dates, df[spec].values*scale)
plt.ylabel('{} ({})'.format(spec, units))
title_str = "Timeseries of modelled '{}' during {}"
plt.title(title_str.format(spec_LaTeX, name))
plt.xticks(rotation=45)
plt.subplots_adjust(bottom=0.15)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=True)
def mk_data_files4Indian_seasurface_paper(res='0.125x0.125'):
"""
Make data files for the indian ocean surface iodide paper
"""
AreasOfInterest = {
'SubT_NA': ('NASW', 'NATR', 'NASE', ),
'SubT_SA': ('SATL',),
'SubT_NP': (u'NPSW', 'NPTG'),
'SubT_SP': ('SPSG',),
'SubT_SI': ('ISSG',),
}
AreasOfInterest_Names = AreasOfInterest.copy()
# Get dictionaries of province numbers and names
num2prov = LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
MRnum2prov = MarineRegionsOrg_LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
Rnum2prov = RosieLonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
# Convert regions to the LP numbers
PrtStr = "{} = Requested province: {} - R's #={}, MIT(GitHub) #={}, LH(2010) #={}"
for key_ in AreasOfInterest.keys():
for a_ in AreasOfInterest[key_]:
print(PrtStr.format(
key_, a_, Rnum2prov[a_], num2prov[a_], MRnum2prov[a_]))
nums = [MRnum2prov[i] for i in AreasOfInterest[key_]]
AreasOfInterest[key_] = nums
# - Get data all together
Filename = 'Oi_prj_predicted_iodide_0.125x0.125_No_Skagerrak_WITH_Provinces.nc'
# folder = '/work/home/ts551/data/iodide/'
folder = './'
ds = xr.open_dataset(folder + Filename)
params = ['Chance2014_STTxx2_I',
'MacDonald2014_iodide', 'Ensemble_Monthly_mean']
vars2use = params + ['LonghurstProvince']
ds = ds[vars2use]
# Also add the features of interest
Filename = 'Oi_prj_feature_variables_0.125x0.125_WITH_Provinces.nc'
ds2 = xr.open_dataset(folder + Filename)
vars2add = ['WOA_MLDpt', 'WOA_Nitrate', 'WOA_TEMP', 'WOA_Salinity']
for var in vars2add:
ds[var] = ds2[var]
# Add axis X/Y assignment
attrs = ds['lat'].attrs
attrs["axis"] = 'Y'
ds['lat'].attrs = attrs
attrs = ds['lon'].attrs
attrs["axis"] = 'X'
ds['lon'].attrs = attrs
# - Now extract the data and check the locations being extracted
# Make files with the data of interest.
file_str = 'Oi_OS_Longhurst_provinces_{}_{}_{}.{}'
for key_ in AreasOfInterest.keys():
nums = AreasOfInterest[key_]
ds_tmp = ds.where(np.isin(ds.LonghurstProvince.values, nums))
# - Plot a diagnostic figure
fig, ax = plt.subplots()
ds_tmp['LonghurstProvince'].mean(dim='time').plot(ax=ax)
# get names and numbers of assigned areas
Names = AreasOfInterest_Names[key_]
nums = [str(i) for i in AreasOfInterest[key_]]
# Add a title
nums = [str(i) for i in nums]
title = "For '{}' ({}), \n plotting #(s)={}"
title = title.format(key_, ', '.join(Names), ', '.join(nums))
plt.title(title)
# Save to png
png_filename = file_str.format(key_, '', res, 'png')
plt.savefig(png_filename, dpi=dpi)
plt.close()
# - What is the area extent of the data
var2use = 'WOA_Nitrate'
ds_lat = ds_tmp[var2use].dropna(dim='lat', how='all')
min_lat = ds_lat['lat'].min() - 2
max_lat = ds_lat['lat'].max() + 2
ds_lon = ds_tmp[var2use].dropna(dim='lon', how='all')
min_lon = ds_lon['lon'].min() - 2
max_lon = ds_lon['lon'].max() + 2
# - Now save by species
vars2save = [i for i in ds_tmp.data_vars if i != 'LonghurstProvince']
for var_ in vars2save:
print(var_)
da = ds_tmp[var_]
# select the minimum area for the areas
da = da.sel(lat=(da.lat >= min_lat))
da = da.sel(lat=(da.lat < max_lat))
if key_ in ('SubT_NP' 'SubT_SP'):
print('just limiting lat for: {}'.format(key_))
else:
da = da.sel(lon=(da.lon >= min_lon))
da = da.sel(lon=(da.lon < max_lon))
# Save the data to NetCDF.
filename = file_str.format(key_, var_, res, '')
filename = AC.rm_spaces_and_chars_from_str(filename)
da.to_netcdf(filename+'.nc')
# ---------------------------------------------------------------------------
# --------------- Functions for Atmospheric impacts work -------------------
# ---------------------------------------------------------------------------
def Do_analysis_and_mk_plots_for_EGU19_poster():
"""
Driver function for analysis and plotting for EGU poster
"""
# - Get data
# data locations and names as a dictionary
wds = get_run_dict4EGU_runs()
runs = list(sorted(wds.keys()))
# Get emissions
dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets(wds=wds)
# Process the datasets?
# a = [ AC.get_O3_burden( wd=wds[i] ) for i in runs ]
# Get datasets objects from directories and in a dictionary
dsD = {}
for run in runs:
ds = xr.open_dataset(wds[run]+'ctm.nc')
dsD[run] = ds
# - Do analysis
# Get summary emission stats
Check_global_statistics_on_emissions(dsDH=dsDH)
# Look at differences in surface concentration.
extra_str = 'EGU_runs_surface_Iy_stats_'
df = evalulate_burdens_and_surface_conc(run_dict=wds, extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Macdonald2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Chance2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'ML_Iodide'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'No_HOI_I2'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# - Get spatial plots
# plot up emissions
plot_up_surface_emissions(dsDH=dsDH)
# - Do diferences plots
# - look at the HOI/I2 surface values and IO.
# species to look at?
specs = ['O3', 'NO2', 'IO', 'HOI', 'I2']
# Chance vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Chance2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. Chance
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='Chance2014', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='No_HOI_I2', specs=specs,
update_PyGChem_format2COARDS=True)
# ML_iodide vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='No_HOI_I2',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# ds_dict=dsD.copy(); BASE='Macdonald2014'; NEW='ML_Iodide'
# - Get production figures.
# surface ozone figure - made in powerpoint for now...
# Plot up emissions for EGU presentation
BASE = 'ML_Iodide'
DIFF1 = 'Chance2014'
DIFF2 = 'Macdonald2014'
plot_up_EGU_fig05_emiss_change(ds_dict=dsD, BASE=BASE, DIFF1=DIFF1, DIFF2=DIFF2,
update_PyGChem_format2COARDS=True)
def plot_up_EGU_fig05_emiss_change(ds_dict=None, levs=[1], specs=[],
BASE='', DIFF1='', DIFF2='', prefix='IJ_AVG_S__',
update_PyGChem_format2COARDS=False):
"""
Plot up the change in emissions for EGU poster
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# Species to plot
vars2use = [prefix+i for i in specs]
unit = None
PDFfilenameStr = 'Oi_surface_change_{}_vs_{}_lev_{:0>2}'
# Set datasets to use and Just include the variables to plot in the dataset
title1 = BASE
title2 = DIFF1
title2 = DIFF2
ds1 = ds_dict[BASE][vars2use].copy()
ds2 = ds_dict[DIFF1][vars2use].copy()
ds2 = ds_dict[DIFF2][vars2use].copy()
# Average over time
print(ds1, ds2, ds3)
ds1 = ds1.mean(dim='time')
ds2 = ds2.mean(dim='time')
ds3 = ds3.mean(dim='time')
# Remove vestigial coordinates.
# (e.g. the time_0 coord... what is this?)
vars2drop = ['time_0']
dsL = [ds1, ds2, ds3]
for var2drop in vars2drop:
for n, ds in enumerate(dsL):
CoordVars = [i for i in ds.coords]
if var2drop in CoordVars:
ds = ds.drop(var2drop)
dsL[n] = ds
ds1, ds2, ds3 = dsL
# Update dimension names
if update_PyGChem_format2COARDS:
ds1 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds1)
ds2 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds2)
ds3 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds3)
# Setup plot
# plot up map with mask present
fig = plt.figure(figsize=(10, 6))
vmin = -100
vmax = 100
# Add initial plot
axn = [1, 1, 1]
ax = fig.add_subplot(*axn, projection=ccrs.Robinson(), aspect='auto')
ax.plot.imshow(x='lon', y='lat', ax=ax,
vmin=vmin, vmax=vmax,
transform=ccrs.PlateCarree())
plt.title(savename)
plt.savefig(savename+'.png')
plt.close()
def evalulate_burdens_and_surface_conc(run_dict=None, extra_str='', REF1=None,
REF2=None, REF_wd=None, res='4x5', trop_limit=True,
save2csv=True, prefix='GC_', run_names=None,
debug=False):
"""
Check general statistics on the CTM model runs
"""
# Extract names and locations of data
if isinstance(run_dict, type(None)):
run_dict = get_run_dict4EGU_runs()
if isinstance(run_names, type(None)):
run_names = sorted(run_dict.keys())
wds = [run_dict[i] for i in run_names]
# Mass unit scaling
mass_scale = 1E3
mass_unit = 'Tg'
# v/v scaling?
ppbv_unit = 'ppbv'
ppbv_scale = 1E9
pptv_unit = 'pptv'
pptv_scale = 1E12
# Get shared variables from a single model run
if isinstance(REF_wd, type(None)):
REF_wd = wds[0]
# get time in the troposphere diagnostic
t_p = AC.get_GC_output(wd=REF_wd, vars=[u'TIME_TPS__TIMETROP'],
trop_limit=True)
# Temperature
K = AC.get_GC_output(wd=REF_wd, vars=[u'DAO_3D_S__TMPU'], trop_limit=True)
# airmass
a_m = AC.get_air_mass_np(wd=REF_wd, trop_limit=True)
# Surface area?
s_area = AC.get_surface_area(res)[..., 0] # m2 land map
# ----
# - Now build analysis in pd.DataFrame
#
# - Tropospheric burdens?
# Get tropospheric burden for run
varname = 'O3 burden ({})'.format(mass_unit)
ars = [AC.get_O3_burden(i, t_p=t_p).sum() for i in wds]
df = pd.DataFrame(ars, columns=[varname], index=run_names)
# Get NO2 burden
NO2_varname = 'NO2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO2')*AC.species_mass('N') for i in ars]
df[NO2_varname] = ars
# Get NO burden
NO_varname = 'NO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO')*AC.species_mass('N') for i in ars]
df[NO_varname] = ars
# Combine NO and NO2 to get NOx burden
NOx_varname = 'NOx burden ({})'.format(mass_unit)
df[NOx_varname] = df[NO2_varname] + df[NO_varname]
# Get HOI burden
varname = 'HOI burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='HOI', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('HOI')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'I2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='I2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('I2')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'IO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='IO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('IO')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Scale units
for col_ in df.columns:
if 'Tg' in col_:
df.loc[:, col_] = df.loc[:, col_].values/mass_scale
# - Surface concentrations?
# Surface ozone
O3_sur_varname = 'O3 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='O3', wd=i, s_area=s_area)
for i in wds]
df[O3_sur_varname] = ars
# Surface NOx
NO_sur_varname = 'NO surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO', wd=i, s_area=s_area)
for i in wds]
df[NO_sur_varname] = ars
NO2_sur_varname = 'NO2 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO2', wd=i, s_area=s_area)
for i in wds]
df[NO2_sur_varname] = ars
NOx_sur_varname = 'NOx surface ({})'.format(ppbv_unit)
df[NOx_sur_varname] = df[NO2_sur_varname] + df[NO_sur_varname]
# Surface HOI
HOI_sur_varname = 'HOI surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='HOI', wd=i, s_area=s_area)
for i in wds]
df[HOI_sur_varname] = ars
# Surface I2
I2_sur_varname = 'I2 surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='I2', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# Surface I2
I2_sur_varname = 'IO surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='IO', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# - Scale units
for col_ in df.columns:
if 'ppbv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*ppbv_scale
if 'pptv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*pptv_scale
# - Processing and save?
# Calculate % change from base case for each variable
if not isinstance(REF1, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF1)
df[pcent_var] = (df[col_]-df[col_][REF1]) / df[col_][REF1] * 100
if not isinstance(REF2, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF2)
df[pcent_var] = (df[col_]-df[col_][REF2]) / df[col_][REF2] * 100
# Re-order columns
df = df.reindex_axis(sorted(df.columns), axis=1)
# Reorder index
df = df.T.reindex_axis(sorted(df.T.columns), axis=1).T
# Now round the numbers
df = df.round(3)
# Save csv to disk
csv_filename = '{}_summary_statistics{}.csv'.format(prefix, extra_str)
df.to_csv(csv_filename)
# return the DataFrame too
return df
def Check_sensitivity_of_HOI_I2_param2WS():
"""
Check the sensitivity of the Carpenter et al 2013 parameterisation to wind speed
"""
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper", font_scale=1.75)
import matplotlib.pyplot as plt
# Core calculation for HOI emission
def calc_HOI_flux_eqn_20(I=None, O3=None, WS=None, ):
""" Eqn 20 from Carpenter et al 2013 """
return O3 * ((4.15E5 * (np.sqrt(I) / WS)) -
(20.6 / WS) - (2.36E4 * np.sqrt(I)))
# Slightly simpler calculation for HOI emission
def calc_HOI_flux_eqn_21(I=None, O3=None, WS=None, ):
""" Eqn 21 from Carpenter et al 2013 """
return O3 * np.sqrt(I) * ((3.56E5/WS) - 2.16E4)
# Plot up values for windspeed
WS_l = np.arange(5, 40, 0.1)
# - plot up
# Eqn 20
Y = [calc_HOI_flux_eqn_20(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 20')
# Eqn 21
Y = [calc_HOI_flux_eqn_21(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 21')
# Update aesthetics of plot and save
plt.title('Flu HOI vs. wind speed')
plt.ylabel('HOI flux, nmol m$^{-2}$ d$^{-1}$')
plt.xlabel('Wind speed (ms)')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 38.048607
| 340
| 0.57818
| 19,545
| 144,813
| 4.102942
| 0.070811
| 0.008604
| 0.012557
| 0.018506
| 0.628404
| 0.593027
| 0.564059
| 0.539742
| 0.523469
| 0.501484
| 0
| 0.024791
| 0.28387
| 144,813
| 3,805
| 341
| 38.058607
| 0.748469
| 0.235414
| 0
| 0.539808
| 0
| 0.000417
| 0.122704
| 0.018779
| 0.000417
| 0
| 0
| 0.000526
| 0.001251
| 1
| 0.020008
| false
| 0.000417
| 0.024177
| 0
| 0.047103
| 0.031263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f86cbd077218ced0fe45ca2c5ef698554acc3ecd
| 18,995
|
py
|
Python
|
server_code.py
|
johnr0/TaleBrush-backend
|
f7429e10f328087444647d5dc6bf1f3a22ccfcce
|
[
"BSD-3-Clause"
] | 1
|
2022-02-25T18:36:16.000Z
|
2022-02-25T18:36:16.000Z
|
server_code.py
|
johnr0/Generative-Input-NLP
|
9607cf2db2aa29f10d4b2179e25dc5bfc9b00288
|
[
"BSD-3-Clause"
] | null | null | null |
server_code.py
|
johnr0/Generative-Input-NLP
|
9607cf2db2aa29f10d4b2179e25dc5bfc9b00288
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from flask_cors import CORS, cross_origin
import torch
import json
import numpy as np
import torch
from modeling_gptneo import GPTNeoForCausalLM
from modeling_gpt2 import GPT2LMHeadModel
from transformers import (
GPTNeoConfig,
GPT2Config,
GPT2Tokenizer
)
import transformers
from nltk import sent_tokenize
import nltk
nltk.download('punkt')
### Loading the model
code_desired = "true"
code_undesired = "false"
model_type = 'gpt2'
gen_type = "gedi"
gen_model_name_or_path = "EleutherAI/gpt-neo-2.7B"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MODEL_CLASSES = {"gptneo": (GPTNeoConfig, GPTNeoForCausalLM, GPT2Tokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),}
config_class_n, model_class_n, tokenizer_class_n = MODEL_CLASSES["gptneo"]
config_class_2, model_class_2, tokenizer_class_2 = MODEL_CLASSES["gpt2"]
tokenizer = tokenizer_class_n.from_pretrained('EleutherAI/gpt-neo-2.7B', do_lower_case=False, additional_special_tokens=['[Prompt]'])
model = model_class_n.from_pretrained(gen_model_name_or_path, load_in_half_prec=True)
model = model.to(device)
model = model.float()
model.config.use_cache=True
model.resize_token_embeddings(len(tokenizer))
gedi_model_name_or_path = 'fortune_gedi'
gedi_model = model_class_2.from_pretrained(gedi_model_name_or_path)
gedi_model.to(device)
gedi_model.resize_token_embeddings(len(tokenizer))
gedi_model.resize_token_embeddings(50258)
wte = gedi_model.get_input_embeddings()
wte.weight.requires_grad=False
wte.weight[len(tokenizer)-1, :]= wte.weight[len(tokenizer)-2, :]
gedi_model.set_input_embeddings(wte)
embed_cont = torch.load('./result_embedding_cont')
embed_infill_front = torch.load('./result_embedding_infill_front')
embed_infill_back = torch.load('./result_embedding_infill_back')
embed_recognition = torch.load('./result_embedding_recognition')
recognition_score = torch.load('./recog_score')
model.set_input_embeddings(embed_cont.wte)
# setting arguments for generation
#max generation length
gen_length = 40
#omega from paper, higher disc_weight means more aggressive topic steering
disc_weight = 30
#1 - rho from paper, should be between 0 and 1 higher filter_p means more aggressive topic steering
filter_p = 0.8
#tau from paper, preserves tokens that are classified as correct topic
target_p = 0.8
#hyperparameter that determines class prior, set to uniform by default
class_bias = 0
if gen_length>1024:
length = 1024
else:
length = gen_length
def cut_into_sentences(text, do_cleanup=True):
"""
Cut text into sentences. \n are also regarded as a sentence.
:param do_cleanup: if True, do cleanups.
:param text: input text.
:return: sentences.
"""
all_sentences = []
# print(text)
# sentences_raw = text.split("\n")
text = text.replace("[Prompt] [Prompt] [Prompt] [Prompt] ", "[Prompt] [Prompt] [Prompt] ")
sentences_raw = text.split('[Prompt] [Prompt] [Prompt]')
text = sentences_raw[len(sentences_raw)-1]
text = text.replace("Start:", " ")
text = text.replace("Characters:", " ")
text = text.replace("Story after start:", " ")
sentences_raw = [text.replace("\n", " ")]
result = []
for item in sentences_raw:
sentence_in_item = sent_tokenize(item)
for item2 in sentence_in_item:
all_sentences.append(item2.strip())
if do_cleanup:
for item in all_sentences:
item = item.replace('<|endoftext|>', '')
if len(item) > 2:
result.append(item)
else:
result = all_sentences
return result
def generate_one_sentence(sentence, control, length=50, disc_weight=30, temperature=0.8, gpt3_id=None):
"""
Generate one sentence based on input data.
:param sentence: (string) context (prompt) used.
:param topic: (dict) {topic: weight, topic:weight,...} topic that the sentence need to steer towards.
:param extra_args: (dict) a dictionary that certain key will trigger additional functionality.
disc_weight: Set this value to use a different control strength than default.
get_gen_token_count: Return only how many tokens the generator has generated (for debug only).
:return: sentence generated, or others if extra_args are specified.
"""
secondary_code = control
if sentence == "":
print("Prompt is empty! Using a dummy sentence.")
sentence = "."
# Specify prompt below
prompt = sentence
# Calculate oroginal input length.
length_of_prompt = len(sentence)
start_len = 0
text_ids = tokenizer.encode(prompt)
length_of_prompt_in_tokens = len(text_ids)
# print('text ids', text_ids)
encoded_prompts = torch.LongTensor(text_ids).unsqueeze(0).to(device)
if type(control) is str:
multi_code = tokenizer.encode(secondary_code)
elif type(control) is dict:
multi_code = {}
for item in secondary_code:
encoded = tokenizer.encode(item)[0] # only take the first one
multi_code[encoded] = secondary_code[item]
else:
raise NotImplementedError("topic data type of %s not supported... Supported: (str,dict)" % type(control))
# If 1, generate sentences towards a specific topic.
attr_class = 1
print(multi_code)
if int(control)!=-1:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
)
else:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
gpt3_api_key=gpt3_id,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=None,
tokenizer=tokenizer,
disc_weight=disc_weight,
class_bias=class_bias,
attr_class=attr_class,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
import openai
openai.api_key = gpt3_id
completion = openai.Completion()
response = completion.create(prompt=prompt,
engine="curie",
max_tokens=length,
temperature=temperature,)
text = response["choices"][0]["text"]
text = cut_into_sentences(text)
if len(text) == 0:
print("Warning! No text generated.")
return ""
all_gen_text = text[0]
return all_gen_text
import numpy as np
def continuing_generation(prompts, generation_controls, characters, temperatures, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
model.set_input_embeddings(embed_cont)
prompts = list(prompts)
generated = []
character_prepend = '[Prompt][Prompt][Prompt]'
for idx, character in enumerate(characters):
if idx==0:
character_prepend = character_prepend+character
else:
character_prepend = character_prepend+' '+character
if idx != len(characters)-1:
character_prepend = character_prepend + ','
prompt_start_idx = 0
for c_idx, generation_control in enumerate(generation_controls):
temperature = temperatures[c_idx]
while True:
prompt_postpend = '[Prompt][Prompt][Prompt]'
# prompt_postpend = 'Story: '
for i in range(prompt_start_idx, len(prompts)):
prompt_postpend = prompt_postpend + prompts[i]
if i != len(prompts)-1:
prompt_postpend = prompt_postpend + ' '
# continue
else:
prompt_postpend = prompt_postpend
prompt_input = prompt_postpend+character_prepend+ '[Prompt][Prompt][Prompt]'
prompt_encoded = tokenizer.encode(prompt_input)
length_of_prompt_in_tokens = len(prompt_encoded)
if length_of_prompt_in_tokens>2048:
prompt_start_idx = prompt_start_idx + 1
else:
break
print(prompt_input, generation_control)
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
prompts.append(gen_sent)
generated.append(gen_sent)
for gen in generated:
print('gen:', gen)
print()
return generated
def infilling_generation(pre_prompts, post_prompts, generation_controls, characters, temperatures, is_front, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
pre_prompts = list(pre_prompts)
post_prompts = list(post_prompts)
right = ''
for idx, pp in enumerate(post_prompts):
right = right + pp
if idx!=len(post_prompts)-1:
right = right + ' '
left = ''
for idx, pp in enumerate(pre_prompts):
left = left + pp
if idx!=len(post_prompts)-1:
left = left + ' '
generated = ['']*len(generation_controls)
# gen_counter = 0
for gen_counter in range(len(generation_controls)):
if is_front:
generation_control = generation_controls[int(gen_counter/2)]
temperature = temperatures[int(gen_counter/2)]
model.set_input_embeddings(embed_infill_front)
prompt_input = '[Prompt][Prompt][Prompt]'+right+'[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[int(gen_counter/2)] =gen_sent
print(gen_sent)
left = left + ' ' + gen_sent
else:
generation_control = generation_controls[len(generated)-1-int(gen_counter/2)]
temperature = temperatures[len(generated)-1-int(gen_counter/2)]
model.set_input_embeddings(embed_infill_back)
prompt_input = '[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt]'+right + '[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[len(generated)-1-int(gen_counter/2)] =gen_sent
print(gen_sent)
right = gen_sent+' '+right
for gen in generated:
print('gen', gen)
print()
return generated
def recognize_sentence_fortune(pre_context, character, target_sentence):
rec_input = "[Prompt][Prompt][Prompt]"+pre_context+"[Prompt][Prompt][Prompt]"+character+"[Prompt][Prompt][Prompt]"+target_sentence
with torch.no_grad():
model.set_input_embeddings(embed_recognition)
tokenized_input = tokenizer.encode(rec_input)
tokenized_input = torch.LongTensor(tokenized_input).unsqueeze(0).to(device)
output = model.transformer(tokenized_input)
op= output[0].type(torch.half)
# op=output[0].type(torch.FloatTensor).to(device)
logits = recognition_score(op)
to_return = float(logits[0][len(tokenized_input[0])-1][0])
if to_return > 1:
to_return = 1
elif to_return <0:
to_return = 0
return to_return
app = FlaskAPI(__name__)
# run_with_ngrok(app)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Below is temporary function with sentiment analysis.
# Hence, it needs to be updated later.
@app.route('/labelSentence', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def sentence_analysis():
if request.method == 'POST':
print(request.data)
sentence = request.data['sentence']
pre_context = request.data['pre_context']
character = request.data['character']
# print(images, group_model, l2t, dec)
value = recognize_sentence_fortune(pre_context, character, sentence)
value = value * 100
return {'value': value}
@app.route('/continuingGeneration', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def continuingGeneration():
if request.method == 'POST':
pre_context = json.loads(request.data['pre_context'])
controls = json.loads(request.data['controls'])
characters = json.loads(request.data['characters'])
temperature = json.loads(request.data['temperature'])
print(pre_context)
print(controls)
print(characters)
print(temperature)
# TODO update below
generated = continuing_generation(pre_context, controls, characters, temperature, gpt3_id=None, disc_weight=30)
# generated = ['This is a generated sentence'] * len(controls)
values = []
for gen in generated:
pre_context_concat = ''
# start_id = 0
# start_id = len(pre_context)-2
# if start_id<0:
# start_id=0
# for idx in range(start_id, len(pre_context)):
# pre_context_concat = pre_context_concat + pre_context[idx]
value = recognize_sentence_fortune(pre_context_concat, characters[0], gen)
pre_context.append(gen)
values.append(value*100)
return {'generated': json.dumps(generated), 'values': json.dumps(values)}
@app.route('/infillingGeneration', methods=['GET', 'POST'])
@cross_origin(origin='http://10.168.233.218:7082',headers=['Content-Type'])
def infillingGeneration():
if request.method == 'POST':
pre_context = json.loads(request.data['pre_context'])
post_context = json.loads(request.data['post_context'])
controls = json.loads(request.data['controls'])
characters = json.loads(request.data['characters'])
temperature = json.loads(request.data['temperature'])
is_front = request.data['is_front']
print(pre_context)
print(post_context)
print(controls)
print(characters)
print(temperature)
# TODO update below
generated = infilling_generation(pre_context, post_context, controls, characters, temperature, is_front, gpt3_id=None, disc_weight=30)
# generated = ['This is a generated sentence'] * len(controls)
# it needs to be updated
values = sentences_analysis(generated)
return {'generated': json.dumps(generated), 'values': json.dumps(values)}
if __name__=="__main__":
app.run(host='0.0.0.0', port=11080)
| 41.025918
| 140
| 0.588944
| 2,080
| 18,995
| 5.128846
| 0.164423
| 0.04162
| 0.03712
| 0.016873
| 0.452006
| 0.376078
| 0.340082
| 0.322553
| 0.300244
| 0.283933
| 0
| 0.016006
| 0.319137
| 18,995
| 463
| 141
| 41.025918
| 0.808861
| 0.118189
| 0
| 0.354167
| 0
| 0
| 0.075836
| 0.029168
| 0
| 0
| 0
| 0.00216
| 0
| 1
| 0.02381
| false
| 0
| 0.044643
| 0
| 0.095238
| 0.059524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f86db685725dd6affbd6d16efda49f2dd028eb93
| 1,735
|
py
|
Python
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 2
|
2017-11-21T10:00:47.000Z
|
2018-02-02T04:40:09.000Z
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 1
|
2018-10-31T06:56:22.000Z
|
2018-11-01T00:58:16.000Z
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 5
|
2017-12-14T01:07:21.000Z
|
2020-04-29T02:21:46.000Z
|
import json
import unittest
from buter.app.services import load_from_file, detect_app_name
from buter.server import docker
from buter.util.Utils import unzip
from config import getConfig
class AppServiceTest(unittest.TestCase):
def setUp(self):
"""
这里只需要初始化 server.docker 对象
:return:
"""
config = getConfig('dev')
docker.setup(config)
def test_load_from_file(self):
load_from_file("G:/tidb.zip")
def test_load_image(self):
docker.loadImage("G:/tidb.tar")
def test_json_read(self):
with open("G:/app.json") as content:
app = json.load(content) # '{"name":"abc"}'
print(app)
docker.createContainer("pingcap/tidb", app['cmd'], app['args'])
def test_detect_app_name(self):
app = json.loads('{"image":"pingcap/tidb", "args":{"name":"tidb01"}}')
self.assertEqual("tidb", detect_app_name(None, app['image']))
self.assertEqual("tidb01", detect_app_name(app['args']))
self.assertEqual("tidb", detect_app_name("tidb"))
def test_unzip(self):
file_path = "G:/test/test.zip"
unzip(file_path, "G:/test")
def test_list_container(self):
containers = docker.listContainer()
print(containers)
for c in containers:
print("container: name={}, id={} ({}), labels={}, stat={}"
.format(c.name, c.id, c.short_id, c.labels, c.status))
print([{"name": c.name, "id": c.short_id, "labels": c.labels, "stat": c.status} for c in containers])
cs = dict((c.name, {"id": c.short_id, "labels": c.labels, "stat": c.status}) for c in containers)
print(cs)
if __name__ == '__main__':
unittest.main()
| 30.982143
| 109
| 0.609222
| 227
| 1,735
| 4.488987
| 0.299559
| 0.041217
| 0.063788
| 0.047105
| 0.196271
| 0.170756
| 0.107949
| 0.107949
| 0.107949
| 0.107949
| 0
| 0.002996
| 0.230548
| 1,735
| 55
| 110
| 31.545455
| 0.7603
| 0.029971
| 0
| 0
| 0
| 0
| 0.145972
| 0.029679
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.184211
| false
| 0
| 0.157895
| 0
| 0.368421
| 0.131579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f871c0ad8b9204fef05550a10cc4ceb534586079
| 654
|
py
|
Python
|
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
# https://atcoder.jp/contests/joi2008yo/tasks/joi2008yo_e
R, C = list(map(int, input().split()))
senbei_pos = []
ans = 0
for _ in range(R):
pos = list(map(int, input().split()))
senbei_pos.append(pos)
for bit in range(2**R):
total = 0
copied_pos = senbei_pos[:]
# Rの上限が10なので10桁の2進数になるように0で埋める
flip_row_pos = list(format(bit, '010b'))
for j in range(C):
column = [p[j] for p in copied_pos]
one_count = sum([column[k] ^ int(flip_row_pos[10 - R + k])
for k in range(R)])
zero_count = R - one_count
total += max(zero_count, one_count)
ans = max(ans, total)
print(ans)
| 29.727273
| 66
| 0.59633
| 100
| 654
| 3.74
| 0.42
| 0.074866
| 0.053476
| 0.080214
| 0.15508
| 0.15508
| 0.15508
| 0
| 0
| 0
| 0
| 0.045082
| 0.253823
| 654
| 21
| 67
| 31.142857
| 0.721311
| 0.12844
| 0
| 0
| 0
| 0
| 0.007055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8724ce5a5705922dd55fcf91b7512b691dc8ab7
| 2,850
|
py
|
Python
|
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | 1
|
2018-03-27T00:08:26.000Z
|
2018-03-27T00:08:26.000Z
|
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | null | null | null |
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | 1
|
2020-06-04T02:49:20.000Z
|
2020-06-04T02:49:20.000Z
|
#!/usr/env python3
import requests
import os
import glob
import telegram
from time import sleep
token = "token"
bot = telegram.Bot(token=token)
# Боту шлется ссылка на ютуб, он загоняет ее в bash комманду youtube-dl -x --audio-format mp3 <link>, шлет загруженный mp3 обратно клиенту
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=30):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_audio(self, chat_id, audio):
params = {'chat_id': chat_id, 'audio': audio}
method = 'sendAudio'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
else:
try:
last_update = get_result[len(get_result)]
except IndexError:
last_update = 'null'
return last_update
def mp3_download(url):
cwd = os.getcwd() + "/"
os.system('youtube-dl -x --audio-format mp3 ' + url)
try:
sleep(15)
mp3_name = glob.glob(cwd + "*.mp3")[0]
return mp3_name
except:
print("Aw, man")
def song_rm():
cwd = os.getcwd() + "/"
try:
os.system('rm ' + cwd + '*.mp3')
except:
print("Aw, man")
mp3_bot = BotHandler(token)
def main():
new_offset = None
while True:
mp3_bot.get_updates(new_offset)
last_update = mp3_bot.get_last_update()
try:
last_update_id = last_update['update_id']
last_chat_text = last_update['message']['text']
last_chat_id = last_update['message']['chat']['id']
except:
last_update_id = 0
last_chat_text = 'null'
last_chat_id = 0
print(last_chat_text)
if 'https://www.youtube.com/' in last_chat_text.lower() or 'https://youtu.be/' in last_chat_text.lower():
bot.send_message(chat_id=last_chat_id, text="Downloading, please wait....")
song_name = mp3_download(last_chat_text)
bot.send_message(chat_id=last_chat_id, text="Uploading, please wait....")
bot.send_audio(chat_id=last_chat_id, audio=open(song_name, 'rb'))
song_rm()
elif '/start' in last_chat_text.lower():
bot.send_message(chat_id=last_chat_id, text="Please send me youtube link.")
new_offset = last_update_id + 1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| 30.978261
| 138
| 0.597895
| 371
| 2,850
| 4.334232
| 0.291105
| 0.052239
| 0.052239
| 0.034826
| 0.166045
| 0.116915
| 0.087065
| 0.087065
| 0.087065
| 0.06592
| 0
| 0.01122
| 0.280702
| 2,850
| 91
| 139
| 31.318681
| 0.773171
| 0.054035
| 0
| 0.157895
| 0
| 0
| 0.121752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.065789
| 0
| 0.223684
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f87cfb9c6282ebda75b44ea58b3afec144dcbcf4
| 448
|
py
|
Python
|
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2020-04-10T07:29:56.000Z
|
2020-05-27T03:45:21.000Z
|
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | null | null | null |
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
# Douglas Crockford's idea for making generators
# basically "why do you need a `yield` keyword when you can just maintain some state"
# in my view, a class would be a better way to do this, and indeed, in python,
# that's how Iterators are defined.
def iter(list):
i = 0
def gen():
nonlocal i
value = list[i]
i += 1
return value
return gen
gen = iter([1,2,3])
for _ in range(4):
print(gen())
| 22.4
| 85
| 0.683036
| 81
| 448
| 3.765432
| 0.753086
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022535
| 0.207589
| 448
| 19
| 86
| 23.578947
| 0.83662
| 0.622768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f881c0e0b875dfcd895b81b936783f36c735935f
| 564
|
py
|
Python
|
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
import requests
from api import config
def get_docgen_token():
params = {
"grant_type": "client_credentials",
"client_id": config.COMMON_DOCGEN_CLIENT_ID,
"client_secret": config.COMMON_DOCGEN_CLIENT_SECRET,
"scope": ""
}
req = requests.post(
config.COMMON_DOCGEN_SSO_ENDPOINT,
data=params,
headers={
"Content-Type": "application/x-www-form-urlencoded",
}
)
req.raise_for_status()
resp = req.json()
token = req.json().get('access_token')
return token
| 21.692308
| 64
| 0.615248
| 63
| 564
| 5.222222
| 0.587302
| 0.109422
| 0.164134
| 0.145897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269504
| 564
| 25
| 65
| 22.56
| 0.798544
| 0
| 0
| 0
| 0
| 0
| 0.198582
| 0.058511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8825ad47b75cf630d4ad3f98bb97cd2847d852d
| 619
|
py
|
Python
|
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
'''
Description: Problem 3 (rearrange the code)
Version: 1.0.1.20210116
Author: Arvin Zhao
Date: 2021-01-14 22:51:16
Last Editors: Arvin Zhao
LastEditTime: 2021-01-16 04:11:18
'''
def get_data():
username = input('Enter your username: ')
age = int(input('Enter your age: '))
data_tuple = (username, age)
return data_tuple
def message(username, age):
if age <= 10:
print('Hi', username)
else:
print('Hello', username)
def main():
username, age = get_data()
message(username, age)
if __name__ == '__main__': # It is strongly recommended to add this line.
main()
| 20.633333
| 74
| 0.646204
| 88
| 619
| 4.409091
| 0.613636
| 0.141753
| 0.072165
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.219709
| 619
| 30
| 75
| 20.633333
| 0.716356
| 0.350565
| 0
| 0
| 0
| 0
| 0.131646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.266667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8825cac93ae51da9c9e342930c13e66cd5b1a63
| 1,046
|
py
|
Python
|
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow import keras
# Make sure the tf_trees directory is in the search path.
from tf_trees import TEL
# The documentation of TEL can be accessed as follows
print(TEL.__doc__)
# We will fit TEL on the Boston Housing regression dataset.
# First, load the dataset.
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
# Define the tree layer; here we choose 10 trees, each of depth 3.
# Note output_logits_dim is the dimension of the tree output.
# output_logits_dim = 1 in this case, but should be equal to the
# number of classes if used as an output layer in a classification task.
tree_layer = TEL(output_logits_dim=1, trees_num=10, depth=3)
# Construct a sequential model with batch normalization and TEL.
model = keras.Sequential()
model.add(keras.layers.BatchNormalization())
model.add(tree_layer)
# Fit a model with mse loss.
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
result = model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))
| 38.740741
| 82
| 0.772467
| 176
| 1,046
| 4.4375
| 0.511364
| 0.049936
| 0.057618
| 0.03073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012277
| 0.143403
| 1,046
| 26
| 83
| 40.230769
| 0.859375
| 0.515296
| 0
| 0
| 0
| 0
| 0.020161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f88367f68dcb96f708907ba780b8dfe0c11ecea5
| 725
|
py
|
Python
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 4
|
2015-01-26T17:56:05.000Z
|
2020-04-01T05:52:00.000Z
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 11
|
2015-01-06T10:34:36.000Z
|
2021-03-22T18:29:45.000Z
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 6
|
2015-01-02T15:02:27.000Z
|
2021-05-12T18:09:35.000Z
|
#!/usr/bin/env python
# Core Library modules
import argparse
import os
# Third party modules
import pytest
# First party modules
import nntoolkit.utils as utils
def test_is_valid_file():
parser = argparse.ArgumentParser()
# Does exist
path = os.path.realpath(__file__)
assert utils.is_valid_file(parser, path) == path
# Does not exist
with pytest.raises(SystemExit):
utils.is_valid_file(parser, "/etc/nonexistingfile")
def test_is_valid_folder():
parser = argparse.ArgumentParser()
# Does exist
assert utils.is_valid_folder(parser, "/etc") == "/etc"
# Does not exist
with pytest.raises(SystemExit):
utils.is_valid_folder(parser, "/etc/nonexistingfoler")
| 20.714286
| 62
| 0.704828
| 93
| 725
| 5.301075
| 0.397849
| 0.085193
| 0.097363
| 0.103448
| 0.503043
| 0.288032
| 0.20284
| 0.20284
| 0.20284
| 0.20284
| 0
| 0
| 0.193103
| 725
| 34
| 63
| 21.323529
| 0.842735
| 0.183448
| 0
| 0.266667
| 0
| 0
| 0.083904
| 0.035959
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.133333
| false
| 0
| 0.266667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f88aa3fcd8cfa698889ea39a72ffe01decd8c2ea
| 6,279
|
py
|
Python
|
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | 1
|
2021-07-22T14:06:08.000Z
|
2021-07-22T14:06:08.000Z
|
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | null | null | null |
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# MIT License
#
# Copyright (c) 2021 gh0$t
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
############################################################################################################################
# imports
############################################################################################################################
import sys
import urllib.request
from bs4 import BeautifulSoup
from urllib.request import Request
from selenium import webdriver
import os
import time
from stem import Signal
from stem.control import Controller
############################################################################################################################
# Pass URL, extract text, translate
############################################################################################################################
URL = str(sys.argv[1])
GTURL = 'https://translate.google.com/'
# this is important, drives the whole translation process.
# if google updates the translate.google.com page selectors, this HORRIBLE selector needs to be updated
GTXpathSel = '//*[@id="yDmH0d"]/c-wiz/div/div[@class="WFnNle"]/c-wiz/div[@class="OlSOob"]/c-wiz/div[@class="hRFt4b"]/c-wiz/div[@class="ykTHSe"]/div/div[@class="dykxn MeCBDd j33Gae"]/div/div[2]/div/div[@class="Llmcnf"]'
print('\nConnecting to ' + URL + ' ...' + '\nExtracting text...')
req = Request(URL)
html = BeautifulSoup(urllib.request.urlopen(req).read(), 'html.parser')
text = html.find('div', {'id': 'bodyContent'}).get_text()
with open('out/English.txt', 'w', encoding='utf-8') as f:
f.write(text)
print('\nExtracted -> out/English.txt')
print('\nStarting translation job...')
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path='driver/chromedriver', options=options)
print('\nConnecting to ' + GTURL + ' ...')
driver.get(GTURL)
time.sleep(1)
try:
# accept Google's cookies
driver.find_elements_by_xpath ('//span[contains(text(), "I agree")]')[0].click()
except:
pass
time.sleep(2)
driver.find_element_by_xpath('//*[@aria-label="Document translation"]').click()
driver.find_element_by_name('file').send_keys(os.path.abspath('out/English.txt'))
langEle = driver.find_elements_by_xpath(GTXpathSel)
i = 0
def init(driver):
try:
# elements are stale, need to refresh the list
langEle = driver.find_elements_by_xpath(GTXpathSel)
lang = langEle[i]
langTxt = lang.get_attribute('innerHTML')
if langTxt != 'English':
# printing this to make you feel less giddy if you end up staring at your terminal at a stretch
print('\nTrying English to ' + langTxt + '...')
driver.find_elements_by_xpath('//button[@aria-label="More target languages"]')[1].click()
time.sleep(2)
# translate.google.com DOM structure SUCKS.
# sorry Google, but that's the truth.
# #$!@ -> i am swearing, that's Google's representation of their 'swearing emote'
try:
driver.find_elements_by_xpath('//div[@data-language-code="' + lang.find_element_by_xpath('..').get_attribute('data-language-code') + '"]')[3].click()
except:
driver.find_elements_by_xpath('//div[@data-language-code="' + lang.find_element_by_xpath('..').get_attribute('data-language-code') + '"]')[1].click()
driver.find_elements_by_xpath ('//span[contains(text(), "Translate")]')[3].click()
time.sleep(1)
translatedBlog = driver.find_element_by_xpath('//pre').text
with open('out/' + langTxt + '.txt', 'w', encoding='utf-8') as f:
f.write(translatedBlog)
print('\n' + str(i + 1) + '/' + str(totLang) + ' -> ' + langTxt + ' -> Done -> out/' + langTxt + '.txt')
driver.back()
else:
print('\nSkipping ' + str(i + 1) + '/' + str(totLang) + ' -> ' + langTxt + '...')
except Exception as e:
# for debugging. use it @ your own risk. i am tired of the terminal screaming @ my face.
# print('\n---------->', e)
# Strategy to bypass Google's spam filter: quit chrome, switch TOR ID, re-try translation job
driver.quit()
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
# it's an overkill to print this. just let it do it's job silently.
# print('\n----------> Switching TOR ID & re-trying ' + str(i + 1) + '/' + str(totLang) + '...')
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path='driver/chromedriver', options=options)
driver.get(GTURL)
time.sleep(1)
try:
# accept Google's cookies
driver.find_elements_by_xpath ('//span[contains(text(), "I agree")]')[0].click()
except:
pass
time.sleep(2)
driver.find_element_by_xpath('//*[@aria-label="Document translation"]').click()
driver.find_element_by_name('file').send_keys(os.path.abspath('out/English.txt'))
init(driver)
totLang = len(langEle)
print('\nTotal languages = ' + str(totLang) + ' [press CTRL + C once or twice or thrice or any number of times you like to press to quit anytime]')
print('\nTranslating text...')
while i < totLang:
init(driver)
i += 1
print('\nTranslations completed. Check "/out" for the files.')
driver.quit()
exit()
| 34.5
| 218
| 0.645963
| 818
| 6,279
| 4.887531
| 0.380196
| 0.032516
| 0.036018
| 0.04002
| 0.311406
| 0.295398
| 0.284392
| 0.263382
| 0.253127
| 0.24062
| 0
| 0.006637
| 0.136168
| 6,279
| 181
| 219
| 34.690608
| 0.730457
| 0.316929
| 0
| 0.430233
| 0
| 0.023256
| 0.303942
| 0.104955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011628
| false
| 0.023256
| 0.104651
| 0
| 0.116279
| 0.116279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f88e5bdd49e9b79ee78760de491336a0c465e929
| 935
|
py
|
Python
|
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import keras
import numpy as np
def get_bias_major_weights(model):
weights = model.get_weights()
biasMajor = []
for arrI in range(0, len(weights), 2):
inWeights = weights[arrI]
biasWeights = weights[arrI+1].reshape((1,-2))
l = np.concatenate((biasWeights, inWeights), axis=0).T
biasMajor.append(l)
return np.asarray(biasMajor)
def get_max_arg_vals(arr3D):
amaxes = tf.argmax(arr3D, axis=-1)
windowIdx = np.arange(0, amaxes.shape[0])
rowIdx = np.arange(0, amaxes.shape[1])
return arr3D[windowIdx[:, np.newaxis], rowIdx[np.newaxis, :], amaxes]
def get_steps_per_epoch(nSamplesOg, fracOfOg):
return int(max(nSamplesOg * fracOfOg), 1)
def get_steps_and_epochs(nSamplesOg, fracOfOg, epochsIfFull):
stepsPerEpoch = get_steps_per_epoch(nSamplesOg, fracOfOg)
epochs = int(max(epochsIfFull / fracOfOg, 1))
return stepsPerEpoch, epochs
| 32.241379
| 73
| 0.698396
| 126
| 935
| 5.055556
| 0.420635
| 0.037677
| 0.028257
| 0.047096
| 0.169545
| 0.10675
| 0
| 0
| 0
| 0
| 0
| 0.020861
| 0.179679
| 935
| 28
| 74
| 33.392857
| 0.809648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.130435
| 0.043478
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8900e5fac4e08162311478b3ed9cf017f5cb02c
| 10,047
|
py
|
Python
|
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
r''' perl_io - Opens a file or pipe in the Perl style
Copyright (c) 2016 Yoichi Hariguchi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Usage:
from perl_io import PerlIO
Example 1:
pio = PerlIO('/proc/meminfo') # open `/proc/meminfo' for input
Example 2:
pio = PerlIO('> /tmp/foo.txt') # open '/tmp/foo.txt' for output
Example 3:
pio = PerlIO('>> /tmp/foo.txt') # open '/tmp/foo.txt' for appending
Example 4:
pio = PerlIO('| cmd arg1 ...') # we pipe output to the command `cmd'
Example 5:
pio = PerlIO('cmd arg1 ... |') # execute `cmd' that pipes output to us
You can access the Python file object as `pio.fo' after
PerlIO object `pio' was successfully created. `pio.fo' is
set to `None' if PelIO failed to open a file or pipe.
Example6 : Read the output of `strings /usr/bin/python' from a pipe
with PerlIO('strings /usr/bin/python |') as pio:
for line in pio.fo.xreadlines():
#
# do something...
#
Example7 : Write to a file
with PerlIO('>/tmp/.tmpfile-%d' % (os.getpid())) as pio:
print >> pio.fo, 'This is an example'
pio.fo.write('This is another example')
pio.fo.write('\n')
Note: PerlIO parses the parameter as follows in the case it
indicates to input from or output to a pipe.
1. Strips the first or last `|' (which indicates to open a pipe)
2. If the remaining string includes shell special characters
like `|', `>', `;', etc., PerlIO calls Popen() with
"sh -c 'remaining_string'", which means it can be a security
hazard when the remaining string includes the unsanitized input
from an untrusted source.
3. If the remaining string includes no shell special characters,
PerlIO does not invoke shell when it calls Popen().
How to test:
python -m unittest -v perl_io
'''
import os
import platform
import re
import sys
import syslog
import time
import subprocess
import shlex
import unittest
class PerlIO:
def __init__(self, open_str):
self._fo = None
self._proc = None
open_str = open_str.strip()
if open_str[-1] == '|':
self._rd_open_pipe(open_str[:-1])
elif open_str[0] == '|':
self._wr_open_pipe(open_str[1:])
elif open_str[0] == '>':
if open_str[1] == '>':
self._open_file(open_str[2:], 'a')
else:
self._open_file(open_str[1:], 'w')
elif open_str[0] == '<':
self._open_file(open_str[1:], 'r')
elif open_str[0:2] == '+>' or open_str[0:2] == '+<':
self._open_file(open_str[2:], 'r+')
elif open_str == '-':
self._fo = sys.stdin
elif open_str == '>-':
self._fo = sys.stdout
else:
self._open_file(open_str, 'r')
def __enter__(self):
return self
def __exit__(self, type, val, traceback):
self.close()
def _parse_command(self, cmd):
m = re.search(r'(\||<|>|`|;)', cmd)
if m:
return "sh -c '" + cmd + "'"
return cmd
def _rd_open_pipe(self, cmd):
try:
cmd = self._parse_command(cmd)
self._proc = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self._fo = self._proc.stdout
except IOError:
print >> sys.stderr, 'failed to open pipe from %s' % (cmd)
def _wr_open_pipe(self, cmd):
try:
cmd = self._parse_command(cmd)
self._proc = subprocess.Popen(shlex.split(cmd),
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
self._fo = self._proc.stdin
except IOError:
print >> sys.stderr, 'failed to open pipe to %s' % (cmd)
def _open_file(self, file, mode):
file = file.strip()
try:
self._fo = open(file, mode)
except IOError:
print >> sys.stderr, 'failed to open %s' % (file)
@property
def fo(self):
return self._fo
@property
def err_fo(self):
return self._proc.stderr
def close(self):
if self._proc == None:
self._fo.close()
else:
self._proc.communicate()
class TestPerlIO(unittest.TestCase):
def runTest(self):
file = self.file_test(False)
self.rd_pipe_test(file)
self.rd_pipe_shell_test()
self.wr_pipe_test()
os.remove(file)
#
# 1. Open a file to write using PerlIO
# 2. Open a pipe outputting to us with a complex command line
# PerlIO('strings `which ls` | sort | uniq | ')
# so that shell is invoked with Popen().
# 3. Write all the input to the file created in No. 1
# 4. Check the contents
#
def rd_pipe_shell_test(self):
file = '/tmp/.pio_pipe_rd_test-%d' % (os.getpid())
pio_wr = PerlIO('> %s' % (file))
self.assertNotEqual(pio_wr.fo, None)
ll = []
cmd = 'strings `which ls` | sort | uniq | '
print >> sys.stderr, \
'Read from pipe (multiple commands): %s' % (cmd)
with PerlIO(cmd) as pio:
for line in pio.fo.xreadlines():
line = line.strip()
ll.append(line)
print >> pio_wr.fo, line
pio_wr.close()
pio_rd = PerlIO(file)
self.assertNotEqual(pio_rd.fo, None)
for line in pio_rd.fo.xreadlines():
line = line.strip()
expected = ll.pop(0)
self.assertEqual(line, expected)
os.remove(file)
#
# 1. Open a pipe to write with a complex command line
# PerlIO('| cat > /tmp/.pio_pipe_rt_test-XXXX')
# so that shell is invoked with Popen().
# The output to the pipe is redirected to a file
# 2. Open the file to read using PerlIO
# 3. Check the contents
#
def wr_pipe_test(self):
m = re.search(r'CYGWIN', platform.system())
if m:
#
# test fails on cygwin
#
return
file = '/tmp/.pio_pipe_wr_test-%d' % (os.getpid())
cmd = '| cat > %s' % (file)
print >> sys.stderr, 'Write to pipe: %s' % (cmd)
pio = PerlIO(cmd)
self.assertNotEqual(pio.fo, None)
ll = []
for i in range (0, 100):
line = "%4d %4d %4d %4d %4d" % (i, i, i, i, i)
ll.append(line)
print >> pio.fo, line
pio.close()
pio_rd = PerlIO(file)
self.assertNotEqual(pio_rd.fo, None)
for line in pio_rd.fo.xreadlines():
line = line.rstrip()
expected = ll.pop(0)
self.assertEqual(line, expected)
os.remove(file)
def file_test(self, remove):
#
# pio = PerlIO('>/tmp/.fileTest-pid')
#
file = '/tmp/.fileTest-%d' % os.getpid()
ofile = '> ' + file
print >> sys.stderr, '\n\nWrite to file: %s' % (ofile)
pio = PerlIO(ofile)
if pio.fo == None:
print >> sys.stderr, ' Error: failed to open %s' % file
sys.exit(1)
else:
for i in range (0, 500):
print >> pio.fo, '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
pio.close()
#
# Append test ('>>/tmp/.fileTest-pid')
#
ofile = ' >> ' + file
print >> sys.stderr, 'Append to file: %s' % (ofile)
pio = PerlIO(ofile)
if pio.fo == None:
print >> sys.stderr, ' Error: failed to open %s' % file
sys.exit(1)
else:
for i in range (500, 1000):
print >> pio.fo, '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
pio.close()
#
# Read the file just created and check the contents
#
print >> sys.stderr, 'Read from file: %s' % (file)
pio = PerlIO(file)
i = 0
for line in pio.fo.xreadlines():
line = line.rstrip()
expected = '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
i += 1
self.assertEqual(line, expected)
pio.close()
if remove == True:
os.remove(file)
return file
#
# Read from a pipe with a simple command line
# so that shell is not invoked with Popen().
# Confirm the contents of the file is correct.
# Must be called after file_test().
#
def rd_pipe_test(self, file):
cmd = ' cat %s | ' % (file)
print >> sys.stderr, 'Read from pipe: %s' % (cmd)
i = 0
with PerlIO(cmd) as pio:
for line in pio.fo.xreadlines():
line = line.rstrip()
expected = '%4d %4d %4d %4d %4d' % (i, i, i, i, i)
i += 1
self.assertEqual(line, expected)
| 33.602007
| 78
| 0.54902
| 1,335
| 10,047
| 4.043446
| 0.214232
| 0.008151
| 0.009448
| 0.008892
| 0.373101
| 0.314931
| 0.264357
| 0.24435
| 0.231753
| 0.187662
| 0
| 0.013059
| 0.336916
| 10,047
| 298
| 79
| 33.714765
| 0.797208
| 0.364686
| 0
| 0.412791
| 0
| 0
| 0.082019
| 0.007886
| 0
| 0
| 0
| 0
| 0.046512
| 1
| 0.087209
| false
| 0
| 0.052326
| 0.017442
| 0.19186
| 0.087209
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f89039eac3e7b46b0d707c6f7b3927ce103b2914
| 919
|
py
|
Python
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 152
|
2020-12-07T13:26:53.000Z
|
2022-03-23T02:00:04.000Z
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 16
|
2020-12-07T17:04:36.000Z
|
2022-03-10T11:12:52.000Z
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 36
|
2020-12-09T13:04:40.000Z
|
2022-03-12T18:14:36.000Z
|
from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
@bp.route('/logs/errors', methods=['GET'])
@login_required
@admin_required
def logs_errors():
provider = Provider()
logging = provider.logging()
default_per_page = 20
page = request.args.get('page', 1)
per_page = request.args.get('per_page', default_per_page)
if isinstance(page, str):
page = int(page) if page.isdigit() else 1
if isinstance(per_page, str):
per_page = int(per_page) if per_page.isdigit() else 1
if page <= 0:
page = 1
if per_page <= 0:
per_page = default_per_page
return render_template(
'config/system/logs/errors.html',
results=logging.view_errors(page, per_page)
)
| 26.257143
| 68
| 0.688792
| 131
| 919
| 4.633588
| 0.358779
| 0.138386
| 0.069193
| 0.046129
| 0.128501
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010944
| 0.20457
| 919
| 34
| 69
| 27.029412
| 0.819425
| 0
| 0
| 0
| 0
| 0
| 0.062024
| 0.032644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f890b528c3dd1757b9098304393522baa32267a2
| 2,241
|
py
|
Python
|
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-08-23T19:49:03.000Z
|
2021-08-23T19:49:03.000Z
|
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Random agent that always returns a random action.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from random import gauss, random, randrange
from tensorforce.agents import Agent
class RandomAgent(Agent):
name = 'RandomAgent'
model = (lambda config: None)
def __init__(self, config):
super(RandomAgent, self).__init__(config)
def reset(self):
self.episode += 1
def act(self, state):
"""
Get random action from action space
:param state: current state (disregarded)
:return: random action
"""
self.timestep += 1
if self.unique_state:
self.current_state = dict(state=state)
else:
self.current_state = state
self.current_action = dict()
for name, action in self.actions_config.items():
if action.continuous:
action = random()
if 'min_value' in action:
action = action.min_value + random() * (action.max_value - action.min_value)
else:
action = gauss(mu=0.0, sigma=1.0)
else:
action = randrange(action.num_actions)
self.current_action[name] = action
if self.unique_action:
return self.current_action['action']
else:
return self.current_action
def observe(self, reward, terminal):
self.current_reward = reward
self.current_terminal = terminal
| 30.283784
| 96
| 0.622936
| 266
| 2,241
| 5.105263
| 0.458647
| 0.064801
| 0.050074
| 0.023564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008511
| 0.265953
| 2,241
| 73
| 97
| 30.69863
| 0.817021
| 0.358322
| 0
| 0.111111
| 0
| 0
| 0.018868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.138889
| 0
| 0.388889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f893a81b68249d96ab59017996d9f35493423f0f
| 8,644
|
py
|
Python
|
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | null | null | null |
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | 32
|
2021-03-17T13:17:22.000Z
|
2021-05-04T14:25:31.000Z
|
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | 1
|
2021-03-24T13:47:44.000Z
|
2021-03-24T13:47:44.000Z
|
import os
import tempfile
import shutil
import requests
import sys
import logging
import json
from src.server.dependency import ModelData
import tensorflow as tf
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
Source: https://stackoverflow.com/a/39215961
"""
def __init__(self, logger, level):
self.logger = logger
self.level = level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.level, line.rstrip())
def flush(self):
pass
def train_model(training_id, model_data: ModelData):
"""
Train model(s) based on a given model and hyperparameters
Now supporting two hyperparameters which are
- Optimizer and learning_rate
"""
# SET LOGGER TO PRINT TO STDOUT AND WRITE TO FILE
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("/log/{}.log".format(training_id)),
logging.StreamHandler(sys.stdout)
]
)
log = logging.getLogger('db_microservice_logger')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
# get API KEY from the environment file
API_KEY = os.getenv('API_KEY')
best_acc = -1
best_val_acc = -1
best_loss = -1
best_val_loss = -1
best_model = None
best_config = None
best_optimizer = None
best_loss_fn = None
# print("Save:" + str(model_data.save))
logging.info("Save:" + str(model_data.save))
try:
# print('[Training] Starting to train model ID: ' + training_id)
logging.info('[Training] Starting to train model ID: ' + training_id)
dataset_root = '/app/src/public_dataset'
img_height = 28
img_width = 28
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="training",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
validation_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="validation",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
autotune_buf_size = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=autotune_buf_size)
validation_ds = validation_ds.cache().prefetch(buffer_size=autotune_buf_size)
optimizer_dict = model_data.optimizer.dict()
config = {}
if "config" in optimizer_dict and optimizer_dict["config"]:
# convert all float config from string to float
convert_data_type(optimizer_dict["config"])
config = optimizer_dict["config"]
# if learning_rate is not defined, it will use the optimizor's default value
learning_rate_list = [None]
if model_data.optimizer.learning_rate:
learning_rate_list = model_data.optimizer.learning_rate
# get loss function object
loss_dict = model_data.loss_function.dict()
if loss_dict["config"] is None:
loss_dict["config"] = {}
else:
convert_data_type(loss_dict["config"])
loss_fn = tf.keras.losses.get(loss_dict)
logging.info(loss_fn)
# create all hyperparameters combination
optimizer_class = model_data.optimizer.dict()
hyperparameters = [[o,lr] for o in optimizer_dict["class_name"]
for lr in learning_rate_list]
# loop through all hyperparameters
for hp in hyperparameters:
# load model from json file
model = tf.keras.models.model_from_json(model_data.model_structure)
optimizer_obj = {
"class_name": hp[0],
"config": config
}
# set learning rate if not None
if hp[1]:
optimizer_obj["config"]["learning_rate"] = hp[1]
optimizer = tf.keras.optimizers.get(optimizer_obj)
n_epochs = model_data.n_epochs
# train the model
(acc, val_acc, loss, val_loss, model) = fit(model, loss_fn, optimizer, train_ds, validation_ds, n_epochs)
# CHECK FOR THE BEST MODEL (from validation accuracy)
if val_acc > best_val_acc:
best_acc = acc
best_val_acc = val_acc
best_loss = loss
best_val_loss = val_loss
best_model = model
best_optimizer = optimizer.get_config()
best_loss_fn = loss_fn.get_config()
# END LOOP
logging.info('[Training] Completed training on model ID: ' + training_id)
# If we are saving the model, we must save it to folder, zip that folder,
# and then send the zip file to the server via HTTP requests
if model_data.save:
# print('[Training] Preparing to save Model data on model ID: ' + training_id)
logging.info('[Training] Preparing to save Model data on model ID: ' + training_id)
# Create temp dir and save model to it
tmpdir = tempfile.mkdtemp()
model_save_path = os.path.join(tmpdir, training_id)
# Save model nested 1 more layer down to facilitate unzipping
tf.saved_model.save(best_model, os.path.join(model_save_path, training_id))
shutil.make_archive(model_save_path, 'zip', model_save_path)
print(tmpdir)
files = {'model': open(model_save_path+'.zip', 'rb')}
requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/model',
headers={'api_key': API_KEY},
params={'training_id': training_id},
files=files
)
# print('[Training] Sent SavedModel file data on model ID: ' + training_id)
logging.info('[Training] Sent SavedModel file data on model ID: ' + training_id)
except:
# print('[Training] Critical error on training: ' + training_id)
logging.exception('[Training] Critical error on training: ' + training_id)
result = {
'training_accuracy': best_acc,
'validation_accuracy': best_val_acc,
'training_loss': best_loss,
'validation_loss': best_val_loss,
'optimizer_config': str(best_optimizer),
'loss_config': str(best_loss_fn)
}
logging.info('[Training] results: ' + str(result))
# Send HTTP request to server with the statistics on this training
r = requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/result',
headers={'api_key': API_KEY},
json={
'dataset_name': os.getenv('DATASET_NAME'),
'training_id': training_id,
'results': result
})
r.raise_for_status()
# print("[Training Results] Sent training results to server.")
logging.info("[Training Results] Sent training results to server.")
def fit(model, loss_fn, optimizer, train_ds, validation_ds, n_epochs):
acc = [-1]
val_acc = [-1]
loss = [-1]
val_loss = [-1]
logging.info(loss_fn)
logging.info(optimizer)
model.compile(optimizer=optimizer,
loss=loss_fn,
metrics=['accuracy'])
logging.info('[Training] with optimizer config: ' + str(model.optimizer.get_config()))
logging.info('[Training] with loss function config: ' + str(model.loss.get_config()))
history = model.fit(train_ds, validation_data=validation_ds, epochs=n_epochs)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
return (acc[-1], val_acc[-1], loss[-1], val_loss[-1], model)
def convert_data_type(input_dict):
for k, v in input_dict.items():
if v == "True":
input_dict[k] = True
elif v == "False":
input_dict[k] = False
elif isfloat(v):
input_dict[k] = float(v)
def isfloat(value):
if type(value) == bool:
return False
try:
float(value)
return True
except ValueError:
return False
| 32.618868
| 117
| 0.614762
| 1,056
| 8,644
| 4.822917
| 0.221591
| 0.033576
| 0.021206
| 0.023365
| 0.255252
| 0.224426
| 0.211467
| 0.176713
| 0.161005
| 0.156686
| 0
| 0.005174
| 0.284475
| 8,644
| 264
| 118
| 32.742424
| 0.81827
| 0.157219
| 0
| 0.104046
| 0
| 0
| 0.125935
| 0.006234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040462
| false
| 0.00578
| 0.052023
| 0
| 0.121387
| 0.00578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f894286d87c8139bf9e7bda1448f050c5b02eb70
| 3,287
|
py
|
Python
|
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | 1
|
2017-05-03T12:18:31.000Z
|
2017-05-03T12:18:31.000Z
|
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | 1
|
2020-10-29T04:00:04.000Z
|
2020-10-29T04:00:04.000Z
|
from flask import Flask
from flask import render_template
from flask import request
from model import MovieWebDAO
import json
from ml import Forcast
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/recently/')
def recently():
return render_template('list.html', functionPath="recently")
@app.route('/download/')
def download():
return render_template('list.html', functionPath="download")
@app.route('/recommander/')
def recommander():
return render_template('list.html', functionPath="recommander")
@app.route('/search/<keyword>')
def search(keyword=None):
return render_template('list.html', functionPath="search", keyword=keyword)
@app.route('/favor/')
def favor():
return render_template('list.html', functionPath="favor")
@app.route('/api/recently/')
def getRecentlyMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
#print(str(start) + ", " + str(limit))
movies = MovieWebDAO.getRecentlyMovies(start, limit)
total = MovieWebDAO.countRecentlyMovies()
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/recommander/')
def getRecommanderMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getForcastMovies(start, limit)
total = MovieWebDAO.countForcastMovies()
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/download/')
def getDownloadMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getDownloadMovies(start, limit)
total = MovieWebDAO.countDownloadMovies();
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/search/<keyword>')
def getSearchMovies(keyword=None):
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getSearchMovies(start, limit, keyword)
total = MovieWebDAO.countSearchMovies(keyword)
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/favor/')
def getFavorMovies():
start = request.args.get("start", type=int, default=0)
limit = request.args.get("limit", type=int, default=10)
movies = MovieWebDAO.getFavorMovies(start, limit)
total = MovieWebDAO.countFavorMovies();
return json.dumps({"movies": movies, "total": total}, ensure_ascii=False)
@app.route('/api/pick/<actor>/<avNumber>')
def pick(actor=None, avNumber=None):
if not actor or not avNumber:
return "must be <actor>/<avNumber>"
MovieWebDAO.downloadMovie(avNumber)
#DiskIndex.copyOneImageToTemp(actor, avNumber)
return "OK"
@app.route('/api/skip/<avNumber>')
def skip(avNumber=None):
MovieWebDAO.skipMovie(avNumber)
return "OK"
if __name__ == '__main__':
print("http://localhost:15001")
app.run(host='0.0.0.0', debug=True, port=15001)
| 31.009434
| 79
| 0.703377
| 403
| 3,287
| 5.672457
| 0.200993
| 0.052493
| 0.061242
| 0.052493
| 0.430009
| 0.430009
| 0.34252
| 0.34252
| 0.34252
| 0.34252
| 0
| 0.010172
| 0.132644
| 3,287
| 106
| 80
| 31.009434
| 0.791652
| 0.024947
| 0
| 0.220779
| 0
| 0
| 0.149189
| 0.015293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.077922
| 0.090909
| 0.454545
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f89c748dd51197d30a5af7af230eb9f70959fb01
| 894
|
py
|
Python
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88
|
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13
|
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1
|
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import gast as ast
from beniget import Ancestors, DefUseChains as DUC, UseDefChains
from beniget.beniget import Def
__all__ = ["Ancestors", "DefUseChains", "UseDefChains"]
class DefUseChains(DUC):
def visit_List(self, node):
if isinstance(node.ctx, ast.Load):
dnode = self.chains.setdefault(node, Def(node))
for elt in node.elts:
if isinstance(elt, CommentLine):
continue
self.visit(elt).add_user(dnode)
return dnode
# unfortunately, destructured node are marked as Load,
# only the parent List/Tuple is marked as Store
elif isinstance(node.ctx, ast.Store):
return self.visit_Destructured(node)
visit_Tuple = visit_List
# this import has to be after the definition of DefUseChains
from transonic.analyses.extast import CommentLine # noqa: E402
| 29.8
| 64
| 0.659955
| 109
| 894
| 5.330275
| 0.513761
| 0.037866
| 0.05852
| 0.068847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004566
| 0.265101
| 894
| 29
| 65
| 30.827586
| 0.879756
| 0.187919
| 0
| 0
| 0
| 0
| 0.04577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a219513d5df677c7712f374a4d0f79bdc2f13b
| 2,401
|
py
|
Python
|
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | 1
|
2021-12-29T09:32:08.000Z
|
2021-12-29T09:32:08.000Z
|
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | null | null | null |
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | null | null | null |
from collections import Counter
def read_file(filepath):
with open(filepath,'r') as f:
a = [x for x in f.read().split('\n\n')]
b = []; d = []
for x in [[x[0],x[1].split(' or ')] for x in [x.split(': ') for x in a[0].split('\n')]]:
for y in x[1]:
z = y.split('-')
b += [[x[0],range(int(z[0]),int(z[1])+1)]]
c = [int(x) for x in [x for x in a[1].split('\n')][1].split(',')]
for x in a[2].split('\n')[1:]:
d += [[int(x) for x in x.split(',')]]
return b,c,d
def validate_tix(tix,rules):
valid_tix = []; error_rate = 0
for t in tix:
curr_rate = error_rate
for n in t:
valid = False
for r in rules:
if n in r[1]:
valid = True
break
if not valid: error_rate += n
if curr_rate == error_rate: valid_tix += [t]
return valid_tix, error_rate
def determine_fields(tix,rules):
fields = list(map(list,zip(*tix)))
length = len(rules)
results = {}; p = []
for e,f in enumerate(fields):
i = 0
while i < length:
valid = []
for r in rules[i:i+2]:
for n in f:
if n in r[1]: valid += [n]
if sorted(f) == sorted(valid): p += [(r[0],str(e))]
i += 2
while len(p) > 0:
count = Counter([x[0] for x in p])
matches = [x for x in p if x[0] in [k for k,v in count.items() if v == 1]]
for a,b in matches:
results[a] = int(b)
p = [x for x in p if x[1] != b]
return results
def check_ticket(my_ticket,fields):
total = 0
for k,v in fields.items():
if k[0:9] == 'departure':
total = max(total,1) * my_ticket[v]
return total
def main(filepath):
rules, my_ticket, tickets = read_file(filepath)
valid_tickets, pt1 = validate_tix(tickets,rules)
fields = determine_fields(valid_tickets,rules)
pt2 = check_ticket(my_ticket,fields)
return pt1, pt2
print(main('day16.txt'))
| 24.752577
| 96
| 0.43107
| 332
| 2,401
| 3.045181
| 0.23494
| 0.043521
| 0.065282
| 0.041543
| 0.152324
| 0.06726
| 0.021761
| 0
| 0
| 0
| 0
| 0.024927
| 0.431903
| 2,401
| 96
| 97
| 25.010417
| 0.716276
| 0
| 0
| 0
| 0
| 0
| 0.015827
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.016949
| 0
| 0.186441
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a565676ba40410367b887bd52120b87f5a4d60
| 9,512
|
py
|
Python
|
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | 2
|
2020-09-15T18:10:12.000Z
|
2021-01-25T21:54:04.000Z
|
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | null | null | null |
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | null | null | null |
# Author: Ibrahim Alhas - ID: 1533204.
# MODEL 3: CNN with built-in tensorflow tokenizer.
# This is the final version of the model (not the base).
# Packages and libraries used for this model.
# ** Install these if not installed already **.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from time import time
import re
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score, roc_curve, \
classification_report
from tensorflow import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras import layers
from keras.models import Sequential
from sklearn.model_selection import train_test_split, cross_validate
import tensorflow as tf
import seaborn as sns
import warnings
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization
from keras.layers.noise import GaussianNoise
from keras.layers import Conv2D, MaxPooling2D
warnings.filterwarnings('ignore')
# plt.style.use('ggplot')
# Basic data visualisation and analysis ------------------------------------------------------------------------------
# We see that the title column is from news articles, and the text column forms the twitter tweet extracts.
true = pd.read_csv('True.csv')
false = pd.read_csv('Fake.csv')
# We drop the columns we do not need. See chapter 3, model CNN for more details.
true = true.drop('title', axis=1)
true = true.drop('subject', axis=1)
true = true.drop('date', axis=1)
false = false.drop('title', axis=1)
false = false.drop('subject', axis=1)
false = false.drop('date', axis=1)
# We set the labels for each data instance, where factual = 1, otherwise 0.
false['label'] = 0
true['label'] = 1
# We merge the two divided datasets (true and fake) into a singular dataset.
data = pd.concat([true, false], ignore_index=True)
texts = data['text']
labels = data['label']
x = texts
y = labels
# We incorporate the publishers feature from title and text instances, and place it into the dataset manually.
# First Creating list of index that do not have publication part. We can use this as a new feature.
unknown_publishers = []
for index, row in enumerate(true.text.values):
try:
record = row.split(" -", maxsplit=1)
# if no text part is present, following will give error
print(record[1])
# if len of piblication part is greater than 260
# following will give error, ensuring no text having "-" in between is counted
assert (len(record[0]) < 260)
except:
unknown_publishers.append(index)
# We print the instances where publication information is absent or different.
print(true.iloc[unknown_publishers].text)
# We want to use the publication information as a new feature.
publisher = []
tmp_text = []
for index, row in enumerate(true.text.values):
if index in unknown_publishers:
# Append unknown publisher:
tmp_text.append(row)
publisher.append("Unknown")
continue
record = row.split(" -", maxsplit=1)
publisher.append(record[0])
tmp_text.append(record[1])
# Replace text column with new text + add a new feature column called publisher/source.
true["publisher"] = publisher
true["text"] = tmp_text
del publisher, tmp_text, record, unknown_publishers
# Validate that the publisher/source column has been added to the dataset.
print(true.head())
# Check for missing values, then drop them for both datasets.
print([index for index, text in enumerate(true.text.values) if str(text).strip() == ''])
true = true.drop(8970, axis=0)
fakeEmptyIndex = [index for index, text in enumerate(false.text.values) if str(text).strip() == '']
print(f"No of empty rows: {len(fakeEmptyIndex)}")
false.iloc[fakeEmptyIndex].tail()
# -
# For CNNs, we have to vectorize the text into 2d integers (tensors).
MAX_SEQUENCE_LENGTH = 5000
MAX_NUM_WORDS = 25000
EMBEDDING_DIM = 300
TEST_SPLIT = 0.2
epochs = 1
# We tokenize the text, just like all other models--------------------------------------------------------------------
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH, padding='pre', truncating='pre')
# Print the total number of tokens:
print('Found %s tokens.' % len(word_index))
# We partition our dataset into train/test.
x_train, x_val, y_train, y_val = train_test_split(data, labels.apply(lambda x: 0 if x == 0 else 1),
test_size=TEST_SPLIT)
log_dir = "logs\\model\\"
# A custom callbacks function, which initially included tensorboard.
mycallbacks = [
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', patience=2, verbose=1, factor=0.5, min_lr=0.00001),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True), # Restoring the best
# ...weights will help keep the optimal weights.
# tf.keras.callbacks.TensorBoard(log_dir="./logs"), # NEWLY ADDED - CHECK.
# tf.keras.callbacks.TensorBoard(log_dir=log_dir.format(time())), # NEWLY ADDED - CHECK.
# tensorboard --logdir logs --> to check tensorboard feedback.
]
# Parameters for our model. We experimented with some combinations and settled on this configuration------------------
model = Sequential(
[
# Word/sequence processing:
layers.Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True),
# The layers:
layers.Conv1D(128, 5, activation='relu'),
layers.GlobalMaxPooling1D(),
# We classify our model here:
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
# We compile our model and run, with the loss function crossentropy, and optimizer rmsprop (we experimented with adam,
# ...but rmsprop produced better results).
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
print("Model weights:")
print(model.weights)
# tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./logs")
history = model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_val, y_val),
callbacks=mycallbacks)
# Produce a figure, for every epoch, and show performance metrics.
epochs = [i for i in range(1)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(20, 10)
ax[0].plot(epochs, train_acc, 'go-', label='Training Accuracy')
ax[0].plot(epochs, val_acc, 'ro-', label='Testing Accuracy')
ax[0].set_title('Training & Testing Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs, train_loss, 'go-', label='Training Loss')
ax[1].plot(epochs, val_loss, 'ro-', label='Testing Loss')
ax[1].set_title('Training & Testing Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
'''
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = history.epoch
plt.figure(figsize=(12, 9))
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Loss', size=20)
plt.legend(prop={'size': 20})
plt.show()
plt.figure(figsize=(12, 9))
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Accuracy', size=20)
plt.legend(prop={'size': 20})
plt.ylim((0.5, 1))
plt.show()
'''
# We evaluate our model by predicting a few instances from our test data (the first 5)--------------------------------
print("Evaluation:")
print(model.evaluate(x_val, y_val))
# We predict a few instances (up to 5).
pred = model.predict(x_val)
print(pred[:5])
binary_predictions = []
for i in pred:
if i >= 0.5:
binary_predictions.append(1)
else:
binary_predictions.append(0)
# We print performance metrics:
print('Accuracy on test set:', accuracy_score(binary_predictions, y_val))
print('Precision on test set:', precision_score(binary_predictions, y_val))
print('Recall on test set:', recall_score(binary_predictions, y_val))
print('F1 on test set:', f1_score(binary_predictions, y_val))
# We print the classification report (as an extra):
print(classification_report(y_val, pred.round(), target_names=['Fact', 'Fiction']))
# We print the confusion matrix.
cmm = confusion_matrix(y_val, pred.round())
print(cmm)
print("Ibrahim Alhas")
cmm = pd.DataFrame(cmm, index=['Fake', 'Original'], columns=['Fake', 'Original'])
plt.figure(figsize=(10, 10))
sns.heatmap(cmm, cmap="Blues", linecolor='black', linewidth=1, annot=True, fmt='', xticklabels=['Fake', 'Original'],
yticklabels=['Fake', 'Original'])
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# End----------------------------------------------------
| 37.746032
| 120
| 0.700694
| 1,349
| 9,512
| 4.842847
| 0.28762
| 0.012399
| 0.011021
| 0.014082
| 0.130874
| 0.087555
| 0.04041
| 0.04041
| 0.011021
| 0
| 0
| 0.017487
| 0.152334
| 9,512
| 251
| 121
| 37.896414
| 0.792757
| 0.312027
| 0
| 0.041379
| 0
| 0
| 0.107895
| 0.003684
| 0
| 0
| 0
| 0
| 0.006897
| 1
| 0
| false
| 0
| 0.151724
| 0
| 0.151724
| 0.124138
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a57061a44b4ce6c14481e8a79c00cddf4bc7c8
| 40,857
|
py
|
Python
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 6
|
2015-07-27T21:50:39.000Z
|
2020-06-25T14:32:35.000Z
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 89
|
2015-06-24T09:35:40.000Z
|
2022-02-13T14:40:31.000Z
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 12
|
2015-07-13T17:31:04.000Z
|
2021-08-06T06:50:21.000Z
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2017 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <rich.mahn@unfoldingword.org>
"""
This script generates the HTML tN documents for each book of the Bible
"""
from __future__ import unicode_literals, print_function
import os
import sys
import re
import pprint
import logging
import argparse
import tempfile
import markdown
import shutil
import subprocess
import csv
import codecs
import markdown2
import json
from glob import glob
from bs4 import BeautifulSoup
from usfm_tools.transform import UsfmTransform
from ...general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
from ...general_tools.url_utils import download_file
from ...general_tools.bible_books import BOOK_NUMBERS, BOOK_CHAPTER_VERSES
from ...general_tools.usfm_utils import usfm3_to_usfm2
_print = print
def print(obj):
_print(json.dumps(obj, ensure_ascii=False, indent=2).encode('utf-8'))
class TnConverter(object):
def __init__(self, ta_tag=None, tn_tag=None, tw_tag=None, ust_tag=None, ult_tag=None, ugnt_tag=None, working_dir=None,
output_dir=None, lang_code='en', books=None):
"""
:param ta_tag:
:param tn_tag:
:param tw_tag:
:param ust_tag:
:param ult_tag:
:param ugnt_tag:
:param working_dir:
:param output_dir:
:param lang_code:
:param books:
"""
self.ta_tag = ta_tag
self.tn_tag = tn_tag
self.tw_tag = tw_tag
self.ust_tag = ust_tag
self.ult_tag = ult_tag
self.ugnt_tag = ugnt_tag
self.working_dir = working_dir
self.output_dir = output_dir
self.lang_code = lang_code
self.books = books
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.pp = pprint.PrettyPrinter(indent=4)
if not self.working_dir:
self.working_dir = tempfile.mkdtemp(prefix='tn-')
if not self.output_dir:
self.output_dir = self.working_dir
self.logger.debug('TEMP DIR IS {0}'.format(self.working_dir))
self.tn_dir = os.path.join(self.working_dir, '{0}_tn'.format(lang_code))
self.tw_dir = os.path.join(self.working_dir, '{0}_tw'.format(lang_code))
self.ta_dir = os.path.join(self.working_dir, '{0}_ta'.format(lang_code))
self.ust_dir = os.path.join(self.working_dir, '{0}_ust'.format(lang_code))
self.ult_dir = os.path.join(self.working_dir, '{0}_ult'.format(lang_code))
self.ugnt_dir = os.path.join(self.working_dir, 'UGNT'.format(lang_code))
self.versification_dir = os.path.join(self.working_dir, 'versification', 'bible', 'ufw', 'chunks')
self.manifest = None
self.book_id = None
self.book_title = None
self.book_number = None
self.project = None
self.tn_text = ''
self.tw_text = ''
self.ta_text = ''
self.rc_references = {}
self.chapters_and_verses = {}
self.resource_data = {}
self.tn_book_data = {}
self.tw_words_data = {}
self.bad_links = {}
self.usfm_chunks = {}
self.version = None
self.contributors = ''
self.publisher = None
self.issued = None
self.filename_base = None
def run(self):
self.setup_resource_files()
self.manifest = load_yaml_object(os.path.join(self.tn_dir, 'manifest.yaml'))
self.version = self.manifest['dublin_core']['version']
#############self.contributors = '; '.join(self.manifest['dublin_core']['contributor'])
self.publisher = self.manifest['dublin_core']['publisher']
self.issued = self.manifest['dublin_core']['issued']
projects = self.get_book_projects()
for p in projects:
self.project = p
self.book_id = p['identifier'].upper()
self.book_title = p['title'].replace(' translationNotes', '')
self.book_number = BOOK_NUMBERS[self.book_id.lower()]
if int(self.book_number) != 65:
continue
self.populate_tn_book_data()
self.populate_tw_words_data()
self.populate_chapters_and_verses()
self.populate_usfm_chunks()
self.filename_base = '{0}_tn_{1}-{2}_v{3}'.format(self.lang_code, self.book_number.zfill(2), self.book_id, self.version)
self.rc_references = {}
self.logger.info('Creating tN for {0} ({1}-{2})...'.format(self.book_title, self.book_number, self.book_id))
if not os.path.isfile(os.path.join(self.output_dir, '{0}.hhhhtml'.format(self.filename_base))):
print("Processing HTML...")
self.generate_html()
if not os.path.isfile(os.path.join(self.output_dir, '{0}.pdf'.format(self.filename_base))):
print("Generating PDF...")
self.convert_html2pdf()
if len(self.bad_links.keys()):
_print("BAD LINKS:")
for bad in sorted(self.bad_links.keys()):
for ref in self.bad_links[bad]:
parts = ref[5:].split('/')
_print("Bad reference: `{0}` in {1}'s {2}".format(bad, parts[1], '/'.join(parts[3:])))
def get_book_projects(self):
projects = []
if not self.manifest or 'projects' not in self.manifest or not self.manifest['projects']:
return
for p in self.manifest['projects']:
if not self.books or p['identifier'] in self.books:
if not p['sort']:
p['sort'] = BOOK_NUMBERS[p['identifier']]
projects.append(p)
return sorted(projects, key=lambda k: k['sort'])
def get_resource_url(self, resource, tag):
return 'https://git.door43.org/unfoldingWord/{0}_{1}/archive/{2}.zip'.format(self.lang_code, resource, tag)
def setup_resource_files(self):
if not os.path.isdir(os.path.join(self.working_dir, 'en_tn')):
tn_url = self.get_resource_url('tn', self.tn_tag)
self.extract_files_from_url(tn_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_tw')):
tw_url = self.get_resource_url('tw', self.tw_tag)
self.extract_files_from_url(tw_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ta')):
ta_url = self.get_resource_url('ta', self.ta_tag)
self.extract_files_from_url(ta_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ust')):
ust_url = self.get_resource_url('ust', self.ust_tag)
self.extract_files_from_url(ust_url)
if not os.path.isdir(os.path.join(self.working_dir, 'en_ult')):
ult_url = self.get_resource_url('ult', self.ult_tag)
self.extract_files_from_url(ult_url)
if not os.path.isdir(os.path.join(self.working_dir, 'ugnt')):
ugnt_url = 'https://git.door43.org/unfoldingWord/UGNT/archive/{0}.zip'.format(self.ugnt_tag)
self.extract_files_from_url(ugnt_url)
if not os.path.isfile(os.path.join(self.working_dir, 'icon-tn.png')):
command = 'curl -o {0}/icon-tn.png https://unfoldingword.bible/assets/img/icon-tn.png'.format(self.working_dir)
subprocess.call(command, shell=True)
if not os.path.isdir(os.path.join(self.working_dir, 'versification')):
versification_url = 'https://git.door43.org/Door43-Catalog/versification/archive/master.zip'
self.extract_files_from_url(versification_url)
def extract_files_from_url(self, url):
zip_file = os.path.join(self.working_dir, url.rpartition('/')[2])
try:
self.logger.debug('Downloading {0}...'.format(url))
download_file(url, zip_file)
finally:
self.logger.debug('finished.')
try:
self.logger.debug('Unzipping {0}...'.format(zip_file))
unzip(zip_file, self.working_dir)
finally:
self.logger.debug('finished.')
def populate_usfm_chunks(self):
book_chunks = {}
for resource in ['ult', 'ust']:
save_dir = os.path.join(self.working_dir, 'chunk_data', resource)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}.json'.format(self.book_id.lower()))
if os.path.isfile(save_file):
book_chunks[resource] = load_json_object(save_file)
continue
book_chunks[resource] = {}
bible_dir = getattr(self, '{0}_dir'.format(resource))
usfm = read_file(os.path.join(bible_dir, '{0}-{1}.usfm'.format(BOOK_NUMBERS[self.book_id.lower()], self.book_id)), encoding='utf-8')
usfm = usfm3_to_usfm2(usfm)
usfm = re.sub(r'\n*\s*\\s5\s*\n*', r'\n', usfm, flags=re.MULTILINE | re.IGNORECASE)
chapters_usfm = re.compile(r'\n*\s*\\c[\u00A0\s]+').split(usfm)
book_chunks[resource]['header'] = chapters_usfm[0]
for chapter_data in self.chapters_and_verses:
chapter = str(chapter_data['chapter'])
book_chunks[resource][chapter] = {}
book_chunks[resource][chapter]['chunks'] = []
chapter_usfm = r'\\c ' + chapters_usfm[int(chapter)].strip()
verses_usfm = re.compile(r'\n*\s*\\v[\u00A0\s]+').split(chapter_usfm)
for idx, first_verse in enumerate(chapter_data['first_verses']):
if len(chapter_data['first_verses']) > idx+1:
last_verse = chapter_data['first_verses'][idx+1] - 1
else:
last_verse = int(BOOK_CHAPTER_VERSES[self.book_id.lower()][chapter])
chunk_usfm = ''
for verse in range(first_verse, last_verse+1):
chunk_usfm += r'\v '+verses_usfm[verse]+'\n'
data = {
'usfm': chunk_usfm,
'first_verse': first_verse,
'last_verse': last_verse,
}
# print('chunk: {0}-{1}-{2}-{3}-{4}'.format(resource, self.book_id, chapter, first_verse, last_verse))
book_chunks[resource][chapter][str(first_verse)] = data
book_chunks[resource][chapter]['chunks'].append(data)
write_file(save_file, book_chunks[resource])
self.usfm_chunks = book_chunks
def generate_html(self):
tn_html = self.get_tn_html()
ta_html = self.get_ta_html()
tw_html = self.get_tw_html()
html = '\n<br>\n'.join([tn_html, tw_html, ta_html])
html = self.replace_rc_links(html)
html = self.fix_links(html)
html_file = os.path.join(self.output_dir, '{0}.html'.format(self.filename_base))
write_file(html_file, html)
print('Wrote HTML to {0}'.format(html_file))
def pad(self, num):
if self.book_id == 'PSA':
return str(num).zfill(3)
else:
return str(num).zfill(2)
@staticmethod
def isInt(str):
try:
int(str)
return True
except ValueError:
return False
def populate_chapters_and_verses(self):
versification_file = os.path.join(self.versification_dir, '{0}.json'.format(self.book_id.lower()))
self.chapter_and_verses = {}
if os.path.isfile(versification_file):
self.chapters_and_verses = load_json_object(versification_file)
def populate_tn_book_data(self):
book_file = os.path.join(self.tn_dir, 'en_tn_{0}-{1}.tsv'.format(self.book_number, self.book_id))
self.tn_book_data = {}
if not os.path.isfile(book_file):
return
book_data = {}
with open(book_file) as fd:
rd = csv.reader(fd, delimiter=str("\t"), quotechar=str('"'))
header = next(rd)
for row in rd:
data = {}
for idx, field in enumerate(header):
data[field] = row[idx]
chapter = data['Chapter']
verse = data['Verse']
if not chapter in book_data:
book_data[chapter] = {}
if not verse in book_data[chapter]:
book_data[chapter][verse] = []
book_data[chapter][verse].append(data)
self.tn_book_data = book_data
def get_tn_html(self):
tn_html = '<h1><a id="tn-{0}"></a>translationNotes</h1>\n\n'.format(self.book_id)
if 'front' in self.tn_book_data and 'intro' in self.tn_book_data['front']:
intro = markdown.markdown(self.tn_book_data['front']['intro'][0]['OccurrenceNote'].decode('utf8').replace('<br>', '\n'))
title = self.get_first_header(intro)
intro = self.fix_tn_links(intro, 'intro')
intro = self.increase_headers(intro)
intro = self.decrease_headers(intro, 4) # bring headers of 3 or more down 1
id = 'tn-{0}-front-intro'.format(self.book_id)
intro = re.sub(r'<h(\d)>', r'<h\1><a id="{0}"></a>'.format(id), intro, 1, flags=re.IGNORECASE | re.MULTILINE)
intro += '<br><br>\n\n'
tn_html += '\n<br>\n'+intro
# HANDLE RC LINKS AND BACK REFERENCE
rc = 'rc://*/tn/help/{0}/front/intro'.format(self.book_id.lower())
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
self.get_resource_data_from_rc_links(intro, rc)
for chapter_verses in self.chapters_and_verses:
chapter = str(chapter_verses['chapter'])
if 'intro' in self.tn_book_data[chapter]:
intro = markdown.markdown(self.tn_book_data[chapter]['intro'][0]['OccurrenceNote'].replace('<br>',"\n"))
intro = re.sub(r'<h(\d)>([^>]+) 0+([1-9])', r'<h\1>\2 \3', intro, 1, flags=re.MULTILINE | re.IGNORECASE)
title = self.get_first_header(intro)
intro = self.fix_tn_links(intro, chapter)
intro = self.increase_headers(intro)
intro = self.decrease_headers(intro, 5, 2) # bring headers of 5 or more down 2
id = 'tn-{0}-{1}'.format(self.book_id, self.pad(chapter))
intro = re.sub(r'<h(\d+)>', r'<h\1><a id="{0}"></a>'.format(id), intro, 1, flags=re.IGNORECASE | re.MULTILINE)
intro += '<br><br>\n\n'
tn_html += '\n<br>\n'+intro
# HANDLE RC LINKS
rc = 'rc://*/tn/help/{0}/{1}/intro'.format(self.book_id.lower(), self.pad(chapter))
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
self.get_resource_data_from_rc_links(intro, rc)
for idx, first_verse in enumerate(chapter_verses['first_verses']):
col1 = ''
if idx < len(chapter_verses['first_verses'])-1:
last_verse = chapter_verses['first_verses'][idx+1] - 1
else:
last_verse = int(BOOK_CHAPTER_VERSES[self.book_id.lower()][chapter])
if first_verse != last_verse:
title = '{0} {1}:{2}-{3}'.format(self.book_title, chapter, first_verse, last_verse)
else:
title = '{0} {1}:{2}'.format(self.book_title, chapter, first_verse)
anchors = ''
for verse in range(first_verse, last_verse+1):
id = 'tn-{0}-{1}-{2}'.format(self.book_id, self.pad(chapter), self.pad(verse))
anchors += '<a id="{0}"></a>'.format(id)
rc = 'rc://*/tn/help/{0}/{1}/{2}'.format(self.book_id.lower(), self.pad(chapter), self.pad(verse))
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#'+id,
'title': title
}
header = '\n<br>\n<h2>{0}{1}</h2>\n\n'.format(anchors, title)
col1 += '<sup style="color:light-gray">ULT</sup>' + self.get_bible_html('ult', int(chapter), first_verse, last_verse)
col1 += '\n<br><br>\n'
col1 += '<sup style="color:light-gray">UST</sup>' + self.get_bible_html('ust', int(chapter), first_verse, last_verse)
col2 = ''
for verse in range(first_verse, last_verse+1):
if str(verse) in self.tn_book_data[chapter]:
for data in self.tn_book_data[chapter][str(verse)]:
title = data['GLQuote'].decode('utf8')
col2 += '<b>' + title + (' -' if not title.endswith(':') else '') + ' </b>'
col2 += markdown.markdown(data['OccurrenceNote'].decode('utf8').replace('<br>',"\n")).replace('<p>', '').replace('</p>', '')
col2 += '\n<br><br>\n'
if col2 != '':
col2 = self.decrease_headers(col2, 5) # bring headers of 5 or more #'s down 1
col2 = self.fix_tn_links(col2, chapter)
chunk_page = '{0}\n<table style="width:100%">\n<tr>\n<td style="vertical-align:top;width:35%;padding-right:5px">\n\n<p>{1}</p>\n</td>\n<td style="vertical-align:top">\n\n<p>{2}</p>\n</td>\n</tr>\n</table>\n'.format(header, col1, col2)
# chunk_page = '{0}\n<table style="width:100%;border:none"><tr><td style="width:50%">{1}</td><td>{2}</td></tr></table>'.format(header, col1, col2) # REMOVE
tn_html += chunk_page
self.get_resource_data_from_rc_links(chunk_page, rc)
return tn_html
def populate_tw_words_data(self):
groups = ['kt', 'names', 'other']
grc_path = 'tools/tn/generate_tn_pdf/grc/translationHelps/translationWords/v0.4'
if not os.path.isdir(grc_path):
_print('{0} not found! Please make sure you ran `node getResources ./` in the generate_tn_pdf dir and that the version in the script is correct'.format(grc_path))
exit(1)
words = {}
for group in groups:
files_path = '{0}/{1}/groups/{2}/*.json'.format(grc_path, group, self.book_id.lower())
files = glob(files_path)
for file in files:
base = os.path.splitext(os.path.basename(file))[0]
rc = 'rc://*/tw/dict/bible/{0}/{1}'.format(group, base)
occurrences = load_json_object(file)
for occurrence in occurrences:
contextId = occurrence['contextId']
chapter = contextId['reference']['chapter']
verse = contextId['reference']['verse']
contextId['rc'] = rc
if chapter not in words:
words[chapter] = {}
if verse not in words[chapter]:
words[chapter][verse] = []
words[chapter][verse].append(contextId)
self.tw_words_data = words
def get_bible_html(self, resource, chapter, first_verse, last_verse):
html = self.get_chunk_html(resource, chapter, first_verse)
html = html.replace('\n', '').replace('<p>', '').replace('</p>', '').strip()
html = re.sub(r'<span class="v-num"', '<br><span class="v-num"', html, flags=re.IGNORECASE | re.MULTILINE)
if resource != 'ult':
return html
words = self.get_all_words_to_match(resource, chapter, first_verse, last_verse)
verses = html.split('<sup>')
for word in words:
parts = word['text'].split(' ... ')
highlights = {}
idx = word['contextId']['reference']['verse']-first_verse+1
for part in parts:
highlights[part] = r'<a href="{0}">{1}</a>'.format(word['contextId']['rc'], part)
regex = re.compile(r'(?<![></\\_-])\b(%s)\b(?![></\\_-])' % "|".join(highlights.keys()))
verses[idx] = regex.sub(lambda m: highlights[m.group(0)], verses[idx])
html = '<sup>'.join(verses)
return html
def get_all_words_to_match(self, resource, chapter, first_verse, last_verse):
path = 'tools/tn/generate_tn_pdf/en/bibles/{0}/v1/{1}/{2}.json'.format(resource, self.book_id.lower(), chapter)
words = []
data = load_json_object(path)
chapter = int(chapter)
for verse in range(first_verse, last_verse + 1):
if chapter in self.tw_words_data and verse in self.tw_words_data[chapter]:
contextIds = self.tw_words_data[int(chapter)][int(verse)]
verseObjects = data[str(verse)]['verseObjects']
for contextId in contextIds:
aligned_text = self.get_aligned_text(verseObjects, contextId, False)
if aligned_text:
words.append({'text': aligned_text, 'contextId': contextId})
return words
def find_english_from_combination(self, verseObjects, quote, occurrence):
greekWords = []
wordList = []
for verseObject in verseObjects:
greek = None
if 'content' in verseObject and verseObject['type'] == 'milestone':
greekWords.append(verseObject['content'])
englishWords = []
for child in verseObject['children']:
if child['type'] == 'word':
englishWords.append(child['text'])
english = ' '.join(englishWords)
found = False
for idx, word in enumerate(wordList):
if word['greek'] == verseObject['content'] and word['occurrence'] == verseObject['occurrence']:
wordList[idx]['english'] += ' ... ' + english
found = True
if not found:
wordList.append({'greek': verseObject['content'], 'english': english, 'occurrence': verseObject['occurrence']})
combinations = []
occurrences = {}
for i in range(0, len(wordList)):
greek = wordList[i]['greek']
english = wordList[i]['english']
for j in range(i, len(wordList)):
if i != j:
greek += ' '+wordList[j]['greek']
english += ' '+wordList[j]['english']
if greek not in occurrences:
occurrences[greek] = 0
occurrences[greek] += 1
combinations.append({'greek': greek, 'english': english, 'occurrence': occurrences[greek]})
for combination in combinations:
if combination['greek'] == quote and combination['occurrence'] == occurrence:
return combination['english']
return None
def find_english_from_split(self, verseObjects, quote, occurrence, isMatch=False):
wordsToMatch = quote.split(' ')
separator = ' '
needsEllipsis = False
text = ''
for index, verseObject in enumerate(verseObjects):
lastMatch = False
if verseObject['type'] == 'milestone' or verseObject['type'] == 'word':
if ((('content' in verseObject and verseObject['content'] in wordsToMatch) or ('lemma' in verseObject and verseObject['lemma'] in wordsToMatch)) and verseObject['occurrence'] == occurrence) or isMatch:
lastMatch = True
if needsEllipsis:
separator += '... '
needsEllipsis = False
if text:
text += separator
separator = ' '
if 'text' in verseObject and verseObject['text']:
text += verseObject['text']
if 'children' in verseObject and verseObject['children']:
text += self.find_english_from_split(verseObject['children'], quote, occurrence, True)
elif 'children' in verseObject and verseObject['children']:
childText = self.find_english_from_split(verseObject['children'], quote, occurrence, isMatch)
if childText:
lastMatch = True
if needsEllipsis:
separator += '... '
needsEllipsis = False
text += (separator if text else '') + childText
separator = ' '
elif text:
needsEllipsis = True
if lastMatch and (index+1) in verseObjects and verseObjects[index + 1]['type'] == "text" and text:
if separator == ' ':
separator = ''
separator += verseObjects[index + 1]['text']
return text
def get_aligned_text(self, verseObjects, contextId, isMatch=False):
if not verseObjects or not contextId or not 'quote' in contextId or not contextId['quote']:
return ''
text = self.find_english_from_combination(verseObjects, contextId['quote'], contextId['occurrence'])
if text:
return text
text = self.find_english_from_split(verseObjects, contextId['quote'], contextId['occurrence'])
if text:
return text
_print('English not found!')
print(contextId)
def get_tw_html(self):
tw_html = '<h1><a id="tw-{0}"></a>translationWords</h1>\n\n'.format(self.book_id)
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/tw/' not in rc:
continue
html = markdown.markdown(self.resource_data[rc]['text'])
html = self.increase_headers(html)
id_tag = '<a id="{0}"></a>'.format(self.resource_data[rc]['id'])
html = re.sub(r'<h(\d)>(.*?)</h(\d)>', r'<h\1>{0}\2</h\3>\n{1}'.format(id_tag, self.get_reference_text(rc)), html, 1, flags=re.IGNORECASE | re.MULTILINE)
html += '\n\n'
tw_html += html
return tw_html
def get_ta_html(self):
ta_html = '<h1><a id="{0}-ta-{1}"></a>translationAcademy</h1>\n\n'.format(self.lang_code, self.book_id)
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/ta/' not in rc:
continue
if self.resource_data[rc]['text']:
html = markdown.markdown(self.resource_data[rc]['text'])
html = self.increase_headers(html)
id_tag = '<a id="{0}"></a>'.format(self.resource_data[rc]['id'])
html = re.sub(r'<h(\d)>(.*?)</h(\d)>', r'<h\1>{0}\2</h\3>{1}\n'.format(id_tag, self.get_reference_text(rc)), html, 1, flags=re.IGNORECASE | re.MULTILINE)
html += "\n\n"
ta_html += html
return ta_html
def get_reference_text(self, rc):
uses = ''
if len(self.rc_references[rc]):
references = []
for reference in self.rc_references[rc]:
if '/tn/' in reference:
parts = reference[5:].split('/')
id = 'tn-{0}-{1}-{2}'.format(self.book_id, parts[4], parts[5])
if parts[4] == 'front':
text = 'Intro'.format(self.book_title)
elif parts[5] == 'intro':
text = 'Ch. {0} Notes'.format(parts[5].lstrip('0'))
else:
text = '{1}:{2}'.format(id, parts[4].lstrip('0'), parts[5].lstrip('0'))
references.append('<a href="#{0}">{1}</a>'.format(id, text))
if len(references):
uses = '(Linked from: ' + ', '.join(references) + ')'
return uses
def get_resource_data_from_rc_links(self, text, source_rc):
for rc in re.findall(r'rc://[A-Z0-9/_\*-]+', text, flags=re.IGNORECASE | re.MULTILINE):
parts = rc[5:].split('/')
resource = parts[1]
path = '/'.join(parts[3:])
if resource not in ['ta', 'tw']:
continue
if rc not in self.rc_references:
self.rc_references[rc] = []
self.rc_references[rc].append(source_rc)
if rc not in self.resource_data:
title = ''
t = ''
anchor_id = '{0}-{1}'.format(resource, path.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path))
if not os.path.isfile(file_path):
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path))
# if not os.path.isfile(file_path):
# if resource == 'tw':
# if path.startswith('bible/other/'):
# path2 = re.sub(r'^bible/other/', r'bible/kt/', path)
# else:
# path2 = re.sub(r'^bible/kt/', r'bible/other/', path)
# anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
# link = '#{0}'.format(anchor_id)
# file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
# '{0}.md'.format(path2))
if os.path.isfile(file_path):
t = read_file(file_path)
if resource == 'ta':
title_file = os.path.join(os.path.dirname(file_path), 'title.md')
question_file = os.path.join(os.path.dirname(file_path), 'sub-title.md')
if os.path.isfile(title_file):
title = read_file(title_file)
else:
title = self.get_first_header(t)
if os.path.isfile(question_file):
question = read_file(question_file)
question = 'This page answers the question: *{0}*\n\n'.format(question)
else:
question = ''
t = '# {0}\n\n{1}{2}'.format(title, question, t)
t = self.fix_ta_links(t, path.split('/')[0])
elif resource == 'tw':
title = self.get_first_header(t)
t = re.sub(r'\n*\s*\(See [^\n]*\)\s*\n*', '\n\n', t, flags=re.IGNORECASE | re.MULTILINE)
t = self.fix_tw_links(t, path.split('/')[1])
else:
if rc not in self.bad_links:
self.bad_links[rc] = []
self.bad_links[rc].append(source_rc)
self.resource_data[rc] = {
'rc': rc,
'link': link,
'id': anchor_id,
'title': title,
'text': t,
}
if t:
self.get_resource_data_from_rc_links(t, rc)
@staticmethod
def increase_headers(text, increase_depth=1):
if text:
for num in range(5,0,-1):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num+increase_depth), text, flags=re.MULTILINE)
return text
@staticmethod
def decrease_headers(text, minimum_header=1, decrease=1):
if text:
for num in range(minimum_header, minimum_header+10):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num-decrease if (num-decrease) <= 5 else 5), text, flags=re.MULTILINE)
return text
@staticmethod
def get_first_header(text):
lines = text.split('\n')
if len(lines):
for line in lines:
if re.match(r'<h1>', line):
return re.sub(r'<h1>(.*?)</h1>', r'\1', line)
return lines[0]
return "NO TITLE"
def fix_tn_links(self, text, chapter):
text = re.sub(r'<a href="\.\./\.\./([^"]+)">([^<]+)</a>', r'\2'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^"]+?)/([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-\1-\2"'.format(self.lang_code, self.book_id), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-\1"'.format(self.lang_code, self.book_id), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\./([^"]+?)(\.md)*"', r'href="#{0}-tn-{1}-{2}-\1"'.format(self.lang_code, self.book_id, self.pad(chapter)), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\n__.*\|.*', r'', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_tw_links(self, text, dictionary):
text = re.sub(r'\]\(\.\./([^/)]+?)(\.md)*\)', r'](rc://{0}/tw/dict/bible/{1}/\1)'.format(self.lang_code, dictionary), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(\.\./([^)]+?)(\.md)*\)', r'](rc://{0}/tw/dict/bible/\1)'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_ta_links(self, text, manual):
text = re.sub(r'\]\(\.\./([^/)]+)/01\.md\)', r'](rc://{0}/ta/man/{1}/\1)'.format(self.lang_code, manual), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(\.\./\.\./([^/)]+)/([^/)]+)/01\.md\)', r'](rc://{0}/ta/man/\1/\2)'.format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'\]\(([^# :/)]+)\)', r'](rc://{0}/ta/man/{1}/\1)'.format(self.lang_code, manual), text, flags=re.IGNORECASE | re.MULTILINE)
return text
def replace_rc_links(self, text):
# Change rc://... rc links,
# 1st: [[rc://en/tw/help/bible/kt/word]] => <a href="#tw-kt-word">God's Word</a>
# 2nd: rc://en/tw/help/bible/kt/word => #tw-kt-word (used in links that are already formed)
for rc, info in self.resource_data.iteritems():
parts = rc[5:].split('/')
tail = '/'.join(parts[1:])
pattern = r'\[\[rc://[^/]+/{0}\]\]'.format(re.escape(tail))
replace = r'<a href="{0}">{1}</a>'.format(info['link'], info['title'])
text = re.sub(pattern, replace, text, flags=re.IGNORECASE | re.MULTILINE)
pattern = r'rc://[^/]+/{0}'.format(re.escape(tail))
replace = info['link']
text = re.sub(pattern, replace, text, flags=re.IGNORECASE | re.MULTILINE)
# Remove other scripture reference not in this tN
text = re.sub(r'<a[^>]+rc://[^>]+>([^>]+)</a>', r'\1', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_links(self, text):
# Change [[http.*]] to <a href="http\1">http\1</a>
text = re.sub(r'\[\[http([^\]]+)\]\]', r'<a href="http\1">http\1</a>', text, flags=re.IGNORECASE)
# convert URLs to links if not already
text = re.sub(r'([^">])((http|https|ftp)://[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="\2">\2</a>', text, flags=re.IGNORECASE)
# URLS wth just www at the start, no http
text = re.sub(r'([^\/])(www\.[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="http://\2">\2</a>', text, flags=re.IGNORECASE)
# Removes leading 0s from verse references
text = re.sub(r' 0*(\d+):0*(\d+)(-*)0*(\d*)', r' \1:\2\3\4', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def get_chunk_html(self, resource, chapter, verse):
# print("html: {0}-{3}-{1}-{2}".format(resource, chapter, verse, self.book_id))
path = os.path.join(self.working_dir, 'usfm_chunks', 'usfm-{0}-{1}-{2}-{3}-{4}'.
format(self.lang_code, resource, self.book_id, chapter, verse))
filename_base = '{0}-{1}-{2}-{3}'.format(resource, self.book_id, chapter, verse)
html_file = os.path.join(path, '{0}.html'.format(filename_base))
usfm_file = os.path.join(path, '{0}.usfm'.format(filename_base))
if os.path.isfile(html_file):
return read_file(html_file)
if not os.path.exists(path):
os.makedirs(path)
chunk = self.usfm_chunks[resource][str(chapter)][str(verse)]['usfm']
usfm = self.usfm_chunks[resource]['header']
if '\\c' not in chunk:
usfm += '\n\n\\c {0}\n'.format(chapter)
usfm += chunk
write_file(usfm_file, usfm)
UsfmTransform.buildSingleHtml(path, path, filename_base)
html = read_file(os.path.join(path, filename_base+'.html'))
soup = BeautifulSoup(html, 'html.parser')
header = soup.find('h1')
if header:
header.decompose()
chapter = soup.find('h2')
if chapter:
chapter.decompose()
html = ''.join(['%s' % x for x in soup.body.contents])
write_file(html_file, html)
return html
def convert_html2pdf(self):
command = """pandoc \
--pdf-engine="wkhtmltopdf" \
--template="tools/tn/generate_tn_pdf/tex/template.tex" \
--toc \
--toc-depth=2 \
-V documentclass="scrartcl" \
-V classoption="oneside" \
-V geometry='hmargin=2cm' \
-V geometry='vmargin=3cm' \
-V title="{2}" \
-V subtitle="translationNotes" \
-V logo="{6}/icon-tn.png" \
-V date="{3}" \
-V version="{4}" \
-V publisher="{8}" \
-V contributors="{9}" \
-V mainfont="Noto Serif" \
-V sansfont="Noto Sans" \
-V fontsize="13pt" \
-V urlcolor="Bittersweet" \
-V linkcolor="Bittersweet" \
-H "tools/tn/generate_tn_pdf/tex/format.tex" \
-o "{5}/{7}.pdf" \
"{5}/{7}.html"
""".format(BOOK_NUMBERS[self.book_id.lower()], self.book_id, self.book_title, self.issued, self.version, self.output_dir,
self.working_dir, self.filename_base, self.publisher, self.contributors)
_print(command)
subprocess.call(command, shell=True)
def main(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, lang_code, books, working_dir, output_dir):
"""
:param ta_tag:
:param tn_tag:
:param tw_tag:
:param ust_tag:
:param ult_tag:
:param ugnt_tag:
:param lang_code:
:param books:
:param working_dir:
:param output_dir:
:return:
"""
tn_converter = TnConverter(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, working_dir, output_dir,
lang_code, books)
tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_code', default='en', required=False, help="Language Code")
parser.add_argument('-b', '--book_id', dest='books', nargs='+', default=None, required=False, help="Bible Book(s)")
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help="Working Directory")
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help="Output Directory")
parser.add_argument('--ta-tag', dest='ta', default='v10', required=False, help="tA Tag")
parser.add_argument('--tn-tag', dest='tn', default='v13', required=False, help="tN Tag")
parser.add_argument('--tw-tag', dest='tw', default='v9', required=False, help="tW Tag")
parser.add_argument('--ust-tag', dest='ust', default='master', required=False, help="UST Tag")
parser.add_argument('--ult-tag', dest='ult', default='master', required=False, help="ULT Tag")
parser.add_argument('--ugnt-tag', dest='ugnt', default='v0.4', required=False, help="UGNT Tag")
args = parser.parse_args(sys.argv[1:])
main(args.ta, args.tn, args.tw, args.ust, args.ult, args.ugnt, args.lang_code, args.books, args.working_dir, args.output_dir)
| 49.46368
| 254
| 0.548303
| 5,124
| 40,857
| 4.215457
| 0.097775
| 0.016667
| 0.015741
| 0.0175
| 0.412222
| 0.338333
| 0.260046
| 0.212546
| 0.18963
| 0.15912
| 0
| 0.014075
| 0.290501
| 40,857
| 825
| 255
| 49.523636
| 0.731061
| 0.051913
| 0
| 0.165951
| 0
| 0.010014
| 0.140736
| 0.04654
| 0.002861
| 0
| 0
| 0
| 0
| 1
| 0.050072
| false
| 0
| 0.031474
| 0.001431
| 0.130186
| 0.021459
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a59fce72ffcde75ac9e9b378c6906ab092d7dd
| 2,565
|
py
|
Python
|
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | 1
|
2021-11-04T00:08:00.000Z
|
2021-11-04T00:08:00.000Z
|
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from tqdm import tqdm
import argparse
from pyscenic.aucell import aucell
from .aucell import create_gene_signatures
from .aucell import assign_bootstrap
def main():
parser = argparse.ArgumentParser(description='AUcell Bootstrapping.')
parser.add_argument(
'-i', '--in_file',
required=True,
help='<Required> Path to input expression matrix.',
type=str
)
parser.add_argument(
'-d', '--de_genes',
required=True,
help='<Required> Differential expression results.',
type=str
)
parser.add_argument('-o', '--out_file',
help='<Required> Output .h5 file to save results.',
required=True,
type=str
)
parser.add_argument('-n', '--niter',
help='Number of iterations.',
required=False,
default=100,
type=int
)
parser.add_argument('-s', '--subset_n',
help='Number of genes to subset.',
required=False,
default=150,
type=int
)
parser.add_argument('-w', '--n_workers',
help='Number of workers.',
required=False,
default=8,
type=int
)
parser.add_argument('-k', '--weight',
help='Enrichment weight. Default is "t" statistic form differential expression.',
required=False,
default="t",
type=str
)
parser.add_argument('-r', '--random_seed',
help='Random seed for bootstrapping.',
required=False,
default=None
)
args = parser.parse_args()
# Set random seed
if args.random_seed is None:
np.random.seed()
else:
np.random.seed(int(args.random_seed))
# Load
exp_mtx = pd.read_parquet(args.in_file)
print(" * {} cells loaded".format(exp_mtx.shape[0]))
print(" * {} genes detected".format(exp_mtx.shape[1]))
# Load DE Genes
de_df = pd.read_csv(args.de_genes, sep='\t').set_index("gene_name")
store = pd.HDFStore(args.out_file,'a')
for n in tqdm(range(args.niter)):
gene_sigs = create_gene_signatures(de_df, n=args.subset_n, weight_idx=args.weight)
enrich_df = aucell(exp_mtx, gene_sigs, normalize=False, num_workers=args.n_workers)
store["perm{}".format(n)] = enrich_df
store.close()
# Assign bootstrapped
print(" * assigning bootstrap results")
bootstrap_df = assign_bootstrap(args.out_file, n=args.niter, norm=True)
bootstrap_df.to_csv(args.out_file.split(".h5")[0]+".tsv", sep="\t")
if __name__ == "__main__":
main()
| 28.5
| 91
| 0.617934
| 325
| 2,565
| 4.701538
| 0.347692
| 0.04712
| 0.089005
| 0.041885
| 0.109948
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006205
| 0.246004
| 2,565
| 89
| 92
| 28.820225
| 0.783868
| 0.021053
| 0
| 0.226667
| 0
| 0
| 0.2083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013333
| false
| 0
| 0.093333
| 0
| 0.106667
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a77e8060730c4c9bc76d9c5c083f084aed00b7
| 2,383
|
py
|
Python
|
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
from rule_condition import Condition
from rule_action import Action
from rule_template import RuleTemplate
from rule_engine import RuleEngine
from rule import Rule
from rule_data import Data
from rule_scope import Scope
from action_handler_send_email import SendEmailHandler
from action_handler_report_alarm import ReportAlarmHandler
def initialize(rule_engine):
condition = Condition("{{telemetry.messageId}}" , "EQ", "{{rule.messageId}}")
action = Action("REPORT_ALARM", {})
scope = Scope()
scope.add("device_type","PITLID")
rule_template = RuleTemplate(scope=scope, condition=condition, action=action)
data = Data()
data.add("messageId",301)
rule = Rule("301-message-rule",rule_template, data)
rule_engine.add_rule(rule)
action = Action("SEND_EMAIL", {})
scope = Scope()
scope.add("device_type","CAPTIS")
rule_template = RuleTemplate(scope=scope, condition=condition, action=action)
data = Data()
data.add("messageId",201)
rule = Rule("201-message-rule",rule_template, data)
rule_engine.add_rule(rule)
rule_engine.add_handler(ReportAlarmHandler())
rule_engine.add_handler(SendEmailHandler())
def test1(rule_engine):
print("===== Start Test case 1======")
telemetry = Data()
telemetry.add("device_type", "PITLID")
telemetry.add("messageId", 201)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test2(rule_engine):
print("===== Start Test case 2======")
telemetry = Data()
telemetry.add("device_type", "PITLID")
telemetry.add("messageId", 301)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test3(rule_engine):
print("===== Start test case 3 ======")
telemetry = Data()
telemetry.add("device_type", "CAPTIS")
telemetry.add("messageId", 301)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def test4(rule_engine):
print("===== Start test case 4 ======")
telemetry = Data()
telemetry.add("device_type", "CAPTIS")
telemetry.add("messageId", 201)
rule_engine.process(telemetry)
print("===== End ======\n\n")
def main():
rule_engine = RuleEngine()
initialize(rule_engine)
test1(rule_engine)
test2(rule_engine)
test3(rule_engine)
test4(rule_engine)
if __name__=="__main__":
main()
| 29.419753
| 81
| 0.660512
| 283
| 2,383
| 5.367491
| 0.173145
| 0.131666
| 0.05135
| 0.052666
| 0.563529
| 0.563529
| 0.45293
| 0.45293
| 0.45293
| 0.45293
| 0
| 0.01849
| 0.182963
| 2,383
| 81
| 82
| 29.419753
| 0.761685
| 0
| 0
| 0.424242
| 0
| 0
| 0.192534
| 0.009648
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.227273
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8a96eee4517afeca4532922b8ea2f6d38dc101a
| 4,898
|
py
|
Python
|
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import monai
import math
import torch
import glob
from skimage.morphology import remove_small_holes, remove_small_objects
from monai.transforms import (
LoadImaged,
AddChanneld,
Orientationd,
Spacingd,
ScaleIntensityRanged,
SpatialPadd,
RandAffined,
RandCropByPosNegLabeld,
RandGaussianNoised,
RandFlipd,
RandFlipd,
RandFlipd,
CastToTyped,
)
def get_xforms_scans_or_synthetic_lesions(mode="scans", keys=("image", "label")):
"""returns a composed transform for scans or synthetic lesions."""
xforms = [
LoadImaged(keys),
AddChanneld(keys),
Orientationd(keys, axcodes="LPS"),
Spacingd(keys, pixdim=(1.25, 1.25, 5.0), mode=("bilinear", "nearest")[: len(keys)]),
]
dtype = (np.int16, np.uint8)
if mode == "synthetic":
xforms.extend([
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
])
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
def get_xforms_load(mode="load", keys=("image", "label")):
"""returns a composed transform."""
xforms = [
LoadImaged(keys),
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
]
if mode == "load":
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
def load_COVID19_v2(data_folder, SCAN_NAME):
images= [f'{data_folder}/{SCAN_NAME}_ct.nii.gz']
labels= [f'{data_folder}/{SCAN_NAME}_seg.nii.gz']
keys = ("image", "label")
files_scans = [{keys[0]: img, keys[1]: seg} for img, seg in zip(images, labels)]
return images, labels, keys, files_scans
def load_synthetic_lesions(files_scans, keys, batch_size):
transforms_load = get_xforms_scans_or_synthetic_lesions("synthetic", keys)
ds_synthetic = monai.data.CacheDataset(data=files_scans, transform=transforms_load)
loader_synthetic = monai.data.DataLoader(
ds_synthetic,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
for idx_mini_batch, mini_batch in enumerate(loader_synthetic):
# if idx_mini_batch==6:break #OMM
BATCH_IDX=0
scan_synthetic = mini_batch['image'][BATCH_IDX][0,...].numpy()
scan_mask = mini_batch['label'][BATCH_IDX][0,...].numpy()
name_prefix = mini_batch['image_meta_dict']['filename_or_obj'][0].split('Train/')[-1].split('.nii')[0]
return name_prefix
def load_scans(files_scans, keys, batch_size, SCAN_NAME, mode="scans"):
transforms_load = get_xforms_scans_or_synthetic_lesions(mode, keys)
ds_scans = monai.data.CacheDataset(data=files_scans, transform=transforms_load)
loader_scans = monai.data.DataLoader(
ds_scans,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
for idx_mini_batch, mini_batch in enumerate(loader_scans):
# if idx_mini_batch==1:break #OMM
BATCH_IDX=0
scan = mini_batch['image'][BATCH_IDX][0,...]
scan_mask = mini_batch['label'][BATCH_IDX][0,...]
scan_name = mini_batch['image_meta_dict']['filename_or_obj'][0].split('/')[-1].split('.nii')[0][:-3]
print(f'working on scan= {scan_name}')
assert scan_name == SCAN_NAME, 'cannot load that scan'
scan = scan.numpy() #ONLY READ ONE SCAN (WITH PREVIOUS BREAK)
scan_mask = scan_mask.numpy()
return scan, scan_mask
def load_individual_lesions(folder_source, batch_size):
# folder_source = f'/content/drive/MyDrive/Datasets/covid19/COVID-19-20/individual_lesions/{SCAN_NAME}_ct/'
files_scan = sorted(glob.glob(os.path.join(folder_source,"*.npy")))
files_mask = sorted(glob.glob(os.path.join(folder_source,"*.npz")))
keys = ("image", "label")
files = [{keys[0]: img, keys[1]: seg} for img, seg in zip(files_scan, files_mask)]
print(len(files_scan), len(files_mask), len(files))
transforms_load = get_xforms_load("load", keys)
ds_lesions = monai.data.CacheDataset(data=files, transform=transforms_load)
loader_lesions = monai.data.DataLoader(
ds_lesions,
batch_size=batch_size,
shuffle=False, #should be true for training
num_workers=2,
pin_memory=torch.cuda.is_available(),
)
return loader_lesions
def load_synthetic_texture(path_synthesis_old):
texture_orig = np.load(f'{path_synthesis_old}texture.npy.npz')
texture_orig = texture_orig.f.arr_0
texture = texture_orig + np.abs(np.min(texture_orig))# + .07
return texture
| 39.5
| 111
| 0.669661
| 658
| 4,898
| 4.74772
| 0.232523
| 0.034571
| 0.017286
| 0.029449
| 0.477273
| 0.440781
| 0.412612
| 0.37484
| 0.302497
| 0.302497
| 0
| 0.020102
| 0.197632
| 4,898
| 123
| 112
| 39.821138
| 0.774809
| 0.078808
| 0
| 0.284404
| 0
| 0
| 0.079697
| 0.023598
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.06422
| false
| 0
| 0.073395
| 0
| 0.201835
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ab0286f449987129eeade795e566330ff36d18
| 867
|
py
|
Python
|
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | 2
|
2019-12-09T10:19:36.000Z
|
2020-01-11T11:48:41.000Z
|
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
import json
import pytest
from unittest import TestCase
from rest_framework.test import APIClient
from ..models import Group, Prediction
@pytest.mark.django_db
class PredictionViewSetTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_prediction_list(self):
response = self.client.get("/api/predictions/")
assert response.status_code == 200
response_json = json.loads(response.content)
assert len(response_json) == Prediction.objects.count()
@pytest.mark.django_db
class GroupViewSetTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_prediction_list(self):
response = self.client.get("/api/groups/")
assert response.status_code == 200
response_json = json.loads(response.content)
assert len(response_json) == Group.objects.count()
| 27.967742
| 63
| 0.704729
| 103
| 867
| 5.805825
| 0.368932
| 0.06689
| 0.053512
| 0.060201
| 0.652174
| 0.575251
| 0.575251
| 0.575251
| 0.575251
| 0.575251
| 0
| 0.008584
| 0.193772
| 867
| 30
| 64
| 28.9
| 0.846924
| 0
| 0
| 0.521739
| 0
| 0
| 0.033449
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.173913
| false
| 0
| 0.217391
| 0
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8acaa7460d221225a0bd79d4a5ca48dc091b0af
| 2,873
|
py
|
Python
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
"""Specification for the RoboMaker delete. simulation application component."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
from common.sagemaker_component_spec import SageMakerComponentSpec
from common.common_inputs import (
COMMON_INPUTS,
SageMakerComponentCommonInputs,
SageMakerComponentInput as Input,
SageMakerComponentOutput as Output,
SageMakerComponentBaseOutputs,
SageMakerComponentInputValidator as InputValidator,
SageMakerComponentOutputValidator as OutputValidator,
)
@dataclass(frozen=True)
class RoboMakerDeleteSimulationAppInputs(SageMakerComponentCommonInputs):
"""Defines the set of inputs for the delete simulation application component."""
arn: Input
version: Input
@dataclass
class RoboMakerDeleteSimulationAppOutputs(SageMakerComponentBaseOutputs):
"""Defines the set of outputs for the create simulation application component."""
arn: Output
class RoboMakerDeleteSimulationAppSpec(
SageMakerComponentSpec[
RoboMakerDeleteSimulationAppInputs, RoboMakerDeleteSimulationAppOutputs
]
):
INPUTS: RoboMakerDeleteSimulationAppInputs = RoboMakerDeleteSimulationAppInputs(
arn=InputValidator(
input_type=str,
required=True,
description="The Amazon Resource Name (ARN) of the simulation application.",
default="",
),
version=InputValidator(
input_type=str,
required=False,
description="The version of the simulation application.",
default=None,
),
**vars(COMMON_INPUTS),
)
OUTPUTS = RoboMakerDeleteSimulationAppOutputs(
arn=OutputValidator(
description="The Amazon Resource Name (ARN) of the simulation application."
),
)
def __init__(self, arguments: List[str]):
super().__init__(
arguments,
RoboMakerDeleteSimulationAppInputs,
RoboMakerDeleteSimulationAppOutputs,
)
@property
def inputs(self) -> RoboMakerDeleteSimulationAppInputs:
return self._inputs
@property
def outputs(self) -> RoboMakerDeleteSimulationAppOutputs:
return self._outputs
@property
def output_paths(self) -> RoboMakerDeleteSimulationAppOutputs:
return self._output_paths
| 32.280899
| 88
| 0.725374
| 264
| 2,873
| 7.818182
| 0.435606
| 0.061047
| 0.043605
| 0.037791
| 0.111434
| 0.059109
| 0.059109
| 0.059109
| 0.059109
| 0.059109
| 0
| 0.001767
| 0.211974
| 2,873
| 88
| 89
| 32.647727
| 0.909894
| 0.258615
| 0
| 0.135593
| 0
| 0
| 0.077947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.067797
| 0.050847
| 0.322034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8b003880b2b0c817a1e02d7db8475b7ea56eada
| 2,624
|
py
|
Python
|
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import fnmatch
import logging
class sflow_sub_record:
def __init__(self,scheme,app_id,app_ip,app_port,subscription_info,sub_info_filter):
logging.debug("* Updating subscription_info ")
self.scheme = scheme
self.app_id = app_id
self.ipaddress = app_ip
self.portno = app_port
self.subscription_info = subscription_info
self.sub_info_filter = sub_info_filter
sflow_sub_database=[]
def add_sflow_sub_record(record):
logging.info("* inside %s",add_sflow_sub_record.__name__)
if not sflow_sub_database:
logging.debug("* -----------List is EMpty -------------")
sflow_sub_database.append(record)
logging.debug("* Subscription is sucessful")
return "Subscription is sucessful \n"
for x in sflow_sub_database:
if (record.ipaddress == x.ipaddress) and (record.portno == x.portno) :
logging.warning("* entry already exists\n")
return "entry already exists \n"
sflow_sub_database.append(record)
return "Subscription is sucessful \n"
def delete_sflow_sub_record(ip,port):
logging.info("* inside %s",delete_sflow_sub_record.__name__)
Flag = False
for x in sflow_sub_database:
if (ip == x.ipaddress) and (port == x.portno) :
sflow_sub_database.remove(x)
Flag = True
logging.debug("* Un-Subscription is sucessful")
return "Un-Subscription is sucessful \n"
if not Flag :
err_str = "No subscription exists with target: udp://" + ip + ":" + str(port) + "\n"
logging.error(err_str)
raise Exception (err_str)
def print_sflow_sub_records():
logging.info("* inside %s",print_sflow_sub_records.__name__)
for obj in sflow_sub_database:
logging.debug("* ------------------------------------------------")
logging.debug("* scheme:%s",obj.scheme)
logging.debug("* app_id:%s",obj.app_id)
logging.debug("* portno:%s",obj.portno )
logging.debug("* ipaddress:%s",obj.ipaddress)
logging.debug("* portno:%s",obj.portno)
logging.debug("* subscription_info:%s",obj.subscription_info)
logging.debug("* sub_info_filter:%s",obj.sub_info_filter)
logging.debug("* ------------------------------------------------")
def get_sflow_sub_records(notif_subscription_info):
logging.info("* inside %s",get_sflow_sub_records.__name__)
sub_list=[]
for obj in sflow_sub_database:
if obj.subscription_info == notif_subscription_info:
sub_list.append(obj)
return sub_list
| 41
| 91
| 0.62843
| 326
| 2,624
| 4.751534
| 0.205521
| 0.092963
| 0.092963
| 0.046482
| 0.247902
| 0.113622
| 0.082634
| 0.051646
| 0
| 0
| 0
| 0
| 0.21875
| 2,624
| 63
| 92
| 41.650794
| 0.75561
| 0.006098
| 0
| 0.210526
| 0
| 0
| 0.210587
| 0.036824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.035088
| 0
| 0.22807
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8b2fa45ad6aa0b508fe2d6b2b81fce66e566e4c
| 3,148
|
py
|
Python
|
scripts/gcorr/run_xfaster.py
|
SPIDER-CMB/xfaster
|
1b8e56d775f2c3a8693d1372ae461392c21da7ca
|
[
"MIT"
] | 1
|
2021-03-25T14:15:44.000Z
|
2021-03-25T14:15:44.000Z
|
scripts/gcorr/run_xfaster.py
|
annegambrel/xfaster
|
03d5a2971d3cc19ae360d78995e3575f3f678d6e
|
[
"MIT"
] | 7
|
2021-04-20T23:34:38.000Z
|
2021-08-24T00:00:53.000Z
|
scripts/gcorr/run_xfaster.py
|
SPIDER-CMB/xfaster
|
1b8e56d775f2c3a8693d1372ae461392c21da7ca
|
[
"MIT"
] | 1
|
2021-05-18T16:43:54.000Z
|
2021-05-18T16:43:54.000Z
|
"""
A script to run XFaster for gcorr calculation. Called by iterate.py.
"""
import os
import xfaster as xf
import argparse as ap
from configparser import ConfigParser
# Change XFaster options here to suit your purposes
opts = dict(
likelihood=False,
residual_fit=False,
foreground_fit=False,
# change options below for your purposes
tbeb=True,
bin_width=25,
lmin=2,
lmax=500,
)
# Change submit options here to fit your system
submit_opts = dict(nodes=1, ppn=1, mem=6, omp_threads=10, wallt=4)
P = ap.ArgumentParser()
P.add_argument("--gcorr-config", help="The config file for gcorr computation")
P.add_argument("-f", "--first", default=0, type=int, help="First sim index to run")
P.add_argument("-n", "--num", default=1, type=int, help="Number of sims to run")
P.add_argument(
"-o", "--output", default="xfaster_gcal", help="Name of output subdirectory"
)
P.add_argument(
"--no-gcorr",
dest="gcorr",
default=True,
action="store_false",
help="Don't apply a g-gcorrection",
)
P.add_argument(
"--reload-gcorr", default=False, action="store_true", help="Reload the gcorr factor"
)
P.add_argument("--check-point", default="bandpowers", help="XFaster checkpoint")
P.add_argument(
"--no-submit", dest="submit", action="store_false", help="Don't submit, run locally"
)
P.add_argument(
"--omp",
default=None,
type=int,
help="Number of omp threads, if submit. Overwrites value in config file",
)
args = P.parse_args()
# start by loading up gcorr config file and parsing it
assert os.path.exists(args.gcorr_config), "Missing config file {}".format(
args.gcorr_config
)
g_cfg = ConfigParser()
g_cfg.read(args.gcorr_config)
# set all user-specific xfaster opts
for k, v in g_cfg["xfaster_opts"].items():
opts[k] = v
null = g_cfg.getboolean("gcorr_opts", "null")
tags = g_cfg["gcorr_opts"]["map_tags"].split(",")
# null tests should use noise sims. signal shouldn't.
if null:
opts["noise_type"] = g_cfg["xfaster_opts"]["noise_type"]
opts["sim_data_components"] = ["signal", "noise"]
else:
opts["noise_type"] = None
opts["sim_data_components"] = ["signal"]
opts["output_root"] = os.path.join(g_cfg["gcorr_opts"]["output_root"], args.output)
# update opts with command line args
opts["apply_gcorr"] = args.gcorr
opts["reload_gcorr"] = args.reload_gcorr
opts["checkpoint"] = args.check_point
seeds = list(range(args.first, args.first + args.num))
for tag in tags:
opts["sim_data"] = True
opts["output_tag"] = tag
opts["gcorr_file"] = os.path.abspath(
os.path.join(
g_cfg["gcorr_opts"]["output_root"],
"xfaster_gcal",
tag,
"gcorr_{}_total.npz".format(tag),
)
)
opts["data_subset"] = os.path.join(
g_cfg["gcorr_opts"]["data_subset"], "*{}".format(tag)
)
if args.omp is not None:
submit_opts["omp_threads"] = args.omp
if args.submit:
opts.update(**submit_opts)
for s in seeds:
opts["sim_index_default"] = s
if args.submit:
xf.xfaster_submit(**opts)
else:
xf.xfaster_run(**opts)
| 28.618182
| 88
| 0.661055
| 459
| 3,148
| 4.385621
| 0.339869
| 0.017884
| 0.053651
| 0.025832
| 0.130651
| 0.068058
| 0.044213
| 0.032787
| 0.032787
| 0
| 0
| 0.005475
| 0.187738
| 3,148
| 109
| 89
| 28.880734
| 0.781776
| 0.120394
| 0
| 0.104651
| 0
| 0
| 0.288203
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 1
| 0
| false
| 0
| 0.046512
| 0
| 0.046512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8b5ae0ccaf93b252b0712f888f73a49ece568a6
| 23,824
|
py
|
Python
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 1
|
2021-03-29T22:09:47.000Z
|
2021-03-29T22:09:47.000Z
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 49
|
2021-03-29T20:13:28.000Z
|
2021-05-01T10:38:19.000Z
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for server files.
"""
from __future__ import absolute_import, print_function
import os
import yaml
import jsonschema
from ._server import Server
from ._vault_file import VaultFile
__all__ = ['ServerFile', 'ServerFileException',
'ServerFileOpenError', 'ServerFileFormatError',
'ServerFileUserDefinedFormatError',
'ServerFileUserDefinedSchemaError',
'ServerFileGroupUserDefinedFormatError',
'ServerFileGroupUserDefinedSchemaError']
# JSON schema describing the structure of the server files
SERVER_FILE_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "JSON schema for easy-server server files",
"definitions": {},
"type": "object",
"required": [
"servers",
],
"additionalProperties": False,
"properties": {
"vault_file": {
"type": "string",
"description":
"Path name of vault file. Relative path names are relative to "
"the directory of the server file",
},
"servers": {
"type": "object",
"description": "The servers in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server",
"required": [
"description",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description": "Short description of the server",
},
"contact_name": {
"type": "string",
"description":
"Name of technical contact for the server",
},
"access_via": {
"type": "string",
"description":
"Short reminder on the "
"network/firewall/proxy/vpn used to access the "
"server",
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"server_groups": {
"type": "object",
"description": "The server groups in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server group",
"required": [
"description",
"members",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description":
"Short description of the server group",
},
"members": {
"type": "array",
"description":
"List of members of the server group. "
"Those can be servers or other server groups.",
"items": {
"type": "string",
"description":
"Nickname of server or server group in "
"this file",
},
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server group. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"default": {
"type": "string",
"description": "Nickname of default server or server group",
},
},
}
class ServerFileException(Exception):
"""
Abstract base exception for errors related to server files.
Derived from :exc:`py:Exception`.
"""
pass
class ServerFileOpenError(ServerFileException):
"""
Exception indicating that a server file was not found or cannot
be accessed due to a permission error.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileFormatError(ServerFileException):
"""
Exception indicating that an existing server file has some
issue with the format of its file content.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileUserDefinedFormatError(ServerFileException):
"""
Exception indicating that the values of the user-defined portion of server
items in a server file do not match the JSON schema defined for them.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileUserDefinedSchemaError(ServerFileException):
"""
Exception indicating that the JSON schema for validating the values of the
user-defined portion of server items in a server file is not a valid JSON
schema.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileGroupUserDefinedFormatError(ServerFileException):
"""
Exception indicating that the values of the user-defined portion of group
items in a server file do not match the JSON schema defined for them.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFileGroupUserDefinedSchemaError(ServerFileException):
"""
Exception indicating that the JSON schema for validating the values of the
user-defined portion of group items in a server file is not a valid JSON
schema.
Derived from :exc:`ServerFileException`.
"""
pass
class ServerFile(object):
"""
A server file that specifies the openly accessible portion of the servers
and optionally references a vault file that specifies the secret portion
of the servers.
An object of this class is tied to a single server file.
The server file is loaded when this object is initialized. If
the server file specifies a vault file, the vault file is also
loaded at that point.
Optionally, the user-defined portions of the server and group items in
the server file, and the server items in the vault file can be validated
against user-provided JSON schema.
For a description of the file formats, see sections
:ref:`Server files` and :ref:`Vault files`.
"""
def __init__(
self, filepath, password=None, use_keyring=True, use_prompting=True,
verbose=False, user_defined_schema=None,
group_user_defined_schema=None, vault_server_schema=None):
"""
Parameters:
filepath (:term:`unicode string`):
Path name of the server file. Relative path names are
relative to the current directory.
password (:term:`unicode string`):
Password for the vault file. `None` indicates that no password has
been provided.
use_keyring (bool):
Enable the use of the keyring service for retrieving and storing the
password of the vault file.
use_prompting (bool):
Enable the use of password prompting for getting the password of
the vault file.
verbose (bool):
Print additional messages. Note that the password prompt (if needed)
is displayed regardless of verbose mode.
user_defined_schema (:term:`JSON schema`):
JSON schema for validating the values of the user-defined portion
of server items when loading the server file.
`None` means no schema validation takes place for these items.
group_user_defined_schema (:term:`JSON schema`):
JSON schema for validating the values of the user-defined portion
of group items when loading the server file.
`None` means no schema validation takes place for these items.
vault_server_schema (:term:`JSON schema`):
JSON schema for validating the values of the server items when
loading the vault file.
`None` means no schema validation takes place for these items.
Raises:
ServerFileOpenError: Error opening server file
ServerFileFormatError: Invalid server file format
ServerFileUserDefinedFormatError: Invalid format of user-defined
portion of server items in the server file
ServerFileUserDefinedSchemaError: Invalid JSON schema for validating
user-defined portion of server items in the server file
ServerFileGroupUserDefinedFormatError: Invalid format of user-defined
portion of group items in the server file
ServerFileGroupUserDefinedSchemaError: Invalid JSON schema for
validating user-defined portion of group items in the server file
VaultFileOpenError: Error with opening the vault file
VaultFileDecryptError: Error with decrypting the vault file
VaultFileFormatError: Invalid vault file format
VaultFileServerFormatError: Invalid format of server items in the
vault file
VaultFileServerSchemaError: Invalid JSON schema for validating server
items in the vault file
"""
self._filepath = os.path.abspath(filepath)
self._user_defined_schema = user_defined_schema
self._group_user_defined_schema = group_user_defined_schema
self._vault_server_schema = vault_server_schema
self._data = _load_server_file(
filepath, user_defined_schema, group_user_defined_schema)
self._vault_file = self._data['vault_file']
if self._vault_file:
if not os.path.isabs(self._vault_file):
self._vault_file = os.path.join(
os.path.dirname(self._filepath), self._vault_file)
self._vault = VaultFile(
self._vault_file, password=password, use_keyring=use_keyring,
use_prompting=use_prompting, verbose=verbose,
server_schema=vault_server_schema)
else:
self._vault = None
# The following attributes are for faster access
self._servers = self._data['servers']
self._server_groups = self._data['server_groups']
self._default = self._data['default']
@property
def filepath(self):
"""
:term:`unicode string`: Absolute path name of the server file.
"""
return self._filepath
@property
def vault_file(self):
"""
:term:`unicode string`: Absolute path name of the vault file specified
in the server file, or `None` if no vault file was specified.
Vault files specified with a relative path name are relative to the
directory of the server file.
"""
return self._vault_file
@property
def user_defined_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
user-defined portion of server items in the server file, or `None`.
"""
return self._user_defined_schema
@property
def group_user_defined_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
user-defined portion of group items in the server file, or `None`.
"""
return self._group_user_defined_schema
@property
def vault_server_schema(self):
"""
:term:`JSON schema`: JSON schema for validating the values of the
server items in the vault file, or `None`.
"""
return self._vault_server_schema
def is_vault_file_encrypted(self):
"""
Test whether the vault file is in the encrypted state.
If the server file does not specify a vault file, `None` is returned.
Returns:
bool: Boolean indicating whether the vault file is in the encrypted
state, or `None` if no vault file was specified.
"""
if self._vault is None:
return None
return self._vault.is_encrypted()
def get_server(self, nickname):
"""
Get server for a given server nickname.
Parameters:
nickname (:term:`unicode string`): Server nickname.
Returns:
:class:`~easy_server.Server`:
Server with the specified nickname.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
try:
server_dict = self._servers[nickname]
except KeyError:
new_exc = KeyError(
"Server with nickname {!r} not found in server "
"file {!r}".
format(nickname, self._filepath))
new_exc.__cause__ = None
raise new_exc # KeyError
if self._vault:
try:
secrets_dict = self._vault.get_secrets(nickname)
except KeyError:
secrets_dict = None
else:
secrets_dict = None
return Server(nickname, server_dict, secrets_dict)
def list_servers(self, nickname):
"""
List the servers for a given server or server group nickname.
Parameters:
nickname (:term:`unicode string`): Server or server group nickname.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
if nickname in self._servers:
return [self.get_server(nickname)]
if nickname in self._server_groups:
sd_list = list() # of Server objects
sd_nick_list = list() # of server nicknames
sg_item = self._server_groups[nickname]
for member_nick in sg_item['members']:
member_sds = self.list_servers(member_nick)
for sd in member_sds:
if sd.nickname not in sd_nick_list:
sd_nick_list.append(sd.nickname)
sd_list.append(sd)
return sd_list
raise KeyError(
"Server or server group with nickname {!r} not found in server "
"definition file {!r}".
format(nickname, self._filepath))
def list_default_servers(self):
"""
List the servers for the default server or group.
An omitted 'default' element in the server file results in
an empty list.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
if self._default is None:
return []
return self.list_servers(self._default)
def list_all_servers(self):
"""
List all servers.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
return [self.get_server(nickname) for nickname in self._servers]
def _load_server_file(
filepath, user_defined_schema=None, group_user_defined_schema=None):
"""
Load the server file, validate its format and default some
optional elements.
Returns:
dict: Python dict representing the file content.
Raises:
ServerFileOpenError: Error opening server file
ServerFileFormatError: Invalid server file content
ServerFileUserDefinedFormatError: Invalid format of user-defined
portion of server items in the server file
ServerFileUserDefinedSchemaError: Invalid JSON schema for validating
user-defined portion of server items in the server file
ServerFileGroupUserDefinedFormatError: Invalid format of user-defined
portion of group items in the server file
ServerFileGroupUserDefinedSchemaError: Invalid JSON schema for
validating user-defined portion of group items in the server file
"""
# Load the server file (YAML)
try:
with open(filepath, 'r') as fp:
data = yaml.safe_load(fp)
except (OSError, IOError) as exc:
new_exc = ServerFileOpenError(
"Cannot open server file: {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileOpenError
except yaml.YAMLError as exc:
new_exc = ServerFileFormatError(
"Invalid YAML syntax in server file {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Schema validation of server file content
try:
jsonschema.validate(data, SERVER_FILE_SCHEMA)
# Raises jsonschema.exceptions.SchemaError if JSON schema is invalid
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = 'top-level element'
new_exc = ServerFileFormatError(
"Invalid format in server file {fn}: Validation "
"failed on {elem}: {exc}".
format(fn=filepath, elem=elem_str, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Establish defaults for optional top-level elements
if 'server_groups' not in data:
data['server_groups'] = {}
if 'default' not in data:
data['default'] = None
if 'vault_file' not in data:
data['vault_file'] = None
# Schema validation of user-defined portion of server items
if user_defined_schema:
for server_nick, server_item in data['servers'].items():
user_defined = server_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileUserDefinedFormatError(
"Missing user_defined element for server {srv} "
"in server file {fn}".
format(srv=server_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
try:
jsonschema.validate(user_defined, user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of server items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"server {srv} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(srv=server_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
# Schema validation of user-defined portion of group items
if group_user_defined_schema:
for group_nick, group_item in data['server_groups'].items():
user_defined = group_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileGroupUserDefinedFormatError(
"Missing user_defined element for group {grp} "
"in server file {fn}".
format(grp=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
try:
jsonschema.validate(user_defined, group_user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileGroupUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of group items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileGroupUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"group {grp} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(grp=group_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
# Check dependencies in the file
server_nicks = list(data['servers'].keys())
group_nicks = list(data['server_groups'].keys())
all_nicks = server_nicks + group_nicks
default_nick = data['default']
if default_nick and default_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Default nickname '{n}' not found in servers or groups in "
"server file {fn}".
format(n=default_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
for group_nick in group_nicks:
sg_item = data['server_groups'][group_nick]
for member_nick in sg_item['members']:
if member_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Nickname '{n}' in server group '{g}' not found in "
"servers or groups in server file {fn}".
format(n=member_nick, g=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
return data
| 37.815873
| 80
| 0.580423
| 2,485
| 23,824
| 5.42495
| 0.126761
| 0.049774
| 0.025072
| 0.032639
| 0.546918
| 0.510496
| 0.478674
| 0.439137
| 0.410949
| 0.363029
| 0
| 0.000644
| 0.347759
| 23,824
| 629
| 81
| 37.875994
| 0.866916
| 0.331808
| 0
| 0.42522
| 0
| 0
| 0.207422
| 0.012573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035191
| false
| 0.026393
| 0.017595
| 0
| 0.117302
| 0.002933
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8b88aa220e765ebad5849f646d7fa3f22e031df
| 1,316
|
py
|
Python
|
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | 6
|
2021-05-21T01:10:42.000Z
|
2021-12-16T16:12:30.000Z
|
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
from typing import Callable
class Solution:
def sortArrayByParityII(self, nums: list[int]) -> list[int]:
# Crucial lesson: 2 pointer approach doesn't necessarily mean
# the pointers should start at opposite ends of the array.
evens, odds = 0, 1
end = len(nums)
while evens < end and odds < end:
if nums[evens] % 2 == 0:
evens += 2
elif nums[odds] % 2 != 0:
odds += 2
else:
nums[evens], nums[odds] = nums[odds], nums[evens]
evens += 2
odds += 2
return nums
tests = [
(
([4, 2, 5, 7],),
[4, 5, 2, 7],
),
(
([2, 3],),
[2, 3],
),
(
([2, 3, 1, 1, 4, 0, 0, 4, 3, 3],),
[2, 3, 4, 1, 4, 3, 0, 1, 0, 3],
),
]
def validator(
sortArrayByParityII: Callable[[list[int]], list[int]],
inputs: tuple[list[int]],
expected: list[int],
) -> None:
nums, = inputs
output = sortArrayByParityII(nums)
sorted_output = sorted(output)
sorted_expected = sorted(expected)
assert sorted_output == sorted_expected, (sorted_output, sorted_expected)
for index, value in enumerate(output):
assert index % 2 == value % 2, (index % 2, value % 2)
| 24.830189
| 77
| 0.50152
| 161
| 1,316
| 4.062112
| 0.372671
| 0.06422
| 0.110092
| 0.119266
| 0.097859
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055952
| 0.361702
| 1,316
| 52
| 78
| 25.307692
| 0.722619
| 0.088146
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 1
| 0.04878
| false
| 0
| 0.02439
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ba6e975ac143461562e6b418e4b0a0aee2b105
| 4,285
|
py
|
Python
|
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | 2
|
2019-09-17T10:20:20.000Z
|
2020-02-10T11:46:33.000Z
|
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | null | null | null |
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | null | null | null |
import re
UNITS_XML_FILE = 'poscUnits22.xml'
UNITS_PICKLE_FILE = 'units.pickle'
OUTPUT_DECIMALS = 6
SOURCE_PATTERN = r'^(?P<quantity>.*[\d.]+)\s*(?P<from>[^\d\s]([^\s]*|.+?))'
SOURCE_RE = re.compile(SOURCE_PATTERN + '$', re.IGNORECASE | re.VERBOSE)
FULL_PATTERN = r'(\s+as|\s+to|\s+in|\s*>|\s*=)\s(?P<to>[^\d\s][^\s]*)$'
FULL_RE = re.compile(SOURCE_PATTERN + FULL_PATTERN + '$',
re.IGNORECASE | re.VERBOSE)
ICONS = {
'length': 'scale6.png',
'height': 'scale6.png',
'distance': 'scale6.png',
'area': 'scaling1.png',
'time': 'round27.png',
'thermodynamic temperature': 'thermometer19.png',
'volume': 'measuring3.png',
'mass': 'weight4.png',
'velocity': 'timer18.png',
'level of power intensity': 'treble2.png',
'digital storage': 'binary9.png',
}
DEFAULT_ICON = 'ruler9.png'
ANNOTATION_REPLACEMENTS = {
'litre': ('liter', 'liters', 'l'),
'metre': ('meter', 'm'),
'm2': ('meter^3',),
'dm': ('decimeter',),
'dm2': ('dm^2', 'decimeter^2',),
'dm3': ('dm^3', 'decimeter^3',),
'cm': ('centimeter',),
'cm2': ('cm^2', 'centimeter^2',),
'cm3': ('cm^3', 'centimeter^3',),
'mm': ('milimeter',),
'mm2': ('mm^2', 'milimeter^2'),
'mm3': ('mm^3', 'milimeter^3'),
'degF': ('f', 'fahrenheit', 'farhenheit', 'farenheit'),
'degC': ('c', 'celsius', 'celcius'),
'byte': ('B', 'bytes',),
'bit': ('b', 'bits',),
'kbyte': ('KB', 'kB', 'kb', 'kilobyte',),
'Mbyte': ('MB', 'megabyte',),
'ozm': ('oz', 'ounce', 'ounces'),
'lbm': ('lb', 'lbs', 'pound', 'pounds'),
'miPh': ('mph',),
'ftPh': ('fps',),
'foot': ("'",),
'square': ('sq',),
'ft2': ('ft^2', 'foot^2'),
'ft3': ('ft^3', 'foot^3'),
'inch': ('inches', '"'),
'inch2': ('inch^2', 'square inch'),
'inch3': ('inch^3', 'cube inch'),
'flozUS': ('flus', 'floz', 'fl', 'fl oz', 'fl oz uk'),
'flozUK': ('fluk', 'fl oz uk', 'fl uk'),
}
EXPANSIONS = {
'foot': ('feet', 'ft'),
'mili': ('milli',),
'meter': ('metres', 'meter', 'meters'),
'^2': ('sq', 'square'),
'^3': ('cube', 'cubed'),
}
for annotation, items in ANNOTATION_REPLACEMENTS.items():
items = set(items)
items.add(annotation)
for key, expansions in EXPANSIONS.iteritems():
for expansion in expansions:
for item in set(items):
items.add(item.replace(key, expansion))
ANNOTATION_REPLACEMENTS[annotation] = sorted(items)
# Mostly for language specific stuff, defaulting to US for now since I'm not
# easily able to detect the language in a fast way from within alfred
LOCALIZED_UNITS = (
('metre', 'meter'),
('litre', 'liter'),
)
def localize(input_):
for k, v in LOCALIZED_UNITS:
if k in input_:
return input_.replace(k, v)
return input_
RIGHT_TRIMABLE_OPERATORS = '/+*- (.^'
FUNCTION_ALIASES = {
'deg': 'degrees',
'rad': 'radians',
'ln': 'log',
'arccos': 'acos',
'arcsin': 'asin',
'arctan': 'atan',
}
FUNCTION_ALIASES_RE = re.compile(r'\b(%s)\(' % '|'.join(FUNCTION_ALIASES))
def FUNCTION_ALIASES_REPLACEMENT(match):
return FUNCTION_ALIASES[match.group(1)] + '('
FOOT_INCH_RE = re.compile(r'''(\d+)'(\d+)"?''')
FOOT_INCH_REPLACE = r'(\1*12)+\2 inch'
POWER_UNIT_RE = re.compile(r'([a-z])\^([23])\b')
POWER_UNIT_REPLACEMENT = r'\g<1>\g<2>'
PRE_EVAL_REPLACEMENTS = {
'^': '**',
}
# Known safe math functions
MATH_FUNCTIONS = [
# Number theoretic and representation functions
'ceil',
'copysign',
'fabs',
'factorial',
'floor',
'fmod',
'frexp',
'isinf',
'isnan',
'ldexp',
'modf',
'trunc',
# Power and logarithmic functions
'exp',
'expm1',
'log',
'log1p',
'log10',
'pow',
'sqrt',
# Trigonometric functions
'acos',
'asin',
'atan',
'atan2',
'cos',
'hypot',
'sin',
'tan',
# Angular conversion functions
'degrees',
'radians',
# Hyperbolic functions
'acosh',
'asinh',
'atanh',
'cosh',
'sinh',
'tanh',
# Special functions
'erf',
'erfc',
'gamma',
'lgamma',
# Missing functions won't break anything but won't do anything either
'this_function_definitely_does_not_exist',
]
| 22.792553
| 76
| 0.54189
| 499
| 4,285
| 4.559118
| 0.52505
| 0.008791
| 0.024176
| 0.015824
| 0.042637
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019271
| 0.224971
| 4,285
| 187
| 77
| 22.914439
| 0.665763
| 0.094749
| 0
| 0
| 0
| 0.007092
| 0.335402
| 0.038014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014184
| false
| 0
| 0.007092
| 0.007092
| 0.042553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8bc9f66b7afd106a2727f0668012f3210c6ab27
| 1,548
|
py
|
Python
|
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | null | null | null |
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | 2
|
2020-07-19T21:03:34.000Z
|
2020-09-11T14:56:34.000Z
|
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | null | null | null |
from enum import Enum
from pathlib import Path
import click
from mbox.click import EnumChoice, ParsedDate, PathParam
class AF(Enum):
IPv4 = 4
IPv6 = 6
def test_enum_choice(runner):
@click.command()
@click.option("--af", type=EnumChoice(AF, int))
def cmd(af):
click.echo(af)
result = runner.invoke(cmd, ["--af", "6"])
assert result.exit_code == 0
assert result.output == "AF.IPv6\n"
result = runner.invoke(cmd, ["--help"])
assert result.exit_code == 0
assert "--af [4|6]" in result.output
def test_path_param(runner):
@click.command()
@click.option("--path", type=PathParam())
def cmd(path):
click.echo(path)
click.echo(isinstance(path, Path))
result = runner.invoke(cmd, ["--path", "directory"])
assert result.exit_code == 0
assert result.output == "directory\nTrue\n"
def test_parsed_date(runner):
@click.command()
@click.option("--date", type=ParsedDate())
def cmd(date):
click.echo(date)
result = runner.invoke(cmd, ["--date", "21 february 2019 at noon"])
assert result.exit_code == 0
assert result.output == "2019-02-21 12:00:00\n"
settings = {"RETURN_AS_TIMEZONE_AWARE": True, "TIMEZONE": "UTC"}
@click.command()
@click.option(
"--date",
type=ParsedDate(settings=settings),
)
def cmd2(date):
click.echo(date.tzinfo)
result = runner.invoke(cmd2, ["--date", "21 february 2019 at noon"])
assert result.exit_code == 0
assert result.output == "UTC\n"
| 24.1875
| 72
| 0.623385
| 205
| 1,548
| 4.639024
| 0.287805
| 0.113565
| 0.094637
| 0.105152
| 0.396425
| 0.329127
| 0.300736
| 0.214511
| 0.132492
| 0.132492
| 0
| 0.033828
| 0.217054
| 1,548
| 63
| 73
| 24.571429
| 0.750825
| 0
| 0
| 0.195652
| 0
| 0
| 0.132429
| 0.015504
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.152174
| false
| 0
| 0.086957
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8bdfba3ce0bde25189979ebc289968a2512c766
| 1,400
|
py
|
Python
|
util/plot_pbt.py
|
Linus4world/3D-MRI-style-transfer
|
6747f0b235b8a6e773a941c222d594d9eedc6a35
|
[
"BSD-3-Clause"
] | 1
|
2022-01-03T16:08:35.000Z
|
2022-01-03T16:08:35.000Z
|
util/plot_PBT.py
|
Linus4world/mrs-gan
|
64669251584a7421cce3a5173983a2275dcb438a
|
[
"BSD-2-Clause"
] | null | null | null |
util/plot_PBT.py
|
Linus4world/mrs-gan
|
64669251584a7421cce3a5173983a2275dcb438a
|
[
"BSD-2-Clause"
] | 1
|
2022-02-11T13:26:38.000Z
|
2022-02-11T13:26:38.000Z
|
import math
import matplotlib.pyplot as plt
import json
import os
import warnings
warnings.filterwarnings("ignore")
def make_dataset(dir, file_ext=[]):
paths = []
assert os.path.exists(dir) and os.path.isdir(dir), '{} is not a valid directory'.format(dir)
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
for ext in file_ext:
if fname.endswith(ext):
path = os.path.join(root, fname)
paths.append(path)
return paths
def plotPBT(path):
name = path.split('/')[-2]
paths = sorted(make_dataset(path, ['result.json']))
scores = []
for i, path in enumerate(paths):
scores.append([])
with open(path, 'r') as f:
for line in f:
step_line = json.loads(line.rstrip())
scores[-1].append(step_line['score'])
max_iter = max(list(map(len, scores)))
plt.figure()
for i in range(len(scores)):
plt.plot(scores[i])
x = int(math.ceil(max_iter*1.1/10.0))*10
plt.plot(list(range(x)), [0.15]*x, 'r--')
plt.legend([*['_nolegend_']*len(scores), '15% error mark'])
plt.xlabel("Steps")
plt.ylabel("Mean Relative Error")
plt.ylim(bottom=0)
plt.savefig('%s.png'%name, format='png', bbox_inches='tight')
if __name__ == "__main__":
plotPBT('/home/kreitnerl/mrs-gan/ray_results/test_feat/')
| 31.111111
| 96
| 0.594286
| 198
| 1,400
| 4.090909
| 0.5
| 0.022222
| 0.02963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014164
| 0.243571
| 1,400
| 44
| 97
| 31.818182
| 0.750708
| 0
| 0
| 0
| 0
| 0
| 0.121429
| 0.032857
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.051282
| false
| 0
| 0.128205
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8c6f95465da9e6fd5b7017053c85eda97db68b6
| 802
|
py
|
Python
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 822
|
2017-09-05T08:38:42.000Z
|
2022-03-31T16:08:48.000Z
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 81
|
2017-09-12T12:49:00.000Z
|
2022-03-25T18:21:12.000Z
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 90
|
2017-09-05T08:38:49.000Z
|
2022-03-29T12:09:22.000Z
|
from .record import Record
class Span(Record):
__attributes__ = ['start', 'stop', 'type']
def adapt_spans(spans):
for span in spans:
yield Span(span.start, span.stop, span.type)
def offset_spans(spans, offset):
for span in spans:
yield Span(
offset + span.start,
offset + span.stop,
span.type
)
def envelop_spans(spans, envelopes):
index = 0
for envelope in envelopes:
chunk = []
while index < len(spans):
span = spans[index]
index += 1
if span.start < envelope.start:
continue
elif span.stop <= envelope.stop:
chunk.append(span)
else:
index -= 1
break
yield chunk
| 21.105263
| 52
| 0.516209
| 87
| 802
| 4.678161
| 0.356322
| 0.051597
| 0.044226
| 0.068796
| 0.206388
| 0.113022
| 0
| 0
| 0
| 0
| 0
| 0.00616
| 0.392768
| 802
| 37
| 53
| 21.675676
| 0.829569
| 0
| 0
| 0.071429
| 0
| 0
| 0.01623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.035714
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8c7ce0b20cdca0b81d121ae696bffeb609cd523
| 7,297
|
py
|
Python
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 86
|
2016-02-29T03:24:28.000Z
|
2022-03-29T09:30:21.000Z
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 135
|
2016-04-12T13:31:28.000Z
|
2022-03-29T02:18:51.000Z
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 154
|
2016-04-08T04:11:27.000Z
|
2022-03-29T21:21:07.000Z
|
from __future__ import print_function
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.extensions import *
class BulkOfflineConversion(_SingleRecordBulkEntity):
""" Represents an offline conversion that can be read or written in a bulk file.
This class exposes the :attr:`offline_conversion` property that can be read and written as fields of the Keyword record in a bulk file.
Properties of this class and of classes that it is derived from, correspond to fields of the Keyword record in a bulk file.
For more information, see Keyword at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, offline_conversion=None):
super(BulkOfflineConversion, self).__init__()
self._offline_conversion = offline_conversion
self._adjustment_value = None
self._adjustment_time = None
self._adjustment_type = None
self._adjustment_currency_code = None
self._external_attribution_model = None
self._external_attribution_credit = None
@property
def adjustment_value(self):
return self._adjustment_value;
@adjustment_value.setter
def adjustment_value(self, value):
self._adjustment_value = value
@property
def adjustment_time(self):
return self._adjustment_time;
@adjustment_time.setter
def adjustment_time(self, value):
self._adjustment_time = value
@property
def adjustment_type(self):
return self._adjustment_type;
@adjustment_type.setter
def adjustment_type(self, value):
self._adjustment_type = value
@property
def adjustment_currency_code(self):
return self._adjustment_currency_code;
@adjustment_currency_code.setter
def adjustment_currency_code(self, value):
self._adjustment_currency_code = value
@property
def external_attribution_model(self):
return self._external_attribution_model;
@external_attribution_model.setter
def external_attribution_model(self, value):
self._external_attribution_model = value
@property
def external_attribution_credit(self):
return self._external_attribution_credit;
@external_attribution_credit.setter
def external_attribution_credit(self, value):
self._external_attribution_credit = value
@property
def offline_conversion(self):
""" The offline conversion Data Object of the Campaign Management Service.
"""
return self._offline_conversion
@offline_conversion.setter
def offline_conversion(self, value):
self._offline_conversion = value
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.ConversionCurrencyCode,
field_to_csv=lambda c: c.offline_conversion.ConversionCurrencyCode,
csv_to_field=lambda c, v: setattr(
c.offline_conversion,
'ConversionCurrencyCode',
v
)
),
_SimpleBulkMapping(
header=_StringTable.ConversionName,
field_to_csv=lambda c: c.offline_conversion.ConversionName,
csv_to_field=lambda c, v: setattr(
c.offline_conversion,
'ConversionName',
v
)
),
_SimpleBulkMapping(
header=_StringTable.MicrosoftClickId,
field_to_csv=lambda c: c.offline_conversion.MicrosoftClickId,
csv_to_field=lambda c, v: setattr(
c.offline_conversion,
'MicrosoftClickId',
v
)
),
_SimpleBulkMapping(
header=_StringTable.ConversionValue,
field_to_csv=lambda c: c.offline_conversion.ConversionValue,
csv_to_field=lambda c, v: setattr(
c.offline_conversion,
'ConversionValue',
float(v) if v else None
)
),
_SimpleBulkMapping(
header=_StringTable.ConversionTime,
field_to_csv=lambda c: bulk_datetime_str(c.offline_conversion.ConversionTime),
csv_to_field=lambda c, v: setattr(
c.offline_conversion,
'ConversionTime',
parse_datetime(v) if v else None
)
),
_SimpleBulkMapping(
header=_StringTable.AdjustmentValue,
field_to_csv=lambda c: c.adjustment_value,
csv_to_field=lambda c, v: setattr(
c,
'adjustment_value',
float(v) if v else None
)
),
_SimpleBulkMapping(
header=_StringTable.AdjustmentType,
field_to_csv=lambda c: c.adjustment_type,
csv_to_field=lambda c, v: setattr(
c,
'adjustment_type',
v
)
),
_SimpleBulkMapping(
header=_StringTable.AdjustmentCurrencyCode,
field_to_csv=lambda c: c.adjustment_currency_code,
csv_to_field=lambda c, v: setattr(
c,
'adjustment_currency_code',
v
)
),
_SimpleBulkMapping(
header=_StringTable.ExternalAttributionModel,
field_to_csv=lambda c: c.external_attribution_model,
csv_to_field=lambda c, v: setattr(
c,
'external_attribution_model',
v
)
),
_SimpleBulkMapping(
header=_StringTable.ExternalAttributionCredit,
field_to_csv=lambda c: c.external_attribution_credit,
csv_to_field=lambda c, v: setattr(
c,
'external_attribution_credit',
float(v) if v else None
)
),
_SimpleBulkMapping(
header=_StringTable.AdjustmentTime,
field_to_csv=lambda c: bulk_datetime_str(c.adjustment_time),
csv_to_field=lambda c, v: setattr(
c,
'adjustment_time',
parse_datetime(v) if v else None
)
),
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self._offline_conversion, 'offline_conversion')
self.convert_to_values(row_values, BulkOfflineConversion._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._offline_conversion = _CAMPAIGN_OBJECT_FACTORY_V13.create('OfflineConversion')
row_values.convert_to_entity(self, BulkOfflineConversion._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkOfflineConversion, self).read_additional_data(stream_reader)
| 34.582938
| 139
| 0.628752
| 724
| 7,297
| 5.991713
| 0.180939
| 0.094053
| 0.086215
| 0.040572
| 0.389119
| 0.277547
| 0.258183
| 0.23006
| 0.170124
| 0.070309
| 0
| 0.003551
| 0.305331
| 7,297
| 210
| 140
| 34.747619
| 0.852239
| 0.084692
| 0
| 0.335329
| 0
| 0
| 0.036108
| 0.014957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107784
| false
| 0
| 0.035928
| 0.035928
| 0.197605
| 0.005988
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8c98cbdffeb6bc1eca9320791dd78a1cefdb9cd
| 4,320
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
from unittest.mock import MagicMock, patch
import ddt
from django.test import TestCase
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
import lms.djangoapps.lti_provider.tasks as tasks
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super().setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lms.djangoapps.lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id,
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
def setUp(self):
super().setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.course_grade = MagicMock()
self.course_grade_mock = self.setup_patch(
'lms.djangoapps.lti_provider.tasks.CourseGradeFactory.read', self.course_grade
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lms.djangoapps.lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, str(self.course_key), self.assignment.id, 1
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, str(self.course_key), self.assignment.id, 1
)
assert self.course_grade_mock.call_count == 0
| 31.532847
| 92
| 0.634491
| 506
| 4,320
| 5.195652
| 0.245059
| 0.041841
| 0.029669
| 0.045645
| 0.349563
| 0.261696
| 0.241917
| 0.241917
| 0.225941
| 0.190186
| 0
| 0.011727
| 0.269676
| 4,320
| 136
| 93
| 31.764706
| 0.821553
| 0.074537
| 0
| 0.27619
| 0
| 0
| 0.079806
| 0.039776
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.057143
| false
| 0
| 0.066667
| 0
| 0.161905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8c9d560d993e370d3b1363238c43807ccc5dfd5
| 1,954
|
py
|
Python
|
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | 2
|
2018-04-17T00:37:40.000Z
|
2018-04-30T03:04:20.000Z
|
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | null | null | null |
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | null | null | null |
from loser_agent import *
class DumbAgent(LoserAgent):
def __init__(self, is_logging = False, is_printing_to_console = False, isMainAgent = False, fileName = ""):
super().__init__(is_logging, is_printing_to_console, isMainAgent)
# For debugging
self.is_logging = is_logging # Setting this to true to write information to log files in the agents/logs directory
self.is_printing_to_console = is_printing_to_console # Setting this to true causes all logs to be printed to the console
#ZerglingBanelingRushAgent.mainAgent = self
async def on_step(self, iteration, strategy_num = -1):
# self.log("Step: %s Overlord: %s" % (str(iteration), str(self.units(OVERLORD).amount)))
# self.log("Step: " + str(iteration))
# TEMP: Until strategy is given by Q table
#strategy_num = (int)(iteration / 75) % 8
# Build lings, queen, overlords, drones, and meleeattack1
await self.basic_build(iteration)
# Perform actions based on given strategy
if strategy_num == -1:
# self.mainAgent.log("No given strategy")
pass
else:
await self.perform_strategy(iteration, strategy_num)
async def basic_build(self, iteration):
larvae = self.mainAgent.units(LARVA)
if larvae.exists and self.mainAgent.can_afford(DRONE) and self.mainAgent.supply_left > 0:
await self.mainAgent.do(larvae.random.train(DRONE))
if larvae.exists and self.mainAgent.can_afford(OVERLORD) and self.mainAgent.supply_left == 0:
await self.mainAgent.do(larvae.random.train(OVERLORD))
def main():
# Start game with LoserAgent as the Bot, and begin logging
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Bot(Race.Zerg, DumbAgent(True, False, True)),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=False)
if __name__ == '__main__':
main()
| 41.574468
| 129
| 0.666837
| 251
| 1,954
| 5.007968
| 0.438247
| 0.082737
| 0.038186
| 0.060461
| 0.163882
| 0.163882
| 0.163882
| 0.163882
| 0.10183
| 0.10183
| 0
| 0.006684
| 0.234391
| 1,954
| 46
| 130
| 42.478261
| 0.833556
| 0.308086
| 0
| 0
| 0
| 0
| 0.017177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.04
| 0.04
| 0
| 0.16
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8cc12080c230a16858bbc18a05bcd5b93430fe7
| 317
|
py
|
Python
|
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
import random
def find(array):
summation = sum(array)
n = len(array)
total = n*(n+1)//2
miss = total - summation
return miss
def main():
arr = [i for i in range(99)]
print(arr)
result = find(arr)
print("The missing number is-", result)
if __name__ == '__main__':
main()
| 14.409091
| 43
| 0.577287
| 45
| 317
| 3.888889
| 0.644444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017621
| 0.283912
| 317
| 21
| 44
| 15.095238
| 0.753304
| 0
| 0
| 0
| 0
| 0
| 0.094637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.285714
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8cde62d3add298d347b197159cd3ef0fad71443
| 2,850
|
py
|
Python
|
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | 8
|
2017-05-04T22:04:48.000Z
|
2020-03-27T13:06:39.000Z
|
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | null | null | null |
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | 2
|
2019-07-22T02:19:57.000Z
|
2020-09-29T21:00:00.000Z
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
m = 4
b = -.2
bl = -.1
br = -.1
sh = .13
def show_ped(image, bb):
im = np.zeros(image.shape[:2])
[ymin, xmin, ymax, xmax] = bb
im[ymin:ymax,xmin:xmax]=1
plt.imshow(im)
plt.show()
def in_region(x,y, m=0, b=0, above=True, from_left=False):
x = 1 - x if from_left else x
return ((m*x+b) <= y) == above
def brakezone(x,y,m=m,b=b,sh=.3):
left = in_region(x,y,m,b, above=False)
right = in_region(x,y,m,b, above=False, from_left=True)
top = in_region(x,y,b=.3, above=False)
return left and right and top
def brake_policy(m=m, b=b,sh=sh):
def policy(x,y):
return brakezone(x,y,m=m,b=b, sh=sh)
return policy
def to_bb(res, img):
h, w = img.shape[:2]
xmin = res['topleft']['x']/w
xmax = res['bottomright']['x']/w
ymin = res['topleft']['y']/h
ymax = res['bottomright']['y']/h
return [ymin, xmin, ymax, xmax]
def res_policy(brake_policy):
def should_brake(res, in_trajectory=brake_policy):
brake = []
for r in res:
if r['label'] == 'person':
print("Person found")
x,y = feet(r)
brake.append(in_trajectory(x,y))
return any(brake)
return should_brake
def feet(res):
bb = res['box']
x = (bb.xmax+bb.xmin)/2
y = bb.ymax
return x,y
def show_brakezone(img, brake_fn=brakezone, saveas=None, show=False):
if img is None:
out = np.zeros(size)
else:
out = img.copy()
size = img.shape[:2]
img_h, img_w = size
zone = np.zeros(size)
for y_ in range(img_h):
for x_ in range(img_w):
y = 1-y_/img_h
x = x_/img_w
brake = brake_fn(x,y) #and not safe_fn(bb)
zone[y_,x_]=brake
if img is not None and brake:
out[y_,x_,0]+=35
# out[y_,x_,0]=min(200,out[y_,x_][0])
if show:
plt.imshow(out)
plt.show()
if saveas:
plt.savefig(saveas)
return out
from visualizations import show_bboxes
def find_horizon(img, save="horizon", detect=None, res=None,sh=sh,b=b,m=m):
if detect:
res = detect(img)
sh_in = (raw_input("Enter horizon: "))
b_in = ( raw_input("Enter Intc: "))
m_in = ( raw_input("Enter Slope: "))
update = lambda x, default: float(x) if x is not '' else float(default)
b = update(b_in, b)
m = update(m_in, m)
sh = update(sh_in, sh)
print('(b,m,sh)',b,m,sh)
brake_fn=brake_policy(sh=sh, m=m, b=b)
masked=show_brakezone(img, show=False, brake_fn=brake_fn)
if detect:
plt.close()
res = detect(img)
if res:
print(res)
show_bboxes(masked, res)
print(res_policy(brake_fn)(res))
plt.show()
| 26.635514
| 75
| 0.561404
| 472
| 2,850
| 3.258475
| 0.216102
| 0.014304
| 0.009753
| 0.026008
| 0.064369
| 0.057217
| 0.050715
| 0.050715
| 0
| 0
| 0
| 0.012291
| 0.286316
| 2,850
| 106
| 76
| 26.886792
| 0.743854
| 0.019298
| 0
| 0.076087
| 0
| 0
| 0.043338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119565
| false
| 0
| 0.043478
| 0.01087
| 0.26087
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d0d6ecca8d12cee0a53f9628644c363e8839b3
| 1,055
|
py
|
Python
|
python/smqtk/utils/simple_timer.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 82
|
2015-01-07T15:33:29.000Z
|
2021-08-11T18:34:05.000Z
|
python/smqtk/utils/simple_timer.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 230
|
2015-04-08T14:36:51.000Z
|
2022-03-14T17:55:30.000Z
|
python/smqtk/utils/simple_timer.py
|
DigitalCompanion/SMQTK
|
fc9404b69150ef44f24423844bc80735c0c2b669
|
[
"BSD-3-Clause"
] | 65
|
2015-01-04T15:00:16.000Z
|
2021-11-19T18:09:11.000Z
|
import time
from smqtk.utils import SmqtkObject
class SimpleTimer (SmqtkObject):
"""
Little class to wrap the timing of things. To be use with the ``with``
statement.
"""
def __init__(self, msg, log_func=None, *args):
"""
Additional arguments are passed to the logging method
:param msg:
:param log_func:
:param args:
:return:
"""
self._log_func = log_func
self._msg = msg
self._msg_args = args
self._s = 0.0
def __enter__(self):
if self._log_func:
self._log_func(self._msg, *self._msg_args)
else:
self._log.info(self._msg % self._msg_args)
self._s = time.time()
def __exit__(self, *_):
if self._log_func:
self._log_func("%s -> %f s", self._msg % self._msg_args,
time.time() - self._s)
else:
self._log.info("%s -> %f s" % (self._msg % self._msg_args,
time.time() - self._s))
| 27.051282
| 74
| 0.525118
| 131
| 1,055
| 3.870229
| 0.328244
| 0.151874
| 0.108481
| 0.138067
| 0.315582
| 0.244576
| 0.244576
| 0.244576
| 0.134122
| 0.134122
| 0
| 0.002963
| 0.36019
| 1,055
| 38
| 75
| 27.763158
| 0.748148
| 0.176303
| 0
| 0.190476
| 0
| 0
| 0.025189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d1e3f53857745560685cc9254effe945b354f9
| 3,314
|
py
|
Python
|
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | null | null | null |
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | 1
|
2020-10-31T15:32:31.000Z
|
2020-10-31T15:33:11.000Z
|
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | null | null | null |
import socket, time, sys
import argparse
__version__="0.1"
min_port=0
#max_port=65535
max_port=10000
parser = argparse.ArgumentParser(description="a simple python port scanner",epilog="author: blackc8")
parser.add_argument("hostname",metavar="<hostname>",help="host to scan")
parser.add_argument("-dp","--ddport",help="do not display port",action="store_true")
parser.add_argument("-sF","--show_filtered",help="show filtered ports",action="store_true")
parser.add_argument("-b","--banner",help="grab the banners of ports",action="store_true")
parser.add_argument("-v","--version",help="dispaly version",action="version",version="%(prog)s ("+__version__+")")
args=parser.parse_args()
def w_log(msg):
print(msg)
def _exit(error):
w_log("[-] {}".format(error))
w_log("exited")
sys.exit()
def resolve_hostname(hostname):
try:
IPaddr=socket.gethostbyname(hostname)
return IPaddr
except socket.error:
return 0
def validIP(address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
if not 0 <= int(item) <= 255:
return False
return True
def is_open(host,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
con = sock.connect_ex((host,port))
sock.close()
return con
def grab_banner(host,port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
con = sock.connect((host,port))
sock.settimeout(3)
banner = sock.recv(1024)
banner = banner.decode().strip('\n')
return banner
except:
return "<no banner>"
def scan(hostname,ddport=False,gbanner=False,show_filtered=False):
open_ports=[]
filtered_ports=[]
banners=[]
if not validIP(hostname):
hostIP=resolve_hostname(hostname)
if hostIP == 0:
_exit("Unable to resolve hostname ({})")
else: host_info="{} ({})".format(hostname,hostIP)
else:
hostIP=hostname
host_info=hostname
if not validIP(hostIP):
_exit("Invalid IP adddress {}".format(hostIP))
w_log("[i] Scan started at: {}".format(time.asctime()))
start_time=time.time()
w_log("[+] Scaning host {}".format(host_info))
for port in range(min_port,max_port):
port_stat=is_open(hostIP,port)
if port_stat == 0: # open port
open_ports.append(port)
if not ddport:
w_log("port: {}".format(port))
w_log(" state: open")
if gbanner:
banner=grab_banner(hostname,port)
banners.append([port, banner])
w_log(" banner: {}".format(banner))
elif port_stat == 11: # filtered port
filtered_ports.append(port)
if show_filtered:
w_log("port: {}".format(port))
w_log(" state: filtered")
stop_time=time.time()
time_taken=stop_time-start_time
w_log("[i] {} open, {} filtered ports are discovered.".format(len(open_ports),len(filtered_ports)))
w_log("[i] Scan completed in {:.2f} seconds.".format(time_taken))
return True,open_ports,banners,time_taken
if __name__ == "__main__":
scan(args.hostname,args.ddport,args.banner,args.show_filtered)
| 30.971963
| 114
| 0.624925
| 427
| 3,314
| 4.662763
| 0.306792
| 0.024108
| 0.042692
| 0.031642
| 0.124561
| 0.124561
| 0.108488
| 0.071321
| 0.044199
| 0
| 0
| 0.012529
| 0.22933
| 3,314
| 106
| 115
| 31.264151
| 0.767032
| 0.011467
| 0
| 0.090909
| 0
| 0
| 0.161625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079545
| false
| 0
| 0.022727
| 0
| 0.204545
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d25c456ce1d78680f761522a288c787f746b68
| 4,730
|
py
|
Python
|
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | null | null | null |
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | null | null | null |
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | 1
|
2021-07-07T12:01:42.000Z
|
2021-07-07T12:01:42.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 20:15:55 2019
@author: Shinelon
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
path='ex2data1.txt'
data=pd.read_csv(path,header=None,names=['Exam1','Exam2','Admitted'])
data.head()
#两个分数的散点图,并用颜色编码可视化
positive=data[data['Admitted'].isin([1])]
negative=data[data['Admitted'].isin([0])]
fig,ax=plt.subplots(figsize=(12,8))
#c=color;s表示图形大小,默认20
ax.scatter(positive['Exam1'],positive['Exam2'],c='b',marker='o',label='Admitted')
ax.scatter(negative['Exam1'],negative['Exam2'],c='r',marker='o',label='Unadimitted')
ax.legend(loc=4)
ax.set_xlabel('Exam1 Score');ax.set_ylabel('Exam2 Score')
plt.show()#可见两类间有一个清晰分界
#sigmoid函数
def sigmoid(z):
return 1/(1+np.exp(-z))#exp)-z)即e的-z次方
#检验/观察一下sigmoid函数
nums=np.arange(-10,10,1)
fig,ax=plt.subplots(figsize=(12,8))
ax.plot(nums,sigmoid(nums),'r')
plt.show()
def cost(theta,X,y):
theta=np.matrix(theta)
X=np.matrix(X)
y=np.matrix(y)
first=np.multiply(-y,np.log(sigmoid(X*theta.T)))
second=np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))
return np.sum(first-second)/len(X)
data.insert(0,'ones',1)#在0列和1列间插入1
cols=data.shape[1]
X=data.iloc[:,0:cols-1]
y=data.iloc[:,cols-1:cols]
X=np.array(X.values)
y=np.array(y.values)
theta=np.zeros(3)
cost(theta,X,y)
def gradientDescent(theta,X,y):
theta=np.matrix(theta)
X=np.matrix(X)
y=np.matrix(y)
parameters=int(theta.ravel().shape[1])
grad=np.zeros(parameters)
error=sigmoid(X*theta.T)-y
for i in range(parameters):
term=np.multiply(error,X[:,i])
grad[i]=np.sum(term)/len(X)
return grad
gradientDescent(theta,X,y)#仅求了一次更新的theta
#用SciPy的TruncatedNewton寻找最优参数
import scipy.optimize as opt
result=opt.fmin_tnc(func=cost,x0=theta,fprime=gradientDescent,args=(X,y))
result#输出theta和代价
cost(result[0],X,y)
#建立预测函数
def predict(theta,X):
probability=sigmoid(X*theta.T)
return [1 if x>=0.5 else 0 for x in probability]
theta_min=np.matrix(result[0])#theta_min是1x3数组
predictions=predict(theta_min,X)
correct=[1 if((a==1 and b==1) or (a==0 and b==0))\
else 0 for (a,b) in zip(predictions,y)]
accuracy=(sum(map(int,correct))/len(correct))
print('accuracy={}'.format(accuracy))#要注意这是训练集的精确度
path2='ex2data2.txt'
data2=pd.read_csv(path2,header=None,names=['Test1','Test2','Accepted'])
data2.head()
positive=data2[data2['Accepted'].isin([1])]
negative=data2[data2['Accepted'].isin([0])]
fig,ax=plt.subplots(figsize=(12,8))
ax.scatter(positive['Test1'],positive['Test2'],s=50,c='b',\
marker='o',label='Accepted')
ax.scatter(negative['Test1'],negative['Test2'],s=50,c='r',\
marker='x',label='Rejected')
ax.legend()
ax.set_xlabel('Test1 Score')
ax.set_ylabel('Test2 Score')
plt.show()
#非常复杂,没有线性界限;通过线性构造原始特征的多项式中的特征
degree=5
x1=data2['Test1']
x2=data2['Test2']
data2.insert(3,'ones',1)
for i in range(1,degree):
for j in range(0,i):
data2['F'+str(i)+str(j)]=np.power(x1,i-j)*np.power(x2,j)
data2.drop('Test1',axis=1,inplace=True)#axis=0为行,1为列;TRUE为在原数据上改动
data2.drop('Test2',axis=1,inplace=True)
data2.head()
#正则化代价函数
def costReg(theta,X,y,learningRate):
theta=np.matrix(theta)
X=np.matrix(X)
y=np.matrix(y)
first=np.multiply(-y,np.log(sigmoid(X*theta.T)))
second=np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))
reg=(learningRate/(2*len(X)))*np.sum(np.power(theta[:,1:theta.shape[1]],2))
return np.sum(first-second)/len(X)+reg
#通过正则化参数加大对theta的惩罚
def gradientReg(theta,X,y,learningRate):
theta=np.matrix(theta)
X=np.matrix(X)
y=np.matrix(y)
parameters=int(theta.ravel().shape[1])
grad=np.zeros(parameters)
error=sigmoid(X*theta.T)-y
for i in range(parameters):
term=np.multiply(error,X[:,i])
if(i==0):
grad[i]=np.sum(term)/len(X)
else:
grad[i]=(np.sum(term)/len(X))+(learningRate/len(X))*theta[:,i]
return grad#grad即theta
cols=data2.shape[1]
X2=data2.iloc[:,1:cols]
y2=data2.iloc[:,0:1]
X2=np.array(X2.values)
y2=np.array(y2.values)
theta2=np.zeros(11)
learningRate=1
costReg(theta2,X2,y2,learningRate)
gradientReg(theta2,X2,y2,learningRate)
result2=opt.fmin_tnc(func=costReg,x0=theta2,fprime=gradientReg,\
args=(X2,y2,learningRate))
result2
#查看杂训练数据上的准确度
theta_min=np.matrix(result2[0])
predictions=predict(theta_min,X2)
correct=[1 if ((a==1 and b==1) or (a==0 and b==0))\
else 0 for (a,b) in zip(predictions,y2)]
accuracy=(sum(map(int,correct))/len(correct))
print('accuracy2={}%'.format(accuracy*100))
#用sklearn直接实现
from sklearn import linear_model
model=linear_model.LogisticRegression(penalty='l2',\
C=1.0,solver='liblinear')
model.fit(X2,y2.ravel())
model.score(X2,y2)
| 31.533333
| 84
| 0.679281
| 790
| 4,730
| 4.048101
| 0.251899
| 0.035022
| 0.028455
| 0.030644
| 0.313321
| 0.298311
| 0.298311
| 0.265166
| 0.231395
| 0.212008
| 0
| 0.042553
| 0.115645
| 4,730
| 150
| 85
| 31.533333
| 0.72197
| 0.07907
| 0
| 0.325581
| 0
| 0
| 0.065604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.03876
| 0.007752
| 0.131783
| 0.015504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ef9b4082cb1779ade1e3f88552ad789562c6383
| 2,776
|
py
|
Python
|
tests/selenium/auth/test_user.py
|
bodik/sner4-web
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 9
|
2019-05-15T11:33:43.000Z
|
2022-02-17T04:05:28.000Z
|
tests/selenium/auth/test_user.py
|
bodik/sner4
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 1
|
2019-03-01T11:48:13.000Z
|
2019-03-01T11:48:13.000Z
|
tests/selenium/auth/test_user.py
|
bodik/sner4-web
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 3
|
2020-03-03T21:06:37.000Z
|
2021-01-11T14:40:56.000Z
|
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
auth.views.user selenium tests
"""
from flask import url_for
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from sner.server.auth.models import User
from sner.server.extensions import db
from tests.selenium import dt_inrow_delete, dt_rendered, webdriver_waituntil
def test_user_list_route(live_server, sl_admin, user): # pylint: disable=unused-argument
"""simple test ajaxed datatable rendering"""
sl_admin.get(url_for('auth.user_list_route', _external=True))
dt_rendered(sl_admin, 'user_list_table', user.username)
def test_user_list_route_inrow_delete(live_server, sl_admin, user): # pylint: disable=unused-argument
"""delete user inrow button"""
user_id = user.id
db.session.expunge(user)
sl_admin.get(url_for('auth.user_list_route', _external=True))
# in this test-case there are multiple items in the table (current_user, test_user), hence index which to delete has to be used
dt_inrow_delete(sl_admin, 'user_list_table', 1)
assert not User.query.get(user_id)
def test_user_apikey_route(live_server, sl_admin, user): # pylint: disable=unused-argument
"""apikey generation/revoking feature tests"""
sl_admin.get(url_for('auth.user_list_route', _external=True))
dt_rendered(sl_admin, 'user_list_table', user.username)
# disable fade, the timing interferes with the test
sl_admin.execute_script('$("div#modal-global").toggleClass("fade")')
sl_admin.find_element_by_xpath('//a[@data-url="%s"]' % url_for('auth.user_apikey_route', user_id=user.id, action='generate')).click()
webdriver_waituntil(sl_admin, EC.visibility_of_element_located((By.XPATH, '//h4[@class="modal-title" and text()="Apikey operation"]')))
sl_admin.find_element_by_xpath('//div[@id="modal-global"]//button[@class="close"]').click()
webdriver_waituntil(sl_admin, EC.invisibility_of_element_located((By.XPATH, '//div[@class="modal-global"')))
dt_rendered(sl_admin, 'user_list_table', user.username)
db.session.refresh(user)
assert user.apikey
sl_admin.find_element_by_xpath('//a[@data-url="%s"]' % url_for('auth.user_apikey_route', user_id=user.id, action='revoke')).click()
webdriver_waituntil(sl_admin, EC.visibility_of_element_located((By.XPATH, '//h4[@class="modal-title" and text()="Apikey operation"]')))
sl_admin.find_element_by_xpath('//div[@id="modal-global"]//button[@class="close"]').click()
webdriver_waituntil(sl_admin, EC.invisibility_of_element_located((By.XPATH, '//div[@class="modal-global"')))
dt_rendered(sl_admin, 'user_list_table', user.username)
db.session.refresh(user)
assert not user.apikey
| 46.266667
| 139
| 0.747839
| 415
| 2,776
| 4.739759
| 0.284337
| 0.071174
| 0.044738
| 0.035587
| 0.616167
| 0.588205
| 0.588205
| 0.588205
| 0.588205
| 0.563803
| 0
| 0.001626
| 0.113833
| 2,776
| 59
| 140
| 47.050847
| 0.797967
| 0.177954
| 0
| 0.454545
| 0
| 0
| 0.237799
| 0.127329
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6efaa56371bdc91af714b2ef343d987547b208e3
| 936
|
py
|
Python
|
isobmff/media_file.py
|
kentoku24/isobmff
|
6877505a75915caf440bbb80b6024ba6bf9f3baa
|
[
"MIT"
] | 6
|
2017-08-31T01:55:37.000Z
|
2018-12-26T03:03:24.000Z
|
isobmff/media_file.py
|
kentoku24/isobmff
|
6877505a75915caf440bbb80b6024ba6bf9f3baa
|
[
"MIT"
] | 4
|
2017-08-29T03:47:16.000Z
|
2017-09-05T09:00:17.000Z
|
isobmff/media_file.py
|
m-hiki/isbmff
|
0724b9892884ae35bdd0796a97a9506098c4cd25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .box import indent
from .box import read_box
class MediaFile(object):
def __init__(self):
self.ftyp = None
self.mdats = []
self.meta = None
self.moov = None
def __repr__(self):
rep = self.ftyp.__repr__() + '\n'
rep += self.meta.__repr__() + '\n'
rep += self.moov.__repr__() + '\n'
for mdat in self.mdats:
rep += mdat.__repr__() + '\n'
return 'ISOBaseMediaFile\n' + indent(rep)
def read(self, file_name):
file = open(file_name, 'rb')
try:
while True:
box = read_box(file)
if not box:
break
if box.box_type == 'mdat':
self.mdats.append(box)
else:
setattr(self, box.box_type, box)
finally:
file.close()
| 26.742857
| 53
| 0.459402
| 101
| 936
| 3.960396
| 0.425743
| 0.05
| 0.065
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001845
| 0.42094
| 936
| 34
| 54
| 27.529412
| 0.736162
| 0.022436
| 0
| 0
| 0
| 0
| 0.036405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6efc25feb8365613f08bcea149b9338afcb635e2
| 3,690
|
py
|
Python
|
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | 1
|
2021-02-24T12:05:06.000Z
|
2021-02-24T12:05:06.000Z
|
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | null | null | null |
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from glob import glob
import logging
import os
from os.path import abspath, dirname, normpath
import re
from shutil import rmtree
import sqlite3
import sys
import folium
from folium.plugins import FastMarkerCluster
from zipfile import ZipFile
import pandas as pd
import requests
from config import db
from models import MarinaLitterWatch
CLEAN_FILES = ('./CSV_1', './CSV_2')
ZIP_FILE = 'fme.zip'
DB_FILE = 'mlw.db'
MAP_FILE = 'locations.html'
# Set Logging
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%d-%b-%y %H:%M:%S", stream=sys.stdout, level=logging.INFO)
# Set local path
here = normpath(abspath(dirname(__file__)))
# Download data
logging.info("Downloading data...")
response = requests.get(
'http://fme.discomap.eea.europa.eu/fmedatadownload/MarineLitter/MLWPivotExport.fmw'
'?CommunityCode=&FromDate=2010-01-01&ToDate=2022-12-31'
'&opt_showresult=false&opt_servicemode=sync')
downloadlink = re.search(
r"<a\s+(?:[^>]*?\s+)?href=([\"'])(.*?)\1>", response.content.decode()).group(2)
logging.info("Saving data...")
zipfile = requests.get(downloadlink)
open(f'{here}/{ZIP_FILE}', 'wb').write(zipfile.content)
logging.info("Uzipping data...")
zipObject = ZipFile(f'{here}/{ZIP_FILE}', 'r')
zipObject.extractall(path=here)
logging.info("Loading data...")
# Data to initialize database with
data = pd.read_csv(
f'{here}/CSV_1/MLW_PivotExport/MLW_Data.csv', encoding="ISO-8859-1")
# Delete database file if it exists currently
if os.path.exists(f'{here}/{DB_FILE}'):
os.remove(f'{here}/{DB_FILE}')
# Create the database
db.create_all()
# populate the database
conn = sqlite3.connect(f'{here}/{DB_FILE}')
data.to_sql('mlw', conn, if_exists='append')
db.session.commit()
# Create Map
folium_map = folium.Map(location=[40.416729, -3.703339],
zoom_start=3, min_zoom=3,
tiles='Stamen Terrain')
callback = ('function (row) {'
'var marker = L.marker(new L.LatLng(row[0], row[1]), {color: "red"});'
'var icon = L.AwesomeMarkers.icon({'
"icon: 'info-sign',"
"iconColor: 'white',"
"markerColor: 'red',"
"prefix: 'glyphicon',"
"extraClasses: 'fa-rotate-0'"
'});'
'marker.setIcon(icon);'
"var popup = L.popup({maxWidth: '300'});"
"const display_text = {text: row[2]};"
"var mytext = $(`<div id='mytext' class='display_text' style='width: 100.0%; height: 100.0%;'> ${display_text.text}</div>`)[0];"
"popup.setContent(mytext);"
"marker.bindPopup(popup);"
'return marker};')
FastMarkerCluster(data=list(
zip(data['lat_y1'].values, data['lon_x1'].values, data['BeachName'].values)), callback=callback).add_to(folium_map)
folium.LayerControl().add_to(folium_map)
folium_map.save(f'{here}/templates/{MAP_FILE}')
# Clean files
logging.info("Cleaning files...")
for path_spec in CLEAN_FILES:
# Make paths absolute and relative to this path
abs_paths = glob(os.path.normpath(
os.path.join(here, path_spec)))
for path in [str(p) for p in abs_paths]:
if not path.startswith(here):
# Die if path in CLEAN_FILES is absolute + outside this directory
raise ValueError(
"%s is not a path inside %s" % (path, here))
logging.info(f'removing {os.path.relpath(path)}')
rmtree(path)
logging.info(f'removing {ZIP_FILE}')
os.remove(f'{here}/{ZIP_FILE}')
| 31.810345
| 140
| 0.644986
| 494
| 3,690
| 4.710526
| 0.455466
| 0.037817
| 0.010314
| 0.015471
| 0.031801
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021426
| 0.190515
| 3,690
| 115
| 141
| 32.086957
| 0.757616
| 0.090786
| 0
| 0
| 0
| 0
| 0.356373
| 0.101137
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.197531
| 0
| 0.197531
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6efcad9f388b05b3d7f79c0c4ad5c784bb1826e5
| 3,486
|
py
|
Python
|
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
TIPO = 'selectable' # 'basic' or 'selectable'. 'basic': necesario para el funcionamiento del programa
# 'selectable': No necesario. Añade nuevas funcionalidades al programa
# Por ejemplo autenticar es 'basic', pero actas es prescindible
# El code_menu debe ser único y se configurará como un permiso del sistema
MENU_DEFAULT = [
{'code_menu': 'acceso_domotica',
'texto_menu': 'Domótica',
'href': '',
'nivel': 1,
'tipo': 'Accesible',
'pos': 1,
},
{'code_menu': 'acceso_grupos_domotica',
'texto_menu': 'Agrupaciones de dispositivos',
'href': 'grupos_domotica',
'nivel': 2,
'tipo': 'Accesible',
'pos': 1,
'parent': 'acceso_domotica'
},
{'code_menu': 'acceso_configura_domotica',
'texto_menu': 'Configurar domótica',
'href': 'configura_domotica',
'nivel': 2,
'tipo': 'Accesible',
'pos': 2,
'parent': 'acceso_domotica'
}
]
# Se añaden otros permisos para el usuario
PERMISOS = [{'code_nombre': 'crea_grupos_domotica',
'nombre': 'Permiso para crear un grupo de dispositivos domóticos',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'borra_grupos_domotica',
'nombre': 'Permiso para borrar cualquier grupo que contiene domótica',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'edita_grupos_domotica',
'nombre': 'Permiso para modificar cualquier grupo que contiene domótica',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'crea_dispositivos_domotica',
'nombre': 'Permiso para crear un dispositivo domótico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_dispositivos_domotica',
'nombre': 'Permiso para borrar cualquier dispositivo domótico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_dispositivos_domotica',
'nombre': 'Permiso para editar cualquier dispositivo domótico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'crea_secuencias_domotica',
'nombre': 'Permiso para crear una secuencia domótica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_secuencias_domotica',
'nombre': 'Permiso para borrar cualquier secuencia domótica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_secuencias_domotica',
'nombre': 'Permiso para modificar cualquier secuencia domótica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'crea_conjuntos_domotica',
'nombre': 'Permiso para crear un conjunto de dispositivos domóticos',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_conjuntos_domotica',
'nombre': 'Permiso para borrar cualquier conjunto de dispositivos domóticos',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_conjuntos_domotica',
'nombre': 'Permiso para modificar cualquier conjunto de dispositivos domóticos',
'menu': 'acceso_configura_domotica'
}
]
| 41.011765
| 103
| 0.592943
| 322
| 3,486
| 6.164596
| 0.26087
| 0.075567
| 0.126952
| 0.151134
| 0.68262
| 0.587406
| 0.377834
| 0.35869
| 0.303275
| 0.13602
| 0
| 0.002848
| 0.294894
| 3,486
| 84
| 104
| 41.5
| 0.804719
| 0.106999
| 0
| 0.276316
| 0
| 0
| 0.580489
| 0.194784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013158
| 0
| 0.013158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6efceaaf9fe7bf6e6a3d8409b3f03d38e6342a11
| 5,944
|
py
|
Python
|
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
import os
import cv2
import fire
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from configs.common import config as cfg
from hawkdet.models.build import build_detor
from hawkdet.lib.numpy_nms import np_nms
from hawkdet.lib.box_utils import decode, decode_landm
from hawkdet.lib.prior_box import PriorBox
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def run(
model_path,
top_k=5000,
keep_top_k=750,
nms_threshold=0.4,
origin_size=True,
confidence_threshold=0.02,
save_folder='./eval_results',
testset_dir='/root/paddlejob/workspace/hawk-facedet/data/widerface/val',
):
torch.set_grad_enabled(False)
cudnn.benchmark = True
device = torch.cuda.current_device()
net = build_detor(cfg.Detector)
state_dict = torch.load(model_path)['model']
net.load_state_dict(state_dict)
net.eval()
net = net.to(device)
testset_folder = os.path.join(testset_dir, 'images')
testset_list = os.path.join(testset_dir, 'wider_val.txt')
with open(testset_list, 'r') as fr:
test_dataset = fr.read().split()
num_images = len(test_dataset)
_t = {'forward_pass': Timer(), 'misc': Timer()}
# testing begin
for i, img_name in enumerate(test_dataset):
image_path = testset_folder + img_name
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
# testing scale
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
_t['forward_pass'].tic()
loc, conf, landms = net(img) # forward pass
conf = F.softmax(conf, dim=-1)
_t['forward_pass'].toc()
_t['misc'].tic()
priors = PriorBox(cfg.min_sizes, cfg.steps, cfg.clip, image_size=(im_height, im_width)).forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg.variance)
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg.variance)
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1]
# order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = np_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
# dets = dets[:keep_top_k, :]
# landms = landms[:keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
_t['misc'].toc()
save_name = save_folder + img_name[:-4] + ".txt"
dirname = os.path.dirname(save_name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(save_name, "w") as fd:
bboxs = dets
file_name = os.path.basename(save_name)[:-4] + "\n"
bboxs_num = str(len(bboxs)) + "\n"
fd.write(file_name)
fd.write(bboxs_num)
for box in bboxs:
x = int(box[0])
y = int(box[1])
w = int(box[2]) - int(box[0])
h = int(box[3]) - int(box[1])
confidence = str(box[4])
line = str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " + confidence + " \n"
fd.write(line)
print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(
i + 1, num_images, _t['forward_pass'].average_time, _t['misc'].average_time))
if __name__ == '__main__':
fire.Fire({"run": run})
exit()
| 33.206704
| 105
| 0.57924
| 808
| 5,944
| 4.094059
| 0.27599
| 0.038694
| 0.012092
| 0.018138
| 0.125453
| 0.100665
| 0.100665
| 0.069528
| 0.069528
| 0.069528
| 0
| 0.023146
| 0.287685
| 5,944
| 178
| 106
| 33.393258
| 0.758148
| 0.063257
| 0
| 0.071429
| 0
| 0
| 0.044685
| 0.01027
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0.035714
| 0.092857
| 0
| 0.15
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e00ea020dca2ee0cd420f43a2015391aba2eabc
| 2,491
|
py
|
Python
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 12
|
2021-05-04T10:47:02.000Z
|
2022-03-10T13:25:04.000Z
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 17
|
2021-05-04T00:53:49.000Z
|
2022-01-18T10:01:49.000Z
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 9
|
2021-05-04T00:46:38.000Z
|
2022-02-16T02:55:50.000Z
|
from keydra.clients.contentful import ContentfulClient
from keydra.providers.base import BaseProvider
from keydra.providers.base import exponential_backoff_retry
from keydra.exceptions import DistributionException
from keydra.exceptions import RotationException
from keydra.logging import get_logger
LOGGER = get_logger()
PW_FIELD = 'secret'
class Client(BaseProvider):
def __init__(self, session=None, credentials=None, region_name=None):
self._secret_key = credentials['key']
self._cfclient = ContentfulClient(token=credentials[PW_FIELD])
def _rotate_secret(self, secret):
try:
curr_tokens = self._cfclient.get_tokens()
new_token = self._cfclient.create_token(
name=self._secret_key,
readonly=False
)
except Exception as error:
LOGGER.error(
"Failed to rotate Contentful token '{}' - {}".format(
self._secret_key,
error
)
)
raise RotationException(
'Error rotating token {} on Contentful - '
'error : {}'.format(
self._secret_key,
error
)
)
try:
# Revoke all existing tokens, just leaving our new one
for token in curr_tokens:
self._cfclient.revoke_token(token_id=token.id)
except Exception as error:
LOGGER.error(
'Failed to revoke Contentful token'
)
raise RotationException(
'Error revoking token on Contentful - '
'error : {}'.format(
error
)
)
LOGGER.info(
"Contentful token '{}' successfully rotated.".format(
self._secret_key
)
)
return {
'provider': 'contentful',
'key': self._secret_key,
f'{PW_FIELD}': new_token.token,
}
@exponential_backoff_retry(3)
def rotate(self, secret):
return self._rotate_secret(secret)
def distribute(self, secret, destination):
raise DistributionException('Contentful does not support distribution')
@classmethod
def redact_result(cls, result, spec=None):
if 'value' in result and PW_FIELD in result['value']:
result['value'][PW_FIELD] = '***'
return result
| 28.965116
| 79
| 0.566439
| 236
| 2,491
| 5.788136
| 0.355932
| 0.065886
| 0.057101
| 0.041728
| 0.174231
| 0.060029
| 0.060029
| 0.060029
| 0
| 0
| 0
| 0.00062
| 0.352469
| 2,491
| 85
| 80
| 29.305882
| 0.846249
| 0.020875
| 0
| 0.227273
| 0
| 0
| 0.128847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.090909
| 0.015152
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e035da887a72ca05d47f4e04f4fd021e19671d0
| 1,356
|
py
|
Python
|
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | 1
|
2022-02-21T18:55:34.000Z
|
2022-02-21T18:55:34.000Z
|
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from sahyun_bot.utils_logging import HttpDump
DEFAULT_RETRY_COUNT = 3
RETRY_ON_METHOD = frozenset([
'HEAD', 'GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'
])
RETRY_ON_STATUS = frozenset([
403, 429, 500, 502, 503, 504
])
class SessionFactory:
"""
Creates Session objects for use with the application. These objects will log HTTP information and retry requests.
Retry count is configurable.
All other kwargs will be passed into HttpDump.
"""
def __init__(self, retry_count: int = DEFAULT_RETRY_COUNT, **dump_kwargs):
self.__dump = HttpDump(**dump_kwargs)
self.__retry_count = max(0, retry_count) or DEFAULT_RETRY_COUNT
def with_retry(self, session: Session = None) -> Session:
session = session or Session()
session.hooks['response'] = [self.__dump.all]
retry = Retry(
total=self.__retry_count,
connect=self.__retry_count,
read=self.__retry_count,
method_whitelist=RETRY_ON_METHOD,
status_forcelist=RETRY_ON_STATUS,
backoff_factor=1
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
| 30.133333
| 117
| 0.668142
| 163
| 1,356
| 5.282209
| 0.490798
| 0.116144
| 0.081301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021256
| 0.236726
| 1,356
| 44
| 118
| 30.818182
| 0.810628
| 0.140118
| 0
| 0.066667
| 0
| 0
| 0.048161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e03fc65e12b6935503f8e6630624fed1809bd0e
| 5,763
|
py
|
Python
|
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | null | null | null |
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | null | null | null |
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | 2
|
2018-10-01T14:08:25.000Z
|
2020-09-30T03:02:15.000Z
|
import json
import requests
from datetime import datetime, timedelta
from BookCirculation import BookCirculation
from DAO.AbstractDAO import AbstractDAO
from DAO.BookDAO import BookDAO
from DAO.UserDAO import UserDAO
from constant import *
from datetime import datetime
class BookCirculationDAO(AbstractDAO):
def __init__(self, parent = None):
AbstractDAO.__init__(self)
self.parent = parent
def borrow(self, user, books):
borrow_list = []
for book in books:
borrow_list.append({"user": {"user_id": user.user_id}, "book": {"book_id": book.book_id}})
# try:
path = '/borrow'
response = requests.post(self.server_ip + path, json=borrow_list, timeout = self.timeout, headers=self.get_authentication_header(path))
if response.status_code == 200: #Success
book_circulations = []
for raw_book_circulation in response.json():
book_circulations.append(self.construct_book_ciruclation(raw_book_circulation))
if self.parent is not None:
due_time = book_circulations[0].due_time
print(str(due_time))
self.parent.borrowBookCallback(due_time)
def getAllCirculations(self):
try:
path = '/history'
response = requests.get(self.server_ip + path , timeout = self.timeout, headers=self.get_authentication_header(path))
circulations = None
if response.status_code == 200:
to_return = []
for raw_data in response.json():
to_return.append(self.construct_book_circulation(raw_data ))
return to_return
else:
print("Request failed")
except requests.exceptions.ConnectTimeout: # Connection timeout, use offline mockup data
print("Timeout")
return None
def getAllOnBorrowCirculation(self):
try:
path = '/borrow'
response = requests.get(self.server_ip + path , timeout = self.timeout, headers=self.get_authentication_header(path))
circulations = None
if response.status_code == 200:
to_return = []
for raw_data in response.json():
to_return.append(self.construct_book_circulation(raw_data ))
return to_return
else:
print("Request failed")
except requests.exceptions.ConnectTimeout: # Connection timeout, use offline mockup data
print("Timeout")
@staticmethod
def construct_book_circulation(arguments):
time_args = ["borrow_time", "due_time", "return_time"]
for time_arg in time_args:
if time_arg in arguments.keys() and arguments[time_arg] is not None:
arguments[time_arg] = datetime.strptime(arguments[time_arg], rfc_822_format)
arguments["book"] = BookDAO.constructBook(arguments["book"])
arguments["user"] = UserDAO.constructUser(arguments["user"])
return BookCirculation(**arguments)
def getBorrowIDFromBookID(self,bookID):
for circulation in self.getAllOnBorrowCirculation():
if(str(circulation.book.book_id) == str(bookID)):
return circulation.borrow_id
return None
def returnBook(self,borrowID):
path = '/return/' + str(borrowID)
response = requests.delete(self.server_ip + path, timeout=self.timeout, headers=self.get_authentication_header(path))
if response.status_code == 200: # Success
print(response.text)
pass
else:
print("Failed")
def searchHistory(self, keyword):
if keyword == "" or keyword.startswith(' '):
return self.getAllCirculations()
try:
path = '/history/search/' + keyword
response = requests.get(self.server_ip + path, timeout = self.timeout, headers=self.get_authentication_header(path))
circulations = None
if response.status_code == 200:
to_return = []
for raw_data in response.json():
to_return.append(self.construct_book_circulation(raw_data ))
return to_return
else:
print("Request failed")
except requests.exceptions.ConnectTimeout: # Connection timeout, use offline mockup data
print("Timeout")
return None
def searchOnBorrow(self, keyword):
if keyword == "" or keyword.startswith(' '):
return self.getAllOnBorrowCirculation()
try:
path = '/borrow/search/' + keyword
response = requests.get(self.server_ip + path , timeout = self.timeout, headers=self.get_authentication_header(path))
circulations = None
if response.status_code == 200:
to_return = []
for raw_data in response.json():
to_return.append(self.construct_book_circulation(raw_data ))
return to_return
else:
print("Request failed")
except requests.exceptions.ConnectTimeout: # Connection timeout, use offline mockup data
print("Timeout")
return None
def getOverdueCirculation(self):
overdueCirculations = []
for circulation in self.getAllOnBorrowCirculation():
if (circulation.due_time.replace(tzinfo=None) < datetime.now()):
overdueCirculations.append(circulation)
return overdueCirculations
if __name__ == "__main__":
bookCirculationDAO = BookCirculationDAO()
for circulation in bookCirculationDAO.getAllCirculations():
print(str(circulation))
| 39.472603
| 143
| 0.622245
| 587
| 5,763
| 5.930153
| 0.178876
| 0.027578
| 0.020684
| 0.027578
| 0.5237
| 0.511635
| 0.484631
| 0.484631
| 0.484631
| 0.456478
| 0
| 0.005366
| 0.288565
| 5,763
| 146
| 144
| 39.472603
| 0.843659
| 0.033837
| 0
| 0.5
| 0
| 0
| 0.04118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0.008197
| 0.07377
| 0
| 0.270492
| 0.098361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e0aba9a6fd99c2588436a872d706b50b1c4f2cd
| 1,612
|
py
|
Python
|
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from http.server import SimpleHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs
import time
class CCVRRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
# Add 'files' prefix
self.path = '/files' + self.path
super().do_GET()
def do_HEAD(self):
# Add 'files' prefix
self.path = '/files' + self.path
super().do_GET()
def do_POST(self):
content_length = int(self.headers['Content-Length'])
data = parse_qs(self.rfile.read(content_length).decode('utf-8'))
if not data.get('type') or not data.get('content'):
self.send_response(400, 'Bad request')
return
filename = 'files/'
if data.get('type')[0] == 'map':
filename += 'map.txt'
elif data.get('type')[0] == 'sensors':
filename += 'sensors.txt'
else:
self.send_response(400, 'Bad type')
try:
dataFile = open(filename, 'w')
dataFile.write(data.get('content')[0])
dataFile.close()
self.send_response(200, 'OK')
except Exception as e:
print('Error writing file:', e)
self.send_response(500, 'Error writing file')
def run(port = 8080, hostName = ''):
server_address = (hostName, port)
server = HTTPServer(server_address, CCVRRequestHandler)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, port))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, port))
if __name__ == "__main__":
from sys import argv
if len(argv) == 3:
run(port = int(argv[1]), hostName = str(argv[2]))
elif len(argv) == 2:
run(port = int(argv[1]))
else:
run()
| 25.587302
| 66
| 0.673077
| 224
| 1,612
| 4.732143
| 0.415179
| 0.033019
| 0.060377
| 0.033962
| 0.171698
| 0.101887
| 0.101887
| 0.101887
| 0.101887
| 0.101887
| 0
| 0.019231
| 0.16129
| 1,612
| 62
| 67
| 26
| 0.764793
| 0.036601
| 0
| 0.163265
| 0
| 0
| 0.128387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0.020408
| 0.081633
| 0
| 0.204082
| 0.061224
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e0adca23e72763263f72a46a3ff5aad270ff8c2
| 4,907
|
py
|
Python
|
dags/dag_update.py
|
alyildiz/btc_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | 5
|
2021-09-06T08:42:02.000Z
|
2021-11-15T15:04:57.000Z
|
dags/dag_update.py
|
alyildiz/sncf_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | null | null | null |
dags/dag_update.py
|
alyildiz/sncf_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.docker_operator import DockerOperator
from docker.types import Mount
default_args = {
"owner": "airflow",
"description": "Use of the DockerOperator",
"depend_on_past": False,
"start_date": datetime(2021, 5, 1),
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
BASE_DIR = "/home/baris/PROJECTS/sncf_forecast/"
dic_env = {
"API_KEY": os.environ["API_KEY"],
"API_KEY_SECRET": os.environ["API_KEY_SECRET"],
"ACCESS_TOKEN": os.environ["ACCESS_TOKEN"],
"ACCESS_TOKEN_SECRET": os.environ["ACCESS_TOKEN_SECRET"],
"MONGODB_HOST": os.environ["MONGODB_HOST"],
"MONGODB_PORT": os.environ["MONGODB_PORT"],
"MONGO_INITDB_ROOT_USERNAME": os.environ["MONGO_INITDB_ROOT_USERNAME"],
"MONGO_INITDB_ROOT_PASSWORD": os.environ["MONGO_INITDB_ROOT_PASSWORD"],
}
with DAG("daily_update_new", default_args=default_args, schedule_interval="0 2 * * *", catchup=False) as dag:
update_db = DockerOperator(
task_id="task_____daily_update_dbmongo",
image="sncf_forecast_update",
environment=dic_env,
container_name="task_____daily_update_dbmongo",
api_version="auto",
auto_remove=True,
command="python3 /workdir/update.py",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "backend/update", target="/workdir", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_lstm = DockerOperator(
task_id="task_____daily_update_lstm",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_lstm",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m lstm",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_baseline = DockerOperator(
task_id="task_____daily_update_baseline",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_baseline",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m baseline",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_autoencoder = DockerOperator(
task_id="task_____daily_update_autoencoder",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_autoencoder",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m autoencoder",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_db >> update_lstm
update_db >> update_baseline
update_db >> update_autoencoder
| 41.584746
| 109
| 0.651314
| 579
| 4,907
| 5.193437
| 0.193437
| 0.04423
| 0.08979
| 0.107749
| 0.699036
| 0.683073
| 0.636515
| 0.634187
| 0.604589
| 0.588294
| 0
| 0.003587
| 0.204606
| 4,907
| 117
| 110
| 41.940171
| 0.766846
| 0
| 0
| 0.518519
| 0
| 0
| 0.352558
| 0.144691
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.009259
| 0.046296
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e105c7bee23ddd23731ff6b0bc65a97faa40678
| 2,536
|
py
|
Python
|
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | 7
|
2018-04-28T17:25:25.000Z
|
2021-08-15T17:52:11.000Z
|
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | null | null | null |
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | 2
|
2019-10-30T03:40:50.000Z
|
2022-01-02T05:44:33.000Z
|
# -*- coding: utf-8 -*-
"""
This script shows how to embed the animation into a
background image (it's also possible to embed the animation
into another animation, but that's too complicated to implement
in a simple program ...)
"""
from colorsys import hls_to_rgb
import gifmaze as gm
from gifmaze.algorithms import wilson, bfs
from gifmaze.utils import generate_text_mask
# firstly define the size and color_depth of the image.
width, height = 600, 400
color_depth = 8
# define a surface to draw on.
surface = gm.GIFSurface.from_image('teacher.png', color_depth)
# set the 0-th color to be the same with the blackboard's.
palette = [52, 51, 50, 200, 200, 200, 255, 0, 255]
for i in range(256):
rgb = hls_to_rgb((i / 360.0) % 1, 0.5, 1.0)
palette += [int(round(255 * x)) for x in rgb]
surface.set_palette(palette)
# next define an animation environment to run the algorithm.
anim = gm.Animation(surface)
# set the speed, delay, and transparent color we want.
anim.set_control(speed=50, delay=2, trans_index=3)
# add a maze instance.
mask = generate_text_mask(surface.size, 'UST', 'ubuntu.ttf', 350)
# specify the region that where the animation is embedded.
left, top, right, bottom = 66, 47, 540, 343
maze = anim.create_maze_in_region(cell_size=4,
region=(left, top, right, bottom),
mask=mask)
anim.pad_delay_frame(100)
# paint the blackboard
surface.rectangle(left, top, right - left + 1, bottom - top + 1, 0)
# in the first algorithm only 4 colors occur in the image, so we can use
# a smaller minimum code length, this can help reduce the file size significantly.
surface.set_lzw_compress(2)
# pad one second delay, get ready!
anim.pad_delay_frame(100)
# the animation runs here.
wilson(maze, root=(0, 0))
# pad three seconds delay to see the result clearly.
anim.pad_delay_frame(300)
# now we run the maze solving algorithm.
# this time we use full 256 colors, hence the minimum code length is 8.
surface.set_lzw_compress(8)
# the tree and wall are unchanged throughout the maze solving algorithm hence
# it's safe to use 0 as the transparent color and color the wall and tree transparent.
anim.set_colormap({0: 0, 1: 0, 2: 2, 3: 3})
anim.set_control(speed=30, delay=5, trans_index=0)
# run the maze solving algorithm.
bfs(maze,
start=(0, 0),
end=(maze.size[0] - 1, maze.size[1] - 1))
# pad five seconds delay to see the path clearly.
anim.pad_delay_frame(500)
# save the result.
surface.save('wilson_bfs.gif')
surface.close()
| 31.308642
| 86
| 0.714117
| 429
| 2,536
| 4.13986
| 0.414918
| 0.027027
| 0.027027
| 0.038288
| 0.127252
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051841
| 0.18612
| 2,536
| 80
| 87
| 31.7
| 0.808624
| 0.473975
| 0
| 0.058824
| 0
| 0
| 0.029096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e105caf515da97595cf131c9228511ab5a47c2b
| 313
|
py
|
Python
|
2-mouth02/socket/communnication.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | 4
|
2021-02-01T10:28:11.000Z
|
2021-02-01T10:34:40.000Z
|
2-mouth02/socket/communnication.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
2-mouth02/socket/communnication.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
from socket import *
a=input("请输入IP地址:")
b=input("请输入端口:")
ADDR = ("176.17.12.178", 31414)
giao = socket(AF_INET, SOCK_DGRAM)
while 1:
m = input(":")
if not m:
break
else:
giao.sendto(m.encode(), ADDR)
d, a = giao.recvfrom(1024)
print("意思是", d.decode())
giao.close()
| 18.411765
| 37
| 0.5623
| 46
| 313
| 3.782609
| 0.76087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08547
| 0.252396
| 313
| 16
| 38
| 19.5625
| 0.65812
| 0
| 0
| 0
| 0
| 0
| 0.099042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e11beb96e30d1e453934e9af1acf5d6478cd742
| 244
|
py
|
Python
|
nice_paintig.py
|
rushdi21-meet/meet2019y1lab6
|
e87c2f04593c8f7e3a5c1c66260c49a3690db90c
|
[
"MIT"
] | null | null | null |
nice_paintig.py
|
rushdi21-meet/meet2019y1lab6
|
e87c2f04593c8f7e3a5c1c66260c49a3690db90c
|
[
"MIT"
] | null | null | null |
nice_paintig.py
|
rushdi21-meet/meet2019y1lab6
|
e87c2f04593c8f7e3a5c1c66260c49a3690db90c
|
[
"MIT"
] | null | null | null |
import turtle
color=["green", "yellow",'orange','blue','pruple','red','pink']
x=10
y= 270
i=0
turtle.bgcolor("black")
while True:
turtle.color(color[0])
turtle.forward(x)
turtle.left(y)
x+=10
y-=1
i+=1
turtle.mainloop()
| 16.266667
| 63
| 0.614754
| 39
| 244
| 3.846154
| 0.615385
| 0.146667
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054455
| 0.172131
| 244
| 14
| 64
| 17.428571
| 0.688119
| 0
| 0
| 0
| 0
| 0
| 0.159836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e1247da76756de4876b84765ac8609022ba7513
| 2,446
|
py
|
Python
|
enzynet/models.py
|
gdarkwah/enzynet
|
7367635ae73595822133577054743a4c4c327cf3
|
[
"MIT"
] | 189
|
2017-07-20T22:16:22.000Z
|
2022-02-21T17:57:41.000Z
|
enzynet/models.py
|
gdarkwah/enzynet
|
7367635ae73595822133577054743a4c4c327cf3
|
[
"MIT"
] | 16
|
2019-05-09T14:47:44.000Z
|
2021-09-19T00:25:59.000Z
|
enzynet/models.py
|
gdarkwah/enzynet
|
7367635ae73595822133577054743a4c4c327cf3
|
[
"MIT"
] | 93
|
2017-07-20T22:55:41.000Z
|
2022-03-12T19:42:14.000Z
|
"""Model definitions."""
# Authors: Afshine Amidi <lastname@mit.edu>
# Shervine Amidi <firstname@stanford.edu>
# MIT License
import numpy as np
from enzynet import constants
from keras import initializers
from keras import layers
from keras.layers import advanced_activations
from keras import models
from keras import regularizers
def enzynet(input_v_size: int, n_channels: int) -> models.Sequential:
"""Returns EnzyNet as a Keras model."""
# Parameters.
stddev_conv3d = np.sqrt(2.0/n_channels)
# Initialization.
model = models.Sequential()
# Add layers.
model.add(
layers.Conv3D(
filters=32,
kernel_size=9,
strides=2,
padding='valid',
kernel_initializer=initializers.RandomNormal(
mean=0.0,
stddev=stddev_conv3d * 9 ** (-3 / 2)),
bias_initializer='zeros',
kernel_regularizer=regularizers.l2(0.001),
bias_regularizer=None,
input_shape=(input_v_size,)*constants.N_DIMENSIONS + (n_channels,)))
model.add(advanced_activations.LeakyReLU(alpha=0.1))
model.add(layers.Dropout(rate=0.2))
model.add(
layers.Conv3D(
filters=64,
kernel_size=5,
strides=1,
padding='valid',
kernel_initializer=initializers.RandomNormal(
mean=0.0,
stddev=stddev_conv3d * 5 ** (-3 / 2)),
bias_initializer='zeros',
kernel_regularizer=regularizers.l2(0.001),
bias_regularizer=None))
model.add(advanced_activations.LeakyReLU(alpha=0.1))
model.add(layers.MaxPooling3D(pool_size=(2, 2, 2)))
model.add(layers.Dropout(rate=0.3))
model.add(layers.Flatten())
model.add(
layers.Dense(
units=128,
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01),
bias_initializer='zeros',
kernel_regularizer=regularizers.l2(0.001),
bias_regularizer=None))
model.add(layers.Dropout(rate=0.4))
model.add(
layers.Dense(
units=constants.N_CLASSES,
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01),
bias_initializer='zeros',
kernel_regularizer=regularizers.l2(0.001),
bias_regularizer=None))
model.add(layers.Activation('softmax'))
return model
| 28.114943
| 80
| 0.618561
| 278
| 2,446
| 5.316547
| 0.294964
| 0.064953
| 0.094723
| 0.110961
| 0.581867
| 0.512855
| 0.479026
| 0.479026
| 0.479026
| 0.479026
| 0
| 0.040314
| 0.269828
| 2,446
| 86
| 81
| 28.44186
| 0.787234
| 0.080131
| 0
| 0.5
| 0
| 0
| 0.016562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.12069
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e140c63bd33992dd5d90e07a79edb1db5f260ce
| 10,357
|
py
|
Python
|
FeatureCloud/api/cli/test/commands.py
|
FeatureCloud/FeatureCloud
|
3421bc9621201ae4a888192f09886122b0cb571a
|
[
"Apache-2.0"
] | null | null | null |
FeatureCloud/api/cli/test/commands.py
|
FeatureCloud/FeatureCloud
|
3421bc9621201ae4a888192f09886122b0cb571a
|
[
"Apache-2.0"
] | null | null | null |
FeatureCloud/api/cli/test/commands.py
|
FeatureCloud/FeatureCloud
|
3421bc9621201ae4a888192f09886122b0cb571a
|
[
"Apache-2.0"
] | null | null | null |
import os
import click
import requests
from FeatureCloud.api.imp.exceptions import FCException
from FeatureCloud.api.imp.test import commands
from FeatureCloud.api.cli.test.workflow.commands import workflow
@click.group("test")
def test() -> None:
"""Testbed related commands"""
test.add_command(workflow)
@test.command('help')
def help():
_, msg = commands.help()
click.echo(msg)
@test.command('start')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance (e.g. featurecloud test start --controller-host=http://localhost:8000).',
required=True)
@click.option('--client-dirs', default='.,.',
help='Client directories separated by comma. The number of clients is based on the number of directories supplied here (e.g. `featurecloud test start --client-dirs=.,.,.,.` command will start 4 clients).',
required=True)
@click.option('--generic-dir', default='.',
help='Generic directory available for all clients. Content will be copied to the input folder of all '
'instances (e.g. featurecloud test start --generic-dir=.).',
required=True)
@click.option('--app-image', default='test_app',
help='The repository url of the app image (e.g. featurecloud test start --app-image=featurecloud.ai/test_app).',
required=True)
@click.option('--channel', default='local',
help='The communication channel to be used. Possible values: "local" or "internet" (e.g. featurecloud test start --channel=local).',
required=True)
@click.option('--query-interval', default=2,
help='The interval after how many seconds the status call will be performed (e.g. featurecloud test start --query-interval=2).',
required=True)
@click.option('--download-results',
help='A directory name where to download results. This will be created into /data/tests directory (e.g. featurecloud test start --download-results=./results).',
default='')
def start(controller_host: str, client_dirs: str, generic_dir: str, app_image: str, channel: str, query_interval,
download_results: str):
'''Starts testbed run with the specified parameters'''
try:
result = commands.start(controller_host, client_dirs, generic_dir, app_image, channel, query_interval,
download_results)
click.echo(f"Test id={result} started")
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('stop')
@click.option('--controller-host', default='http://localhost:8000',
help='Http address of your running controller instance (e.g. featurecloud test stop --controller-host=http://localhost:8000).',
required=True)
@click.option('--test-id', help='The test id of the test to be stopped. The test id is returned by the start command (e.g.featurecloud test stop --test-id=1).')
def stop(controller_host: str, test_id: str or int):
'''Stops test with specified test id'''
try:
result = commands.stop(controller_host, test_id)
click.echo(f"Test id={result} stopped")
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('delete')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance. (e.g. featurecloud test delete all --controller-host=http://localhost:8000)',)
@click.option('--test-id', help='The test id of the test to be deleted. The test id is returned by the start command.'
'To delete all tests omit this option and use "delete all".')
@click.argument('all', type=str, nargs=1, required=False)
def delete(controller_host: str, test_id: str or int, all: str):
'''
Deletes test with specified id or alternatively, deletes all tests
ALL - delete all tests
Examples:
featurecloud test delete --test-id=1
featurecloud test delete all
'''
try:
result = commands.delete(controller_host, test_id, all)
if all is not None:
if all.lower() == 'all':
click.echo(f"All tests deleted")
else:
click.echo(f'Wrong parameter {all}')
else:
click.echo(f"Test id={result} deleted")
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('list')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance (e.g. featurecloud test list --controller-host=http://localhost:8000).',
required=True)
@click.option('--format', help='Format of the test list. Possible options: json or dataframe (e.g. featurecloud test list --format=dataframe).', required=True, default='dataframe')
def list(controller_host: str, format: str):
'''List all tests'''
try:
result = commands.list(controller_host, format)
if len(result) == 0:
click.echo('No tests available')
else:
click.echo(result)
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('info')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance (e.g. featurecloud test info --controller-host=http://localhost:8000).',
required=True)
@click.option('--test-id', help='Test id to get info about (e.g. featurecloud test info --test-id=1).', required=True)
@click.option('--format', help='Format of the test info. Possible values: json or dataframe (e.g. featurecloud test info --format=dataframe).', required=True, default='dataframe')
def info(controller_host: str, test_id: str or int, format: str):
'''Get information about a running test'''
try:
result = commands.info(controller_host, test_id, format)
click.echo(result)
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('traffic')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance (e.g. featurecloud test traffic --controller-host=http://localhost:8000).',
required=True)
@click.option('--test-id', help='The test id to get traffic info about (e.g. featurecloud test traffic --test-id=1).')
@click.option('--format', help='Format of the test traffic. Possible values: json or dataframe (e.g. featurecloud test traffic --format=dataframe).e', required=True, default='dataframe')
def traffic(controller_host: str, test_id: str or int, format: str):
'''Displays traffic information inside tests'''
try:
result = commands.traffic(controller_host, test_id, format)
click.echo(result)
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
@test.command('logs')
@click.option('--controller-host', default='http://localhost:8000',
help='Address of your running controller instance (e.g. featurecloud test logs --controller-host=http://localhost:8000).',
required=True)
@click.option('--test-id', help='The test id to get logs about (e.g. featurecloud test logs --test-id=1).', required=True)
@click.option('--instance-id', help='The instance id of the test client. Instance ids can be obtained by running the info command (e.g. featurecloud test logs --test-id=1 --instance-id=0).', required=True)
@click.option('--from-row', help='Get logs from a certain row number (e.g. featurecloud test logs --test-id=1 --instance-id=0 --from-row=0).', default='', required=True)
def logs(controller_host: str, test_id: str or int, instance_id: str or int, from_row: str):
'''Get logs from test client'''
try:
result = commands.logs(controller_host, test_id, instance_id, from_row)
log_lines = ""
for line in result:
log_lines += str(line) + os.linesep
click.echo(log_lines)
except requests.exceptions.InvalidSchema:
click.echo(f'No connection adapters were found for {controller_host}')
except requests.exceptions.MissingSchema:
click.echo(f' Invalid URL {controller_host}: No scheme supplied. Perhaps you meant http://{controller_host}?')
except FCException as e:
click.echo(f'Error: {e}')
if __name__ == "__main__":
test()
| 51.527363
| 220
| 0.663609
| 1,334
| 10,357
| 5.096702
| 0.134183
| 0.100897
| 0.038241
| 0.058244
| 0.624504
| 0.582144
| 0.564495
| 0.541697
| 0.518459
| 0.473746
| 0
| 0.00867
| 0.209327
| 10,357
| 200
| 221
| 51.785
| 0.82159
| 0.03949
| 0
| 0.464968
| 0
| 0.133758
| 0.46803
| 0.011466
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057325
| false
| 0
| 0.038217
| 0
| 0.095541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e14c4fe464f76c3e655c88c87bd66bc84933f25
| 4,188
|
py
|
Python
|
axi_plot/utils.py
|
zoso95/axi_plot
|
1a8c1f601c75e149d60377ccc4a437c33b3620bb
|
[
"MIT"
] | null | null | null |
axi_plot/utils.py
|
zoso95/axi_plot
|
1a8c1f601c75e149d60377ccc4a437c33b3620bb
|
[
"MIT"
] | null | null | null |
axi_plot/utils.py
|
zoso95/axi_plot
|
1a8c1f601c75e149d60377ccc4a437c33b3620bb
|
[
"MIT"
] | null | null | null |
import subprocess
import logging
import os, time
from pathlib import Path
from shutil import copyfile
import pandas as pd
from datetime import datetime
def estimate_time(filename, config, layer=None):
base_commands = ['axicli', filename, '--config', config]
end_command = ['-vTC']
if layer is None:
process = subprocess.run(base_commands+end_command, stdout=subprocess.PIPE, universal_newlines=True)
else:
commands = base_commands + ['--mode', 'layers', '--layer', str(layer)] + end_command
process = subprocess.run(commands, stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout
def plot(filename, config, checkpoint_file, layer=None):
base_commands = ['axicli', filename, '--config', config]
end_commands = ['-o', checkpoint_file]
if layer is None:
commands = base_commands + end_commands
else:
commands = base_commands + ['--mode', 'layers', '--layer', str(layer)] + end_commands
process = subprocess.run(commands, stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout
def res_plot(filename, config, checkpoint_file):
"""
base_commands = ['axicli', filename, '--config', config, '--mode', 'res_plot']
end_commands = ['-o', checkpoint_file]
commands = base_commands + end_commands
process = subprocess.run(commands, stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout
"""
raise Exception()
def toggle_pen(config):
process = subprocess.run(['axicli', '-mtoggle', '--config', config], stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout
def return_home(filename):
process = subprocess.run(['axicli', filename, '--mode', 'res_home'], stdout=subprocess.PIPE, universal_newlines=True)
return process.stdout
def backup_drawing(file):
"""
Check to see if $PLOTTER_BACKUP exists. If it does, then copy over the file
if it doesnt exist, and add to the print logs that we are printing it.
"""
if 'PLOTTER_BACKUP' in os.environ:
logging.info("backing up {}".format(file))
filename = os.path.basename(file)
backup_dir = os.path.join(os.environ.get('PLOTTER_BACKUP'))
backup_path = os.path.join(backup_dir, filename)
if not os.path.exists(backup_path):
copyfile(file, backup_path)
print_logs = os.path.join(backup_dir, "print_logs.csv")
if os.path.exists(print_logs):
logs = pd.read_csv(print_logs)
else:
logs = pd.DataFrame({})
df = pd.DataFrame([{'name':filename, 'time_printed':datetime.now().strftime('%Y-%m-%d %H:%M')}], columns=['name', 'time_printed'])
logs = logs.append(df, sort=False)
logs.to_csv(print_logs, index=False)
else:
logging.info("Skipping backup for {}, no $PLOTTER_BACKUP path given".format(file))
def get_checkpoint_file(file, tmp_folder="tmp"):
filename = os.path.basename(file)
tmp_dir = os.path.join(os.getcwd(), tmp_folder)
Path(tmp_dir).mkdir(parents=True, exist_ok=True)
temp_file = os.path.join(tmp_dir, filename)
logging.info("making tempfile {}".format(temp_file))
now = time.time()
# delete files older than a week
for f in os.listdir(tmp_dir):
if os.stat(os.path.join(tmp_dir, f)).st_mtime < now - 7 * 86400:
os.remove(os.path.join(tmp_dir, f))
return temp_file
def get_checkpoint_and_new_checkpoint(file, tmp_folder="tmp"):
checkpoint = get_checkpoint_file(file, tmp_folder)
active_checkpoint = "{}-active".format(checkpoint)
os.rename(checkpoint, active_checkpoint)
return active_checkpoint, checkpoint
def clean_tmp_file(file):
try:
os.remove(file)
except:
logging.warning("Could not delete temp file {}".format(file))
def get_config_names(config_folder = 'configs'):
dir = os.path.join(os.getcwd(), config_folder)
configs = []
for file in os.listdir(dir):
configs.append(os.path.basename(file))
return configs
def get_full_config_path(config, config_folder = 'configs'):
dir = os.path.join(os.getcwd(), config_folder)
return os.path.join(dir, config)
| 36.417391
| 138
| 0.680755
| 555
| 4,188
| 4.976577
| 0.252252
| 0.032585
| 0.036206
| 0.062998
| 0.433744
| 0.330558
| 0.260319
| 0.260319
| 0.260319
| 0.224113
| 0
| 0.001766
| 0.188634
| 4,188
| 114
| 139
| 36.736842
| 0.811065
| 0.10554
| 0
| 0.222222
| 0
| 0
| 0.093099
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135802
| false
| 0
| 0.08642
| 0
| 0.320988
| 0.061728
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e14f76f2adf0f315a94c191c5946f1de65d9fa9
| 5,258
|
py
|
Python
|
scripts/regions_optimize.py
|
jason-neal/Starfish
|
4ffa45e0190fb6f3262511d57d1a563e5ee711de
|
[
"BSD-3-Clause"
] | 1
|
2017-07-10T00:06:36.000Z
|
2017-07-10T00:06:36.000Z
|
scripts/regions_optimize.py
|
jason-neal/Starfish
|
4ffa45e0190fb6f3262511d57d1a563e5ee711de
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/regions_optimize.py
|
jason-neal/Starfish
|
4ffa45e0190fb6f3262511d57d1a563e5ee711de
|
[
"BSD-3-Clause"
] | 5
|
2016-06-11T09:48:16.000Z
|
2019-08-07T19:52:41.000Z
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(prog="region_optimize.py", description="Find the kernel parameters for Gaussian region zones.")
parser.add_argument("spectrum", help="JSON file containing the data, model, and residual.")
parser.add_argument("--sigma0", type=float, default=2, help="(AA) to use in fitting")
args = parser.parse_args()
import json
import numpy as np
from scipy.optimize import fmin
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
import Starfish
from Starfish.model import PhiParam
from Starfish.covariance import get_dense_C, make_k_func
from Starfish import constants as C
# Load the spectrum and then take the data products.
f = open(args.spectrum, "r")
read = json.load(f) # read is a dictionary
f.close()
wl = np.array(read["wl"])
# data_full = np.array(read["data"])
# model = np.array(read["model"])
resid = np.array(read["resid"])
sigma = np.array(read["sigma"])
spectrum_id = read["spectrum_id"]
order = read["order"]
fname = Starfish.specfmt.format(spectrum_id, order) + "regions.json"
f = open(fname, "r")
read = json.load(f) # read is a dictionary
f.close()
mus = np.array(read["mus"])
assert spectrum_id == read["spectrum_id"], "Spectrum/Order mismatch"
assert order == read["order"], "Spectrum/Order mismatch"
# Load the guesses for the global parameters from the .json
# If the file exists, optionally initiliaze to the chebyshev values
fname = Starfish.specfmt.format(spectrum_id, order) + "phi.json"
try:
phi = PhiParam.load(fname)
except FileNotFoundError:
print("No order parameter file found (e.g. sX_oXXphi.json), please run `star.py --initPhi` first.")
raise
# Puposely set phi.regions to none for this exercise, since we don't care about existing regions, and likely we want to overwrite them.
phi.regions = None
def optimize_region_residual(wl, residuals, sigma, mu):
'''
Determine the optimal parameters for the line kernels by fitting a Gaussian directly to the residuals.
'''
# Using sigma0, truncate the wavelength vector and residulas to include
# only those portions that fall in the range [mu - sigma, mu + sigma]
ind = (wl > mu - args.sigma0) & (wl < mu + args.sigma0)
wl = wl[ind]
R = residuals[ind]
sigma = sigma[ind]
sigma_mat = phi.sigAmp * sigma**2 * np.eye(len(wl))
max_r = 6.0 * phi.l # [km/s]
k_func = make_k_func(phi)
# Use the full covariance matrix when doing the likelihood eval
CC = get_dense_C(wl, k_func=k_func, max_r=max_r) + sigma_mat
factor, flag = cho_factor(CC)
logdet = np.sum(2 * np.log((np.diag(factor))))
rr = C.c_kms/mu * np.abs(mu - wl) # Km/s
def fprob(p):
# The likelihood function
# Requires sign about amplitude, so we can't use log.
amp, sig = p
gauss = amp * np.exp(-0.5 * rr**2/sig**2)
r = R - gauss
# Create a Gaussian using these parameters, and re-evaluate the residual
lnprob = -0.5 * (np.dot(r, cho_solve((factor, flag), r)) + logdet)
return lnprob
par = Starfish.config["region_params"]
p0 = np.array([10**par["logAmp"], par["sigma"]])
f = lambda x: -fprob(x)
try:
p = fmin(f, p0, maxiter=10000, maxfun=10000, disp=False)
# print(p)
return p
except np.linalg.linalg.LinAlgError:
return p0
def optimize_region_covariance(wl, residuals, sigma, mu):
'''
Determine the optimal parameters for the line kernels by actually using a chunk of the covariance matrix.
Note this actually uses the assumed global parameters.
'''
# Using sigma0, truncate the wavelength vector and residulas to include
# only those portions that fall in the range [mu - sigma, mu + sigma]
ind = (wl > mu - args.sigma0) & (wl < mu + args.sigma0)
wl = wl[ind]
R = residuals[ind]
sigma = sigma[ind]
sigma_mat = phi.sigAmp * sigma**2 * np.eye(len(wl))
max_rl = 6.0 * phi.l # [km/s]
# Define a probability function for the residuals
def fprob(p):
logAmp, sigma = p
# set phi.regions = p
phi.regions = np.array([logAmp, mu, sigma])[np.newaxis, :]
max_rr = 4.0 * sigma
max_r = max(max_rl, max_rr)
k_func = make_k_func(phi)
CC = get_dense_C(wl, k_func=k_func, max_r=max_r) + sigma_mat
factor, flag = cho_factor(CC)
logdet = np.sum(2 * np.log((np.diag(factor))))
lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)
# print(p, lnprob)
return lnprob
par = Starfish.config["region_params"]
p0 = np.array([par["logAmp"], par["sigma"]])
f = lambda x: -fprob(x)
try:
p = fmin(f, p0, maxiter=10000, maxfun=10000)
print(p)
return p
except np.linalg.linalg.LinAlgError:
return p0
# Regions will be a 2D array with shape (nregions, 3)
regions = []
for mu in mus:
# amp, sig = optimize_region_residual(wl, resid, sigma, mu)
# regions.append([np.log10(np.abs(amp)), mu, sig])
logAmp, sig = optimize_region_covariance(wl, resid, sigma, mu)
regions.append([logAmp, mu, sig])
# Add these values back to the phi parameter file and save
phi.regions = np.array(regions)
phi.save()
| 30.929412
| 135
| 0.659947
| 801
| 5,258
| 4.25593
| 0.292135
| 0.020534
| 0.019361
| 0.016427
| 0.424758
| 0.410678
| 0.379583
| 0.355529
| 0.355529
| 0.355529
| 0
| 0.014081
| 0.216622
| 5,258
| 169
| 136
| 31.112426
| 0.813547
| 0.288513
| 0
| 0.410526
| 0
| 0.010526
| 0.112259
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 1
| 0.042105
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e15b565f2c5c8e4188c7106981c4468935c3719
| 2,261
|
py
|
Python
|
Bases/download_bases.py
|
lucas26xd/Estudo-Dados-COVID19-BR
|
cba0278e1cbd2464b4b4c7faa866d05d9968247d
|
[
"MIT"
] | null | null | null |
Bases/download_bases.py
|
lucas26xd/Estudo-Dados-COVID19-BR
|
cba0278e1cbd2464b4b4c7faa866d05d9968247d
|
[
"MIT"
] | null | null | null |
Bases/download_bases.py
|
lucas26xd/Estudo-Dados-COVID19-BR
|
cba0278e1cbd2464b4b4c7faa866d05d9968247d
|
[
"MIT"
] | null | null | null |
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
def get_urls_and_last_updates(): # Pega a url e a ultima data de atualização das bases disponíveis no OpenDataSUS
urls = list()
last_ups = list()
try:
html = BeautifulSoup(urlopen('https://opendatasus.saude.gov.br/dataset/casos-nacionais', timeout=1).read(), 'html.parser')
p = 0
anchor_data = html.select('a.resource-url-analytics')
anchor_last = html.select('a.heading')
for url_data, last_up in zip(anchor_data, anchor_last):
if 'pretty' not in url_data['href']:
urls.append(url_data['href'])
html = BeautifulSoup(urlopen(f'https://opendatasus.saude.gov.br{last_up["href"]}', timeout=1).read(), 'html.parser')
last_ups.append(html.select('td')[0].text)
p += 1
print('\r[', u'\u2588' * p, ' ' * (len(anchor_data) - p), f'] - {p*100/len(anchor_data):.2f}%', end='')
print()
except Exception as e:
print(e)
finally:
return urls, last_ups
def download(url_base): # Realiza o download da base passada por parâmetro e salva na pasta Bases
r = requests.get(url_base, stream=True)
if r.status_code == requests.codes.OK:
arq = url_base[url_base.rfind("/") + 1:]
with open(f'./Bases/{arq}', 'wb') as file:
file_len = int(r.headers.get('content-length'))
p = 0
for data in r.iter_content(chunk_size=1024):
p += len(data)
print('\r[', u'\u2588' * int(30 * p / file_len), ' ' * (30 - int(30 * p / file_len)), end='] - ')
print(f'{p * 100 / file_len:.2f}%', end='')
file.write(data)
print()
else:
r.raise_for_status()
print('Pegando informações para download das bases...')
urls_bases, last_updates = get_urls_and_last_updates()
if len(urls_bases) > 0:
print('Iniciando Downloads...')
progress = 0
for url in urls_bases:
print(f'Baixando {url[url.rfind("/") + 1:]} - {last_updates[progress]} - ({progress + 1:0>2}/{len(urls_bases)})')
download(url)
progress += 1
else:
print('Problema ao resgatar as URLs das bases!')
| 39.666667
| 132
| 0.587793
| 310
| 2,261
| 4.154839
| 0.387097
| 0.034161
| 0.015528
| 0.021739
| 0.127329
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024522
| 0.260504
| 2,261
| 56
| 133
| 40.375
| 0.745813
| 0.066342
| 0
| 0.122449
| 0
| 0.020408
| 0.236243
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.061224
| 0
| 0.122449
| 0.204082
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e16ddbf593ddf87a424ef3546058ed337f938d3
| 10,699
|
py
|
Python
|
rax/_src/utils_test.py
|
google/rax
|
d6370d574246db9fb0566317f7cac8cd331526d7
|
[
"Apache-2.0"
] | 19
|
2022-01-25T12:37:51.000Z
|
2022-03-30T17:12:45.000Z
|
rax/_src/utils_test.py
|
google/rax
|
d6370d574246db9fb0566317f7cac8cd331526d7
|
[
"Apache-2.0"
] | 1
|
2022-02-08T23:02:42.000Z
|
2022-02-08T23:02:42.000Z
|
rax/_src/utils_test.py
|
google/rax
|
d6370d574246db9fb0566317f7cac8cd331526d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
"""Tests for rax._src.utils."""
import doctest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
import rax
from rax._src import utils
class NormalizeProbabilitiesTest(absltest.TestCase):
def test_sums_to_one_for_given_axis(self):
arr = jnp.asarray([[0., 1., 2.], [3., 4., 5.]])
result1 = utils.normalize_probabilities(arr, axis=0)
result2 = utils.normalize_probabilities(arr, axis=1)
np.testing.assert_array_equal(
result1, jnp.asarray([[0., 1. / 5., 2. / 7.], [1., 4. / 5., 5. / 7.]]))
np.testing.assert_array_equal(
result2,
jnp.asarray([[0., 1. / 3., 2. / 3.], [3. / 12., 4. / 12., 5. / 12.]]))
def test_sums_to_one_for_default_axis(self):
arr = jnp.asarray([[0., 1., 2.], [3., 4., 5.]])
result = utils.normalize_probabilities(arr)
np.testing.assert_array_equal(
result,
jnp.asarray([[0., 1. / 3., 2. / 3.], [3. / 12., 4. / 12., 5. / 12.]]))
def test_handles_where(self):
arr = jnp.asarray([[0., 1., 2.], [3., 4., 5.]])
where = jnp.asarray([[True, False, True], [True, True, True]])
result = utils.normalize_probabilities(arr, where, axis=1)
np.testing.assert_array_equal(
jnp.sum(result, axis=1, where=where), jnp.asarray([1., 1.]))
def test_correctly_sets_all_zeros(self):
arr = jnp.asarray([[0., 0., 0.], [0., 0., 0.]])
result1 = utils.normalize_probabilities(arr, axis=0)
result2 = utils.normalize_probabilities(arr, axis=1)
np.testing.assert_array_equal(
jnp.sum(result1, axis=0), jnp.asarray([1., 1., 1.]))
np.testing.assert_array_equal(
jnp.sum(result2, axis=1), jnp.asarray([1., 1.]))
def test_correctly_handles_all_masked(self):
arr = jnp.asarray([[2., 1., 3.], [1., 1., 1.]])
where = jnp.asarray([[False, False, False], [False, False, False]])
result1 = utils.normalize_probabilities(arr, where, axis=0)
result2 = utils.normalize_probabilities(arr, where, axis=1)
np.testing.assert_array_equal(
jnp.sum(result1, axis=0), jnp.asarray([1., 1., 1.]))
np.testing.assert_array_equal(
jnp.sum(result2, axis=1), jnp.asarray([1., 1.]))
class LogCumsumExp(absltest.TestCase):
def test_computes_logcumsumexp(self):
x = jnp.asarray([-4., 5., 2.3, 0.])
result = utils.logcumsumexp(x)
np.testing.assert_array_equal(
result,
jnp.asarray([
jnp.log(jnp.exp(-4.)),
jnp.log(jnp.exp(-4.) + jnp.exp(5.)),
jnp.log(jnp.exp(-4.) + jnp.exp(5.) + jnp.exp(2.3)),
jnp.log(jnp.exp(-4.) + jnp.exp(5.) + jnp.exp(2.3) + jnp.exp(0.))
]))
def test_computes_over_specified_axis(self):
x = jnp.asarray([[-4., 2.3, 0.], [2.2, -1.2, 1.1]])
result = utils.logcumsumexp(x, axis=-1)
np.testing.assert_array_equal(result[0, :], utils.logcumsumexp(x[0, :]))
np.testing.assert_array_equal(result[1, :], utils.logcumsumexp(x[1, :]))
result = utils.logcumsumexp(x, axis=0)
np.testing.assert_array_equal(result[:, 0], utils.logcumsumexp(x[:, 0]))
np.testing.assert_array_equal(result[:, 1], utils.logcumsumexp(x[:, 1]))
np.testing.assert_array_equal(result[:, 2], utils.logcumsumexp(x[:, 2]))
def test_computes_reversed(self):
x = jnp.asarray([-4., 5., 2.3, 0.])
x_flipped = jnp.asarray([0., 2.3, 5., -4.])
result_reverse = utils.logcumsumexp(x, reverse=True)
result_flipped = jnp.flip(utils.logcumsumexp(x_flipped))
np.testing.assert_array_equal(result_reverse, result_flipped)
def test_computes_with_where_mask(self):
x = jnp.asarray([-4., 5., 2.3, 0.])
where = jnp.asarray([True, False, True, True])
x_masked = jnp.asarray([-4., 2.3, 0.])
result_where = utils.logcumsumexp(x, where=where)
result_masked = utils.logcumsumexp(x_masked)
np.testing.assert_array_equal(result_where[0], result_masked[0])
np.testing.assert_array_equal(result_where[2], result_masked[1])
np.testing.assert_array_equal(result_where[3], result_masked[2])
def test_handles_extreme_values(self):
x = jnp.asarray([-4., -2.1e26, 5., 3.4e38, 10., -2.99e26])
result = utils.logcumsumexp(x)
np.testing.assert_array_equal(
result, jnp.asarray([-4., -4., 5.0001235, 3.4e38, 3.4e38, 3.4e38]))
class SortByTest(absltest.TestCase):
def test_sorts_by_scores(self):
scores = jnp.asarray([0., 3., 1., 2.])
tensors_to_sort = [jnp.asarray([10., 13., 11., 12.])]
result = utils.sort_by(scores, tensors_to_sort)[0]
np.testing.assert_array_equal(result, jnp.asarray([13., 12., 11., 10.]))
def test_sorts_by_given_axis(self):
scores = jnp.asarray([[3., 1., 2.], [1., 5., 3.]])
tensors_to_sort = [jnp.asarray([[0., 1., 2.], [3., 4., 5.]])]
result_0 = utils.sort_by(scores, tensors_to_sort, axis=0)[0]
result_1 = utils.sort_by(scores, tensors_to_sort, axis=1)[0]
np.testing.assert_array_equal(result_0,
jnp.asarray([[0., 4., 5.], [3., 1., 2.]]))
np.testing.assert_array_equal(result_1,
jnp.asarray([[0., 2., 1.], [4., 5., 3.]]))
def test_sorts_multiple_tensors(self):
scores = jnp.asarray([0., 3., 1., 2.])
tensors_to_sort = [
jnp.asarray([10., 13., 11., 12.]),
jnp.asarray([50., 56., 52., 54.]),
jnp.asarray([75., 78., 76., 77.])
]
result = utils.sort_by(scores, tensors_to_sort)
np.testing.assert_array_equal(result[0], jnp.asarray([13., 12., 11., 10.]))
np.testing.assert_array_equal(result[1], jnp.asarray([56., 54., 52., 50.]))
np.testing.assert_array_equal(result[2], jnp.asarray([78., 77., 76., 75.]))
def test_places_masked_values_last(self):
scores = jnp.asarray([0., 3., 1., 2.])
tensors_to_sort = [jnp.asarray([10., 13., 11., 12.])]
where = jnp.asarray([True, True, False, False])
result = utils.sort_by(scores, tensors_to_sort, where=where)[0]
np.testing.assert_array_equal(result, jnp.asarray([13., 10., 12., 11.]))
def test_breaks_ties_randomly_when_key_is_provided(self):
scores = jnp.asarray([0., 1., 1., 2.])
tensors_to_sort = [jnp.asarray([10., 11.1, 11.2, 12.])]
key = jax.random.PRNGKey(4242)
key1, key2 = jax.random.split(key)
result1 = utils.sort_by(scores, tensors_to_sort, key=key1)[0]
result2 = utils.sort_by(scores, tensors_to_sort, key=key2)[0]
np.testing.assert_array_equal(result1, jnp.asarray([12., 11.2, 11.1, 10.]))
np.testing.assert_array_equal(result2, jnp.asarray([12., 11.1, 11.2, 10.]))
class RanksTest(absltest.TestCase):
def test_ranks_by_sorting_scores(self):
scores = jnp.asarray([[0., 1., 2.], [2., 1., 3.]])
ranks = utils.ranks(scores)
np.testing.assert_array_equal(ranks, jnp.asarray([[3, 2, 1], [2, 3, 1]]))
def test_ranks_along_given_axis(self):
scores = jnp.asarray([[0., 1., 2.], [1., 2., 0.]])
ranks = utils.ranks(scores, axis=0)
np.testing.assert_array_equal(ranks, jnp.asarray([[2, 2, 1], [1, 1, 2]]))
def test_ranks_with_ties_broken_randomly(self):
scores = jnp.asarray([2., 1., 1.])
key = jax.random.PRNGKey(1)
key1, key2 = jax.random.split(key)
ranks1 = utils.ranks(scores, key=key1)
ranks2 = utils.ranks(scores, key=key2)
np.testing.assert_array_equal(ranks1, jnp.asarray([1, 2, 3]))
np.testing.assert_array_equal(ranks2, jnp.asarray([1, 3, 2]))
class ApproxRanksTest(absltest.TestCase):
def test_computes_approx_ranks(self):
scores = jnp.asarray([-3., 1., 2.])
ranks = utils.approx_ranks(scores)
sigmoid = jax.nn.sigmoid
np.testing.assert_array_equal(
ranks,
jnp.asarray([
sigmoid(3. + 1.) + sigmoid(3. + 2.) + 1.0,
sigmoid(-1. - 3.) + sigmoid(-1. + 2.) + 1.0,
sigmoid(-2. - 3.) + sigmoid(-2. + 1.) + 1.0
]))
def test_maintains_order(self):
scores = jnp.asarray([-4., 1., -3., 2.])
ranks = utils.approx_ranks(scores)
true_ranks = utils.ranks(scores)
np.testing.assert_array_equal(jnp.argsort(ranks), jnp.argsort(true_ranks))
def test_computes_approx_ranks_with_where(self):
scores_without_where = jnp.asarray([3.33, 1.125])
scores = jnp.asarray([3.33, 2.5, 1.125])
where = jnp.asarray([True, False, True])
ranks = utils.approx_ranks(scores_without_where)
ranks_with_where = utils.approx_ranks(scores, where=where)
np.testing.assert_array_equal(
ranks, jnp.asarray([ranks_with_where[0], ranks_with_where[2]]))
class SafeReduceTest(absltest.TestCase):
def test_reduces_values_according_to_fn(self):
a = jnp.array([[3., 2.], [4.5, 1.2]])
res_mean = utils.safe_reduce(a, reduce_fn=jnp.mean)
res_sum = utils.safe_reduce(a, reduce_fn=jnp.sum)
res_none = utils.safe_reduce(a, reduce_fn=None)
np.testing.assert_allclose(res_mean, jnp.mean(a))
np.testing.assert_allclose(res_sum, jnp.sum(a))
np.testing.assert_allclose(res_none, a)
def test_reduces_values_with_mask(self):
a = jnp.array([[3., 2., 0.01], [4.5, 1.2, 0.9]])
where = jnp.array([[True, False, True], [True, True, False]])
res_mean = utils.safe_reduce(a, where=where, reduce_fn=jnp.mean)
res_sum = utils.safe_reduce(a, where=where, reduce_fn=jnp.sum)
res_none = utils.safe_reduce(a, where=where, reduce_fn=None)
np.testing.assert_allclose(res_mean, jnp.mean(a, where=where))
np.testing.assert_allclose(res_sum, jnp.sum(a, where=where))
np.testing.assert_allclose(res_none, jnp.where(where, a, 0.))
def test_reduces_mean_with_all_masked(self):
a = jnp.array([[3., 2., 0.01], [4.5, 1.2, 0.9]])
where = jnp.array([[False, False, False], [False, False, False]])
res_mean = utils.safe_reduce(a, where=where, reduce_fn=jnp.mean)
np.testing.assert_allclose(res_mean, jnp.array(0.))
def load_tests(loader, tests, ignore):
del loader, ignore # Unused.
tests.addTests(
doctest.DocTestSuite(
utils, extraglobs={
"jax": jax,
"jnp": jnp,
"rax": rax
}))
return tests
if __name__ == "__main__":
absltest.main()
| 34.291667
| 79
| 0.639873
| 1,623
| 10,699
| 4.041898
| 0.126309
| 0.092988
| 0.096037
| 0.106707
| 0.619512
| 0.575305
| 0.495274
| 0.426829
| 0.331402
| 0.262805
| 0
| 0.058144
| 0.183382
| 10,699
| 311
| 80
| 34.401929
| 0.692686
| 0.056173
| 0
| 0.239024
| 0
| 0
| 0.001687
| 0
| 0
| 0
| 0
| 0
| 0.204878
| 1
| 0.121951
| false
| 0
| 0.034146
| 0
| 0.190244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e182689577a11bad1e8f7437a3d622ced715f94
| 427
|
py
|
Python
|
examples/decorators.py
|
FusionSid/FusionSidAPI.py
|
e1b50622bf4fcec8265f8fd4e9b3ac79b580d286
|
[
"MIT"
] | 5
|
2022-03-05T23:29:33.000Z
|
2022-03-20T07:44:20.000Z
|
examples/decorators.py
|
FusionSid/FusionSidAPI.py
|
e1b50622bf4fcec8265f8fd4e9b3ac79b580d286
|
[
"MIT"
] | null | null | null |
examples/decorators.py
|
FusionSid/FusionSidAPI.py
|
e1b50622bf4fcec8265f8fd4e9b3ac79b580d286
|
[
"MIT"
] | null | null | null |
import asyncio
from fusionsid import Decorators
deco = Decorators
do_roast = deco.roast
@deco.compliment() # will give you a complement before the function is run
@Decorators.fact() # you can just put the class name and use that instead of setting it to a var
@do_roast() # you can set it to a variable and use that
async def main():
print("Wassup")
loop = asyncio.new_event_loop()
loop.run_until_complete(main())
| 23.722222
| 97
| 0.744731
| 70
| 427
| 4.457143
| 0.657143
| 0.044872
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177986
| 427
| 17
| 98
| 25.117647
| 0.888889
| 0.400468
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e188c93ed7a3552c4548ac6fc5970107dcdbcdb
| 2,303
|
py
|
Python
|
configs/raubtierv2b/centripetalnet_hourglass104_mstest_16x6_210e_coco_raubtierv2b_2gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
configs/raubtierv2b/centripetalnet_hourglass104_mstest_16x6_210e_coco_raubtierv2b_2gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
configs/raubtierv2b/centripetalnet_hourglass104_mstest_16x6_210e_coco_raubtierv2b_2gpu.py
|
esf-bt2020/mmdetection
|
abc5fe060e0fcb716f845c85441be3741b22d3cf
|
[
"Apache-2.0"
] | null | null | null |
# Basiskonfigurationsfile
_base_ = '../centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py'
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=3,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1))
)
dataset_type = 'COCODataset'
classes = ('luchs', 'rotfuchs', 'wolf')
data = dict(
samples_per_gpu=3, #default 6
workers_per_gpu=1, #default 3
train=dict(
img_prefix='customData/train/',
classes=classes,
ann_file='customData/train/_annotations.coco.json'),
val=dict(
img_prefix='customData/valid/',
classes=classes,
ann_file='customData/valid/_annotations.coco.json'),
test=dict(
img_prefix='customData/test/',
classes=classes,
ann_file='customData/test/_annotations.coco.json'))
#optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #8 GPUs => 8*6=48
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #2 GPUs => 2*3=6 => 6/48= 1/8 cheetah
#optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x6=6)
evaluation = dict(classwise=True, interval=4, metric='bbox')
load_from = 'checkpoints/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth'
work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/centripetalnet_hourglass104_mstest_16x6_210e_coco_raubtierv2b_2gpu'
#http://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth
| 37.145161
| 191
| 0.691706
| 305
| 2,303
| 4.97377
| 0.409836
| 0.058009
| 0.105471
| 0.118655
| 0.452208
| 0.377719
| 0.333553
| 0.239947
| 0.239947
| 0.160844
| 0
| 0.101632
| 0.175423
| 2,303
| 61
| 192
| 37.754098
| 0.697209
| 0.190187
| 0
| 0.065217
| 0
| 0
| 0.307983
| 0.213053
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e1c92be5d3fa432577c6a625de6487e656413d6
| 3,175
|
py
|
Python
|
firecares/firestation/tests/test_feedback.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 12
|
2016-01-30T02:28:35.000Z
|
2019-05-29T15:49:56.000Z
|
firecares/firestation/tests/test_feedback.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 455
|
2015-07-27T20:21:56.000Z
|
2022-03-11T23:26:20.000Z
|
firecares/firestation/tests/test_feedback.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 14
|
2015-07-29T09:45:53.000Z
|
2020-10-21T20:03:17.000Z
|
import json
import mock
import os
from django.contrib.auth import get_user_model
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import Client
from firecares.firestation.models import FireDepartment, FireStation, DataFeedback
from firecares.firecares_core.tests.base import BaseFirecaresTestcase
User = get_user_model()
class FeedbackTests(BaseFirecaresTestcase):
@mock.patch('geopy.geocoders.base.urllib_urlopen')
def test_feedback_form(self, urllib_urlopen):
"""
Test the feedback form submission
"""
c = urllib_urlopen.return_value
c.read.return_value = open(os.path.join(os.path.dirname(__file__), 'mock/geocode.json')).read()
c.headers.getparam.return_value = 'utf-8'
c = Client()
with self.settings(DATA_FEEDBACK_EMAILS=(('Test Admin', 'admin@example.com'),)):
# Create fire department and fire station
fd = FireDepartment.objects.create(name='Fire Department 1')
fs = FireStation.create_station(department=fd, address_string='1', name='Fire Station 1')
feedback_url = reverse('firedepartment_data_feedback_slug', kwargs={'pk': fd.id, 'slug': fd.slug})
response = c.get(feedback_url)
self.assert_redirect_to_login(response)
# Test only post allowed
c.login(**self.non_admin_creds)
get_response = c.get(feedback_url)
self.assertEqual(get_response.status_code, 405)
# Test email sent
response = c.post(feedback_url, {
'department': fd.id,
'firestation': fs.id,
'user': self.non_admin_user.id,
'message': 'This is a test'
})
self.assertEqual(response.status_code, 201)
self.assertEqual(DataFeedback.objects.filter(department=fd, firestation=fs).count(), 1)
self.assertEqual(len(mail.outbox), 1)
self.assert_email_appears_valid(mail.outbox[0])
self.assertListEqual(mail.outbox[0].reply_to, ['non_admin@example.com'])
mail_body = mail.outbox[0].body
self.assertTrue(fd.name in mail_body)
self.assertTrue(fs.name in mail_body)
self.assertTrue(self.non_admin_user.username in mail_body)
self.assertTrue(self.non_admin_user.email in mail_body)
self.assertTrue('This is a test' in mail_body)
# Test without fire station
response = c.post(feedback_url, {
'department': fd.id,
'user': self.non_admin_user.id,
'message': 'This is a test'
})
self.assertEqual(len(mail.outbox), 2)
self.assert_email_appears_valid(mail.outbox[1])
self.assertTrue('Fire Station:' not in mail.outbox[1].body)
# Test invalid data
response = c.post(feedback_url, {
'department': fd.id,
'message': 'This is a test'
})
self.assertEqual(response.status_code, 400)
self.assertTrue('user' in json.loads(response.content))
| 42.333333
| 110
| 0.627402
| 381
| 3,175
| 5.065617
| 0.307087
| 0.036269
| 0.031088
| 0.033161
| 0.310881
| 0.277202
| 0.232642
| 0.194301
| 0.137306
| 0.095855
| 0
| 0.009032
| 0.267717
| 3,175
| 74
| 111
| 42.905405
| 0.821075
| 0.049449
| 0
| 0.245614
| 0
| 0
| 0.106689
| 0.029766
| 0
| 0
| 0
| 0
| 0.298246
| 1
| 0.017544
| false
| 0
| 0.157895
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e24e04ad5a6a1e6faafb25c71a578a2c2c42a6c
| 4,772
|
py
|
Python
|
api/api/endpoints/sensor_info.py
|
andschneider/ss_api
|
4ddf5cd60d5e0e87e7641e97c9fbe78965c4b522
|
[
"MIT"
] | null | null | null |
api/api/endpoints/sensor_info.py
|
andschneider/ss_api
|
4ddf5cd60d5e0e87e7641e97c9fbe78965c4b522
|
[
"MIT"
] | 2
|
2019-12-26T17:31:56.000Z
|
2020-01-06T19:45:05.000Z
|
api/api/endpoints/sensor_info.py
|
andschneider/soil_sense
|
4ddf5cd60d5e0e87e7641e97c9fbe78965c4b522
|
[
"MIT"
] | null | null | null |
import datetime
import json
from flask import Response, request, Blueprint
from flask_jwt_extended import jwt_required
from flask_restplus import Api, Namespace, Resource, reqparse
from sqlalchemy.exc import IntegrityError
from api.core.db_execptions import bad_db_response
from api.core.models import SensorInfoModel, SensorDataModel
from api import db
api = Namespace(
"sensor_info",
description="Sensor information: sensor id, plant name, and moisture alert level.",
)
post_args = reqparse.RequestParser()
post_args.add_argument("plant", type=str, required=True, help="Plant name.")
post_args.add_argument(
"alert_level", type=int, required=True, help="Alert level for moisture."
)
@api.route("/sensor_info/<int:sensor_id>")
class SensorInfo(Resource):
@jwt_required
def get(self, sensor_id):
"""Get sensor info for a given sensor_id."""
try:
sensor_info = SensorInfoModel.query.filter_by(sensor_id=sensor_id).first()
response = {
"message": "success",
"data": {
"sensor_id": sensor_info.sensor_id,
"plant_name": sensor_info.plant,
"alert_level": sensor_info.alert_level,
},
}
return Response(
response=json.dumps(response), status=200, mimetype="application/json"
)
except Exception as e:
return bad_db_response(e.args)
@jwt_required
@api.expect(post_args)
def post(self, sensor_id):
"""Creates a new sensor info entry."""
args = post_args.parse_args()
try:
sensor_info = SensorInfoModel(
sensor_id=sensor_id,
plant=args["plant"],
alert_level=args["alert_level"],
)
db.session.add(sensor_info)
db.session.commit()
response = {"message": "success"}
except IntegrityError:
response = {
"message": f"Sensor id {sensor_id} already exists in database. Try updating or deleting first."
}
return Response(
response=json.dumps(response), status=409, mimetype="application/json"
)
except Exception as e:
return bad_db_response(e.args)
return Response(
response=json.dumps(response), status=201, mimetype="application/json"
)
@jwt_required
@api.doc(
params={"plant": "Plant name.", "alert_level": "Alert level for moisture."}
)
def put(self, sensor_id):
"""Updates a sensor info entry.
One or both of 'plant' and 'alert_level' must be supplied.
"""
parser = reqparse.RequestParser()
parser.add_argument("plant", type=str)
parser.add_argument("alert_level", type=int)
args = parser.parse_args()
if not any(list(args.values())):
return Response(
response=json.dumps(
{
"message": "Both arguments are empty. Try checking your parameter names."
}
),
status=400,
mimetype="application/json",
)
now = datetime.datetime.utcnow()
sensor_info = SensorInfoModel.query.filter_by(sensor_id=sensor_id).first()
if sensor_info:
try:
if args["plant"]:
sensor_info.plant = args["plant"]
if args["alert_level"]:
sensor_info.alert_level = args["alert_level"]
sensor_info.updated = now
db.session.commit()
response = {"message": f"Sensor id {sensor_id} successfully updated"}
return Response(
response=json.dumps(response),
status=200,
mimetype="application/json",
)
except Exception as e:
return bad_db_response(e.args)
# TODO handle updating entry that doesn't exist
@jwt_required
def delete(self, sensor_id):
"""Deletes a sensor info entry."""
# TODO need to handle deleting an entry that doesn't exist
try:
sensor_info = (
db.session.query(SensorInfoModel).filter_by(sensor_id=sensor_id).first()
)
db.session.delete(sensor_info)
db.session.commit()
response = {"message": f"Sensor id {sensor_id} successfully deleted"}
return Response(
response=json.dumps(response), status=200, mimetype="application/json"
)
except Exception as e:
return bad_db_response(e.args)
| 33.843972
| 111
| 0.573135
| 513
| 4,772
| 5.177388
| 0.255361
| 0.069277
| 0.042169
| 0.042169
| 0.423946
| 0.360316
| 0.316642
| 0.240964
| 0.240964
| 0.240964
| 0
| 0.005625
| 0.329422
| 4,772
| 140
| 112
| 34.085714
| 0.824375
| 0.0614
| 0
| 0.292035
| 0
| 0
| 0.154505
| 0.006306
| 0
| 0
| 0
| 0.007143
| 0
| 1
| 0.035398
| false
| 0
| 0.079646
| 0
| 0.212389
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e263e2d36efcfc4b3135f0a65636317114a2c8d
| 995
|
py
|
Python
|
hash calculator.py
|
Andrea1141/hash-calculator
|
182d2f9bcfa0227ad70f7fdb03dde4599717cafa
|
[
"MIT"
] | 1
|
2021-10-02T12:48:25.000Z
|
2021-10-02T12:48:25.000Z
|
hash calculator.py
|
Andrea1141/hash-calculator
|
182d2f9bcfa0227ad70f7fdb03dde4599717cafa
|
[
"MIT"
] | null | null | null |
hash calculator.py
|
Andrea1141/hash-calculator
|
182d2f9bcfa0227ad70f7fdb03dde4599717cafa
|
[
"MIT"
] | 1
|
2021-10-18T12:34:26.000Z
|
2021-10-18T12:34:26.000Z
|
import tkinter, hashlib
root = tkinter.Tk()
root.title("Hash Calculator")
label = tkinter.Label(text="Write the string to hash")
label.pack()
option = tkinter.StringVar()
option.set("sha224")
string = tkinter.StringVar()
entry = tkinter.Entry(root, textvariable=string, width=150, justify="center")
entry.pack()
hexdigest = tkinter.StringVar()
label = tkinter.Entry(text="", textvariable=hexdigest, width=150, justify="center", state="readonly")
label.pack()
def callback(*args):
encoded_string = string.get().encode()
command = "hashlib." + option.get() + "(encoded_string)"
result = eval(command)
hexdigest.set(result.hexdigest())
string.trace_add("write", callback)
option.trace_add("write", callback)
algorithms = ["sha224", "sha1", "blake2s", "sha3_384", "sha256", "blake2b", "sha384", "sha3_256", "sha3_512", "md5", "sha512", "sha3_224"]
menu = tkinter.OptionMenu(root, option, *algorithms)
menu.pack()
callback()
root.mainloop()
| 28.428571
| 139
| 0.684422
| 118
| 995
| 5.70339
| 0.466102
| 0.071322
| 0.044577
| 0.062407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048066
| 0.142714
| 995
| 34
| 140
| 29.264706
| 0.740914
| 0
| 0
| 0.08
| 0
| 0
| 0.183143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e28e0f9797870a68b28678349b8f468bf2771ae
| 387
|
py
|
Python
|
src/tandlr/notifications/routing.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
src/tandlr/notifications/routing.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
src/tandlr/notifications/routing.py
|
shrmoud/schoolapp
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from channels.staticfiles import StaticFilesConsumer
from tandlr.notifications import consumers
channel_routing = {
'http.request': StaticFilesConsumer(),
# Wire up websocket channels to our consumers:
'websocket.connect': consumers.ws_connect,
'websocket.receive': consumers.ws_receive,
'websocket.disconnect': consumers.ws_disconnect,
}
| 25.8
| 52
| 0.74677
| 40
| 387
| 7.125
| 0.6
| 0.115789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00303
| 0.147287
| 387
| 14
| 53
| 27.642857
| 0.860606
| 0.170543
| 0
| 0
| 0
| 0
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e297317547f88cd2d57145599c9dcd9b0299b5a
| 646
|
py
|
Python
|
2018/d03.py
|
m1el/advent-of-code
|
0944579fd58c586ce5a72b4152c5105ec07846a1
|
[
"MIT"
] | null | null | null |
2018/d03.py
|
m1el/advent-of-code
|
0944579fd58c586ce5a72b4152c5105ec07846a1
|
[
"MIT"
] | null | null | null |
2018/d03.py
|
m1el/advent-of-code
|
0944579fd58c586ce5a72b4152c5105ec07846a1
|
[
"MIT"
] | null | null | null |
from collections import defaultdict, Counter
from itertools import product
import re
with open('03.txt') as fd:
inp = []
for l in fd.readlines():
groups = re.findall(r'\d+', l)
inp.append(list(map(int, groups)))
claims = defaultdict(int)
for (id, l,t, w,h) in inp:
for y in range(t,t+h):
for x in range(l,l+w):
claims[(x,y)] += 1
c=0
for n in claims.values():
if n > 1: c+= 1
print(c)
for (id, l,t, w,h) in inp:
bad = False
for y in range(t,t+h):
for x in range(l,l+w):
if claims[(x,y)] > 1:
bad = True
break
if bad: break
if not bad:
print(id)
| 20.1875
| 45
| 0.547988
| 118
| 646
| 3
| 0.398305
| 0.079096
| 0.033898
| 0.039548
| 0.237288
| 0.237288
| 0.237288
| 0.237288
| 0.158192
| 0.158192
| 0
| 0.015453
| 0.298762
| 646
| 31
| 46
| 20.83871
| 0.766004
| 0
| 0
| 0.222222
| 0
| 0
| 0.014634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e2a44b8d417cc833a2bb62cb532d7fa7ff0e6b8
| 2,591
|
py
|
Python
|
files/lambda/tagger.py
|
mbasri/generic-spot-cluster
|
cccfbee4660ae26742e1442f495dc9f523d0a2fd
|
[
"MIT"
] | 1
|
2019-12-24T18:53:34.000Z
|
2019-12-24T18:53:34.000Z
|
files/lambda/tagger.py
|
mbasri/generic-spot-cluster
|
cccfbee4660ae26742e1442f495dc9f523d0a2fd
|
[
"MIT"
] | null | null | null |
files/lambda/tagger.py
|
mbasri/generic-spot-cluster
|
cccfbee4660ae26742e1442f495dc9f523d0a2fd
|
[
"MIT"
] | null | null | null |
import os
import sys
import logging
import boto3
def handler(event, context):
logger = setup_logging(context.aws_request_id)
logger.setLevel(logging.INFO)
logger.info('## ENVIRONMENT VARIABLES')
logger.info(os.environ)
logger.info('## EVENT')
logger.info(event)
count = '1'
CLUSTER_NAME = os.environ['cluster_name']
asg = boto3.client('autoscaling')
ec2 = boto3.client('ec2')
asg_response = asg.describe_auto_scaling_groups(
AutoScalingGroupNames=[
CLUSTER_NAME
]
)
instances = []
try:
for i in asg_response['AutoScalingGroups'][0]['Instances']:
if i['LifecycleState'] == 'InService' or i['LifecycleState'] == 'Pending':
instances.append(i['InstanceId'])
except IndexError :
logger.error('IndexError on autoscaling')
count = '1'
logger.info('## INSTANCE(S) FOUND ON THE ASG')
logger.info('instances=['+','.join(instances)+']')
ec2_response = ec2.describe_instances(
Filters=[
{
'Name': 'instance-state-name',
'Values': [
'pending',
'running',
'stopping',
'stopped',
]
},
{
'Name': 'tag-key',
'Values': [
'Count',
]
}
],
InstanceIds = instances
)
logger.info('## ACTIVE INSTANCE(S) FOUND ON THE ASG')
logger.info('ec2_response='+str(ec2_response))
counts = []
try :
for i in ec2_response['Reservations']:
for j in i['Instances']:
for z in j['Tags']:
if z['Key'] == 'Count':
counts.append(z['Value'])
except IndexError :
logger.error('IndexError on ec2')
count = '1'
#counts.sort()
for i in counts :
if count in counts:
count = str(int(count)+1)
else:
break
ec2.create_tags(
Resources = [
event['instance_id']
],
Tags=[
{
'Key': 'Count',
'Value': count
}
]
)
response = {
'cluster_name': CLUSTER_NAME,
'count': count,
'instance_id': event['instance_id']
}
logger.info('## RESPONSE')
logger.info('response' + str(response))
return response
def setup_logging(uuid):
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
handler = logging.StreamHandler(sys.stdout)
formatter = f"[%(asctime)s] [Bastion] [{uuid}] [%(levelname)s] %(message)s"
handler.setFormatter(logging.Formatter(formatter))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| 22.530435
| 81
| 0.582015
| 275
| 2,591
| 5.4
| 0.345455
| 0.06734
| 0.012121
| 0.012121
| 0.095623
| 0.095623
| 0.043098
| 0.043098
| 0
| 0
| 0
| 0.009028
| 0.273254
| 2,591
| 115
| 82
| 22.530435
| 0.779607
| 0.005017
| 0
| 0.114583
| 0
| 0
| 0.208689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.041667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e2c4ce8c6ded9f25bc03ff3e20ecd6211356ad1
| 7,950
|
py
|
Python
|
addressbook/views.py
|
webskate101/django-polymer-addressbook
|
bf41b6a83e7b9228b383129958488f1c8075c728
|
[
"Apache-2.0"
] | null | null | null |
addressbook/views.py
|
webskate101/django-polymer-addressbook
|
bf41b6a83e7b9228b383129958488f1c8075c728
|
[
"Apache-2.0"
] | null | null | null |
addressbook/views.py
|
webskate101/django-polymer-addressbook
|
bf41b6a83e7b9228b383129958488f1c8075c728
|
[
"Apache-2.0"
] | null | null | null |
"""Holds the HTTP handlers for the addressbook app."""
from django import db
from django import http
from django.views import generic
import json
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from addressbook import models
JSON_XSSI_PREFIX = ")]}'\n"
def json_response(data, status_code=200):
response = http.HttpResponse()
response.status_code = status_code
response['Content-Type'] = 'application/javascript'
# These three lines needed to defeat XSSI attacks
response['X-Content-Type-Options'] = 'nosniff'
response['Content-Disposition'] = 'attachment'
response.content = JSON_XSSI_PREFIX + json.dumps(data)
return response
def _update_contact_details(has_contact_details, update_dict):
has_contact_details.email = update_dict['email']
has_contact_details.phone = update_dict['phone']
has_contact_details.street_address = update_dict['streetAddress']
has_contact_details.city = update_dict['city']
has_contact_details.postal_code = update_dict['postalCode']
@method_decorator(login_required, name='get')
class IndexView(generic.base.TemplateView):
"""Renders the base index file."""
template_name = 'index.html'
class LoginRequiredRESTHandler(generic.View):
def dispatch(self, *args, **kwargs):
"""Require authenticated user for all REST requests."""
if not self.request.user.is_authenticated():
return json_response({'status': 'Unauthorized'}, status_code=401)
self.user = self.request.user
return super(LoginRequiredRESTHandler, self).dispatch(*args, **kwargs)
class OrganizationListRESTHandler(LoginRequiredRESTHandler):
"""REST handler for multiple organization requests."""
def get(self, request):
data = [
{
'id': organization.id,
'name': organization.name,
'email': organization.email,
'phone': organization.phone,
'streetAddress': organization.street_address,
'city': organization.city,
'postalCode': organization.postal_code,
'members': [
{
'id': person.id,
'firstName': person.first_name,
'lastName': person.last_name,
}
for person in organization.members.filter(
owner=self.user).order_by('last_name', 'first_name')
],
}
for organization in models.Organization.objects.filter(
owner=self.user).order_by('name')
]
return json_response(data)
class OrganizationMembershipRESTHandler(LoginRequiredRESTHandler):
"""REST handler to manage membership of an organization."""
@db.transaction.atomic
def put(self, request, organization_id, person_id):
"""Add a member to an organization."""
# TODO(john): Better error handling - get() raises if not found, but there's
# no messaging back to the client yet
organization = models.Organization.objects.get(
owner=self.user, id=organization_id)
person = models.Person.objects.get(owner=self.user, id=person_id)
organization.members.add(person)
organization.save()
return json_response({
'type': 'membership',
'organization_id': organization_id,
'person_id': person_id,
'action': 'added'})
@db.transaction.atomic
def delete(self, request, organization_id, person_id):
"""Remove a member from an organization."""
# TODO(john): Better error handling - get() raises if not found, but there's
# no messaging back to the client yet
organization = models.Organization.objects.get(
owner=self.user, id=organization_id)
person = models.Person.objects.get(owner=self.user, id=person_id)
organization.members.remove(person)
organization.save()
return json_response({
'type': 'membership',
'organization_id': organization_id,
'person_id': person_id,
'action': 'deleted'})
class OrganizationRESTHandler(LoginRequiredRESTHandler):
"""REST handler for single organization requests."""
def get(self, request, organization_id):
raise NotImplementedError()
@db.transaction.atomic
def post(self, request):
"""Adds a new organization."""
organization = models.Organization(owner=self.user)
# TODO(john): Server-side data validation before blindly copying the data
# into the target object
self._update_organization(organization, json.loads(request.body))
return json_response(
{'type': 'organization', 'id': organization.id, 'action': 'added'})
@db.transaction.atomic
def put(self, request, organization_id):
"""Receives updates to an existing organization."""
# TODO(john): Better error handling - get() raises if not found, but there's
# no messaging back to the client yet
organization = models.Organization.objects.get(
owner=self.user, id=organization_id)
# TODO(john): Server-side data validation before blindly copying the data
# into the target object
self._update_organization(organization, json.loads(request.body))
return json_response(
{'type': 'organization', 'id': organization_id, 'action': 'updated'})
@db.transaction.atomic
def delete(self, request, organization_id):
"""Delete an organization."""
organization = models.Organization.objects.get(
owner=self.user, id=organization_id)
organization.delete()
return json_response(
{'type': 'organization', 'id': organization_id, 'action': 'deleted'})
def _update_organization(self, organization, update_dict):
organization.name = update_dict['name']
_update_contact_details(organization, update_dict)
organization.save()
class PersonListRESTHandler(LoginRequiredRESTHandler):
"""REST handler for multiple person requests."""
def get(self, request):
data = [
{
'id': person.id,
'firstName': person.first_name,
'lastName': person.last_name,
'email': person.email,
'phone': person.phone,
'streetAddress': person.street_address,
'city': person.city,
'postalCode': person.postal_code,
}
for person in models.Person.objects.filter(owner=self.user)
]
return json_response(data)
class PersonRESTHandler(LoginRequiredRESTHandler):
"""REST handler for single person requests."""
def get(self, request, person_id):
raise NotImplementedError()
@db.transaction.atomic
def post(self, request):
"""Adds a new person."""
person = models.Person(owner=self.user)
# TODO(john): Server-side data validation before blindly copying the data
# into the target object
self._update_person(person, json.loads(request.body))
return json_response(
{'type': 'person', 'id': person.id, 'action': 'added'})
@db.transaction.atomic
def put(self, request, person_id):
"""Receives updates to an existing person."""
# TODO(john): Better error handling - get() raises if not found, but there's
# no messaging back to the client yet
person = models.Person.objects.get(owner=self.user, id=person_id)
# TODO(john): Server-side data validation before blindly copying the data
# into the target object
self._update_person(person, json.loads(request.body))
return json_response(
{'type': 'person', 'id': person_id, 'action': 'updated'})
@db.transaction.atomic
def delete(self, request, person_id):
"""Delete a person."""
person = models.Person.objects.get(owner=self.user, id=person_id)
person.delete()
return json_response(
{'type': 'person', 'id': person_id, 'action': 'deleted'})
def _update_person(self, person, update_dict):
person.first_name = update_dict['firstName']
person.last_name = update_dict['lastName']
_update_contact_details(person, update_dict)
person.save()
| 33.544304
| 80
| 0.684528
| 920
| 7,950
| 5.786957
| 0.181522
| 0.031555
| 0.028174
| 0.033058
| 0.579264
| 0.522164
| 0.48009
| 0.46882
| 0.46882
| 0.429564
| 0
| 0.000943
| 0.199748
| 7,950
| 236
| 81
| 33.686441
| 0.835901
| 0.18566
| 0
| 0.38
| 0
| 0
| 0.093931
| 0.0069
| 0
| 0
| 0
| 0.004237
| 0
| 1
| 0.113333
| false
| 0
| 0.046667
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e2de9f463b88672a9f0881711bb0f7f45018e12
| 1,124
|
py
|
Python
|
Housing Price/HouseRegression.py
|
anupriyamranjit/machinelearning
|
5e1deef38d356fddcedfe0a23094571500c1c82d
|
[
"MIT"
] | null | null | null |
Housing Price/HouseRegression.py
|
anupriyamranjit/machinelearning
|
5e1deef38d356fddcedfe0a23094571500c1c82d
|
[
"MIT"
] | null | null | null |
Housing Price/HouseRegression.py
|
anupriyamranjit/machinelearning
|
5e1deef38d356fddcedfe0a23094571500c1c82d
|
[
"MIT"
] | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import keras
import os
print(os.listdir("../input"))
print("Success")
# Any results you write to the current directory are saved as output.
# importing models/layers
from keras.models import Sequential
from keras.layers import Dense
print("Success")
my_data = pd.read_csv('../input/kc_house_data.csv')
my_data.head()
#Splitting Data Up
predictors = my_data.drop(columns=["price","date"])
output = my_data['price']
print("Success")
model = Sequential()
n_cols = predictors.shape[1]
print("Success")
#Dense Layers
model.add(Dense(5,activation ="relu", input_shape=(n_cols,)))
model.add(Dense(5,activation ="relu"))
model.add(Dense(1))
print("Success")
#Optimizer
model.compile(optimizer="adam", loss ="mean_squared_error")
print("Success")
#fitting
from keras.callbacks import EarlyStopping
early_stopping_monitor = EarlyStopping(patience=3)
model.fit(predictors,output,validation_split=0.2, epochs=30, callbacks=[early_stopping_monitor])
#prediction
prediction = model.predict()
| 22.039216
| 96
| 0.758897
| 165
| 1,124
| 5.060606
| 0.521212
| 0.086228
| 0.046707
| 0.033533
| 0.067066
| 0.067066
| 0
| 0
| 0
| 0
| 0
| 0.008955
| 0.105872
| 1,124
| 50
| 97
| 22.48
| 0.821891
| 0.186833
| 0
| 0.214286
| 0
| 0
| 0.133038
| 0.028825
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e2e001920079b806a3731784374226e2f26379a
| 1,194
|
py
|
Python
|
migrations/versions/29e48091912e_remove_unique_constraint_from_user_table.py
|
GitauHarrison/somasoma_V1
|
2d74ad3b58f7e4ea5334e240d5bd30938f615e24
|
[
"MIT"
] | null | null | null |
migrations/versions/29e48091912e_remove_unique_constraint_from_user_table.py
|
GitauHarrison/somasoma_V1
|
2d74ad3b58f7e4ea5334e240d5bd30938f615e24
|
[
"MIT"
] | 2
|
2021-11-11T19:04:10.000Z
|
2021-11-11T19:08:42.000Z
|
migrations/versions/29e48091912e_remove_unique_constraint_from_user_table.py
|
GitauHarrison/somasoma_V1
|
2d74ad3b58f7e4ea5334e240d5bd30938f615e24
|
[
"MIT"
] | 1
|
2021-09-09T13:44:26.000Z
|
2021-09-09T13:44:26.000Z
|
"""remove unique constraint from user table
Revision ID: 29e48091912e
Revises: f73df8de1f1f
Create Date: 2021-12-22 22:26:20.918461
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '29e48091912e'
down_revision = 'f73df8de1f1f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index('ix_user_email')
batch_op.create_index(batch_op.f('ix_user_email'), ['email'], unique=False)
batch_op.drop_index('ix_user_name')
batch_op.create_index(batch_op.f('ix_user_name'), ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_name'))
batch_op.create_index('ix_user_name', ['name'], unique=1)
batch_op.drop_index(batch_op.f('ix_user_email'))
batch_op.create_index('ix_user_email', ['email'], unique=1)
# ### end Alembic commands ###
| 30.615385
| 83
| 0.69598
| 170
| 1,194
| 4.623529
| 0.323529
| 0.124682
| 0.05598
| 0.081425
| 0.592875
| 0.52799
| 0.492366
| 0.396947
| 0.396947
| 0.259542
| 0
| 0.052525
| 0.170854
| 1,194
| 38
| 84
| 31.421053
| 0.741414
| 0.269682
| 0
| 0.111111
| 0
| 0
| 0.179641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e2e6a8e43d315af581125fc3cb4dc17b915f7a7
| 6,065
|
py
|
Python
|
VBx/models/resnet.py
|
Jamiroquai88/VBx
|
35e7954ac0042ea445dcec657130e2c3c0b94ee0
|
[
"Apache-2.0"
] | 145
|
2020-02-13T09:08:59.000Z
|
2022-03-28T02:05:38.000Z
|
VBx/models/resnet.py
|
Jamiroquai88/VBx
|
35e7954ac0042ea445dcec657130e2c3c0b94ee0
|
[
"Apache-2.0"
] | 39
|
2021-01-12T02:49:37.000Z
|
2022-02-17T18:49:54.000Z
|
VBx/models/resnet.py
|
Jamiroquai88/VBx
|
35e7954ac0042ea445dcec657130e2c3c0b94ee0
|
[
"Apache-2.0"
] | 44
|
2020-02-13T03:57:35.000Z
|
2022-03-31T07:05:09.000Z
|
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, reduction=16):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
#self.se = SELayer(planes, reduction)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
#out = self.se(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, reduction=16):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
#self.se = SELayer(planes * 4, reduction)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
#out = self.se(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class ResNet(nn.Module):
def __init__(self, block, num_blocks, m_channels=32, feat_dim=40, embed_dim=128, squeeze_excitation=False):
super(ResNet, self).__init__()
self.in_planes = m_channels
self.feat_dim = feat_dim
self.embed_dim = embed_dim
self.squeeze_excitation = squeeze_excitation
if block is BasicBlock:
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels*2, num_blocks[1], stride=2)
current_freq_dim = int((feat_dim - 1) / 2) + 1
self.layer3 = self._make_layer(block, m_channels*4, num_blocks[2], stride=2)
current_freq_dim = int((current_freq_dim - 1) / 2) + 1
self.layer4 = self._make_layer(block, m_channels*8, num_blocks[3], stride=2)
current_freq_dim = int((current_freq_dim - 1) / 2) + 1
self.embedding = nn.Linear(m_channels * 8 * 2 * current_freq_dim, embed_dim)
elif block is Bottleneck:
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels*2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, m_channels*4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, m_channels*8, num_blocks[3], stride=2)
self.embedding = nn.Linear(int(feat_dim/8) * m_channels * 16 * block.expansion, embed_dim)
else:
raise ValueError(f'Unexpected class {type(block)}.')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
pooling_mean = torch.mean(out, dim=-1)
meansq = torch.mean(out * out, dim=-1)
pooling_std = torch.sqrt(meansq - pooling_mean ** 2 + 1e-10)
out = torch.cat((torch.flatten(pooling_mean, start_dim=1),
torch.flatten(pooling_std, start_dim=1)), 1)
embedding = self.embedding(out)
return embedding
def ResNet101(feat_dim, embed_dim, squeeze_excitation=False):
return ResNet(Bottleneck, [3, 4, 23, 3], feat_dim=feat_dim, embed_dim=embed_dim, squeeze_excitation=squeeze_excitation)
| 40.433333
| 123
| 0.622754
| 832
| 6,065
| 4.365385
| 0.163462
| 0.039648
| 0.04185
| 0.039648
| 0.575991
| 0.522302
| 0.517621
| 0.493667
| 0.493667
| 0.493667
| 0
| 0.036352
| 0.251608
| 6,065
| 149
| 124
| 40.704698
| 0.763825
| 0.052102
| 0
| 0.380531
| 0
| 0
| 0.005402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088496
| false
| 0
| 0.035398
| 0.00885
| 0.230089
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e345a0575b803502ed9bfed61051d0d9fb3fa57
| 5,159
|
py
|
Python
|
bc/recruitment/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-27T07:27:17.000Z
|
2021-02-27T07:27:17.000Z
|
bc/recruitment/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | null | null | null |
bc/recruitment/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T15:56:54.000Z
|
2021-06-09T15:56:54.000Z
|
import json
from django import forms
from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector
from django.core.exceptions import ValidationError
from django.db.models import F
from django.db.models.functions import ACos, Cos, Radians, Sin
import requests
from bc.recruitment.constants import JOB_FILTERS
from bc.recruitment.models import JobCategory, RecruitmentHomePage, TalentLinkJob
def is_recruitment_site(site):
return site and isinstance(site.root_page.specific, RecruitmentHomePage)
def get_current_search(querydict):
"""
Returns search query and filters in request.GET as json string
"""
search = {}
if querydict.get("query", None):
search["query"] = querydict["query"]
if querydict.get("postcode", None):
search["postcode"] = querydict["postcode"]
# Loop through our filters so we don't just store any query params
for filter in JOB_FILTERS:
selected = querydict.getlist(filter["name"])
if selected:
selected = list(dict.fromkeys(selected)) # Remove duplicate options
search[filter["name"]] = sorted(selected) # Sort options alphabetically
return json.dumps(search)
def get_job_search_results(querydict, homepage, queryset=None):
if queryset is None:
queryset = TalentLinkJob.objects.all()
queryset = queryset.filter(homepage=homepage)
search_query = querydict.get("query", None)
if search_query:
vector = (
SearchVector("title", weight="A")
+ SearchVector("job_number", weight="A")
# + SearchVector("short_description", weight="A")
+ SearchVector("location_name", weight="B")
+ SearchVector("location_city", weight="B")
+ SearchVector("description", weight="C")
)
query = SearchQuery(search_query, search_type="phrase")
search_results = (
queryset.annotate(rank=SearchRank(vector, query))
.filter(rank__gte=0.1)
.order_by("-rank")
)
else:
# Order by newest job at top
search_results = queryset.order_by("posting_start_date")
# Process 'hide schools and early years job'
if querydict.get("hide_schools_and_early_years", False):
schools_and_early_years_categories = (
JobCategory.get_school_and_early_years_categories()
)
search_results = search_results.exclude(
subcategory__categories__slug__in=schools_and_early_years_categories
)
# Process filters
for filter in JOB_FILTERS:
# QueryDict.update() used in send_job_alerts.py adds the values as list instead of multivalue dict.
if isinstance(querydict.get(filter["name"]), list):
selected = querydict.get(filter["name"])
else:
selected = querydict.getlist(
filter["name"]
) # will return empty list if not found
try:
selected = [forms.CharField().clean(value) for value in selected]
except ValidationError:
# Abort any invalid string literals, e.g. SQL injection attempts
continue
if selected:
search_results = search_results.filter(
**{
filter["filter_key"] + "__in": selected
} # TODO: make case insensitive
)
# Process postcode search
search_postcode = querydict.get("postcode", None)
if search_postcode:
postcode_response = requests.get(
"https://api.postcodes.io/postcodes/" + search_postcode
)
if postcode_response.status_code == 200:
postcode_response_json = postcode_response.json()
search_lon = postcode_response_json["result"]["longitude"]
search_lat = postcode_response_json["result"]["latitude"]
search_results = search_results.annotate(
distance=GetDistance(search_lat, search_lon)
).order_by("distance")
if search_query:
# Rank is only used when there is a search query
search_results = search_results.order_by("distance", "-rank")
return search_results
def GetDistance(point_latitude, point_longitude):
# Calculate distance. See https://www.thutat.com/web/en/programming-and-tech-stuff/
# web-programming/postgres-query-with-gps-distance-calculations-without-postgis/
distance = (
ACos(
Sin(Radians(F("location_lat"))) * Sin(Radians(point_latitude))
+ Cos(Radians(F("location_lat")))
* Cos(Radians(point_latitude))
* Cos(Radians(F("location_lon") - point_longitude))
)
* 6371
* 1000
)
return distance
def get_school_and_early_years_count(search_results):
schools_and_early_years_categories = (
JobCategory.get_school_and_early_years_categories()
)
if len(schools_and_early_years_categories):
search_results = search_results.filter(
subcategory__categories__slug__in=schools_and_early_years_categories
)
return len(search_results)
| 34.393333
| 107
| 0.652064
| 569
| 5,159
| 5.6942
| 0.339192
| 0.064198
| 0.040123
| 0.04321
| 0.196914
| 0.127469
| 0.127469
| 0.103395
| 0.080247
| 0.045062
| 0
| 0.00338
| 0.254507
| 5,159
| 149
| 108
| 34.624161
| 0.839054
| 0.149641
| 0
| 0.153846
| 0
| 0
| 0.073675
| 0.006426
| 0
| 0
| 0
| 0.006711
| 0
| 1
| 0.048077
| false
| 0
| 0.086538
| 0.009615
| 0.182692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e3c50b123745c81d1f91068db3b602d8d3f128d
| 5,966
|
py
|
Python
|
dynamo/preprocessing/dynast.py
|
xing-lab-pitt/dynamo-release
|
76c1f2a270dd6722b88f4700aac1a1a725a0c261
|
[
"BSD-3-Clause"
] | 236
|
2019-07-09T22:06:21.000Z
|
2022-03-31T17:56:07.000Z
|
dynamo/preprocessing/dynast.py
|
xing-lab-pitt/dynamo-release
|
76c1f2a270dd6722b88f4700aac1a1a725a0c261
|
[
"BSD-3-Clause"
] | 115
|
2019-07-12T19:06:21.000Z
|
2022-03-31T17:34:18.000Z
|
dynamo/preprocessing/dynast.py
|
xing-lab-pitt/dynamo-release
|
76c1f2a270dd6722b88f4700aac1a1a725a0c261
|
[
"BSD-3-Clause"
] | 34
|
2019-07-10T03:34:04.000Z
|
2022-03-22T12:44:22.000Z
|
import numpy as np
from scipy.sparse import issparse
from sklearn.utils import sparsefuncs
import anndata
from typing import Union
from ..dynamo_logger import LoggerManager, main_tqdm
from ..utils import copy_adata
def lambda_correction(
adata: anndata.AnnData,
lambda_key: str = "lambda",
inplace: bool = True,
copy: bool = False,
) -> Union[anndata.AnnData, None]:
"""Use lambda (cell-wise detection rate) to estimate the labelled RNA.
Parameters
----------
adata:
adata object generated from dynast.
lambda_key:
The key to the cell-wise detection rate.
inplace:
Whether to inplace update the layers. If False, new layers that append '_corrected" to the existing will be
used to store the updated data.
copy:
Whether to copy the adata object or update adata object inplace.
Returns
-------
adata: :class:`~anndata.AnnData`
An new or updated anndata object, based on copy parameter, that are updated with Size_Factor, normalized
expression values, X and reduced dimensions, etc.
"""
logger = LoggerManager.gen_logger("dynamo-lambda_correction")
logger.log_time()
adata = copy_adata(adata) if copy else adata
logger.info("apply detection rate correction to adata...", indent_level=1)
if lambda_key not in adata.obs.keys():
raise ValueError(
f"the lambda_key {lambda_key} is not included in adata.obs! Please ensure you have calculated "
"per-cell detection rate!"
)
logger.info("retrieving the cell-wise detection rate..", indent_level=1)
detection_rate = adata.obs[lambda_key].values[:, None]
logger.info("identify the data type..", indent_level=1)
all_layers = adata.layers.keys()
has_ul = np.any([i.contains("ul_") for i in all_layers])
has_un = np.any([i.contains("un_") for i in all_layers])
has_sl = np.any([i.contains("sl_") for i in all_layers])
has_sn = np.any([i.contains("sn_") for i in all_layers])
has_l = np.any([i.contains("_l_") for i in all_layers])
has_n = np.any([i.contains("_n_") for i in all_layers])
if sum(has_ul + has_un + has_sl + has_sn) == 4:
datatype = "splicing_labeling"
elif sum(has_l + has_n):
datatype = "labeling"
logger.info(f"the data type identified is {datatype}", indent_level=2)
logger.info("retrieve relevant layers for detection rate correction", indent_level=1)
if datatype == "splicing_labeling":
layers, match_tot_layer = [], []
for layer in all_layers:
if "ul_" in layer:
layers += layer
match_tot_layer += "unspliced"
elif "un_" in layer:
layers += layer
match_tot_layer += "unspliced"
elif "sl_" in layer:
layers += layer
match_tot_layer += "spliced"
elif "sn_" in layer:
layers += layer
match_tot_layer += "spliced"
elif "spliced" in layer:
layers += layer
elif "unspliced" in layer:
layers += layer
if len(layers) != 6:
raise ValueError(
"the adata object has to include ul, un, sl, sn, unspliced, spliced, "
"six relevant layers for splicing and labeling quantified datasets."
)
elif datatype == "labeling":
layers, match_tot_layer = [], []
for layer in all_layers:
if "_l_" in layer:
layers += layer
match_tot_layer += ["total"]
elif "_n_" in layer:
layers += layer
match_tot_layer += ["total"]
elif "total" in layer:
layers += layer
if len(layers) != 3:
raise ValueError(
"the adata object has to include labeled, unlabeled, three relevant layers for labeling quantified "
"datasets."
)
logger.info("detection rate correction starts", indent_level=1)
for i, layer in enumerate(main_tqdm(layers, desc="iterating all relevant layers")):
if i < len(match_tot_layer):
cur_layer = adata.layers[layer] if inplace else adata.layers[layer].copy()
cur_total = adata.layers[match_tot_layer[i]]
# even layers is labeled RNA and odd unlabeled RNA
if i % 2 == 0:
# formula: min(L / lambda, (L + U)) from scNT-seq
if issparse(cur_layer):
sparsefuncs.inplace_row_scale(cur_layer, 1 / detection_rate)
else:
cur_layer /= detection_rate
if inplace:
adata.layers[layer] = sparse_mimmax(cur_layer, cur_total)
else:
adata.layers[layer + "_corrected"] = sparse_mimmax(cur_layer, cur_total)
else:
if inplace:
adata.layers[layer] = cur_total - adata.layers[layer[i - 1]]
else:
adata.layers[layer + "_corrected"] = cur_total - adata.layers[layer[i - 1]]
logger.finish_progress(progress_name="lambda_correction")
if copy:
return adata
return None
def sparse_mimmax(A, B, type="mim"):
"""Return the element-wise mimimum/maximum of sparse matrices `A` and `B`.
Parameters
----------
A:
The first sparse matrix
B:
The second sparse matrix
type:
The type of calculation, either mimimum or maximum.
Returns
-------
M:
A sparse matrix that contain the element-wise maximal or mimimal of two sparse matrices.
"""
AgtB = (A < B).astype(int) if type == "min" else (A > B).astype(int)
M = AgtB.multiply(A - B) + B
return M
| 35.301775
| 120
| 0.578947
| 733
| 5,966
| 4.567531
| 0.253752
| 0.055854
| 0.038829
| 0.048387
| 0.257168
| 0.207288
| 0.180406
| 0.128435
| 0.103943
| 0.028674
| 0
| 0.003477
| 0.325008
| 5,966
| 168
| 121
| 35.511905
| 0.827912
| 0.192927
| 0
| 0.266667
| 0
| 0
| 0.179449
| 0.005127
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019048
| false
| 0
| 0.066667
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e3cee6ba011350960f8e52993ae0b2666144798
| 4,095
|
py
|
Python
|
tests/fullscale/poroelasticity/cryer/TestCryer.py
|
cehanagan/pylith
|
cf5c1c34040460a82f79b6eb54df894ed1b1ee93
|
[
"MIT"
] | 93
|
2015-01-08T16:41:22.000Z
|
2022-02-25T13:40:02.000Z
|
tests/fullscale/poroelasticity/cryer/TestCryer.py
|
sloppyjuicy/pylith
|
ac2c1587f87e45c948638b19560813d4d5b6a9e3
|
[
"MIT"
] | 277
|
2015-02-20T16:27:35.000Z
|
2022-03-30T21:13:09.000Z
|
tests/fullscale/poroelasticity/cryer/TestCryer.py
|
sloppyjuicy/pylith
|
ac2c1587f87e45c948638b19560813d4d5b6a9e3
|
[
"MIT"
] | 71
|
2015-03-24T12:11:08.000Z
|
2022-03-03T04:26:02.000Z
|
#!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/poroelasticity/cryer/TestCryer.py
#
# @brief Test suite for testing pylith with Cryer's problem.
import unittest
from pylith.testing.FullTestApp import (FullTestCase, Check, check_data)
import meshes
import cryer_soln
# We do not include trace_strain in the test of the solution fields, because of the
# poor convergence of the series solution.
SOLUTION_FIELDS = ["displacement", "pressure"]
SOLUTION_TOLERANCE = 0.5
# -------------------------------------------------------------------------------------------------
class TestCase(FullTestCase):
def setUp(self):
defaults = {
"filename": "output/{name}-{mesh_entity}.h5",
"exact_soln": cryer_soln.AnalyticalSoln(),
"mesh": self.mesh,
}
self.checks = [
Check(
mesh_entities=["domain"],
vertex_fields=SOLUTION_FIELDS,
defaults=defaults,
tolerance=SOLUTION_TOLERANCE,
),
Check(
mesh_entities=["poroelastic"],
filename="output/{name}-{mesh_entity}_info.h5",
cell_fields = [
"biot_coefficient",
"biot_modulus",
"drained_bulk_modulus",
"fluid_density",
"fluid_viscosity",
"isotropic_permeability",
"porosity",
"shear_modulus",
"solid_density",
],
defaults=defaults,
),
Check(
mesh_entities=["poroelastic"],
vertex_fields = SOLUTION_FIELDS,
defaults=defaults,
tolerance=SOLUTION_TOLERANCE,
),
Check(
mesh_entities=["x_neg", "y_neg", "z_neg", "surface_pressure"],
filename="output/{name}-{mesh_entity}_info.h5",
vertex_fields=["initial_amplitude"],
defaults=defaults,
),
Check(
mesh_entities=["x_neg", "y_neg", "z_neg", "surface_pressure"],
vertex_fields=SOLUTION_FIELDS,
defaults=defaults,
tolerance=SOLUTION_TOLERANCE,
),
]
def run_pylith(self, testName, args):
FullTestCase.run_pylith(self, testName, args)
# -------------------------------------------------------------------------------------------------
class TestHex(TestCase):
def setUp(self):
self.name = "cryer_hex"
self.mesh = meshes.Hex()
super().setUp()
TestCase.run_pylith(self, self.name, ["cryer.cfg", "cryer_hex.cfg"])
return
# -------------------------------------------------------------------------------------------------
class TestTet(TestCase):
def setUp(self):
self.name = "cryer_tet"
self.mesh = meshes.Tet()
super().setUp()
TestCase.run_pylith(self, self.name, ["cryer.cfg", "cryer_tet.cfg"])
return
# -------------------------------------------------------------------------------------------------
def test_cases():
return [
TestHex,
TestTet,
]
# -------------------------------------------------------------------------------------------------
if __name__ == '__main__':
FullTestCase.parse_args()
suite = unittest.TestSuite()
for test in test_cases():
suite.addTest(unittest.makeSuite(test))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| 31.022727
| 99
| 0.477656
| 342
| 4,095
| 5.52924
| 0.435673
| 0.037017
| 0.04495
| 0.03596
| 0.356425
| 0.289265
| 0.289265
| 0.218403
| 0.218403
| 0.182443
| 0
| 0.004733
| 0.277656
| 4,095
| 131
| 100
| 31.259542
| 0.63455
| 0.298901
| 0
| 0.414634
| 0
| 0
| 0.158635
| 0.042912
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060976
| false
| 0
| 0.04878
| 0.012195
| 0.182927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e3e8c87814094936e4351a80831e5bb8fce82f9
| 3,551
|
py
|
Python
|
util/data.py
|
pinaryazgan/GDN
|
469e63fa8c2dce596c6f7e99f2620ac6eec7dadf
|
[
"MIT"
] | 156
|
2021-03-01T12:49:25.000Z
|
2022-03-28T08:27:33.000Z
|
util/data.py
|
pinaryazgan/GDN
|
469e63fa8c2dce596c6f7e99f2620ac6eec7dadf
|
[
"MIT"
] | 24
|
2021-04-19T10:08:35.000Z
|
2022-03-28T11:42:54.000Z
|
util/data.py
|
pinaryazgan/GDN
|
469e63fa8c2dce596c6f7e99f2620ac6eec7dadf
|
[
"MIT"
] | 54
|
2021-04-16T17:26:30.000Z
|
2022-03-28T06:08:43.000Z
|
# util functions about data
from scipy.stats import rankdata, iqr, trim_mean
from sklearn.metrics import f1_score, mean_squared_error
import numpy as np
from numpy import percentile
def get_attack_interval(attack):
heads = []
tails = []
for i in range(len(attack)):
if attack[i] == 1:
if attack[i-1] == 0:
heads.append(i)
if i < len(attack)-1 and attack[i+1] == 0:
tails.append(i)
elif i == len(attack)-1:
tails.append(i)
res = []
for i in range(len(heads)):
res.append((heads[i], tails[i]))
# print(heads, tails)
return res
# calculate F1 scores
def eval_scores(scores, true_scores, th_steps, return_thresold=False):
padding_list = [0]*(len(true_scores) - len(scores))
# print(padding_list)
if len(padding_list) > 0:
scores = padding_list + scores
scores_sorted = rankdata(scores, method='ordinal')
th_steps = th_steps
# th_steps = 500
th_vals = np.array(range(th_steps)) * 1.0 / th_steps
fmeas = [None] * th_steps
thresholds = [None] * th_steps
for i in range(th_steps):
cur_pred = scores_sorted > th_vals[i] * len(scores)
fmeas[i] = f1_score(true_scores, cur_pred)
score_index = scores_sorted.tolist().index(int(th_vals[i] * len(scores)+1))
thresholds[i] = scores[score_index]
if return_thresold:
return fmeas, thresholds
return fmeas
def eval_mseloss(predicted, ground_truth):
ground_truth_list = np.array(ground_truth)
predicted_list = np.array(predicted)
# mask = (ground_truth_list == 0) | (predicted_list == 0)
# ground_truth_list = ground_truth_list[~mask]
# predicted_list = predicted_list[~mask]
# neg_mask = predicted_list < 0
# predicted_list[neg_mask] = 0
# err = np.abs(predicted_list / ground_truth_list - 1)
# acc = (1 - np.mean(err))
# return loss
loss = mean_squared_error(predicted_list, ground_truth_list)
return loss
def get_err_median_and_iqr(predicted, groundtruth):
np_arr = np.abs(np.subtract(np.array(predicted), np.array(groundtruth)))
err_median = np.median(np_arr)
err_iqr = iqr(np_arr)
return err_median, err_iqr
def get_err_median_and_quantile(predicted, groundtruth, percentage):
np_arr = np.abs(np.subtract(np.array(predicted), np.array(groundtruth)))
err_median = np.median(np_arr)
# err_iqr = iqr(np_arr)
err_delta = percentile(np_arr, int(percentage*100)) - percentile(np_arr, int((1-percentage)*100))
return err_median, err_delta
def get_err_mean_and_quantile(predicted, groundtruth, percentage):
np_arr = np.abs(np.subtract(np.array(predicted), np.array(groundtruth)))
err_median = trim_mean(np_arr, percentage)
# err_iqr = iqr(np_arr)
err_delta = percentile(np_arr, int(percentage*100)) - percentile(np_arr, int((1-percentage)*100))
return err_median, err_delta
def get_err_mean_and_std(predicted, groundtruth):
np_arr = np.abs(np.subtract(np.array(predicted), np.array(groundtruth)))
err_mean = np.mean(np_arr)
err_std = np.std(np_arr)
return err_mean, err_std
def get_f1_score(scores, gt, contamination):
padding_list = [0]*(len(gt) - len(scores))
# print(padding_list)
threshold = percentile(scores, 100 * (1 - contamination))
if len(padding_list) > 0:
scores = padding_list + scores
pred_labels = (scores > threshold).astype('int').ravel()
return f1_score(gt, pred_labels)
| 28.18254
| 101
| 0.664602
| 506
| 3,551
| 4.420949
| 0.179842
| 0.035762
| 0.040232
| 0.017881
| 0.416629
| 0.327671
| 0.327671
| 0.327671
| 0.327671
| 0.291909
| 0
| 0.016535
| 0.216559
| 3,551
| 126
| 102
| 28.18254
| 0.787563
| 0.12757
| 0
| 0.235294
| 0
| 0
| 0.003245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.308824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e41a3d23f1cd5e224926d0f23ef2a864d4c94cb
| 5,654
|
py
|
Python
|
rrl-sysadmin/sysadmin.py
|
HyeokjuJang/sr-drl
|
01fa8264c7b36f34f721303f455f37545dbce1fe
|
[
"MIT"
] | 14
|
2020-10-02T17:14:04.000Z
|
2022-02-26T19:26:58.000Z
|
rrl-sysadmin/sysadmin.py
|
HyeokjuJang/sr-drl
|
01fa8264c7b36f34f721303f455f37545dbce1fe
|
[
"MIT"
] | 1
|
2022-02-26T08:23:13.000Z
|
2022-02-26T08:23:13.000Z
|
rrl-sysadmin/sysadmin.py
|
jaromiru/sr-drl
|
01fa8264c7b36f34f721303f455f37545dbce1fe
|
[
"MIT"
] | 6
|
2021-05-04T13:24:12.000Z
|
2021-12-06T12:51:30.000Z
|
import gym, random, copy, string, uuid
import numpy as np
rddl_template = string.Template('''
non-fluents nf_sysadmin_inst_$uid {
domain = sysadmin_mdp;
objects {
computer : {$objects};
};
non-fluents {
REBOOT-PROB = $reboot_prob;
$connections
};
}
instance sysadmin_inst_$uid {
domain = sysadmin_mdp;
non-fluents = nf_sysadmin_inst_$uid;
init-state {
$running
};
max-nondef-actions = $maxactions;
horizon = $horizon;
discount = $discount;
}
''')
# ----------------------------------------------------------
class SysAdminEnv(gym.Env):
REBOOT_PROB = 0.04
REBOOT_PENALTY = 0.75 # IDEA: change?
MAX_CONNECTIONS = 3
def __init__(self, offset=0, save_domain=False, **kwargs):
random.seed()
np.random.seed()
self.num_obj = kwargs["env_num_obj"]
self.max_steps = kwargs["env_max_steps"]
self.offset = offset # first-time initialize with random actions
self.save_domain = save_domain
self.multi = kwargs["multi"]
def step(self, actions):
running_ = self.running.copy()
# update the running nodes
for c in range(self.num_obj):
if self.running[c]:
conns = self.connections[0, (self.connections[1] == c)] # connections to this node
n_conns = len(conns)
n_conns_running = np.sum(self.running[conns])
# up_prob = 0.45 + 0.5 * (1 + n_conns_running) / (1 + n_conns)
up_prob = 0.9 * (1 + n_conns_running) / (1 + n_conns) # IDEA: change?
running_[c] = np.random.binomial(1, up_prob)
else:
running_[c] = np.random.binomial(1, self.REBOOT_PROB)
# restart the selected nodes
if len(actions) != 0:
running_[actions] = 1
reward = np.sum(self.running) - self.REBOOT_PENALTY * len(actions)
self.reward_total += reward
self.running = running_
# compute stats
self.steps += 1
done = self.steps >= self.max_steps
s_true = self._get_state()
info = {
'd_true': False,
'done': done,
'steps': self.steps,
's_true': s_true,
'num_obj': self.num_obj,
'reward_total': self.reward_total
}
if done:
s_ = self.reset()
else:
s_ = s_true
return s_, reward, done, info
def reset(self):
self.steps = 0
self.reward_total = 0.
self.running = np.ones(self.num_obj)
# generate random connections
self.connections = []
# IDEA: better graphs?
for node_a in range(self.num_obj):
possible_connections = np.delete( np.arange(self.num_obj), node_a )
conns_ids = np.random.choice(possible_connections, np.random.randint(1, self.MAX_CONNECTIONS), replace=False)
conns = np.stack([ np.full(len(conns_ids), node_a), conns_ids ])
self.connections.append(conns)
# self.connections.append(np.flip(conns, axis=0))
self.connections = np.concatenate(self.connections, axis=1)
self.connections = np.unique(self.connections, axis=1)
# first-time init
if self.offset > 0:
offset = self.offset % self.max_steps
self.offset = 0
for i in range(offset):
self.step([]) # noop
if self.save_domain:
uid = uuid.uuid4().hex
fn = f"_plan/sysadmin_inst_{uid}.rddl"
rddl = self._get_rddl(uid)
with open(fn, 'wt') as f:
f.write(rddl)
return self._get_state()
def _get_state(self):
node_feats = self.running.reshape(-1, 1)
edge_feats = None
return node_feats, edge_feats, self.connections
def _get_rddl(self, uid):
objects = ",".join([f"c{i}" for i in range(self.num_obj)])
connections = " ".join([f"CONNECTED(c{x[0]},c{x[1]});" for x in self.connections.T])
running = " ".join([f"running(c{i});" for i, x in enumerate(self.running)])
max_actions = self.num_obj if self.multi else 1
rddl = rddl_template.substitute(uid=uid, objects=objects, maxactions=max_actions, reboot_prob=self.REBOOT_PROB, connections=connections, running=running, horizon=self.max_steps, discount=1.0)
return rddl
# ----------------------------------------------------------
import networkx as nx
import matplotlib.pyplot as plt
COLOR_RUNNING = "#cad5fa"
COLOR_DOWN = "#e33c30"
COLOR_SELECTED_R = "#1b3eb5"
COLOR_SELECTED_D = "#701812"
class GraphVisualization:
def __init__(self, env):
self.connections = env.connections.T
self.G = nx.DiGraph()
self.G.add_edges_from(self.connections)
self.pos = nx.kamada_kawai_layout(self.G)
# self.pos = nx.spring_layout(self.G)
self.colors = [COLOR_DOWN, COLOR_RUNNING, COLOR_SELECTED_D, COLOR_SELECTED_R]
self.update_state(env)
def update_state(self, env, a=None, probs=None):
states = env.running.copy()
if (a is not None):
states[a] += 2
self.edge_colors = np.array([self.colors[int(x)] for x in states])
self.edge_colors = self.edge_colors[self.G.nodes] # re-order
if probs is not None:
self.node_labels = {i: f"{probs[i]:.1f}".lstrip("0") for i in self.G.nodes}
self.node_colors = np.array([(1-x, 1-x, 1-x) for x in probs])
self.node_colors = self.node_colors[self.G.nodes]
else:
self.node_labels = None
self.node_colors = ['w'] * len(states)
def plot(self):
plt.clf()
nx.draw_networkx(self.G, pos=self.pos, labels=self.node_labels, node_color=self.node_colors, edgecolors=self.edge_colors, linewidths=3.0, arrows=True)
return plt
# ----------------------------------------------------------
if __name__ == '__main__':
NODES = 5
env = SysAdminEnv(env_num_obj=NODES, env_max_steps=10)
s = env.reset()
gvis = GraphVisualization(env)
a = -1
while(True):
# a = np.random.randint(env.num_obj)
a = np.random.choice(NODES, np.random.randint(0, NODES), replace=False)
probs = np.random.rand(NODES)
print(a)
print(probs)
gvis.update_state(env, a, probs)
gvis.plot().show()
s, r, d, i = env.step(a)
print(a, r)
if d:
gvis = GraphVisualization(env)
| 25.241071
| 194
| 0.662363
| 842
| 5,654
| 4.266033
| 0.220903
| 0.054287
| 0.022272
| 0.011693
| 0.07461
| 0.054287
| 0.011693
| 0
| 0
| 0
| 0
| 0.014678
| 0.168553
| 5,654
| 223
| 195
| 25.35426
| 0.749415
| 0.105412
| 0
| 0.066225
| 0
| 0
| 0.113736
| 0.019849
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05298
| false
| 0
| 0.02649
| 0
| 0.145695
| 0.019868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e43d8b9a039af747051e4f38665ccd61353394f
| 3,974
|
py
|
Python
|
core/language_modelling.py
|
lkwate/e-greedy-lm
|
02e81fee93ee93faca0c1eb339b3c5ad55b4a639
|
[
"MIT"
] | 1
|
2021-11-09T19:18:00.000Z
|
2021-11-09T19:18:00.000Z
|
core/language_modelling.py
|
lkwate/e-greedy-lm
|
02e81fee93ee93faca0c1eb339b3c5ad55b4a639
|
[
"MIT"
] | null | null | null |
core/language_modelling.py
|
lkwate/e-greedy-lm
|
02e81fee93ee93faca0c1eb339b3c5ad55b4a639
|
[
"MIT"
] | null | null | null |
import torch
import torch.optim as optim
from transformers import AutoTokenizer
from .utils import epsilon_greedy_transform_label, uid_variance_fn, OPTIMIZER_DIC
import pytorch_lightning as pl
class RLLMLightningModule(pl.LightningModule):
def __init__(
self,
model,
action_table: torch.LongTensor,
tokenizer: AutoTokenizer,
learning_rate: float,
k: int,
epsilon: int,
beta: int,
variance_type: str,
lr_factor: float,
lr_patience: int,
optimizer_name: str,
add_variance: bool,
):
super(RLLMLightningModule, self).__init__()
self.model = model
self.epsilon = epsilon
self.beta = beta
self.action_table = action_table.to(self.device)
self.tokenizer = tokenizer
self.k = k
self.variance_type = variance_type
self.learning_rate = learning_rate
self.lr_factor = lr_factor
self.lr_patience = lr_patience
self.optimizer_name = optimizer_name
self.add_variance = add_variance
self.output_transform = (
self._add_uid_variance_fn
if self.add_variance
else self._skip_uid_variance_fn
)
def configure_optimizers(self):
optimizer = OPTIMIZER_DIC[self.optimizer_name](
self.model.parameters(), lr=self.learning_rate
)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, "min", factor=self.lr_factor, patience=self.lr_patience
)
output = {
"optimizer": optimizer,
"lr_scheduler": lr_scheduler,
"monitor": "val_loss",
}
return output
def _add_uid_variance_fn(self, loss, logits, labels, variance_type):
uid_variance = uid_variance_fn(logits, labels, variance_type=variance_type)
output = {"likelihood": loss.detach(), "uid_variance": uid_variance.detach()}
loss = loss + self.beta * uid_variance
return loss, output
def _skip_uid_variance_fn(self, loss, logits, labels, variance_type):
return loss, {}
def _compute_loss(self, input_ids, attention_mask, decoder_attention_mask, labels):
labels = epsilon_greedy_transform_label(
labels, self.action_table, self.tokenizer, epsilon=self.epsilon
)
output = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
)
loss, logits = output.loss, output.logits
return self.output_transform(
loss, logits, labels, variance_type=self.variance_type
)
def _unpack_batch(self, batch):
input_ids, attention_mask, decoder_attention_mask, labels = (
batch["encoder_input_ids"],
batch["encoder_attention_mask"],
batch["decoder_attention_mask"],
batch["decoder_input_ids"],
)
return input_ids, attention_mask, decoder_attention_mask, labels
def training_step(self, batch, batch_idx):
input_ids, attention_mask, decoder_attention_mask, labels = self._unpack_batch(
batch
)
loss, output = self._compute_loss(
input_ids, attention_mask, decoder_attention_mask, labels
)
output["loss"] = loss
self.log_dict(output)
return output
def validation_step(self, batch, batch_idx):
input_ids, attention_mask, decoder_attention_mask, labels = self._unpack_batch(
batch
)
loss, output = self._compute_loss(
input_ids, attention_mask, decoder_attention_mask, labels
)
output["val_loss"] = loss
self.log_dict(output, prog_bar=True)
return output
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def generate(self, input_ids):
return self.model.generate(input_ids)
| 32.842975
| 87
| 0.638903
| 445
| 3,974
| 5.373034
| 0.193258
| 0.097867
| 0.05688
| 0.070263
| 0.276872
| 0.256378
| 0.235466
| 0.235466
| 0.176495
| 0.138854
| 0
| 0
| 0.28158
| 3,974
| 120
| 88
| 33.116667
| 0.837478
| 0
| 0
| 0.105769
| 0
| 0
| 0.037997
| 0.011072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.048077
| 0.028846
| 0.240385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e49ee4375c4fdbca12777a89f48b0e9f1e01d7a
| 3,590
|
py
|
Python
|
tests/imperative_vs_reactive/test_get_daily_average.py
|
BastiTee/bastis-python-toolbox
|
c313cf12607a973a1a8b8a9fbd73b2c8a47a82d8
|
[
"Apache-2.0"
] | 1
|
2016-04-06T14:09:43.000Z
|
2016-04-06T14:09:43.000Z
|
tests/imperative_vs_reactive/test_get_daily_average.py
|
BastiTee/bastis-python-toolbox
|
c313cf12607a973a1a8b8a9fbd73b2c8a47a82d8
|
[
"Apache-2.0"
] | null | null | null |
tests/imperative_vs_reactive/test_get_daily_average.py
|
BastiTee/bastis-python-toolbox
|
c313cf12607a973a1a8b8a9fbd73b2c8a47a82d8
|
[
"Apache-2.0"
] | 1
|
2022-03-19T04:21:40.000Z
|
2022-03-19T04:21:40.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test suite for the daily average Toggl API process."""
from random import random
from tempfile import NamedTemporaryFile
from time import sleep, time
from unittest import TestCase
from recipes.imperative_vs_reactive.get_daily_average_imp import \
get_avg_daily_working_hours as imp
from recipes.imperative_vs_reactive.get_daily_average_rx import \
get_avg_daily_working_hours as rx
class TestSuite(TestCase): # noqa: D101
def test_integration(self):
"""Integration test for get_daily_average."""
# Between 23th of April and 4th of May we spend an average
# of 3.981 simulated hours at work for the given 4-hour contract.
from_day = '2018-04-23'
to_day = '2018-05-04'
expected_worktime_average = 3.98056125
expected_workdays = 10
# Run test for imperative implementation using a mocked client
now = time()
tmp_file_imp = NamedTemporaryFile()
result_imp = imp(from_day, to_day, tmp_file_imp.name,
MockedTogglApiClient)
time_imp = time() - now
print('----')
# Run test for reactive implementation using a mocked client
now = time()
tmp_file_rx = NamedTemporaryFile()
result_rx = rx(from_day, to_day, tmp_file_rx.name,
MockedTogglApiClient)
time_rx = time() - now
print('----')
# Check results
self.assertEquals(result_imp,
(expected_worktime_average, expected_workdays))
self.assertEquals(result_rx,
(expected_worktime_average, expected_workdays))
# Print results
print(f'imp-result = {round(result_imp[0], 2)} h '
+ f'@{result_rx[1]} days (took: {round(time_imp, 4)} sec)')
print(
f'rx-result = {round(result_rx[0], 2)} h '
+ f'@{result_rx[1]} days (took: {round(time_rx, 4)} sec)')
print(f'rx speed-up = {time_imp / time_rx}')
class MockedTogglApiClient():
"""A mocked Toggl API client.
Assuming that we have a 4-hour work contract, the Toggl API might
return values between 3.8 and 4.2 hours of total working hours per day.
Toggl API responses take between 0.0 and 0.5 seconds in our mocked version.
"""
def __init__(self, credentials=None): # noqa: D107
self.fake_values = {
'2018-04-23T00:00:00>>2018-04-23T23:59:59': 14853641, # 4.1260 h
'2018-04-24T00:00:00>>2018-04-24T23:59:59': 13725371,
'2018-04-25T00:00:00>>2018-04-25T23:59:59': 14209405,
'2018-04-26T00:00:00>>2018-04-26T23:59:59': 13969792,
'2018-04-27T00:00:00>>2018-04-27T23:59:59': 14591221,
'2018-04-28T00:00:00>>2018-04-28T23:59:59': 0,
'2018-04-29T00:00:00>>2018-04-29T23:59:59': 0,
'2018-04-30T00:00:00>>2018-04-30T23:59:59': 14012216,
'2018-05-01T00:00:00>>2018-05-01T23:59:59': 14802751,
'2018-05-02T00:00:00>>2018-05-02T23:59:59': 14752767,
'2018-05-03T00:00:00>>2018-05-03T23:59:59': 14601954,
'2018-05-04T00:00:00>>2018-05-04T23:59:59': 13781087
}
def get_working_hours_for_range(self, range_from, range_to): # noqa: D102
# A simulated API request takes between 0.0 and 0.5 seconds ...
sleep(random() / 2)
# ... and returns a fake value.
return self.fake_values.get('>>'.join([range_from, range_to]), 0)
if __name__ == '__main__':
TestSuite().test_integration()
| 39.450549
| 79
| 0.620334
| 507
| 3,590
| 4.232742
| 0.327416
| 0.04753
| 0.044734
| 0.037279
| 0.237651
| 0.17987
| 0.162162
| 0.1137
| 0.070829
| 0.027959
| 0
| 0.184339
| 0.256546
| 3,590
| 90
| 80
| 39.888889
| 0.619708
| 0.217827
| 0
| 0.140351
| 0
| 0
| 0.266836
| 0.18139
| 0
| 0
| 0
| 0
| 0.035088
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.087719
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e4a37d31db8b27c20ff44c3b6b28b18b2dd20b1
| 4,077
|
py
|
Python
|
pox/stats_monitor.py
|
nachtkatze/sdn-diagnosis
|
22b187d276bf302ef5811abc946b1af125dd17bc
|
[
"Apache-2.0"
] | null | null | null |
pox/stats_monitor.py
|
nachtkatze/sdn-diagnosis
|
22b187d276bf302ef5811abc946b1af125dd17bc
|
[
"Apache-2.0"
] | null | null | null |
pox/stats_monitor.py
|
nachtkatze/sdn-diagnosis
|
22b187d276bf302ef5811abc946b1af125dd17bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Oscar Araque
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A skeleton POX component
You can customize this to do whatever you like. Don't forget to
adjust the Copyright above, and to delete the Apache license if you
don't want to release under Apache (but consider doing so!).
Rename this file to whatever you like, .e.g., mycomponent.py. You can
then invoke it with "./pox.py mycomponent" if you leave it in the
ext/ directory.
Implement a launch() function (as shown below) which accepts commandline
arguments and starts off your component (e.g., by listening to events).
Edit this docstring and your launch function's docstring. These will
show up when used with the help component ("./pox.py help --mycomponent").
"""
# Import some POX stuff
from pox.core import core # Main POX object
import pox.openflow.libopenflow_01 as of # OpenFlow 1.0 library
import pox.lib.packet as pkt # Packet parsing/construction
from pox.lib.addresses import EthAddr, IPAddr # Address types
import pox.lib.util as poxutil # Various util functions
import pox.lib.revent as revent # Event library
import pox.lib.recoco as recoco # Multitasking library
from pox.openflow.of_json import *
import multiprocessing
import json
# Create a logger for this component
log = core.getLogger("Monitor")
def _send_to_pipe(data):
with open('/dev/shm/poxpipe','w') as pipe:
pipe.write(data)
def _to_pipe(data):
p = multiprocessing.Process(target=_send_to_pipe, args=(data,))
p.start()
def _go_up (event):
# Event handler called when POX goes into up state
# (we actually listen to the event in launch() below)
log.info("Monitor application ready.")
def _request_stats():
log.debug('Number of connections: {}'.format(len(core.openflow.connections)))
log.info('Sending stats requests')
for connection in core.openflow.connections:
log.debug("Sending stats request")
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def _handle_flowstats(event):
stats = flow_stats_to_list(event.stats)
dpid = poxutil.dpidToStr(event.connection.dpid)
log.debug('Received flow stats from {}'.format(dpid))
data = {'type': 'switch_flowstats', 'data': {'switch': dpid, 'stats': stats}}
log.debug(data)
data = json.dumps(data)
data += '#'
_to_pipe(data)
def _handle_portstats(event):
stats = flow_stats_to_list(event.stats)
dpid = poxutil.dpidToStr(event.connection.dpid)
log.debug('Received port stats from {}'.format(dpid))
data = {'type':"switch_portstats", "data":{'switch':dpid, 'stats':stats}}
data = json.dumps(data)
data += '#'
_to_pipe(data)
def _handle_LinkEvent(event):
is_up = event.added is True and event.removed is False
link = event.link.end
data = {'type': 'linkstats', 'data': {'link':link, 'up': is_up}}
data = json.dumps(data)
data += '#'
_to_pipe(data)
@poxutil.eval_args
def launch (bar = False):
"""
The default launcher just logs its arguments
"""
log.warn("Bar: %s (%s)", bar, type(bar))
core.addListenerByName("UpEvent", _go_up)
core.openflow_discovery.addListenerByName("LinkEvent", _handle_LinkEvent)
core.openflow.addListenerByName("FlowStatsReceived", _handle_flowstats)
core.openflow.addListenerByName("PortStatsReceived", _handle_portstats)
recoco.Timer(7, _request_stats, recurring=True)
| 36.72973
| 81
| 0.709345
| 576
| 4,077
| 4.920139
| 0.401042
| 0.021171
| 0.017643
| 0.017996
| 0.179605
| 0.162668
| 0.162668
| 0.139379
| 0.12844
| 0.12844
| 0
| 0.003909
| 0.184204
| 4,077
| 110
| 82
| 37.063636
| 0.848166
| 0.382144
| 0
| 0.224138
| 0
| 0
| 0.133441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.172414
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e4a39484ed02c469223ab4065ec6d989a83a302
| 7,623
|
py
|
Python
|
tests/app_example.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | 3
|
2018-09-13T19:55:47.000Z
|
2018-09-15T18:31:22.000Z
|
tests/app_example.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | null | null | null |
tests/app_example.py
|
omarryhan/flask-stateless-auth
|
c6acefc55050d1a53235ead20cb7d5e9eb4bbf9a
|
[
"MIT"
] | null | null | null |
import os
import datetime
import secrets
import json
from flask import Flask, abort, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from werkzeug.security import safe_str_cmp
from flask_stateless_auth import (
StatelessAuthError,
StatelessAuthManager,
current_stateless_user,
UserMixin,
TokenMixin,
token_required,
)
db = SQLAlchemy()
stateless_auth_manager = StatelessAuthManager()
app = Flask(__name__.split(".")[0])
class Config:
# Stateless auth configs
# DEFAULT_AUTH_TYPE = 'Bearer' # Default
# TOKEN_HEADER = 'Authorization'# Default
# ADD_CONTEXT_PROCESSOR = True # Default
# Other configs
TESTING = False
TOKENS_BYTES_LENGTH = 32
ACCESS_TOKEN_DEFAULT_EXPIRY = 3600 # seconds
REFRESH_TOKEN_DEFAULT_EXPIRY = 365 # days
DB_NAME = "flask_stateless_auth_db"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_NAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String, unique=True)
api_token = db.relationship("ApiToken", backref="user", uselist=False)
class ApiToken(db.Model, TokenMixin):
__tablename__ = "api_token"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
refresh_token = db.Column(db.String, nullable=False, unique=True, index=True)
access_token = db.Column(db.String, nullable=False, unique=True, index=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
created_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now())
refresh_token_expiry = db.Column(
db.Integer, nullable=False, default=Config.REFRESH_TOKEN_DEFAULT_EXPIRY
)
access_token_expiry = db.Column(
db.Integer, nullable=False, default=Config.ACCESS_TOKEN_DEFAULT_EXPIRY
)
def __init__(
self,
user_id,
refresh_token_expiry=None,
access_token_expiry=None,
tokens_bytes_length=Config.TOKENS_BYTES_LENGTH,
):
self.user_id = user_id
if refresh_token_expiry and type(refresh_token_expiry) == int:
self.refresh_token_expiry = refresh_token_expiry
if access_token_expiry and type(access_token_expiry) == int:
self.access_token_expiry = access_token_expiry
# create tokens
self.refresh_tokens(tokens_bytes_length)
def refresh_tokens(self, tokens_bytes_length=Config.TOKENS_BYTES_LENGTH):
self.access_token = secrets.base64.standard_b64encode(
secrets.token_bytes(tokens_bytes_length)
).decode("utf-8")
self.refresh_token = secrets.base64.standard_b64encode(
secrets.token_bytes(tokens_bytes_length)
).decode("utf-8")
self.created_on = datetime.datetime.now()
@property
def access_is_expired(self):
expiry_time = self.created_on + datetime.timedelta(
seconds=self.access_token_expiry
)
if datetime.datetime.now() <= expiry_time:
return False
else:
return True
@property
def refresh_is_expired(self):
expiry_time = self.created_on + datetime.timedelta(
days=self.refresh_token_expiry
)
if datetime.datetime.now() <= expiry_time:
return False
else:
return True
def token_expired(self, token_type, auth_type):
if token_type == "access":
return self.access_is_expired
elif token_type == "refresh":
return self.refresh_is_expired
else:
raise NameError("Invalid token name")
@property
def as_dict(self):
return {
"access_token": self.access_token,
"expiry": self.access_token_expiry,
"refresh_token": self.refresh_token,
}
@stateless_auth_manager.user_loader
def user_by_token(token):
try:
user = User.query.filter_by(id=token.user_id).one()
except NoResultFound:
raise StatelessAuthError(
msg="Server error", code=500, type_="Server"
) # Tokens should always have a user, hence the 500 not the
except Exception as e:
raise StatelessAuthError(msg="Server error", code=500, type_="Server")
# log.critical(e)
else:
return user
@stateless_auth_manager.token_loader
def token_model_by(token, auth_type, token_type="access"):
try:
if token_type == "access":
token_model = ApiToken.query.filter_by(access_token=token).one()
elif token_type == "refresh":
token_model = ApiToken.query.filter_by(refresh_token=token).one()
except NoResultFound:
raise StatelessAuthError(
msg="{} token doesn't belong to a user".format(token_type),
code=401,
type_="Token",
)
except Exception as e:
raise StatelessAuthError(msg="Server error", code=500, type_="Server")
# log.critical(e)
else:
return token_model
@app.route("/")
def index():
return "hello", 200
@app.route("/user", methods=["GET", "POST", "PUT", "DELETE"])
def user_endpoint():
data = json.loads(request.data)
if request.method == "POST":
user = User(username=data["username"])
db.session.add(user)
elif request.method == "DELETE":
user = User.query.filter_by(username=data["username"]).first()
db.session.delete(user)
db.session.commit()
data = {"msg": "Success!"}
return jsonify(data), 201
@app.route("/create_token", methods=["POST"])
def create_token():
data = json.loads(request.data)
user = User.query.filter_by(username=data["username"]).first()
if user.api_token:
token = user.api_token
token.refresh_tokens()
else:
token = ApiToken(user_id=user.id)
db.session.add(token)
db.session.commit()
return jsonify(token.as_dict), 201
@app.route("/delete_token", methods=["DELETE"])
def delete_token():
data = json.loads(request.data)
token = User.query.filter_by(username=data["username"]).one().api_token
db.session.delete(token)
db.session.commit()
return jsonify({"msg": "Success!"}), 201
@app.route("/refresh_token", methods=["PUT"])
@token_required(token_type="refresh")
def refresh_token():
current_stateless_user.api_token.refresh_tokens()
db.session.add(current_stateless_user.api_token)
db.session.commit()
return jsonify(current_stateless_user.api_token.as_dict), 201
@app.route("/secret", methods=["GET"])
@token_required(token_type="access") # access by default
def secret():
data = {"secret": "Stateless auth is awesome :O"}
return jsonify(data), 200
@app.route("/whoami", methods=["GET"])
@token_required
def whoami():
data = {"my_username": current_stateless_user.username}
return jsonify(data), 200
@app.route("/no_current_stateless_user")
def no_current_stateless_user():
if not current_stateless_user:
username = "None"
else:
username = current_stateless_user.username
data = {"current_stateless_username": username}
return jsonify(data), 200
@app.errorhandler(StatelessAuthError)
def handle_stateless_auth_error(error):
return jsonify({"error": error.full_msg}), error.code
if __name__ == "__main__":
app.config.from_object(Config())
db.init_app(app)
with app.app_context():
db.create_all()
stateless_auth_manager.init_app(app)
app.run()
| 30.987805
| 88
| 0.674406
| 934
| 7,623
| 5.251606
| 0.188437
| 0.04159
| 0.036697
| 0.017329
| 0.370438
| 0.333129
| 0.240367
| 0.232824
| 0.214883
| 0.185117
| 0
| 0.009845
| 0.213827
| 7,623
| 245
| 89
| 31.114286
| 0.80861
| 0.038436
| 0
| 0.255102
| 0
| 0
| 0.073831
| 0.010254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086735
| false
| 0
| 0.045918
| 0.015306
| 0.336735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e4e3e3f65d730e416b620ade003178d96c61532
| 920
|
py
|
Python
|
stereo/stereo.py
|
whaleygeek/microbit_python
|
1fa8e0f34cfa2a92d7c5c32fc5ee5287c5d5b105
|
[
"MIT"
] | 8
|
2016-11-15T23:04:25.000Z
|
2021-05-17T17:42:47.000Z
|
stereo/stereo.py
|
whaleygeek/microbit_python
|
1fa8e0f34cfa2a92d7c5c32fc5ee5287c5d5b105
|
[
"MIT"
] | null | null | null |
stereo/stereo.py
|
whaleygeek/microbit_python
|
1fa8e0f34cfa2a92d7c5c32fc5ee5287c5d5b105
|
[
"MIT"
] | null | null | null |
from microbit import *
import music
A = False
B = False
PITCH = 440
# PIN2 read_analog()
ACTION_VALUE = 50
VOLUMEUP_VALUE = 150
VOLUMEDOWN_VALUE = 350
#nothing: 944
prev_l = False
prev_r = False
l = False
r = False
while True:
v = pin2.read_analog()
if v < ACTION_VALUE:
l,r = True, True
elif v < VOLUMEUP_VALUE:
l,r = False, True
elif v < VOLUMEDOWN_VALUE:
l,r = True, False
else:
l,r = False, False
if l != prev_l:
prev_l = l
if l:
music.pitch(PITCH, pin=pin0)
display.set_pixel(0,2,9)
else:
display.set_pixel(0,2,0)
music.stop(pin0)
if r != prev_r:
prev_r = r
if r:
display.set_pixel(4,2,9)
music.pitch(PITCH, pin=pin1)
else:
display.set_pixel(4,2,0)
music.stop(pin1)
| 18.77551
| 40
| 0.519565
| 130
| 920
| 3.538462
| 0.315385
| 0.052174
| 0.130435
| 0.047826
| 0.147826
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056537
| 0.384783
| 920
| 48
| 41
| 19.166667
| 0.756184
| 0.032609
| 0
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e50073943f2d59f2a64f9e25a36110605822852
| 1,062
|
py
|
Python
|
comments/migrations/0004_auto_20170531_1011.py
|
salazarpardo/redinnovacion
|
3f7c13af0af1887112a0492aea7782871fba0129
|
[
"CC-BY-3.0"
] | null | null | null |
comments/migrations/0004_auto_20170531_1011.py
|
salazarpardo/redinnovacion
|
3f7c13af0af1887112a0492aea7782871fba0129
|
[
"CC-BY-3.0"
] | null | null | null |
comments/migrations/0004_auto_20170531_1011.py
|
salazarpardo/redinnovacion
|
3f7c13af0af1887112a0492aea7782871fba0129
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('comments', '0003_comment_public'),
]
operations = [
migrations.CreateModel(
name='CommentLike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(help_text='creation date', auto_now_add=True)),
('updated_at', models.DateTimeField(help_text='edition date', auto_now=True, null=True)),
('comment', models.ForeignKey(to='comments.Comment')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='commentlike',
unique_together=set([('comment', 'user')]),
),
]
| 34.258065
| 114
| 0.615819
| 103
| 1,062
| 6.126214
| 0.563107
| 0.031696
| 0.050713
| 0.066561
| 0.091918
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006297
| 0.252354
| 1,062
| 30
| 115
| 35.4
| 0.788413
| 0.019774
| 0
| 0.083333
| 0
| 0
| 0.130895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e522957a432795bf32198db1cc68b1e2615e3f9
| 1,924
|
py
|
Python
|
Script/calculate_RMSD.py
|
dhruvsangamwar/Protein-structure-prediction
|
99364bfd62f8293ddbe8e2c9a86ca7850b270d44
|
[
"MIT"
] | 1
|
2022-01-30T08:20:08.000Z
|
2022-01-30T08:20:08.000Z
|
Script/calculate_RMSD.py
|
dhruvsangamwar/ECS_129_Protein-structure-prediction
|
99364bfd62f8293ddbe8e2c9a86ca7850b270d44
|
[
"MIT"
] | null | null | null |
Script/calculate_RMSD.py
|
dhruvsangamwar/ECS_129_Protein-structure-prediction
|
99364bfd62f8293ddbe8e2c9a86ca7850b270d44
|
[
"MIT"
] | null | null | null |
import pdbCleanup as pc
import fxndefinitions as f
import numpy as np
from numpy.linalg import eig
pc.takeInput1()
DataFrame1 = []
pc.CsvToDataframe(DataFrame1)
pc.takeInput2()
DataFrame2 = []
pc.CsvToDataframe(DataFrame2)
xtil = [0, 0, 0]
ytil = [0, 0, 0]
x = np.array(DataFrame1)
y = np.array(DataFrame2)
# This finds the number of CA atoms in both of the proteins
N1 = np.size(xtil, 0)
N2 = np.size(ytil, 0)
# finding the average of the x coords in protein 1 and 2 (arr1 & 2)
# these two functions calculate the barycenter
# Here we will be finding Xtil && Ytil = X && Y - G
Gx = f.findG(x, N1)
Gy = f.findG(y, N2)
xtil = np.subtract(x, Gx)
ytil = np.subtract(x, Gy)
# we now have the ~x_k Coords and the ~y_k Coords respectively
# this function will calculate all the 9 R values
R11 = R12 = R13 = R21 = R22 = R23 = R31 = R32 = R33 = 0
for i in range(0, N1):
R11 += xtil[i][0] * ytil[i][0]
R12 += xtil[i][0] * ytil[i][1]
R13 += xtil[i][0] * ytil[i][2]
R21 += xtil[i][1] * ytil[i][0]
R22 += xtil[i][1] * ytil[i][1]
R23 += xtil[i][1] * ytil[i][2]
R31 += xtil[i][2] * ytil[i][0]
R32 += xtil[i][2] * ytil[i][1]
R33 += xtil[i][2] * ytil[i][2]
# matrix given by equation 10 from the paper
Matrix = np.array([[R11+R22+R33, R23-R32, R31-R13, R12-R21],
[R23-R32, R11-R22-R33, R12+R21, R13+R31],
[R31-R13, R12+R21, -R11+R22-R33, R23+R32],
[R12-R21, R13+R31, R23+R32, -R11-R22+R33]])
# Here we calculate the maxEigenvalue for the final calucaltion
w, v = eig(Matrix)
maxEig = np.amax(w)
# Now we will find the best fit RMSD using the steps below
temp = [0, 0, 0]
for i in range(0, N1):
temp += np.add((np.square(xtil[i])), np.square(ytil[i]))
n = temp[0] + temp[1] + temp[2]
var = np.subtract(n, 2*maxEig)
temp2 = np.true_divide(var, np.size(xtil, 0))
RMSD = np.sqrt(abs(temp2))
RMSD = round(RMSD, 2)
print(RMSD)
| 24.666667
| 67
| 0.613825
| 348
| 1,924
| 3.385057
| 0.316092
| 0.042445
| 0.03056
| 0.025467
| 0.160441
| 0.025467
| 0.025467
| 0
| 0
| 0
| 0
| 0.112517
| 0.219335
| 1,924
| 77
| 68
| 24.987013
| 0.671771
| 0.254158
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e572d40ef88a1ec3058d9cc94eb6dce557f2d6d
| 4,728
|
py
|
Python
|
src/voicemaker/voicemaker.py
|
IAL32/voicemaker
|
66c9dd25749743d94bb9c3aac8ba2c858f327723
|
[
"MIT"
] | null | null | null |
src/voicemaker/voicemaker.py
|
IAL32/voicemaker
|
66c9dd25749743d94bb9c3aac8ba2c858f327723
|
[
"MIT"
] | 1
|
2022-03-04T14:52:16.000Z
|
2022-03-08T08:00:59.000Z
|
src/voicemaker/voicemaker.py
|
IAL32/voicemaker
|
66c9dd25749743d94bb9c3aac8ba2c858f327723
|
[
"MIT"
] | null | null | null |
import requests
LANGUAGES_LIST = [
'en-US', 'en-GB', 'en-AU', 'en-HK', 'en-NZ', 'en-SG', 'en-ZA', 'de-DE',
'ar-XA', 'ar-SA', 'bn-IN', 'bg-BG', 'ca-ES', 'cmn-CN', 'zh-HK', 'cmn-TW',
'cy-GB', 'cs-CZ', 'da-DK', 'de-CH', 'es-AR', 'es-CO', 'es-US', 'ga-IE',
'gu-IN', 'hr-HR', 'mr-IN', 'ms-MY', 'mt-MT', 'nl-NL', 'nl-BE', 'en-CA',
'en-IN', 'en-IE', 'et-EE', 'en-PH', 'fil-PH', 'fi-FI', 'fr-BE', 'fr-FR',
'fr-CA', 'fr-CH', 'el-GR', 'he-IL', 'hi-IN', 'hu-HU', 'id-ID', 'it-IT',
'ja-JP', 'lv-LV', 'lt-LT', 'ko-KR', 'nb-NO', 'pl-PL', 'pt-PT', 'pt-BR',
'ro-RO', 'ru-RU', 'sk-SK', 'sw-KE', 'es-ES', 'es-MX', 'es-LA', 'es-US',
'sl-SI', 'sv-SE', 'tr-TR', 'ta-IN', 'te-IN', 'th-TH', 'uk-UA', 'ur-PK',
'vi-VN'
]
class Voicemaker():
token: str = None
base_url: str = None
def __init__(self, token=None) -> None:
self.base_url = "https://developer.voicemaker.in/voice"
self.token = None
if token is not None:
self.set_token(token)
def set_token(self, token: str) -> None:
"""Sets the API token. You can get yours from https://developer.voicemaker.in/apidocs
Args:
token (str): API Token.
"""
self.token = token
def __headers__(self) -> dict:
headers = {'Content-Type': 'application/json'}
if self.token is not None:
headers['Authorization'] = 'Bearer ' + self.token
return headers
def __get__(self, api: str, params={}):
result = requests.get(self.base_url + api, params=params,
headers=self.__headers__())
result.raise_for_status()
return result.json()
def __post__(self, api: str, data={}):
result = requests.post(self.base_url + api, json=data,
headers=self.__headers__())
result.raise_for_status()
return result.json()
def generate_audio_url(self,
text: str,
engine='neural', voice_id='ai3-Jony', language_code='en-US',
output_format='mp3', sample_rate=48000, effect='default',
master_speed=0, master_volume=0,
master_pitch=0) -> str:
"""Generates an audio URL from the given text and using the selected options
Args:
text (str): Text to generate an audio from.
engine (str, optional): Choose between 'standard' and 'neutral'. Defaults to 'neural'.
voice_id (str, optional): Uses the selected voice id from the available one for the selected language. Defaults to 'ai3-Jony'.
language_code (str, optional): Language of the target voice. Defaults to 'en-US'.
output_format (str, optional): Choose from 'mp3' and 'wav'. Defaults to 'mp3'.
sample_rate (int, optional): Choose from 48000, 44100, 24000, 22050, 16000, 8000. Defaults to 48000.
effect (str, optional): Effect to give to the voice. Defaults to 'default'.
master_speed (int, optional): Speed from -100 to 100. Defaults to 0.
master_volume (int, optional): Volume of the voice from -100 to 100. Defaults to 0.
master_pitch (int, optional): Pitch of the voice, from -100 to 100. Defaults to 0.
Returns:
str: URL of the MP3 to download, hosted on Voicemaker.in
"""
return self.__post__('/api', {
'Text': text,
'Engine': engine,
'VoiceId': voice_id,
'LanguageCode': language_code,
'OutputFormat': output_format,
'SampleRate': str(sample_rate),
'Effect': effect,
'MasterSpeed': str(master_speed),
'MasterVolume': str(master_volume),
'MasterPitch': str(master_pitch),
})['path']
def generate_audio_to_file(self, out_path: str, text: str, **kwargs) -> None:
"""Generates audio from text and saves it to a file
Args:
out_path (str): Path where the generated audio should be written
text (str): Text to generate an audio from
"""
url = self.generate_audio_url(text, **kwargs)
file = requests.get(url)
with open(out_path, 'wb') as file_handle:
file_handle.write(file.content)
def list_voices(self, language='en-US') -> list:
"""Lists all available voices for the selected language
Args:
language (str, optional): Language of choice. Defaults to 'en-US'.
Raises:
ValueError: When the selected language is not supported
Returns:
list: List of languages of the form { "Engine": "xxx", "VoiceId": "xxx", "VoiceGender": "xxx", "VoiceWebname": "xxx", "Country": "XX", "Language": "xx-XX" }
"""
if language not in LANGUAGES_LIST:
raise ValueError('Selected language is not supported')
return self.__get__('/list', {'language': language})['data']['voices_list']
| 39.4
| 164
| 0.597716
| 663
| 4,728
| 4.143288
| 0.334842
| 0.036403
| 0.012013
| 0.013105
| 0.123407
| 0.101565
| 0.101565
| 0.101565
| 0.065526
| 0.065526
| 0
| 0.019199
| 0.239848
| 4,728
| 119
| 165
| 39.731092
| 0.745131
| 0.356176
| 0
| 0.092308
| 0
| 0
| 0.223598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123077
| false
| 0
| 0.015385
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e5810f45ee6abfb855c478735026a678b651dd9
| 1,365
|
py
|
Python
|
Lecture/Kapitel 9 - Seite 235 - Implementierung des Gradientenverfahrens.py
|
PhilippMatthes/tensorflow-playground
|
b5fee6e5f5044dc5cbcd54529d559388a3df7813
|
[
"MIT"
] | null | null | null |
Lecture/Kapitel 9 - Seite 235 - Implementierung des Gradientenverfahrens.py
|
PhilippMatthes/tensorflow-playground
|
b5fee6e5f5044dc5cbcd54529d559388a3df7813
|
[
"MIT"
] | null | null | null |
Lecture/Kapitel 9 - Seite 235 - Implementierung des Gradientenverfahrens.py
|
PhilippMatthes/tensorflow-playground
|
b5fee6e5f5044dc5cbcd54529d559388a3df7813
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
n_epochs = 1000
learning_rate = 0.01
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
gradients = 2 / m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta - learning_rate * gradients)
def run():
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
run()
gradients = tf.gradients(mse, [theta])[0]
run()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
run()
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
run()
| 26.764706
| 81
| 0.69304
| 203
| 1,365
| 4.507389
| 0.384236
| 0.078689
| 0.048087
| 0.04153
| 0.072131
| 0.072131
| 0
| 0
| 0
| 0
| 0
| 0.024583
| 0.165568
| 1,365
| 51
| 82
| 26.764706
| 0.778753
| 0
| 0
| 0.171429
| 0
| 0
| 0.022694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.114286
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e582f1280b1545b27d8bb65ef57684f484bd7bc
| 1,634
|
py
|
Python
|
python/Fluoroseq/obsolete/scripts/intrinsic_pr_bounds.py
|
erisyon/whatprot
|
176cd7e6ee99ea3f91794dcf1ec14f3578b7ee3c
|
[
"MIT"
] | null | null | null |
python/Fluoroseq/obsolete/scripts/intrinsic_pr_bounds.py
|
erisyon/whatprot
|
176cd7e6ee99ea3f91794dcf1ec14f3578b7ee3c
|
[
"MIT"
] | 1
|
2021-06-12T00:50:08.000Z
|
2021-06-15T17:59:12.000Z
|
python/Fluoroseq/obsolete/scripts/intrinsic_pr_bounds.py
|
erisyon/whatprot
|
176cd7e6ee99ea3f91794dcf1ec14f3578b7ee3c
|
[
"MIT"
] | 1
|
2021-06-11T19:34:43.000Z
|
2021-06-11T19:34:43.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Matthew Beauregard Smith (UT Austin)
"""
from common.peptide import Peptide
from plotting.plot_pr_curve import plot_pr_curve
from numpy import load
from simulate.label_peptides import label_peptides
TRUE_Y_FILE = 'C:/Users/Matthew/ICES/MarcotteLab/data/classification/control_15_proteins/true_pep_i.npy'
NUM_PEPTIDES = 705
NUM_CHANNELS = 3
LABEL_SET = ['DE','Y','C']
PEPTIDE_FILE = 'C:/Users/Matthew/ICES/MarcotteLab/data/classification/control_15_proteins/peps.csv'
true_y = load(TRUE_Y_FILE)
class GroundTruth:
def __init__(self, value):
self.value = value
def class_index(self):
return self.value
ground_truth = [0] * len(true_y)
for i in range(0, len(true_y)):
ground_truth[i] = GroundTruth(true_y[i])
f = open(PEPTIDE_FILE, 'r')
f.readline() # header
f.readline() # Zack's null line
line = f.readline()
peptides = [0] * NUM_PEPTIDES
i = 0
while line != '\n' and line != '':
items = line.split(",")
pep_id = items[0]
pep_str = items[-1]
peptides[i] = Peptide(pep_str, pep_id=pep_id)
line = f.readline()
i += 1
f.close()
dye_seqs = label_peptides(peptides, LABEL_SET)
id_to_prediction = {}
for dye_seq in dye_seqs:
for peptide in dye_seq.src_peptides:
id_to_prediction[int(peptide.pep_id)] = (
int(dye_seq.src_peptides[0].pep_id),
1 / len(dye_seq.src_peptides))
predictions = [0] * len(ground_truth)
for i in range(len(ground_truth)):
predictions[i] = id_to_prediction[ground_truth[i].value]
plot_pr_curve(predictions, ground_truth)
| 30.259259
| 105
| 0.676255
| 245
| 1,634
| 4.253061
| 0.35102
| 0.028791
| 0.03167
| 0.048944
| 0.128599
| 0.128599
| 0.128599
| 0.128599
| 0.128599
| 0.128599
| 0
| 0.01446
| 0.195838
| 1,634
| 53
| 106
| 30.830189
| 0.778539
| 0.056304
| 0
| 0.093023
| 0
| 0
| 0.120352
| 0.114943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.093023
| 0.023256
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e5b857f8383e340919c32b08170a5b4cd5f70b7
| 820
|
py
|
Python
|
python-basic-project/unit08/myfinance.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 9
|
2020-10-25T15:13:32.000Z
|
2022-03-26T11:27:21.000Z
|
python-basic-project/unit08/myfinance.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | null | null | null |
python-basic-project/unit08/myfinance.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 7
|
2021-03-01T11:06:45.000Z
|
2022-03-14T07:06:04.000Z
|
import requests
from bs4 import BeautifulSoup
def get_tickers(market=2):
url = f"http://comp.fnguide.com/SVO2/common/lookup_data.asp?mkt_gb={market}&comp_gb=1"
resp = requests.get(url)
data = resp.json()
codes = []
for comp in data:
code = comp['cd'][-6:]
codes.append(code)
return codes
def get_dvr(code):
try:
url = f"https://finance.naver.com/item/main.nhn?code={code}"
resp = requests.get(url)
html = resp.text
soup = BeautifulSoup(html, "html5lib")
tags = soup.select("#_dvr")
dvr = float(tags[0].text)
except:
dvr = 0
return dvr
if __name__ == "__main__":
kospi = get_tickers(market=2)
kosdaq = get_tickers(market=3)
print(len(kospi))
print(len(kosdaq))
print(get_dvr("005930"))
| 24.117647
| 90
| 0.603659
| 113
| 820
| 4.230089
| 0.522124
| 0.062762
| 0.100418
| 0.07113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026059
| 0.25122
| 820
| 34
| 91
| 24.117647
| 0.752443
| 0
| 0
| 0.071429
| 0
| 0.035714
| 0.19123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.214286
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e5c8076b3c080597643c7f2efec1d74b5c8f190
| 1,882
|
py
|
Python
|
elsie/draw.py
|
Kobzol/elsie
|
b7b784d8d04c9e0d545e18504cf4ad23b9e7e8c4
|
[
"MIT"
] | null | null | null |
elsie/draw.py
|
Kobzol/elsie
|
b7b784d8d04c9e0d545e18504cf4ad23b9e7e8c4
|
[
"MIT"
] | null | null | null |
elsie/draw.py
|
Kobzol/elsie
|
b7b784d8d04c9e0d545e18504cf4ad23b9e7e8c4
|
[
"MIT"
] | null | null | null |
def set_font_from_style(xml, style):
if "font" in style:
xml.set("font-family", style["font"])
if "size" in style:
xml.set("font-size", style["size"])
s = ""
if "color" in style:
s += "fill:{};".format(style["color"])
if style.get("bold", False):
s += "font-weight: bold;"
if style.get("italic", False):
s += "font-style: italic;"
if s:
xml.set("style", s)
def draw_text(xml, x, y, parsed_text, style, styles, id=None):
xml.element("text")
if id is not None:
xml.set("id", id)
xml.set("x", x)
xml.set("y", y)
anchor = {
"left": "start",
"middle": "middle",
"right": "end"
}
xml.set("text-anchor", anchor[style["align"]])
set_font_from_style(xml, style)
line_size = style["size"] * style["line_spacing"]
active_styles = [style]
xml.element("tspan")
for token_type, value in parsed_text:
if token_type == "text":
xml.text(value)
elif token_type == "newline":
for s in active_styles:
xml.close("tspan") # tspan
for i, s in enumerate(active_styles):
xml.element("tspan")
xml.set("xml:space", "preserve")
if i == 0:
xml.set("x", x)
xml.set("dy", line_size * value)
set_font_from_style(xml, s)
elif token_type == "begin":
s = styles[value]
active_styles.append(s)
xml.element("tspan")
xml.set("xml:space", "preserve")
set_font_from_style(xml, s)
elif token_type == "end":
xml.close("tspan")
active_styles.pop()
else:
raise Exception("Invalid token")
for s in active_styles:
xml.close("tspan")
xml.close("text")
| 26.885714
| 62
| 0.508502
| 238
| 1,882
| 3.89916
| 0.256303
| 0.071121
| 0.047414
| 0.068966
| 0.336207
| 0.299569
| 0.217672
| 0.217672
| 0.071121
| 0
| 0
| 0.000801
| 0.336344
| 1,882
| 69
| 63
| 27.275362
| 0.742194
| 0.002657
| 0
| 0.245614
| 0
| 0
| 0.148267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0
| 0
| 0.035088
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e5e4207adc8922463d0a98148721a7ee4e6e6eb
| 1,428
|
py
|
Python
|
demos/cookie-clicker/cookie-clicker.py
|
Coding-Kakis/Automating-Shenanigans-in-Python
|
c8e00231468668fbe231e0b35e32b9e99d5bd458
|
[
"MIT"
] | 1
|
2021-09-11T13:05:17.000Z
|
2021-09-11T13:05:17.000Z
|
demos/cookie-clicker/cookie-clicker.py
|
Coding-Kakis/Automating-Shenanigans-in-Python
|
c8e00231468668fbe231e0b35e32b9e99d5bd458
|
[
"MIT"
] | null | null | null |
demos/cookie-clicker/cookie-clicker.py
|
Coding-Kakis/Automating-Shenanigans-in-Python
|
c8e00231468668fbe231e0b35e32b9e99d5bd458
|
[
"MIT"
] | null | null | null |
# Cookie clicker auto-clicker
# Works for the classic version here: https://orteil.dashnet.org/experiments/cookie/
import pyautogui
def locate_cookie():
"""
Returns the locations of the Big Cookie
Does not return until the cookie is found
"""
loc = None
while loc == None:
loc = pyautogui.locateCenterOnScreen('rsrc/bigcookie.png')
return loc
def click_cookie(loc, ntimes):
"""
Moves mouse to `loc` and clicks `ntimes`
"""
x,y = loc
pyautogui.moveTo(x,y)
for _ in range(ntimes):
pyautogui.click()
def round():
"""
Does 1 round.
Returns `Yes` if user wants to continue
Returns `No` otherwise.
"""
loc = locate_cookie()
pyautogui.alert(
title = "Found cookie!",
text = str(loc))
while True:
number_of_times = pyautogui.prompt(
title = "Continue?",
text = "Click how many times?")
if not number_of_times.isdigit():
pyautogui.alert(
title = "Error!",
text = "Input isn't an integer!")
continue
break
number_of_times = int(number_of_times)
click_cookie(loc, number_of_times)
reply = pyautogui.confirm(
title = "Done!",
text = "Another round?",
buttons = ["Yes", "No"])
return reply
while True:
reply = round()
if reply == "No":
break
| 19.833333
| 84
| 0.573529
| 166
| 1,428
| 4.843373
| 0.481928
| 0.049751
| 0.080846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001026
| 0.317227
| 1,428
| 71
| 85
| 20.112676
| 0.82359
| 0.219188
| 0
| 0.162162
| 0
| 0
| 0.109953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.027027
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e5e941943139ba0623e31d497e78bf7beb9106d
| 1,485
|
py
|
Python
|
esupa/templatetags/esupa.py
|
Abando/esupa
|
84888ff7d7879437659fd06a8707ac033f25b8ab
|
[
"Apache-2.0"
] | null | null | null |
esupa/templatetags/esupa.py
|
Abando/esupa
|
84888ff7d7879437659fd06a8707ac033f25b8ab
|
[
"Apache-2.0"
] | 4
|
2015-11-09T02:01:15.000Z
|
2016-01-20T14:51:13.000Z
|
esupa/templatetags/esupa.py
|
ekevoo/esupa
|
84888ff7d7879437659fd06a8707ac033f25b8ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2015, Ekevoo.com.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
from datetime import datetime
from django.template import Library
from django.template.defaultfilters import date
from django.utils.safestring import mark_safe
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext
register = Library()
@register.filter(expects_localtime=True)
def relative(when, include_span_tag=True):
if not when:
return ''
delta = (when - datetime.now(tz=when.tzinfo)).total_seconds()
if abs(delta) < 10: # 10 seconds threshold
text = ugettext(u"just now")
elif delta < 0:
text = ugettext(u"%s ago") % timesince(when)
else:
text = ugettext(u"in %s") % timeuntil(when)
if include_span_tag:
text = mark_safe(u"<span title='%(absolute)s'>%(relative)s</span>"
% {'relative': text, 'absolute': date(when, 'r')})
return text
| 37.125
| 107
| 0.703704
| 208
| 1,485
| 4.985577
| 0.548077
| 0.057859
| 0.043394
| 0.030858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011696
| 0.193939
| 1,485
| 39
| 108
| 38.076923
| 0.854637
| 0.400673
| 0
| 0
| 0
| 0
| 0.093714
| 0.045714
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e62b645957319fa784b6eef70fbe8c8812a5575
| 3,305
|
py
|
Python
|
ivy/pages.py
|
swsch/ivy
|
4932cf7541acff13815be613b0f3335b21c86670
|
[
"Unlicense"
] | null | null | null |
ivy/pages.py
|
swsch/ivy
|
4932cf7541acff13815be613b0f3335b21c86670
|
[
"Unlicense"
] | null | null | null |
ivy/pages.py
|
swsch/ivy
|
4932cf7541acff13815be613b0f3335b21c86670
|
[
"Unlicense"
] | null | null | null |
# ------------------------------------------------------------------------------
# This module renders and writes HTML pages to disk.
# ------------------------------------------------------------------------------
import re
import os
from . import site
from . import events
from . import filters
from . import utils
from . import templates
from . import hashes
from typing import List
from .nodes import Node
# A Page instance represents a single HTML page in the rendered site.
class Page(dict):
# Each Page is initialized with an associated Node instance. This node's
# location in the parse tree determines the output filepath for the page.
def __init__(self, node: Node):
self['node'] = node
self['site'] = site.config
self['inc'] = site.includes()
self['is_homepage'] = node.parent is None
# Render the page into HTML and write the HTML to disk.
def write(self):
self['filepath'] = self.get_filepath()
self['classes'] = self.get_class_list()
self['templates'] = self.get_template_list()
# Render the page into HTML.
events.fire('render_page', self)
html = templates.render(self)
site.rendered(1)
# Filter the HTML before writing it to disk.
html = filters.apply('page_html', html, self)
# Rewrite all @root/ urls.
html = utils.rewrite_urls(html, self['filepath'])
# Write the page to disk. Avoid overwriting identical files.
if not hashes.match(self['filepath'], html):
utils.writefile(self['filepath'], html)
site.written(1)
# Determine the output filepath for the page.
def get_filepath(self) -> str:
slugs = self['node'].path or ['index']
suffix = site.config['extension']
if suffix == '/':
if slugs[-1] == 'index':
slugs[-1] += '.html'
else:
slugs.append('index.html')
else:
slugs[-1] += suffix
filepath = site.out(*slugs)
return filters.apply('page_path', filepath, self)
# Assemble an ordered list of hyphenated slugs for generating CSS classes
# and running template lookups.
# E.g. <Node @root/foo/bar//> -> ['node-foo-bar', 'node-foo', 'node'].
def get_slug_list(self) -> List[str]:
slugs = []
stack = ['node'] + self['node'].path
while stack:
slugs.append('-'.join(stack))
stack.pop()
return filters.apply('page_slugs', slugs, self)
# Assemble a list of potential template names for the page.
def get_template_list(self) -> List[str]:
template_list = self.get_slug_list()
if 'template' in self['node']:
template_list.insert(0, self['node']['template'])
return filters.apply('page_templates', template_list, self)
# Assemble a list of CSS classes for the page's <body> element.
def get_class_list(self) -> List[str]:
class_list = self.get_slug_list()
if self['is_homepage']:
class_list.append('homepage')
if 'classes' in self['node']:
for item in str(self['node']['classes']).split(','):
class_list.append(item.strip())
return filters.apply('page_classes', class_list, self)
| 35.537634
| 80
| 0.579728
| 407
| 3,305
| 4.619165
| 0.29484
| 0.034043
| 0.042553
| 0.046809
| 0.106915
| 0.054255
| 0.031915
| 0
| 0
| 0
| 0
| 0.002444
| 0.257186
| 3,305
| 92
| 81
| 35.923913
| 0.76334
| 0.291074
| 0
| 0.033333
| 0
| 0
| 0.107097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e64ce743607e76cfc572cc4ea2cfe77fba2b173
| 5,646
|
py
|
Python
|
mvyaml/mvyaml.py
|
gchiesa/mvyaml
|
6d4c580bc596d220b45e6a6ccf9b2c3ef582f554
|
[
"MIT"
] | null | null | null |
mvyaml/mvyaml.py
|
gchiesa/mvyaml
|
6d4c580bc596d220b45e6a6ccf9b2c3ef582f554
|
[
"MIT"
] | null | null | null |
mvyaml/mvyaml.py
|
gchiesa/mvyaml
|
6d4c580bc596d220b45e6a6ccf9b2c3ef582f554
|
[
"MIT"
] | null | null | null |
"""Main module."""
from copy import deepcopy
from datetime import datetime
from difflib import Differ
from io import StringIO
from typing import IO, Iterable, AnyStr
from datadiff.tools import assert_equal
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
class MVYamlVersionNotFoundException(Exception):
pass
class MVYamlFileException(Exception):
pass
def as_yaml(data: Iterable) -> AnyStr:
yaml = YAML()
output = StringIO()
yaml.dump(data, output)
return output.getvalue()
class MVYaml(object):
protected_keys = ('__current', '__type', )
def __init__(self, base64=False):
self._b64 = base64
self._raw = CommentedMap()
self._yaml = YAML()
self._curr_version = None
self._curr_data = None
self._create()
def _create(self):
tag = self._make_tag()
self._raw[tag] = CommentedMap()
self._raw.insert(0, '__current', tag, 'current version')
self._raw.insert(1, '__type', None, 'base64 if value are base64')
self._commit(tag=tag, comment='Initial version')
def import_yaml(self, file: AnyStr = None, stream: AnyStr = None):
data = None
if file:
with open(file, 'r') as fp:
data = fp.read()
imported_data = self._yaml.load(data or stream)
self.override(imported_data)
return self
def load(self, file_handler: AnyStr = None, stream_data: AnyStr = None):
data = None
if file_handler:
with open(file_handler, 'r') as fp:
data = fp.read()
self._raw = self._yaml.load(data or stream_data)
if self.protected_keys not in self._raw.keys():
raise MVYamlFileException(f'Not a valid mvyaml file. Perhaps is a yaml you want to import with '
f'import_yaml()?')
return self
def write(self, file_handler: IO = None, comment: AnyStr = None) -> [AnyStr, None]:
if not self._raw:
return
if self._has_changes():
self._commit(comment=comment)
output = file_handler or StringIO()
self._yaml.dump(self._raw, output)
return output.getvalue() if not file_handler else None
@property
def versions(self):
if not self._raw:
return []
return [k for k in self._raw.keys() if k not in self.protected_keys]
@property
def current(self):
return self._raw['__current']
@property
def data(self):
if not self._curr_data:
self._curr_data = deepcopy(self._raw[self._curr_version or self.current])
return self._curr_data
def with_version(self, version: str = '__current'):
if version not in self.versions:
raise MVYamlVersionNotFoundException(f'version {version} not found')
self._curr_version = version
self._curr_data = None
return self
@staticmethod
def _make_tag() -> str:
d = datetime.utcnow().isoformat()
return d
def override(self, data: [Iterable]):
self._curr_data = CommentedMap()
self._curr_data.update(data)
self._commit(comment='Overridden')
return self
def _commit(self, *args, **kwargs):
return self._commit_head(*args, **kwargs)
def _commit_head(self, tag: AnyStr = None, comment: AnyStr = None):
"""
apply the modifications on curr_data to the underling opened version
and create a new tag
"""
commented_map = CommentedMap()
commented_map.update(self._curr_data or self.data)
if tag:
self._raw[tag] = commented_map
self._raw['__current'] = tag
else:
new_tag = self._make_tag()
self._raw.insert(2, new_tag, commented_map, comment=comment)
self._raw['__current'] = new_tag
self._curr_version = None
self._curr_data = None
return self
def _commit_tail(self, tag: AnyStr = None, comment: AnyStr = None):
"""
apply the modifications on curr_data to the underling opened version
and create a new tag
"""
commented_map = CommentedMap()
commented_map.update(self._curr_data or self.data)
if tag:
self._raw[tag] = commented_map
self._raw['__current'] = tag
else:
new_tag = self._make_tag()
self._raw.insert(len(self._raw.keys()), new_tag, commented_map, comment=comment)
self._raw['__current'] = new_tag
self._curr_version = None
self._curr_data = None
return self
def _has_changes(self):
orig = self._raw[self._curr_version or self.current]
current = self._curr_data or self.data
try:
assert_equal(orig, current)
except AssertionError:
return True
return False
@property
def changes(self) -> AnyStr:
if not self._has_changes():
return ''
yaml_orig = as_yaml(self._raw[self._curr_version or self.current])
yaml_curr = as_yaml(self._curr_data)
differ = Differ()
result = list(differ.compare(
yaml_orig.splitlines(),
yaml_curr.splitlines()
))
return '\n'.join(result)
def set_current(self, version_label: AnyStr):
if version_label not in self.versions:
raise MVYamlVersionNotFoundException(f'request version [{version_label}] not found')
self._raw['__current'] = version_label
self.with_version(version_label)
return self
| 32.079545
| 108
| 0.613355
| 686
| 5,646
| 4.801749
| 0.189504
| 0.051002
| 0.047359
| 0.019429
| 0.357316
| 0.346387
| 0.287189
| 0.255009
| 0.212508
| 0.212508
| 0
| 0.003248
| 0.29118
| 5,646
| 175
| 109
| 32.262857
| 0.81984
| 0.034006
| 0
| 0.3
| 0
| 0
| 0.058299
| 0
| 0
| 0
| 0
| 0
| 0.021429
| 1
| 0.128571
| false
| 0.014286
| 0.092857
| 0.014286
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e6846fed01d2e5081085a1f9b9ca2203cbb1dad
| 1,137
|
py
|
Python
|
b2share/modules/deposit/search.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | null | null | null |
b2share/modules/deposit/search.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | null | null | null |
b2share/modules/deposit/search.py
|
hjhsalo/b2share-new
|
2a2a961f7cc3a5353850e9a409fd7e879c715b0b
|
[
"MIT"
] | 1
|
2020-09-29T10:56:03.000Z
|
2020-09-29T10:56:03.000Z
|
from elasticsearch_dsl import Q, TermsFacet
from flask import has_request_context
from flask_login import current_user
from invenio_search import RecordsSearch
from invenio_search.api import DefaultFilter
from .permissions import admin_permission_factory
def deposits_filter():
"""Filter list of deposits.
Permit to the user to see all if:
* The user is an admin (see
func:`invenio_deposit.permissions:admin_permission_factory`).
* It's called outside of a request.
Otherwise, it filters out any deposit where user is not the owner.
"""
if not has_request_context() or admin_permission_factory().can():
return Q()
else:
return Q(
'match', **{'_deposit.owners': getattr(current_user, 'id', 0)}
)
class DepositSearch(RecordsSearch):
"""Default search class."""
class Meta:
"""Configuration for deposit search."""
index = 'deposits'
doc_types = None
fields = ('*', )
facets = {
'status': TermsFacet(field='_deposit.status'),
}
default_filter = DefaultFilter(deposits_filter)
| 25.266667
| 74
| 0.664908
| 134
| 1,137
| 5.470149
| 0.537313
| 0.061392
| 0.090041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001164
| 0.244503
| 1,137
| 45
| 75
| 25.266667
| 0.852154
| 0.277045
| 0
| 0
| 0
| 0
| 0.066581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3e69d58aa5e27029fd5fb9a2126945c9c542b4c9
| 1,586
|
py
|
Python
|
code/find_nconfsources.py
|
fornax-navo/fornax-demo-notebooks
|
49525d5bed3440d0d1903c29b9a1af8e0ff7e975
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T18:12:59.000Z
|
2022-02-03T18:12:59.000Z
|
code/find_nconfsources.py
|
fornax-navo/fornax-demo-notebooks
|
49525d5bed3440d0d1903c29b9a1af8e0ff7e975
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T21:17:35.000Z
|
2022-03-11T22:28:46.000Z
|
code/find_nconfsources.py
|
fornax-navo/fornax-demo-notebooks
|
49525d5bed3440d0d1903c29b9a1af8e0ff7e975
|
[
"BSD-3-Clause"
] | 2
|
2022-02-01T00:57:35.000Z
|
2022-02-13T22:20:55.000Z
|
import numpy as np
from determine_source_type import determine_source_type
#function to figure out how many sources are in cutout
#and set up necessary tractor input for those sources
def find_nconfsources(raval, decval, gal_type, fluxval, x1, y1, cutout_width, subimage_wcs, df):
#setup to collect sources
objsrc = []
#keep the main source
objsrc.append(determine_source_type(raval, decval, gal_type, fluxval, x1, y1))
#find confusing sources with real fluxes
radiff = (df.ra-raval)*np.cos(decval)
decdiff= df.dec-decval
posdiff= np.sqrt(radiff**2+decdiff**2)*3600.
det = df.ks_flux_aper2 > 0 #make sure they have fluxes
#make an index into the dataframe for those objects within the same cutout
good = (abs(radiff*3600.) < cutout_width/2) & (abs(decdiff*3600.) < cutout_width/2) & (posdiff > 0.2) & det
nconfsrcs = np.size(posdiff[good])
#add confusing sources
#if there are any confusing sources
if nconfsrcs > 0:
ra_conf = df.ra[good].values
dec_conf = df.dec[good].values
flux_conf = df.ks_flux_aper2[good].values #should all be real fluxes
type_conf = df.type[good].values
for n in range(nconfsrcs):
#now need to set the values of x1, y1 at the location of the target *in the cutout*
xn, yn = subimage_wcs.all_world2pix(ra_conf[n], dec_conf[n],1)
objsrc.append(determine_source_type(ra_conf[n], dec_conf[n], type_conf[n], flux_conf[n], xn, yn))
return objsrc, nconfsrcs
| 38.682927
| 111
| 0.663934
| 239
| 1,586
| 4.276151
| 0.422594
| 0.029354
| 0.074364
| 0.035225
| 0.146771
| 0.086106
| 0.056751
| 0
| 0
| 0
| 0
| 0.024958
| 0.242119
| 1,586
| 40
| 112
| 39.65
| 0.825291
| 0.283102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|