hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7faaf6151524c8e82026ebaf789a577660ab08ca
| 76
|
py
|
Python
|
sep/__init__.py
|
Fafa87/SEP
|
cdc8fdad83478d35aeb2992b8382aa4bc1763131
|
[
"MIT"
] | null | null | null |
sep/__init__.py
|
Fafa87/SEP
|
cdc8fdad83478d35aeb2992b8382aa4bc1763131
|
[
"MIT"
] | null | null | null |
sep/__init__.py
|
Fafa87/SEP
|
cdc8fdad83478d35aeb2992b8382aa4bc1763131
|
[
"MIT"
] | null | null | null |
import sep.evaluate
import sep.extract
import sep.process
import sep.splits
| 15.2
| 19
| 0.842105
| 12
| 76
| 5.333333
| 0.5
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 4
| 20
| 19
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6a2e177795e18d3ef5a7e2990fcc1d5569472e1
| 86
|
py
|
Python
|
teste.py
|
Lausegouras/AulaDevopsTestes
|
b3221cba77de313168d06fc454340b3ffc7e2898
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
Lausegouras/AulaDevopsTestes
|
b3221cba77de313168d06fc454340b3ffc7e2898
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
Lausegouras/AulaDevopsTestes
|
b3221cba77de313168d06fc454340b3ffc7e2898
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from utilitario import mult
def teste_mult():
assert mult(3,5)==15
| 14.333333
| 27
| 0.732558
| 14
| 86
| 4.428571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056338
| 0.174419
| 86
| 5
| 28
| 17.2
| 0.816901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6a5f30d990505baa3e82b51b75a4942c4d46d03
| 4,324
|
py
|
Python
|
services/core-api/tests/parties/party_appt/resources/test_mpa_permittee_resource.py
|
bcgov/mds
|
6c427a66a5edb4196222607291adef8fd6677038
|
[
"Apache-2.0"
] | 25
|
2018-07-09T19:04:37.000Z
|
2022-03-15T17:27:10.000Z
|
services/core-api/tests/parties/party_appt/resources/test_mpa_permittee_resource.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 983
|
2018-04-25T20:08:07.000Z
|
2022-03-31T21:45:20.000Z
|
services/core-api/tests/parties/party_appt/resources/test_mpa_permittee_resource.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 58
|
2018-05-15T22:35:50.000Z
|
2021-11-29T19:40:52.000Z
|
import pytest
import json
import uuid
from datetime import datetime
from tests.factories import PermitFactory, PartyFactory, MinePartyAppointmentFactory, create_mine_and_permit
# GET
def test_get_permittee_not_found(test_client, db_session, auth_headers):
get_resp = test_client.get(
f'/parties/mines/{uuid.uuid4()}', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 404, str(get_resp.response)
assert get_data['message']
def test_get_permittee(test_client, db_session, auth_headers):
appt_guid = MinePartyAppointmentFactory(permittee=True).mine_party_appt_guid
get_resp = test_client.get(
f'/parties/mines/{appt_guid}', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200, str(get_resp.response)
assert get_data['mine_party_appt_guid'] == str(appt_guid)
assert get_data['mine_party_appt_type_code'] == 'PMT'
#POST
def test_post_permittee_no_party(test_client, db_session, auth_headers):
mine, permit = create_mine_and_permit()
data = {
'mine_guid': str(mine.mine_guid),
'related_guid': str(permit.permit_guid),
'mine_party_appt_type_code': 'PMT',
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 404, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['message']
def test_post_permittee_no_permit(test_client, db_session, auth_headers):
mine, permit = create_mine_and_permit()
party_guid = PartyFactory(company=True).party_guid
data = {
'mine_guid': str(mine.mine_guid),
'party_guid': str(party_guid),
'mine_party_appt_type_code': 'PMT',
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
post_data = json.loads(post_resp.data.decode())
assert post_resp.status_code == 404, str(post_resp.response)
assert post_data['message']
def test_post_permittee(test_client, db_session, auth_headers):
mine, permit = create_mine_and_permit()
party_guid = PartyFactory(person=True).party_guid
data = {
'mine_guid': str(mine.mine_guid),
'party_guid': str(party_guid),
'mine_party_appt_type_code': 'PMT',
'related_guid': str(permit.permit_guid),
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
post_data = json.loads(post_resp.data.decode())
assert post_resp.status_code == 200, str(post_resp.response)
assert post_data['party_guid'] == str(party_guid)
def test_post_permittee_permit_guid_not_found(test_client, db_session, auth_headers):
mine, permit = create_mine_and_permit()
party_guid = PartyFactory(person=True).party_guid
data = {
'mine_guid': str(mine.mine_guid),
'party_guid': str(party_guid),
'mine_party_appt_type_code': 'PMT',
'related_guid': str(uuid.uuid4()),
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
post_data = json.loads(post_resp.data.decode())
assert post_resp.status_code == 404, str(post_resp.response)
assert post_data['message']
def test_post_permittee_party_guid_not_found(test_client, db_session, auth_headers):
mine, permit = create_mine_and_permit()
data = {
'mine_guid': str(mine.mine_guid),
'party_guid': str(uuid.uuid4()),
'mine_party_appt_type_code': 'PMT',
'related_guid': str(permit.permit_guid),
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 404, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['message']
| 37.6
| 108
| 0.703515
| 595
| 4,324
| 4.751261
| 0.104202
| 0.056597
| 0.036788
| 0.047046
| 0.872657
| 0.853555
| 0.826671
| 0.779271
| 0.735409
| 0.735409
| 0
| 0.006656
| 0.16605
| 4,324
| 114
| 109
| 37.929825
| 0.777316
| 0.001619
| 0
| 0.666667
| 0
| 0
| 0.165276
| 0.04752
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.077778
| false
| 0
| 0.055556
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f6d2c91c7e27f397a4a85e11f2d128afcfa820e0
| 69
|
py
|
Python
|
iptk/__init__.py
|
iptk/iptk-py
|
356e3a4b1acee05b03d25c14f2545a1d12f83787
|
[
"MIT"
] | null | null | null |
iptk/__init__.py
|
iptk/iptk-py
|
356e3a4b1acee05b03d25c14f2545a1d12f83787
|
[
"MIT"
] | null | null | null |
iptk/__init__.py
|
iptk/iptk-py
|
356e3a4b1acee05b03d25c14f2545a1d12f83787
|
[
"MIT"
] | 1
|
2020-05-17T21:45:01.000Z
|
2020-05-17T21:45:01.000Z
|
from .dataset import Dataset
from .dataset_store import DatasetStore
| 23
| 39
| 0.855072
| 9
| 69
| 6.444444
| 0.555556
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 40
| 34.5
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6d9982770d9aa959572fadfe93280c1228a34b5
| 1,818
|
py
|
Python
|
Tests/ElementAttribute_test.py
|
adscheevel/tm1py
|
8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43
|
[
"MIT"
] | 113
|
2019-03-12T19:42:39.000Z
|
2022-03-31T22:40:05.000Z
|
Tests/ElementAttribute_test.py
|
adscheevel/tm1py
|
8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43
|
[
"MIT"
] | 459
|
2019-01-25T09:32:18.000Z
|
2022-03-24T21:57:16.000Z
|
Tests/ElementAttribute_test.py
|
adscheevel/tm1py
|
8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43
|
[
"MIT"
] | 107
|
2019-01-31T15:08:34.000Z
|
2022-03-16T14:58:38.000Z
|
import unittest
from TM1py import ElementAttribute
class TestElementAttribute(unittest.TestCase):
def test_eq_happy_case(self):
element_attribute1 = ElementAttribute(name="Attribute 1", attribute_type="String")
element_attribute2 = ElementAttribute(name="Attribute 1", attribute_type="String")
self.assertEqual(element_attribute1, element_attribute2)
def test_ne_name(self):
element_attribute1 = ElementAttribute(name="Attribute 1", attribute_type="String")
element_attribute2 = ElementAttribute(name="Attribute 2", attribute_type="String")
self.assertNotEqual(element_attribute1, element_attribute2)
def test_ne_type(self):
element_attribute1 = ElementAttribute(name="Attribute 1", attribute_type="String")
element_attribute2 = ElementAttribute(name="Attribute 1", attribute_type="Numeric")
self.assertNotEqual(element_attribute1, element_attribute2)
def test_eq_case_space_difference(self):
element_attribute1 = ElementAttribute(name="Attribute 1", attribute_type="String")
element_attribute2 = ElementAttribute(name="ATTRIBUTE1", attribute_type="String")
self.assertEqual(element_attribute1, element_attribute2)
def test_hash_happy_case(self):
element_attribute1 = ElementAttribute(name="Attribute 1", attribute_type="String")
element_attribute2 = ElementAttribute(name="Attribute 1", attribute_type="String")
self.assertEqual(hash(element_attribute1), hash(element_attribute2))
def test_construct_body(self):
element = ElementAttribute(name="Attribute 1", attribute_type="Numeric")
self.assertEqual(
element.body_as_dict,
{'Name': 'Attribute 1', 'Type': 'Numeric'})
if __name__ == '__main__':
unittest.main()
| 37.875
| 91
| 0.734323
| 189
| 1,818
| 6.761905
| 0.190476
| 0.172144
| 0.226917
| 0.211268
| 0.760563
| 0.760563
| 0.760563
| 0.757433
| 0.617371
| 0.617371
| 0
| 0.021753
| 0.165567
| 1,818
| 47
| 92
| 38.680851
| 0.820699
| 0
| 0
| 0.366667
| 0
| 0
| 0.122112
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1272878cddc65b740dcfdbd94cc165038864bd29
| 8,279
|
py
|
Python
|
survae/utils/loss.py
|
robert-giaquinto/survae_flows
|
4d7dc638f77c48ad3c8393b967c33ac9dbad60fe
|
[
"MIT"
] | 2
|
2021-03-06T19:37:39.000Z
|
2022-01-09T11:19:45.000Z
|
survae/utils/loss.py
|
robert-giaquinto/survae_flows
|
4d7dc638f77c48ad3c8393b967c33ac9dbad60fe
|
[
"MIT"
] | null | null | null |
survae/utils/loss.py
|
robert-giaquinto/survae_flows
|
4d7dc638f77c48ad3c8393b967c33ac9dbad60fe
|
[
"MIT"
] | null | null | null |
import math
import torch
def loglik_nats(model, x):
"""Compute the log-likelihood in nats."""
return - model.log_prob(x).mean()
def loglik_bpd(model, x):
"""Compute the log-likelihood in bits per dim."""
return - model.log_prob(x).sum() / (math.log(2) * x.shape.numel())
def cond_loglik_nats(model, x, context):
"""Compute the log-likelihood in nats."""
return - model.log_prob(x, context).mean()
def cond_loglik_bpd(model, x, context):
"""Compute the log-likelihood in bits per dim."""
return - model.log_prob(x, context).sum() / (math.log(2) * x.shape.numel())
def elbo_nats(model, x):
"""
Compute the ELBO in nats.
Same as .loglik_nats(), but may improve readability.
"""
return loglik_nats(model, x)
def elbo_bpd(model, x):
"""
Compute the ELBO in bits per dim.
Same as .loglik_bpd(), but may improve readability.
"""
return loglik_bpd(model, x)
def cond_elbo_nats(model, x, context):
"""
Compute the ELBO in nats for conditional models.
Same as .loglik_nats(), but may improve readability.
"""
return cond_loglik_nats(model, x, context)
def cond_elbo_bpd(model, x, context):
"""
Compute the ELBO in bits per dim for conditional models.
Same as .loglik_bpd(), but may improve readability.
"""
return cond_loglik_bpd(model, x, context)
def iwbo(model, x, k):
x_stack = torch.cat([x for _ in range(k)], dim=0)
ll_stack = model.log_prob(x_stack)
ll = torch.stack(torch.chunk(ll_stack, k, dim=0))
return torch.logsumexp(ll, dim=0) - math.log(k)
def cond_iwbo(model, x, context, k):
x_stack = torch.cat([x for _ in range(k)], dim=0)
context_stack = torch.cat([context for _ in range(k)], dim=0)
ll_stack = model.log_prob(x_stack, context_stack)
ll = torch.stack(torch.chunk(ll_stack, k, dim=0))
return torch.logsumexp(ll, dim=0) - math.log(k)
def iwbo_batched(model, x, k, kbs):
assert k % kbs == 0
num_passes = k // kbs
ll_batched = []
for i in range(num_passes):
x_stack = torch.cat([x for _ in range(kbs)], dim=0)
ll_stack = model.log_prob(x_stack)
ll_batched.append(torch.stack(torch.chunk(ll_stack, kbs, dim=0)))
ll = torch.cat(ll_batched, dim=0)
return torch.logsumexp(ll, dim=0) - math.log(k)
def cond_iwbo_batched(model, x, context, k, kbs):
assert k % kbs == 0
num_passes = k // kbs
ll_batched = []
for i in range(num_passes):
x_stack = torch.cat([x for _ in range(kbs)], dim=0)
context_stack = torch.cat([context for _ in range(kbs)], dim=0)
ll_stack = model.log_prob(x_stack, context_stack)
ll_batched.append(torch.stack(torch.chunk(ll_stack, kbs, dim=0)))
ll = torch.cat(ll_batched, dim=0)
return torch.logsumexp(ll, dim=0) - math.log(k)
def iwbo_nats(model, x, k, kbs=None):
"""Compute the IWBO in nats."""
if kbs: return - iwbo_batched(model, x, k, kbs).mean()
else: return - iwbo(model, x, k).mean()
def cond_iwbo_nats(model, x, context, k, kbs=None):
"""Compute the IWBO in nats."""
if kbs: return - cond_iwbo_batched(model, x, context, k, kbs).mean()
else: return - cond_iwbo(model, x, context, k).mean()
def iwbo_bpd(model, x, k, kbs=None):
"""Compute the IWBO in bits per dim."""
if kbs: return - iwbo_batched(model, x, k, kbs).sum() / (x.numel() * math.log(2))
else: return - iwbo(model, x, k).sum() / (x.numel() * math.log(2))
def cond_iwbo_bpd(model, x, context, k, kbs=None):
"""Compute the IWBO in bits per dim."""
if kbs: return - cond_iwbo_batched(model, x, context, k, kbs).sum() / (x.numel() * math.log(2))
else: return - cond_iwbo(model, x, context, k).sum() / (x.numel() * math.log(2))
def dataset_elbo_nats(model, data_loader, device, double=False, verbose=False):
with torch.no_grad():
nats = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = x.double()
x = x.to(device)
nats += elbo_nats(model, x).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), nats/count, end='\r')
print(f"Dataset ELBO Nats: {nats/count}")
return nats / count
def dataset_cond_elbo_nats(model, data_loader, device, double=False, verbose=False):
with torch.no_grad():
nats = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = [x[0].double(), x[1].double()]
context = x[1].to(device)
x = x[0].to(device)
nats += cond_elbo_nats(model, x, context=context).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), nats/count, end='\r')
print(f"Dataset ELBO Nats: {nats/count}")
return nats / count
def dataset_elbo_bpd(model, data_loader, device, double=False, verbose=False):
with torch.no_grad():
bpd = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = x.double()
x = x.to(device)
bpd += elbo_bpd(model, x).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\r')
print(f"Dataset ELBO BPD: {bpd/count}")
return bpd / count
def dataset_cond_elbo_bpd(model, data_loader, device, double=False, verbose=False):
with torch.no_grad():
bpd = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = [x[0].double(), x[1].double()]
context = x[1].to(device)
x = x[0].to(device)
bpd += cond_elbo_bpd(model, x, context=context).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\r')
print(f"Dataset ELBO BPD: {bpd/count}")
return bpd / count
def dataset_iwbo_nats(model, data_loader, k, device, double=False, kbs=None, verbose=False):
with torch.no_grad():
nats = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = x.double()
x = x.to(device)
nats += iwbo_nats(model, x, k=k, kbs=kbs).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), nats/count, end='\r')
print(f"Dataset IWBO Nats: {nats/count}")
return nats / count
def dataset_cond_iwbo_nats(model, data_loader, k, device, double=False, kbs=None, verbose=False):
with torch.no_grad():
nats = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = [x[0].double(), x[1].double()]
context = x[1].to(device)
x = x[0].to(device)
nats += cond_iwbo_nats(model, x, context=context, k=k, kbs=kbs).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), nats/count, end='\r')
print(f"Dataset IWBO Nats: {nats/count}")
return nats / count
def dataset_iwbo_bpd(model, data_loader, k, device, double=False, kbs=None, verbose=False):
with torch.no_grad():
bpd = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = x.double()
x = x.to(device)
bpd += iwbo_bpd(model, x, k=k, kbs=kbs).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\r')
print(f"Dataset IWBO BPD: {bpd/count}")
return bpd / count
def dataset_cond_iwbo_bpd(model, data_loader, k, device, double=False, kbs=None, verbose=False):
with torch.no_grad():
bpd = 0.0
count = 0
for i, x in enumerate(data_loader):
if double: x = [x[0].double(), x[1].double()]
context = x[1].to(device)
x = x[0].to(device)
bpd += cond_iwbo_bpd(model, x, context=context, k=k, kbs=kbs).cpu().item() * len(x)
count += len(x)
if verbose: print('{}/{}'.format(i+1, len(data_loader)), bpd/count, end='\r')
print(f"Dataset IWBO BPD: {bpd/count}")
return bpd / count
| 34.069959
| 99
| 0.58739
| 1,272
| 8,279
| 3.706761
| 0.060535
| 0.045811
| 0.049629
| 0.022057
| 0.971368
| 0.959491
| 0.898621
| 0.871898
| 0.831389
| 0.77667
| 0
| 0.011634
| 0.252446
| 8,279
| 242
| 100
| 34.210744
| 0.750202
| 0.079116
| 0
| 0.679012
| 0
| 0
| 0.03943
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 1
| 0.148148
| false
| 0.024691
| 0.012346
| 0
| 0.283951
| 0.098765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89ec131e991e98d7b8d725487cf6dbe45aee94e6
| 39
|
py
|
Python
|
reststub/__init__.py
|
nharada1/python-rest-stub
|
a5fb4fc8a6c1e750cac7457716b1ffed796c3a94
|
[
"MIT"
] | 13
|
2015-11-04T03:34:15.000Z
|
2017-08-06T15:11:16.000Z
|
reststub/__init__.py
|
nharada1/python-rest-stub
|
a5fb4fc8a6c1e750cac7457716b1ffed796c3a94
|
[
"MIT"
] | null | null | null |
reststub/__init__.py
|
nharada1/python-rest-stub
|
a5fb4fc8a6c1e750cac7457716b1ffed796c3a94
|
[
"MIT"
] | null | null | null |
from .rest_server import rest_stub_app
| 19.5
| 38
| 0.871795
| 7
| 39
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d624e5bc18f4463db3aaa493b44c606f239eaec7
| 62
|
py
|
Python
|
tools/Sikuli/ClickContextMenuVisualStudioCode.sikuli/ClickContextMenuVisualStudioCode.py
|
marmyshev/vanessa-automation
|
9f87bd6df58b4c205104d3ae8e3643752d67eef7
|
[
"BSD-3-Clause"
] | 296
|
2018-05-27T08:03:14.000Z
|
2022-03-19T08:36:11.000Z
|
tools/Sikuli/ClickContextMenuVisualStudioCode.sikuli/ClickContextMenuVisualStudioCode.py
|
marmyshev/vanessa-automation
|
9f87bd6df58b4c205104d3ae8e3643752d67eef7
|
[
"BSD-3-Clause"
] | 1,562
|
2018-05-27T18:36:25.000Z
|
2022-03-31T07:35:11.000Z
|
tools/Sikuli/ClickContextMenuVisualStudioCode.sikuli/ClickContextMenuVisualStudioCode.py
|
marmyshev/vanessa-automation
|
9f87bd6df58b4c205104d3ae8e3643752d67eef7
|
[
"BSD-3-Clause"
] | 299
|
2018-06-18T20:00:56.000Z
|
2022-03-29T12:29:55.000Z
|
hover("1532879285882.png")
click("1532879285882.png")
exit(0)
| 15.5
| 26
| 0.758065
| 8
| 62
| 5.875
| 0.75
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.457627
| 0.048387
| 62
| 3
| 27
| 20.666667
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d626d7885687f5fd26a705dfb530f8ac8308dbc0
| 3,436
|
py
|
Python
|
riddler/nancy/yahtzee.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
riddler/nancy/yahtzee.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
riddler/nancy/yahtzee.py
|
rgc-retired/math_puzzles
|
0f96fc0f4d53f9ece53fb7af02c037067f710fac
|
[
"MIT"
] | null | null | null |
import numpy as np
# Attempt to get a large straight after
# first roll result = 1, 2, 4, 5, X where
# X is not a 3.
#
# What is the optimal strategy to get a large straight?
# This is either 1-2-3-4-5 or 2-3-4-5-6. No other result
# matters.
#
# Possibilities:
# --------------
# 1. Reroll the X to try for inside straight (e.g. 3)?
# 2. Reroll the 1 and X?
# 3. Other?
# Strategy 1: Reroll the X only
def strategy1():
r=np.array([1,2,4,5])
s=np.random.choice(6,1)+1
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(True)
s=np.random.choice(6,1)+1
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(True)
return(False)
# Strategy 3: Reroll the 1,X
# Reroll 1 die if you have a 3, 1, or 6
# The priority is to keep the 3 (double ended straight)
def strategy3():
r=np.array([2,4,5])
s=np.random.choice(6,2)+1
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(1)
elif np.all(a==np.array([2,3,4,5,6])):
return(1)
# First roll failed -- need to figure out
# what to try on the second roll.
# If none of the new dice are 1, 3, or 6 then
# roll them both again.
# elif either of the new dice is 3 then keep it
# roll other die - 2 chances to win
# elif either of the new dice is 1 then keep it
# roll other die - 1 chance to win
# elif either of the new dice is 6 then keep it
# roll other die - 1 chance to win
if 3 in s:
# Keep the 3 and roll the other die
r=np.array([2,3,4,5])
s=np.random.choice(6,1)+1
rollpath=2
elif 1 in s:
# Keep the 1 and roll the other die
r=np.array([1,2,4,5])
s=np.random.choice(6,1)+1
rollpath=3
elif 6 in s:
# Keep the 6 and roll the other die
r=np.array([2,4,5,6])
s=np.random.choice(6,1)+1
rollpath=4
else:
# Roll both dice
r=np.array([2,4,5])
s=np.random.choice(6,2)+1
rollpath=5
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(rollpath)
elif np.all(a==np.array([2,3,4,5,6])):
return(rollpath)
return(0)
# Strategy 2: Reroll the 1,X
# Roll 1 die if you have a 3 otherwise roll 2
def strategy2():
r=np.array([2,4,5])
s=np.random.choice(6,2)+1
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(1)
elif np.all(a==np.array([2,3,4,5,6])):
return(1)
# First roll failed -- need to figure out
# what to try on the second roll.
# If none of the new dice are 1, 3, or 6 then
# roll them both again.
# elif either of the new dice is 3 then keep it
# roll other die - 2 chances to win
# elif either of the new dice is 1 then keep it
# roll other die - 1 chance to win
# elif either of the new dice is 6 then keep it
# roll other die - 1 chance to win
if 3 in s:
# Keep the 3 and roll the other die
r=np.array([2,3,4,5])
s=np.random.choice(6,1)+1
rollpath=2
else:
# Roll both dice
r=np.array([2,4,5])
s=np.random.choice(6,2)+1
rollpath=5
a=np.r_[r,s]
a=np.sort(a)
if np.all(a==np.array([1,2,3,4,5])):
return(rollpath)
elif np.all(a==np.array([2,3,4,5,6])):
return(rollpath)
return(0)
| 28.396694
| 57
| 0.559371
| 685
| 3,436
| 2.79708
| 0.131387
| 0.022965
| 0.021921
| 0.029228
| 0.789666
| 0.764614
| 0.76357
| 0.747912
| 0.724948
| 0.710856
| 0
| 0.076417
| 0.291618
| 3,436
| 120
| 58
| 28.633333
| 0.710764
| 0.433062
| 0
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.014286
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d62bcdf8af7be4e285d3a2566f6156bd728f72f3
| 35
|
py
|
Python
|
nyaggle/testing/__init__.py
|
harupy/nyaggle
|
132a93079e364d60b5598de77ab636a603ec06a4
|
[
"MIT"
] | null | null | null |
nyaggle/testing/__init__.py
|
harupy/nyaggle
|
132a93079e364d60b5598de77ab636a603ec06a4
|
[
"MIT"
] | null | null | null |
nyaggle/testing/__init__.py
|
harupy/nyaggle
|
132a93079e364d60b5598de77ab636a603ec06a4
|
[
"MIT"
] | 2
|
2021-03-19T05:57:39.000Z
|
2021-03-30T04:54:36.000Z
|
from nyaggle.testing.util import *
| 17.5
| 34
| 0.8
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c39554dcec1697b15f55de90a5418a5cbaffe35f
| 663
|
py
|
Python
|
eloquent/orm/scopes/scope.py
|
KarthickNamakkalKrishnan/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 47
|
2015-03-19T02:11:36.000Z
|
2022-03-29T07:16:42.000Z
|
eloquent/orm/scopes/scope.py
|
KarthickNamakkalKrishnan/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 20
|
2015-03-16T02:56:51.000Z
|
2015-05-24T17:51:29.000Z
|
eloquent/orm/scopes/scope.py
|
sdispater/eloquent
|
0638b688d5fd0c1a46b7471dd465eeb4c2f84666
|
[
"MIT"
] | 4
|
2018-08-29T13:42:50.000Z
|
2021-03-14T11:28:31.000Z
|
# -*- coding: utf-8 -*-
class Scope(object):
def apply(self, builder, model):
"""
Apply the scope to a given query builder.
:param builder: The query builder
:type builder: eloquent.orm.Builder
:param model: The model
:type model: eloquent.orm.Model
"""
raise NotImplementedError
def remove(self, builder, model):
"""
Remove the scope from a given query builder.
:param builder: The query builder
:type builder: eloquent.orm.Builder
:param model: The model
:type model: eloquent.orm.Model
"""
raise NotImplementedError
| 22.862069
| 52
| 0.588235
| 74
| 663
| 5.27027
| 0.324324
| 0.123077
| 0.082051
| 0.092308
| 0.723077
| 0.723077
| 0.723077
| 0.723077
| 0.723077
| 0.723077
| 0
| 0.002222
| 0.321267
| 663
| 28
| 53
| 23.678571
| 0.864444
| 0.550528
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c3a34b7edc4dd4f40de782bcb7f8ee256bd72a77
| 25
|
py
|
Python
|
neuropowertools/apps/__init__.py
|
jokedurnez/neuropowertools
|
4e17247867b108f7e928dfb205a62400afba1e34
|
[
"MIT"
] | null | null | null |
neuropowertools/apps/__init__.py
|
jokedurnez/neuropowertools
|
4e17247867b108f7e928dfb205a62400afba1e34
|
[
"MIT"
] | null | null | null |
neuropowertools/apps/__init__.py
|
jokedurnez/neuropowertools
|
4e17247867b108f7e928dfb205a62400afba1e34
|
[
"MIT"
] | null | null | null |
from . import main, power
| 25
| 25
| 0.76
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c3d5fd2d7682d149e476444e4461938d33fe7e58
| 44
|
py
|
Python
|
eisen/utils/logging/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
eisen/utils/logging/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
eisen/utils/logging/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
from .logs import *
from .summaries import *
| 22
| 24
| 0.75
| 6
| 44
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 24
| 22
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7f02664bfcfb1860c752619019d268883ec38f04
| 129
|
py
|
Python
|
tyxe/__init__.py
|
TyXe-BDL/TyXe
|
9b0a0aebb84ddd7eed2f26da967e61ad0cb79039
|
[
"MIT"
] | 34
|
2021-09-29T16:16:34.000Z
|
2022-03-13T00:34:29.000Z
|
tyxe/__init__.py
|
TyXe-BDL/TyXe
|
9b0a0aebb84ddd7eed2f26da967e61ad0cb79039
|
[
"MIT"
] | 10
|
2021-09-20T21:49:55.000Z
|
2022-03-01T06:25:50.000Z
|
tyxe/__init__.py
|
TyXe-BDL/TyXe
|
9b0a0aebb84ddd7eed2f26da967e61ad0cb79039
|
[
"MIT"
] | 11
|
2021-06-17T13:31:55.000Z
|
2022-02-17T05:22:55.000Z
|
from . import guides
from . import likelihoods
from . import poutine
from . import priors
from . import util
from .bnn import *
| 16.125
| 25
| 0.751938
| 18
| 129
| 5.388889
| 0.444444
| 0.515464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193798
| 129
| 7
| 26
| 18.428571
| 0.932692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7f10ed5eb47807ce4b493191fdddd8db468a30dd
| 37
|
py
|
Python
|
Chapter 07/ch7_1g.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 07/ch7_1g.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 07/ch7_1g.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
def Pyn():
print("Be Positive! ")
| 12.333333
| 24
| 0.567568
| 5
| 37
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 37
| 2
| 25
| 18.5
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
615a3cabf456058913fd20a7c63a191fd32ac773
| 60
|
py
|
Python
|
packages/any-api/wasmer/__init__.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 900
|
2019-04-11T01:52:10.000Z
|
2020-09-02T11:09:14.000Z
|
packages/any-api/wasmer/__init__.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 172
|
2019-04-15T18:04:55.000Z
|
2020-09-01T15:20:06.000Z
|
packages/any-api/wasmer/__init__.py
|
TheRakeshPurohit/wasmer-python
|
2375974d9dc50a2caf29fdd9e07d49fd94537e03
|
[
"MIT"
] | 28
|
2019-04-11T02:49:04.000Z
|
2020-08-27T09:47:49.000Z
|
raise ImportError("Wasmer is not available on this system")
| 30
| 59
| 0.8
| 9
| 60
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 60
| 1
| 60
| 60
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
619d1a3c5a829f97da67411ed5d4a228735078c7
| 2,325
|
gyp
|
Python
|
deps/subversion/ra_serf.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | null | null | null |
deps/subversion/ra_serf.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | 5
|
2018-03-16T06:48:29.000Z
|
2018-04-17T09:47:15.000Z
|
deps/subversion/ra_serf.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | 4
|
2018-04-11T00:06:05.000Z
|
2019-10-25T01:34:40.000Z
|
{
"includes": [
"./common.gypi"
],
"targets": [
{
"target_name": "libsvn_ra_serf",
"dependencies": [
"../serf/serf.gyp:serf"
],
"sources": [
"subversion/subversion/libsvn_ra_serf/blame.c",
"subversion/subversion/libsvn_ra_serf/blncache.c",
"subversion/subversion/libsvn_ra_serf/commit.c",
"subversion/subversion/libsvn_ra_serf/eagain_bucket.c",
"subversion/subversion/libsvn_ra_serf/get_deleted_rev.c",
"subversion/subversion/libsvn_ra_serf/get_file.c",
"subversion/subversion/libsvn_ra_serf/get_lock.c",
"subversion/subversion/libsvn_ra_serf/getdate.c",
"subversion/subversion/libsvn_ra_serf/getlocations.c",
"subversion/subversion/libsvn_ra_serf/getlocationsegments.c",
"subversion/subversion/libsvn_ra_serf/getlocks.c",
"subversion/subversion/libsvn_ra_serf/inherited_props.c",
"subversion/subversion/libsvn_ra_serf/list.c",
"subversion/subversion/libsvn_ra_serf/lock.c",
"subversion/subversion/libsvn_ra_serf/log.c",
"subversion/subversion/libsvn_ra_serf/merge.c",
"subversion/subversion/libsvn_ra_serf/mergeinfo.c",
"subversion/subversion/libsvn_ra_serf/multistatus.c",
"subversion/subversion/libsvn_ra_serf/options.c",
"subversion/subversion/libsvn_ra_serf/property.c",
"subversion/subversion/libsvn_ra_serf/replay.c",
"subversion/subversion/libsvn_ra_serf/request_body.c",
"subversion/subversion/libsvn_ra_serf/sb_bucket.c",
"subversion/subversion/libsvn_ra_serf/serf.c",
"subversion/subversion/libsvn_ra_serf/stat.c",
"subversion/subversion/libsvn_ra_serf/stream_bucket.c",
"subversion/subversion/libsvn_ra_serf/update.c",
"subversion/subversion/libsvn_ra_serf/util.c",
"subversion/subversion/libsvn_ra_serf/util_error.c",
"subversion/subversion/libsvn_ra_serf/xml.c"
]
}
]
}
| 50.543478
| 78
| 0.593548
| 235
| 2,325
| 5.561702
| 0.2
| 0.189748
| 0.284621
| 0.642693
| 0.789594
| 0.765111
| 0.260138
| 0
| 0
| 0
| 0
| 0
| 0.297204
| 2,325
| 45
| 79
| 51.666667
| 0.799878
| 0
| 0
| 0.044444
| 0
| 0
| 0.661842
| 0.630263
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61ad78327315c12c190baa4682e7af00a3a3850a
| 160
|
py
|
Python
|
src/svgen/__init__.py
|
pedromxavier/svg-motion
|
016d95d3302b5519954b89e489f7394bb64eeea9
|
[
"MIT"
] | null | null | null |
src/svgen/__init__.py
|
pedromxavier/svg-motion
|
016d95d3302b5519954b89e489f7394bb64eeea9
|
[
"MIT"
] | null | null | null |
src/svgen/__init__.py
|
pedromxavier/svg-motion
|
016d95d3302b5519954b89e489f7394bb64eeea9
|
[
"MIT"
] | null | null | null |
from .svgen import SVG, Figure
from .svgen import Animation, Frame, Camera
from .svglib import Point, Vector
from .svglib import Domain, Map, Surface, Transform
| 40
| 51
| 0.79375
| 23
| 160
| 5.521739
| 0.652174
| 0.141732
| 0.23622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1375
| 160
| 4
| 51
| 40
| 0.92029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
61bfc5849f3285dcce2ee3b38eb927a600c62c69
| 12,488
|
py
|
Python
|
ztx/views.py
|
lqs429521992/ztx-srv
|
1fee866606ce5b00ae29edd526eaad6dc7f9ceff
|
[
"MIT"
] | null | null | null |
ztx/views.py
|
lqs429521992/ztx-srv
|
1fee866606ce5b00ae29edd526eaad6dc7f9ceff
|
[
"MIT"
] | null | null | null |
ztx/views.py
|
lqs429521992/ztx-srv
|
1fee866606ce5b00ae29edd526eaad6dc7f9ceff
|
[
"MIT"
] | null | null | null |
from rest_framework.viewsets import ModelViewSet
from .models import Costomer, Product, Income, Trade
from .serializers import CostomerSerializer, ProductSerializer, IncomeSerializer, TradeSerializer
from libs import iView
from rest_framework.views import APIView
from rest_framework.response import Response
from decimal import Decimal
from django.db.models import Sum
from datetime import datetime
from datetime import timedelta
# Create your views here.
class CostomerViewSet(ModelViewSet):
"""
角色:增删改查
"""
# perms_map = {'get': '*', 'post': 'role_create',
# 'put': 'role_update', 'delete': 'role_delete'}
permission_classes = []
authentication_classes = []
queryset = Costomer.objects
serializer_class = CostomerSerializer
# pagination_class = None
filter_fields = ('name','introducer')
search_fields = ('name','introducer')
ordering_fields = ['pk','create_time']
ordering = ['-create_time']
class CostomerRecharge(iView):
# 默认认证校验类
authentication_classes = []
# 默认权限校验类
# permission_classes = []
# 权限校验名称
perms_map = {
'get': '*',
'post': '*',
'put': '*',
'delete': '*'
}
def get(self,request):
# 今日 昨日 本周 上周 本月 上月 总计
# 获取今日收入
now = datetime.strptime(datetime.now().strftime('%Y-%m-%d'),'%Y-%m-%d')
# for item in range(11):
# now = datetime.strptime('2020-' + str(item+1).zfill(2) + '-01','%Y-%m-%d')
# 今天
today = now
print("今天:"+str(today) + ' ' + str(today.weekday()+1))
# 昨天
yesterday = now - timedelta(days = 1 )
print("昨天:" + str(yesterday) + ' ' + str(yesterday.weekday()+1))
# 本周第一天和最后一天
this_week_start = now - timedelta(days = now.weekday())
this_week_end = now + timedelta(days = 6 - now.weekday())
print("本周:" + str(this_week_start) + '——' + str(this_week_end))
# 上周第一天和最后一天
last_week_start = now - timedelta(days = now.weekday() + 7 )
last_week_end = now - timedelta(days = now.weekday() + 2 )
print("上周:" + str(last_week_start) + '——' + str(last_week_end))
# 本月第一天和最后一天
this_month_start = datetime(now.year, now.month, 1 )
this_month_end = datetime(now.year+now.month//12, (now.month)%12+1, 1 )
# print("本月:" + str(this_month_start) + '——' + str(this_month_end))
# 上月第一天和最后一天
# last_month_end = this_month_start - timedelta(days = 1 )
last_month_start = datetime((this_month_start - timedelta(days = 1 )).year, (this_month_start - timedelta(days = 1 )).month, 1 )
last_month_end = datetime(now.year, now.month, 1 )
# print("上月:" + str(last_month_start) + '——' + str(last_month_end))
# 本季第一天和最后一天
this_quarter_start = datetime(now.year+(now.month+9)//12-1, ((now.month%12//3-1)%4+1)*3, 1 )
this_quarter_end = datetime(now.year+now.month//12, (now.month%12//3+1)*3, 1 )
# print("本季:" + str(this_quarter_start) + '——' + str(this_quarter_end))
# 上季第一天和最后一天
last_quarter_start = datetime(now.year+(now.month+6)//12-1, (((now.month+9)%12//3-1)%4+1)*3 , 1 )
last_quarter_end = this_quarter_start
# print("上季:" + str(last_quarter_start) + '——' + str(last_quarter_end))
# 本年第一天和最后一天
this_year_start = datetime(now.year, 1 , 1 )
this_year_end = datetime(now.year + 1 , 1 , 1 )
# print("今年:" + str(this_year_start) + '——' + str(this_year_end))
# 去年第一天和最后一天
last_year_start = datetime((this_year_start - timedelta(days = 1 )).year, 1 , 1 )
last_year_end = this_year_start
# print("去年:" + str(last_year_start) + '——' + str(last_year_end))
result = {}
result['recharge_today']= Income.objects.filter(create_time__date= today).aggregate(sum=Sum("money"))['sum']
result['recharge_yesterday'] = Income.objects.filter(create_time__date= yesterday).aggregate(sum=Sum("money"))['sum']
result['recharge_this_week'] = Income.objects.filter(create_time__range= (this_week_start,this_week_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_last_week'] = Income.objects.filter(create_time__range= (last_week_start,last_week_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_this_month'] = Income.objects.filter(create_time__range= (this_month_start,this_month_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_last_month'] = Income.objects.filter(create_time__range= (last_month_start,last_month_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_this_quarter'] = Income.objects.filter(create_time__range= (this_quarter_start,this_quarter_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_last_quarter'] = Income.objects.filter(create_time__range= (last_quarter_start,last_quarter_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_this_year'] = Income.objects.filter(create_time__range= (this_year_start,this_year_end)).aggregate(sum=Sum("money"))['sum']
result['recharge_last_year'] = Income.objects.filter(create_time__range= (last_year_start,last_year_end)).aggregate(sum=Sum("money"))['sum']
result['total'] = Income.objects.all().aggregate(sum=Sum("money"))['sum']
# 校验空数据,并将其设置为0
for item in result.keys():
if not result[item]:
result[item] = 0
return Response(result)
def post(self,request):
costomer = Costomer.objects.get(pk=request.data['id'])
if request.data['type'] == '理疗卡':
costomer.money = costomer.money + Decimal(request.data['money'])
costomer.save()
Income(type=request.data['type'],money=request.data['money'],costomer= costomer).save()
return Response()
class CostomerConsume(iView):
# 默认认证校验类
authentication_classes = []
# 默认权限校验类
# permission_classes = []
# 权限校验名称
perms_map = {
'get': '*',
'post': '*',
'put': '*',
'delete': '*'
}
# 获取消费统计信息
def get(self,request):
trade = Trade.objects.all().aggregate(sum= Sum("product__price"))
# 今日 昨日 本周 上周 本月 上月 总计
# 获取今日消费
now = datetime.strptime(datetime.now().strftime('%Y-%m-%d'),'%Y-%m-%d')
# for item in range(11):
# now = datetime.strptime('2020-' + str(item+1).zfill(2) + '-01','%Y-%m-%d')
# 今天
today = now
print("今天:"+str(today) + ' ' + str(today.weekday()+1))
# 昨天
yesterday = now - timedelta(days = 1 )
print("昨天:" + str(yesterday) + ' ' + str(yesterday.weekday()+1))
# 本周第一天和最后一天
this_week_start = now - timedelta(days = now.weekday())
this_week_end = now + timedelta(days = 6 - now.weekday())
print("本周:" + str(this_week_start) + '——' + str(this_week_end))
# 上周第一天和最后一天
last_week_start = now - timedelta(days = now.weekday() + 7 )
last_week_end = now - timedelta(days = now.weekday() + 2 )
print("上周:" + str(last_week_start) + '——' + str(last_week_end))
# 本月第一天和最后一天
this_month_start = datetime(now.year, now.month, 1 )
this_month_end = datetime(now.year+now.month//12, (now.month)%12+1, 1 )
# print("本月:" + str(this_month_start) + '——' + str(this_month_end))
# 上月第一天和最后一天
# last_month_end = this_month_start - timedelta(days = 1 )
last_month_start = datetime((this_month_start - timedelta(days = 1 )).year, (this_month_start - timedelta(days = 1 )).month, 1 )
last_month_end = datetime(now.year, now.month, 1 )
# print("上月:" + str(last_month_start) + '——' + str(last_month_end))
# 本季第一天和最后一天
this_quarter_start = datetime(now.year+(now.month+9)//12-1, ((now.month%12//3-1)%4+1)*3, 1 )
this_quarter_end = datetime(now.year+now.month//12, (now.month%12//3+1)*3, 1 )
# print("本季:" + str(this_quarter_start) + '——' + str(this_quarter_end))
# 上季第一天和最后一天
last_quarter_start = datetime(now.year+(now.month+6)//12-1, (((now.month+9)%12//3-1)%4+1)*3 , 1 )
last_quarter_end = this_quarter_start
# print("上季:" + str(last_quarter_start) + '——' + str(last_quarter_end))
# 本年第一天和最后一天
this_year_start = datetime(now.year, 1 , 1 )
this_year_end = datetime(now.year + 1 , 1 , 1 )
# print("今年:" + str(this_year_start) + '——' + str(this_year_end))
# 去年第一天和最后一天
last_year_start = datetime((this_year_start - timedelta(days = 1 )).year, 1 , 1 )
last_year_end = this_year_start
# print("去年:" + str(last_year_start) + '——' + str(last_year_end))
# Trade.objects.all().aggregate(sum= Sum("product__price"))
result = {}
result['consume_today']= Trade.objects.filter(create_time__date= today).aggregate(sum=Sum("product__price"))['sum']
result['consume_yesterday'] = Trade.objects.filter(create_time__date= yesterday).aggregate(sum=Sum("product__price"))['sum']
result['consume_this_week'] = Trade.objects.filter(create_time__range= (this_week_start,this_week_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_last_week'] = Trade.objects.filter(create_time__range= (last_week_start,last_week_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_this_month'] = Trade.objects.filter(create_time__range= (this_month_start,this_month_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_last_month'] = Trade.objects.filter(create_time__range= (last_month_start,last_month_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_this_quarter'] = Trade.objects.filter(create_time__range= (this_quarter_start,this_quarter_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_last_quarter'] = Trade.objects.filter(create_time__range= (last_quarter_start,last_quarter_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_this_year'] = Trade.objects.filter(create_time__range= (this_year_start,this_year_end)).aggregate(sum=Sum("product__price"))['sum']
result['consume_last_year'] = Trade.objects.filter(create_time__range= (last_year_start,last_year_end)).aggregate(sum=Sum("product__price"))['sum']
result['total'] = Trade.objects.filter().aggregate(sum=Sum("product__price"))['sum']
# 校验空数据,并将其设置为0
for item in result.keys():
if not result[item]:
result[item] = 0
return Response(result)
def post(self,request):
# 1. 添加消费记录
# 2. 修改用户余额
costomer = Costomer.objects.get(pk=request.data['id'])
product = Product.objects.get(pk=request.data['product_id'])
costomer.money = costomer.money - Decimal(request.data['price'])
costomer.save()
Trade(costomer= costomer, product= product,price= request.data['price'],amount=request.data['amount']).save()
return Response()
class ProductViewSet(ModelViewSet):
"""
角色:增删改查
"""
perms_map = {'get': '*', 'post': '*',
'put': '*', 'delete': '*'}
permission_classes = []
authentication_classes = []
queryset = Product.objects
serializer_class = ProductSerializer
# pagination_class = None
search_fields = ['name']
ordering_fields = ['pk']
ordering = ['pk']
class IncomeViewSet(ModelViewSet):
"""
角色:增删改查
"""
# perms_map = {'get': '*', 'post': 'role_create',
# 'put': 'role_update', 'delete': 'role_delete'}
permission_classes = []
authentication_classes = []
queryset = Income.objects
serializer_class = IncomeSerializer
# pagination_class = None
# search_fields = ['costomer']
ordering_fields = ['pk','create_time']
ordering = ['-create_time']
filterset_fields = ['costomer_id']
class TradeViewSet(ModelViewSet):
"""
角色:增删改查
"""
# perms_map = {'get': '*', 'post': 'role_create',
# 'put': 'role_update', 'delete': 'role_delete'}
permission_classes = []
authentication_classes = []
queryset = Trade.objects
serializer_class = TradeSerializer
# pagination_class = None
# search_fields = ['costomer']
ordering_fields = ['pk','create_time']
ordering = ['-create_time']
filterset_fields = ['costomer_id']
| 48.403101
| 164
| 0.6254
| 1,545
| 12,488
| 4.816828
| 0.100324
| 0.034937
| 0.048374
| 0.061811
| 0.837141
| 0.825853
| 0.811879
| 0.766326
| 0.728971
| 0.671056
| 0
| 0.014692
| 0.215167
| 12,488
| 258
| 165
| 48.403101
| 0.741353
| 0.168882
| 0
| 0.602564
| 0
| 0
| 0.094527
| 0.004097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.064103
| 0
| 0.371795
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ee35c211d2a49f4d8ee55fafe01c5f97af44419
| 25
|
py
|
Python
|
samplings/pk.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
samplings/pk.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
samplings/pk.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
def sampling():
pass
| 8.333333
| 15
| 0.6
| 3
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 25
| 2
| 16
| 12.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
4eead47658f3f5e52aa30ea97006b8896542a97f
| 4,059
|
py
|
Python
|
tests/functions/spin_fields/test_exchange_interaction_field.py
|
jdalzatec/llg
|
c0acd728d29a9a821ebadc4f1e17e0327d7e238c
|
[
"MIT"
] | 4
|
2019-09-02T19:18:55.000Z
|
2021-05-05T15:04:54.000Z
|
tests/functions/spin_fields/test_exchange_interaction_field.py
|
lufvelasquezgo/llg
|
c0acd728d29a9a821ebadc4f1e17e0327d7e238c
|
[
"MIT"
] | 116
|
2020-02-09T05:19:52.000Z
|
2022-03-27T18:47:17.000Z
|
tests/functions/spin_fields/test_exchange_interaction_field.py
|
lufvelasquezgo/llg
|
c0acd728d29a9a821ebadc4f1e17e0327d7e238c
|
[
"MIT"
] | null | null | null |
from llg.functions import spin_fields
import numpy
import pytest
def compute_exchange_field(
num_sites, state, j_exchange, spin_moments, num_neighbors, neighbors
):
total = numpy.zeros(shape=(num_sites, 3))
for i in range(num_sites):
sum_nhbs = sum(num_neighbors[:i])
for j in range(num_neighbors[i]):
index = j + sum_nhbs
j_int = j_exchange[index]
nhb = neighbors[index]
total[i] += j_int * state[nhb]
total /= spin_moments[:, numpy.newaxis]
return total
@pytest.mark.repeat(10)
def test_exchange_interaction_field_null_J_exchange(random_state_spins, build_sample):
num_sites, num_interactions, neighbors, num_neighbors = build_sample
spin_moments = numpy.ones(shape=num_sites)
j_exchange = numpy.zeros(shape=num_interactions)
exchanges = j_exchange.reshape(num_sites, 6)
neighbors_ = numpy.array(neighbors).reshape(num_sites, 6)
expected = numpy.zeros(shape=(num_sites, 3))
total = spin_fields.exchange_interaction_field(
random_state_spins, spin_moments, exchanges, neighbors_
)
assert numpy.allclose(expected, total)
@pytest.mark.repeat(10)
def test_exchange_interaction_field_constant_J_exchange(
random_state_spins, build_sample
):
num_sites, num_interactions, neighbors, num_neighbors = build_sample
spin_moments = numpy.ones(shape=num_sites)
j_exchange = numpy.full(num_interactions, numpy.random.uniform(-10, 10))
exchanges = j_exchange.reshape(num_sites, 6)
neighbors_ = numpy.array(neighbors).reshape(num_sites, 6)
expected = compute_exchange_field(
num_sites,
random_state_spins,
j_exchange,
spin_moments,
num_neighbors,
neighbors,
)
assert numpy.allclose(
spin_fields.exchange_interaction_field(
random_state_spins, spin_moments, exchanges, neighbors_
),
expected,
)
@pytest.mark.repeat(10)
def test_exchange_interaction_field_random_J_exchange(
random_state_spins, build_sample, random_j_exchange
):
num_sites, _, neighbors, num_neighbors = build_sample
exchanges = random_j_exchange.reshape(num_sites, 6)
neighbors_ = numpy.array(neighbors).reshape(num_sites, 6)
spin_moments = numpy.ones(shape=num_sites)
expected = compute_exchange_field(
num_sites,
random_state_spins,
random_j_exchange,
spin_moments,
num_neighbors,
neighbors,
)
assert numpy.allclose(
spin_fields.exchange_interaction_field(
random_state_spins,
spin_moments,
exchanges,
neighbors_,
),
expected,
)
@pytest.mark.repeat(10)
def test_exchange_interaction_field_random_spin_moments(
random_state_spins, build_sample, random_spin_moments, random_j_exchange
):
num_sites, _, neighbors, num_neighbors = build_sample
exchanges = random_j_exchange.reshape(num_sites, 6)
neighbors_ = numpy.array(neighbors).reshape(num_sites, 6)
expected = compute_exchange_field(
num_sites,
random_state_spins,
random_j_exchange,
random_spin_moments,
num_neighbors,
neighbors,
)
assert numpy.allclose(
spin_fields.exchange_interaction_field(
random_state_spins,
random_spin_moments,
exchanges,
neighbors_,
),
expected,
)
@pytest.mark.repeat(10)
def test_exchange_interaction_field_null_magnetic_moments(
random_state_spins, build_sample, random_j_exchange
):
num_sites, _, neighbors, _ = build_sample
exchanges = random_j_exchange.reshape(num_sites, 6)
neighbors_ = numpy.array(neighbors).reshape(num_sites, 6)
null_moments = numpy.array([0.0] * num_sites)
expected = numpy.full((num_sites, 3), numpy.inf)
total = numpy.abs(
spin_fields.exchange_interaction_field(
random_state_spins, null_moments, exchanges, neighbors_
)
)
assert numpy.allclose(total, expected)
| 31.710938
| 86
| 0.695245
| 487
| 4,059
| 5.404517
| 0.12115
| 0.082067
| 0.079027
| 0.06079
| 0.828647
| 0.818009
| 0.775836
| 0.726444
| 0.707447
| 0.707447
| 0
| 0.009201
| 0.223454
| 4,059
| 127
| 87
| 31.96063
| 0.825825
| 0
| 0
| 0.626087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.052174
| false
| 0
| 0.026087
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ef490bf509cfc0552e244879896a37730e094ec
| 46
|
py
|
Python
|
battle_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 1
|
2021-12-12T02:50:20.000Z
|
2021-12-12T02:50:20.000Z
|
battle_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 17
|
2020-02-07T23:40:36.000Z
|
2020-12-22T16:38:44.000Z
|
battle_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | null | null | null |
from battle_module.battle import BattleModule
| 23
| 45
| 0.891304
| 6
| 46
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f644efd8d8a51dbfbc05e92dad3ad61e15278318
| 1,271
|
py
|
Python
|
apps/gallery/migrations/0006_auto_20190801_1338.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | null | null | null |
apps/gallery/migrations/0006_auto_20190801_1338.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | 8
|
2020-02-12T01:02:15.000Z
|
2022-03-11T23:53:39.000Z
|
apps/gallery/migrations/0006_auto_20190801_1338.py
|
mrtaalebi/sitigo
|
cce8b4f5299b58d7365789ead416d4568b443743
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-08-01 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0005_image_city'),
]
operations = [
migrations.AlterField(
model_name='city',
name='english_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='city',
name='persian_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='country',
name='english_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='country',
name='persian_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='image',
name='english_caption',
field=models.CharField(blank=True, default='default', max_length=200, null=True),
),
migrations.AlterField(
model_name='image',
name='persian_caption',
field=models.CharField(blank=True, default='default', max_length=200, null=True),
),
]
| 28.886364
| 93
| 0.565696
| 124
| 1,271
| 5.637097
| 0.314516
| 0.171674
| 0.214592
| 0.248927
| 0.775393
| 0.775393
| 0.655222
| 0.655222
| 0.655222
| 0.655222
| 0
| 0.042578
| 0.316286
| 1,271
| 43
| 94
| 29.55814
| 0.761795
| 0.035405
| 0
| 0.756757
| 1
| 0
| 0.119281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c90710cfd644160403952cb91188d7c52cf2cac
| 38
|
py
|
Python
|
__init__.py
|
zahirsalim/kpyprotocol
|
9435f2e5ead0bb8abc7a89bad261e67e5f1a40a7
|
[
"MIT"
] | null | null | null |
__init__.py
|
zahirsalim/kpyprotocol
|
9435f2e5ead0bb8abc7a89bad261e67e5f1a40a7
|
[
"MIT"
] | null | null | null |
__init__.py
|
zahirsalim/kpyprotocol
|
9435f2e5ead0bb8abc7a89bad261e67e5f1a40a7
|
[
"MIT"
] | null | null | null |
from KProtocol import KurentoProtocol
| 19
| 37
| 0.894737
| 4
| 38
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c9a2bf1cd96338828e08958877ef1d7c4e62683
| 133
|
py
|
Python
|
labAPI/optimization/samplers/__init__.py
|
robertfasano/labAPI
|
e671c6af2bb702cde018b6d30582c269965da63c
|
[
"MIT"
] | null | null | null |
labAPI/optimization/samplers/__init__.py
|
robertfasano/labAPI
|
e671c6af2bb702cde018b6d30582c269965da63c
|
[
"MIT"
] | null | null | null |
labAPI/optimization/samplers/__init__.py
|
robertfasano/labAPI
|
e671c6af2bb702cde018b6d30582c269965da63c
|
[
"MIT"
] | null | null | null |
from .grid_search import GridSearch
from .differential_evolution import DifferentialEvolution
from .random_search import RandomSearch
| 44.333333
| 57
| 0.894737
| 15
| 133
| 7.733333
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082707
| 133
| 3
| 58
| 44.333333
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
141e57536cbe00a3ef68b0f8b3e7370ba6196281
| 11,758
|
py
|
Python
|
tests/test_enc.py
|
Roman513/python-xmlsec
|
4a91bbc352a6eb38f3f2c4dcf35691b985ef9da7
|
[
"MIT"
] | null | null | null |
tests/test_enc.py
|
Roman513/python-xmlsec
|
4a91bbc352a6eb38f3f2c4dcf35691b985ef9da7
|
[
"MIT"
] | null | null | null |
tests/test_enc.py
|
Roman513/python-xmlsec
|
4a91bbc352a6eb38f3f2c4dcf35691b985ef9da7
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from lxml import etree
import xmlsec
from tests import base
consts = xmlsec.constants
class TestEncryptionContext(base.TestMemoryLeaks):
def test_init(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
del ctx
def test_init_no_keys_manager(self):
ctx = xmlsec.EncryptionContext()
del ctx
def test_init_bad_args(self):
with self.assertRaisesRegex(TypeError, 'KeysManager required'):
xmlsec.EncryptionContext(manager='foo')
def test_no_key(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
self.assertIsNone(ctx.key)
def test_get_key(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
self.assertIsNone(ctx.key)
ctx.key = xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem)
self.assertIsNotNone(ctx.key)
def test_del_key(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
ctx.key = xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem)
del ctx.key
self.assertIsNone(ctx.key)
def test_set_key(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
ctx.key = xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem)
self.assertIsNotNone(ctx.key)
def test_set_key_bad_type(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
with self.assertRaisesRegex(TypeError, r'instance of \*xmlsec.Key\* expected.'):
ctx.key = ''
def test_set_invalid_key(self):
ctx = xmlsec.EncryptionContext(manager=xmlsec.KeysManager())
with self.assertRaisesRegex(TypeError, 'empty key.'):
ctx.key = xmlsec.Key()
def test_encrypt_xml(self):
root = self.load_xml('enc1-in.xml')
enc_data = xmlsec.template.encrypted_data_create(root, consts.TransformAes128Cbc, type=consts.TypeEncElement, ns="xenc")
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
ki = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns="dsig")
ek = xmlsec.template.add_encrypted_key(ki, consts.TransformRsaOaep)
xmlsec.template.encrypted_data_ensure_cipher_value(ek)
data = root.find('./Data')
self.assertIsNotNone(data)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem))
ctx = xmlsec.EncryptionContext(manager)
ctx.key = xmlsec.Key.generate(consts.KeyDataAes, 128, consts.KeyDataTypeSession)
encrypted = ctx.encrypt_xml(enc_data, data)
self.assertIsNotNone(encrypted)
enc_method = xmlsec.tree.find_child(enc_data, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#aes128-cbc", enc_method.get("Algorithm"))
ki = xmlsec.tree.find_child(enc_data, consts.NodeKeyInfo, consts.DSigNs)
self.assertIsNotNone(ki)
enc_method2 = xmlsec.tree.find_node(ki, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method2)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p", enc_method2.get("Algorithm"))
cipher_value = xmlsec.tree.find_node(ki, consts.NodeCipherValue, consts.EncNs)
self.assertIsNotNone(cipher_value)
def test_encrypt_xml_bad_args(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaises(TypeError):
ctx.encrypt_xml('', 0)
def test_encrypt_xml_bad_template(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaisesRegex(xmlsec.Error, 'unsupported `Type`, it should be `element` or `content`'):
ctx.encrypt_xml(etree.Element('root'), etree.Element('node'))
def test_encrypt_xml_bad_template_bad_type_attribute(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaisesRegex(xmlsec.Error, 'unsupported `Type`, it should be `element` or `content`'):
root = etree.Element('root')
root.attrib['Type'] = 'foo'
ctx.encrypt_xml(root, etree.Element('node'))
def test_encrypt_xml_fail(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaisesRegex(xmlsec.Error, 'failed to encrypt xml'):
root = etree.Element('root')
root.attrib['Type'] = consts.TypeEncElement
ctx.encrypt_xml(root, etree.Element('node'))
def test_encrypt_binary(self):
root = self.load_xml('enc2-in.xml')
enc_data = xmlsec.template.encrypted_data_create(
root, consts.TransformAes128Cbc, type=consts.TypeEncContent, ns="xenc", mime_type="binary/octet-stream"
)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
ki = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns="dsig")
ek = xmlsec.template.add_encrypted_key(ki, consts.TransformRsaOaep)
xmlsec.template.encrypted_data_ensure_cipher_value(ek)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem))
ctx = xmlsec.EncryptionContext(manager)
ctx.key = xmlsec.Key.generate(consts.KeyDataAes, 128, consts.KeyDataTypeSession)
encrypted = ctx.encrypt_binary(enc_data, b'test')
self.assertIsNotNone(encrypted)
self.assertEqual("{%s}%s" % (consts.EncNs, consts.NodeEncryptedData), encrypted.tag)
enc_method = xmlsec.tree.find_child(enc_data, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#aes128-cbc", enc_method.get("Algorithm"))
ki = xmlsec.tree.find_child(enc_data, consts.NodeKeyInfo, consts.DSigNs)
self.assertIsNotNone(ki)
enc_method2 = xmlsec.tree.find_node(ki, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method2)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p", enc_method2.get("Algorithm"))
cipher_value = xmlsec.tree.find_node(ki, consts.NodeCipherValue, consts.EncNs)
self.assertIsNotNone(cipher_value)
def test_encrypt_binary_bad_args(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaises(TypeError):
ctx.encrypt_binary('', 0)
def test_encrypt_binary_bad_template(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaisesRegex(xmlsec.Error, 'failed to encrypt binary'):
ctx.encrypt_binary(etree.Element('root'), b'data')
def test_encrypt_uri(self):
root = self.load_xml('enc2-in.xml')
enc_data = xmlsec.template.encrypted_data_create(
root, consts.TransformAes128Cbc, type=consts.TypeEncContent, ns="xenc", mime_type="binary/octet-stream"
)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
ki = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns="dsig")
ek = xmlsec.template.add_encrypted_key(ki, consts.TransformRsaOaep)
xmlsec.template.encrypted_data_ensure_cipher_value(ek)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem))
ctx = xmlsec.EncryptionContext(manager)
ctx.key = xmlsec.Key.generate(consts.KeyDataAes, 128, consts.KeyDataTypeSession)
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'test')
encrypted = ctx.encrypt_binary(enc_data, 'file://' + tmpfile.name)
self.assertIsNotNone(encrypted)
self.assertEqual("{%s}%s" % (consts.EncNs, consts.NodeEncryptedData), encrypted.tag)
enc_method = xmlsec.tree.find_child(enc_data, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#aes128-cbc", enc_method.get("Algorithm"))
ki = xmlsec.tree.find_child(enc_data, consts.NodeKeyInfo, consts.DSigNs)
self.assertIsNotNone(ki)
enc_method2 = xmlsec.tree.find_node(ki, consts.NodeEncryptionMethod, consts.EncNs)
self.assertIsNotNone(enc_method2)
self.assertEqual("http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p", enc_method2.get("Algorithm"))
cipher_value = xmlsec.tree.find_node(ki, consts.NodeCipherValue, consts.EncNs)
self.assertIsNotNone(cipher_value)
def test_encrypt_uri_bad_args(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaises(TypeError):
ctx.encrypt_uri('', 0)
def test_encrypt_uri_fail(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaisesRegex(xmlsec.InternalError, 'failed to encrypt URI'):
ctx.encrypt_uri(etree.Element('root'), '')
def test_decrypt1(self):
self.check_decrypt(1)
def test_decrypt2(self):
self.check_decrypt(2)
def test_decrypt_key(self):
root = self.load_xml('enc3-out.xml')
enc_key = xmlsec.tree.find_child(root, consts.NodeEncryptedKey, consts.EncNs)
self.assertIsNotNone(enc_key)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
ctx = xmlsec.EncryptionContext(manager)
keydata = ctx.decrypt(enc_key)
ctx.reset()
root.remove(enc_key)
ctx.key = xmlsec.Key.from_binary_data(consts.KeyDataAes, keydata)
enc_data = xmlsec.tree.find_child(root, consts.NodeEncryptedData, consts.EncNs)
self.assertIsNotNone(enc_data)
decrypted = ctx.decrypt(enc_data)
self.assertIsNotNone(decrypted)
self.assertEqual(self.load_xml("enc3-in.xml"), decrypted)
def check_decrypt(self, i):
root = self.load_xml('enc%d-out.xml' % i)
enc_data = xmlsec.tree.find_child(root, consts.NodeEncryptedData, consts.EncNs)
self.assertIsNotNone(enc_data)
manager = xmlsec.KeysManager()
manager.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
ctx = xmlsec.EncryptionContext(manager)
decrypted = ctx.decrypt(enc_data)
self.assertIsNotNone(decrypted)
self.assertEqual(self.load_xml("enc%d-in.xml" % i), root)
def test_decrypt_bad_args(self):
ctx = xmlsec.EncryptionContext()
with self.assertRaises(TypeError):
ctx.decrypt('')
def check_no_segfault(self):
namespaces = {'soap': 'http://schemas.xmlsoap.org/soap/envelope/'}
manager = xmlsec.KeysManager()
key = xmlsec.Key.from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatCertPem)
manager.add_key(key)
template = self.load_xml('enc-bad-in.xml')
enc_data = xmlsec.template.encrypted_data_create(
template, xmlsec.Transform.AES128, type=xmlsec.EncryptionType.CONTENT, ns='xenc'
)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_data)
key_info = xmlsec.template.encrypted_data_ensure_key_info(enc_data, ns='dsig')
enc_key = xmlsec.template.add_encrypted_key(key_info, xmlsec.Transform.RSA_PKCS1)
xmlsec.template.encrypted_data_ensure_cipher_value(enc_key)
data = template.find('soap:Body', namespaces=namespaces)
enc_ctx = xmlsec.EncryptionContext(manager)
enc_ctx.key = xmlsec.Key.generate(xmlsec.KeyData.AES, 192, xmlsec.KeyDataType.SESSION)
self.assertRaises(Exception, enc_ctx.encrypt_xml(enc_data, data))
| 45.929688
| 128
| 0.696887
| 1,405
| 11,758
| 5.649822
| 0.120285
| 0.024691
| 0.075334
| 0.064248
| 0.817964
| 0.787478
| 0.761401
| 0.746032
| 0.734694
| 0.720333
| 0
| 0.010466
| 0.187362
| 11,758
| 255
| 129
| 46.109804
| 0.820304
| 0
| 0
| 0.574879
| 0
| 0
| 0.082072
| 0
| 0
| 0
| 0
| 0
| 0.236715
| 1
| 0.125604
| false
| 0
| 0.024155
| 0
| 0.154589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1adee8c371f0ae47ceaf49652ee8998ada6ad545
| 10
|
py
|
Python
|
tests/trigger/samples/sample.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | 7
|
2021-03-23T17:40:45.000Z
|
2022-03-14T16:07:27.000Z
|
tests/trigger/samples/sample.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | 27
|
2020-06-29T13:35:45.000Z
|
2022-01-21T07:10:55.000Z
|
tests/trigger/samples/sample.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | 14
|
2020-07-26T18:23:16.000Z
|
2022-03-09T13:44:53.000Z
|
a = 4 - 2
| 5
| 9
| 0.3
| 3
| 10
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.5
| 10
| 1
| 10
| 10
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
211605b05bcf3dfcce43336376cc719034263dff
| 24
|
py
|
Python
|
src/Mosaic/main.py
|
MarcMDE/Mosaic
|
ecf7628f23dbc0af4ba774d04fbc321b92065af2
|
[
"CC0-1.0"
] | null | null | null |
src/Mosaic/main.py
|
MarcMDE/Mosaic
|
ecf7628f23dbc0af4ba774d04fbc321b92065af2
|
[
"CC0-1.0"
] | null | null | null |
src/Mosaic/main.py
|
MarcMDE/Mosaic
|
ecf7628f23dbc0af4ba774d04fbc321b92065af2
|
[
"CC0-1.0"
] | null | null | null |
# TODO: Test img resize
| 12
| 23
| 0.708333
| 4
| 24
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 24
| 1
| 24
| 24
| 0.894737
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
211757f4325fb8154f1bbb43c1c17f0a565c6dfc
| 54,173
|
py
|
Python
|
code/recnn/model/data_loader.py
|
SebastianMacaluso/RecNN_PyTorch
|
bb67898268aa5d8c1cc432bb747602fb0d93d631
|
[
"MIT"
] | 6
|
2019-04-01T17:53:04.000Z
|
2020-05-13T17:00:21.000Z
|
code/recnn/model/data_loader.py
|
SebastianMacaluso/RecNN_PyTorch_batch
|
bb67898268aa5d8c1cc432bb747602fb0d93d631
|
[
"MIT"
] | 1
|
2020-01-09T17:03:16.000Z
|
2020-01-09T17:03:16.000Z
|
code/recnn/model/data_loader.py
|
SebastianMacaluso/RecNN_PyTorch_batch
|
bb67898268aa5d8c1cc432bb747602fb0d93d631
|
[
"MIT"
] | 6
|
2019-03-27T18:57:37.000Z
|
2021-06-10T15:42:15.000Z
|
""" Classes and functions to load the raw data and create the batches """
import random
import numpy as np
import os
import sys
import pickle
import gzip
import subprocess
#import matplotlib as mpl
import json
import itertools
import re
import random
from sklearn.utils import check_random_state
import torch
from torch.autograd import Variable
from sklearn.preprocessing import RobustScaler
import logging
import sys
from model import preprocess #The local dir is the train.py dir
##############################################################################################################
#///////////////////// CLASSES ////////////////////////////////////////////////////////////////////////
##############################################################################################################
# use GPU if available
class torch_params(object):
cuda = torch.cuda.is_available()
##############################################################################################################
# methods to load the raw data and create the batches
class DataLoader(object):
"""
Handles all aspects of the data. Has the methods to load the raw data and create the batches
"""
def __init__(self):
''' Empty '''
#-----------------------------------------------------------------------------------------------------------
# Make the input tree dictionaries. String should be either qcd or tt
def makeTrees(dir_subjets,string,N_jets,label):
'''
Function to load the jet events and make the trees.
Args:
dir_subjets: dir with the event files. Loads all the files in the dir_subjets that satisfy the 'string' label. File format: array, where each entry is a "jet list". Each "jet list" has:
tree=np.asarray(event[0])
content=np.asarray(event[1])
mass=np.asarray(event[2])
pt=np.asarray(event[3])
Currently only works for 1 jet per even. Modify for full event studies.
string: string that identifies which files to load (signal or background files)
N_jets: Number of jet trees to generate. If set to inf, it will load all the jets in the files
label: label where 1 is for signal and 0 for background
'''
subjetlist = [filename for filename in np.sort(os.listdir(dir_subjets)) if ('tree' in filename and string in filename and filename.endswith('.dat'))]
N_analysis=len(subjetlist)
logging.info('Number of jet files for '+str(string)+'='+str(N_analysis))
logging.info('Loading '+str(string)+' jet files... ')
logging.info(str(string)+' files list = '+str(subjetlist))
Ntotjets=0
final_trees=[]
jets=[]
##------------------------------------------------
# loop over the files and the events in each file
for ifile in range(N_analysis):
for s in open(dir_subjets+'/'+subjetlist[ifile]):
if (Ntotjets>=N_jets): return jets
else:
event=json.loads(s)
# print('Full event tree = ',event[0])
Ntotjets+=1
# print('Ntotjets = ', Ntotjets)
if Ntotjets%10000==0: logging.info('Ntotjets='+str(Ntotjets))
tree=np.asarray(event[0])
content=np.asarray(event[1])
mass=np.asarray(event[2])
pt=np.asarray(event[3])
charge=np.asarray(event[4])
abs_charge=np.asarray(event[5])
muon=np.asarray(event[6])
tree=np.array([np.asarray(e).reshape(-1,2) for e in tree])
content=np.array([np.asarray(e).reshape(-1,4) for e in content])
# print('tree = ',tree[0])
# print('content = ',content[0])
# print('mass =',mass)
# print('pt = ',pt)
# # # SANITY CHECK: Below we check that the tree contains the right location of each children subjet
# ii = 3;
# # print('Content = ',content[0])
# print('Content ',ii,' = ',content[0][ii])
# print('Children location =',tree[0][ii])
# print('Content ',ii,' by adding the 2 children 4-vectors= ',content[0][tree[0][ii,0]]
# + content[0][tree[0][ii,1]])
# print('-------------------'*10)
##-----------------------------------------------
event=[]
# loop over the jets in each event. Currently loads only the 1st jet
for i in range(1): #This only works for single jet studies. Modify for full events
jet = {}
jet["root_id"] = 0
jet["tree"] = tree[i] #Labels for the jet constituents in the tree
# jet["content"] = np.reshape(content[i],(-1,4,1)) #Where content[i][0] is the jet 4-momentum, and the other entries are the jets constituents 4 momentum. Use this format if using TensorFlow
jet["content"] = np.reshape(content[i],(-1,4)) # Use this format if using Pytorch
jet["mass"] = mass[i]
jet["pt"] = pt[i]
jet["energy"] = content[i][0, 3]
jet["charge"]=charge[i]
jet["abs_charge"]=abs_charge[i]
jet["muon"]=muon[i]
px = content[i][0, 0] #The jet is the first entry of content. And then we have (px,py,pz,E)
py = content[i][0, 1]
pz = content[i][0, 2]
p = (content[i][0, 0:3] ** 2).sum() ** 0.5
# jet["Calc energy"]=(p**2+mass[i]**2)**0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz)) #pseudorapidity eta
phi = np.arctan2(py, px)
jet["eta"] = eta
jet["phi"] = phi
# print('jet contents =', jet.items())
#
# #-----------------------------
# # Preprocess
#
# #Ensure that the left sub-jet has always a larger pt than the right
# jet= preprocess.permute_by_pt(jet)
#
# # Change the input variables
# jet= preprocess.extract(jet)
if label==1:
jet["label"]=1
else:
jet["label"]=0
# Append each jet dictionary
jets.append(jet)
# event.append(jet) #Uncomment for full event studies
# jets.append(event) #Uncomment for full event studies
logging.info('Number of jets ='+ str(len(jets)))
logging.info('---'*20)
# print('Number of trees =', len(final_trees))
return jets
#-----------------------------------------------------------------------------------------------------------
# Split the sample into train, cross-validation and test
def merge_shuffle_sample(sig, bkg):
'''
Function to split the sample into train, cross-validation and test with equal number of sg and bg events. Then shuffle each set.
Args:
sig: signal sample
bkg: background sample
train_frac_rel: fraction of data for the train set
val_frac_rel: fraction of data for the validation set
test_frac_rel: fraction of data for the test set
'''
logging.info('---'*20)
logging.info('Loading and shuffling the trees ...')
rndstate = random.getstate()
random.seed(0)
size=np.minimum(len(sig),len(bkg))
# print('sg length=',len(sig))
sig_label=np.ones((size),dtype=int)
bkg_label=np.zeros((size),dtype=int)
##-----------------------------------------------
# Concatenate sg and bg data
X=np.concatenate((sig[0:int(size)],bkg[0:int(size)]))
Y=np.concatenate((sig_label[0:int(size)],bkg_label[0:int(size)]))
##-----------------------------------------------
# Shuffle the sets
indices = check_random_state(1).permutation(len(X))
X = X[indices]
Y = Y[indices]
##-----------------------------------------------
X=np.asarray(X)
Y=np.asarray(Y)
# Uncomment below if we change the NN output to be of dim=1
# train_y=np.asarray(train_y).reshape((-1,1))
# dev_y=np.asarray(dev_y).reshape((-1,1))
# test_y=np.asarray(test_y).reshape((-1,1))
print('test_y=',Y)
print('---'*20)
logging.info('X shape='+str(X.shape))
return X, Y
#-----------------------------------------------------------------------------------------------------------
#
def scale_features(jets):
""" RobustScaler will remove the median (send then median value to 0) for each feature (each column). Then it divides each value of each feature by 1/2*(distance between 1st and 3rd quartiles). (In a symmetric distribution it would send the 1st and 3rd quartiles to -1 and 1. It uses the 1st and 3rd quartiles by default, but this can be an input to RobustScaler."""
transformer = RobustScaler().fit(np.vstack([jet["content"] for jet in jets])) # remove outliers
for jet in jets:
jet["content"] = transformer.transform(jet["content"]) # center and scale the data
return jets
#-----------------------------------------------------------------------------------------------------------
#
def get_transformer(jets):
transformer = RobustScaler().fit(np.vstack([jet["content"] for jet in jets])) # remove outliers
return transformer
#-----------------------------------------------------------------------------------------------------------
#
def transform_features(transformer,jets):
for jet in jets:
jet["content"] = transformer.transform(jet["content"]) # center and scale the data
return jets
#-----------------------------------------------------------------------------------------------------------
# Split the sample into train, cross-validation and test
def shuffle_autoencoder(sig, bkg):
'''
Function to split the sample into train, cross-validation and test with equal number of sg and bg events. Then shuffle each set.
Args:
sig: signal sample
bkg: background sample
train_frac_rel: fraction of data for the train set
val_frac_rel: fraction of data for the validation set
test_frac_rel: fraction of data for the test set
'''
print('---'*20)
print('Loading and shuffling the trees ...')
rndstate = random.getstate()
random.seed(0)
size=np.minimum(len(sig),len(bkg))
# print('sg length=',len(sig))
sig_label=np.ones((size),dtype=int)
bkg_label=np.zeros((size),dtype=int)
##-----------------------------------------------
print('Creating train, val and test datasets ...')
# Split data into train, val and test
# train_frac=train_frac_rel
# val_frac=train_frac+val_frac_rel
# test_frac=val_frac+test_frac_rel
#
# N_train=int(train_frac*size)
# Nval=int(val_frac*size)
Ntest=int(size)
##-----------------------------------------------
# Concatenate sg and bg data
# train_x=np.concatenate((sig[0:N_train],bkg[0:N_train]))
# train_y=np.concatenate((sig_label[0:N_train],bkg_label[0:N_train]))
#
# dev_x=np.concatenate((sig[N_train:Nval],bkg[N_train:Nval]))
# dev_y=np.concatenate((sig_label[N_train:Nval],bkg_label[N_train:Nval]))
test_x=np.concatenate((sig[0:Ntest],bkg[0:Ntest]))
test_y=np.concatenate((sig_label[0:Ntest],bkg_label[0:Ntest]))
##-----------------------------------------------
# Shuffle the sets
# indices_train = check_random_state(1).permutation(len(train_x))
# train_x = train_x[indices_train]
# train_y = train_y[indices_train]
#
# indices_dev = check_random_state(2).permutation(len(dev_x))
# dev_x = dev_x[indices_dev]
# dev_y = dev_y[indices_dev]
indices_test = check_random_state(3).permutation(len(test_x))
test_x = test_x[indices_test]
test_y = test_y[indices_test]
##-----------------------------------------------
# train_x=np.asarray(train_x)
# dev_x=np.asarray(dev_x)
test_x=np.asarray(test_x)
# train_y=np.asarray(train_y)
# dev_y=np.asarray(dev_y)
test_y=np.asarray(test_y)
# Uncomment below if we change the NN output to be of dim=1
# train_y=np.asarray(train_y).reshape((-1,1))
# dev_y=np.asarray(dev_y).reshape((-1,1))
# test_y=np.asarray(test_y).reshape((-1,1))
# print('Train shape=',train_x.shape)
# print('Val shape=',dev_x.shape)
print('Test shape=',test_x.shape)
# print('train=',train_x[0]['content'])
print('test_y=',test_y)
print('---'*20)
return test_x, test_y
#-----------------------------------------------------------------------------------------------------------
# Split the sample into train, cross-validation and test
def split_shuffle_sample(sig, bkg, train_frac_rel, val_frac_rel, test_frac_rel):
'''
Function to split the sample into train, cross-validation and test with equal number of sg and bg events. Then shuffle each set.
Args:
sig: signal sample
bkg: background sample
train_frac_rel: fraction of data for the train set
val_frac_rel: fraction of data for the validation set
test_frac_rel: fraction of data for the test set
'''
print('---'*20)
print('Loading and shuffling the trees ...')
rndstate = random.getstate()
random.seed()
size=np.minimum(len(sig),len(bkg))
# print('sg length=',len(sig))
sig_label=np.ones((size),dtype=int)
bkg_label=np.zeros((size),dtype=int)
##-----------------------------------------------
print('Creating train, val and test datasets ...')
# Split data into train, val and test
train_frac=train_frac_rel
val_frac=train_frac+val_frac_rel
test_frac=val_frac+test_frac_rel
N_train=int(train_frac*size)
Nval=int(val_frac*size)
Ntest=int(test_frac*size)
# print('len(sg)=',len(sig))
# print('len(bkg)=',len(bkg))
# print('size=',size)
# print('N_train=',N_train)
# print('Nval=',Nval)
# print('Ntest=',Ntest)
# print('+-+-'*20)
# Shuffle sig and bkg sets independently
indices_sig = check_random_state().permutation(len(sig))
sig = sig[indices_sig]
indices_bkg = check_random_state().permutation(len(bkg))
bkg = bkg[indices_bkg]
##-----------------------------------------------
# Concatenate sg and bg data
train_x=np.concatenate((sig[0:N_train],bkg[0:N_train]))
train_y=np.concatenate((sig_label[0:N_train],bkg_label[0:N_train]))
dev_x=np.concatenate((sig[N_train:Nval],bkg[N_train:Nval]))
dev_y=np.concatenate((sig_label[N_train:Nval],bkg_label[N_train:Nval]))
test_x=np.concatenate((sig[Nval:Ntest],bkg[Nval:Ntest]))
test_y=np.concatenate((sig_label[Nval:Ntest],bkg_label[Nval:Ntest]))
# print('train_x=',[x['charge']for x in train_x] )
# print('dev_x=',[x['charge']for x in dev_x])
# print('test_x=',[x['charge']for x in test_x])
##-----------------------------------------------
# Shuffle the sets
indices_train = check_random_state().permutation(len(train_x))
print('train_y=',train_y)
print('train x shape=',train_x.shape)
print('///'*20)
train_x = train_x[indices_train]
train_y = train_y[indices_train]
print('train_y=',train_y)
indices_dev = check_random_state().permutation(len(dev_x))
dev_x = dev_x[indices_dev]
dev_y = dev_y[indices_dev]
indices_test = check_random_state().permutation(len(test_x))
test_x = test_x[indices_test]
test_y = test_y[indices_test]
##-----------------------------------------------
train_x=np.asarray(train_x)
dev_x=np.asarray(dev_x)
test_x=np.asarray(test_x)
train_y=np.asarray(train_y)
dev_y=np.asarray(dev_y)
test_y=np.asarray(test_y)
# Uncomment below if we change the NN output to be of dim=1
# train_y=np.asarray(train_y).reshape((-1,1))
# dev_y=np.asarray(dev_y).reshape((-1,1))
# test_y=np.asarray(test_y).reshape((-1,1))
print('Train shape=',train_x.shape)
print('Val shape=',dev_x.shape)
print('Test shape=',test_x.shape)
# print('train=',train_x[0]['content'])
# print('test_y=',test_y)
return train_x, train_y, dev_x, dev_y, test_x, test_y
#-----------------------------------------------------------------------------------------------------------
# CURRENTLY NOT USED. SKIP TO THE NEXT METHOD.
# Batchization of the recursion.
#This creates batches without zero padding.
def batch_level_no_pad(jets):
# Batch the recursive activations across all nodes of a same level
# !!! Assume that jets have at least one inner node.
# Leads to off-by-one errors otherwise :(
# Reindex node IDs over all jets
#
# jet_children: array of shape [n_nodes, 2]
# jet_children[node_id, 0] is the node_id of the left child of node_id
# jet_children[node_id, 1] is the node_id of the right child of node_id
#
# jet_contents: array of shape [n_nodes, n_features]
# jet_contents[node_id] is the feature vector of node_id (4-vector in our case)
jet_children =np.vstack([jet['tree'] for jet in jets])
# print('jet_children=',jet_children)
# jet_children = np.vstack(jet_children) #We concatenate all the jets tree into 1 tree
# print('jet_children=',jet_children)
jet_contents = np.vstack([jet["content"] for jet in jets]) #We concatenate all the jet['contents'] into 1 array
# print('jet_contents=',jet_contents)
n_nodes=len(jet_children)
#---------------------
# Level-wise traversal
level_children = np.zeros((n_nodes, 4), dtype=np.int32) #Array with 4 features per node
level_children[:, [0, 2]] -= 1 #We set features 0 and 2 to -1. Features 0 and 2 will be the position of the left and right children of node_i, where node_i is given by "contents[node_i]" and left child is "content[level_children[node,0]]"
#
# # SANITY CHECK 1: Below we check that the jet_children contains the right location of each children subjet
# ii = -28
# print('Content ',ii,' = ',jet_contents[ii])
# print('Children location =',jet_children[ii])
# if jet_children[ii][0]==-1: print('The node is a leaf')
# else: print('Content ',ii,' by adding the 2 children 4-vectors= ',jet_contents[jet_children[ii,0]]
# + jet_contents[jet_children[ii,1]])
inners = [] # Inner nodes at level i ---- The nodes that are not leaves are in this category (SM)
outers = [] # Outer nodes at level i ---- The leaves are in this category (SM)
offset = 0
for jet in jets: # We fill the inners and outers array where each row corresponds to 1 level. We have each jet next to each other, so each jet root is a new column at depth 0, the first children add 2 columns at depth 1, .... Then we save in "level_children" the position of the left(right) child in the inners (or outers) array at depth i. So the 4-vector of node_i would be e.g. content[outers[level_children[i,0]]
queue = [(jet["root_id"], -1, True, 0)] #(node, parent position, is_left, depth)
while len(queue) > 0:
node, parent, is_left, depth = queue.pop(0) #We pop the first element (This is expensive because we have to change the position of all the other tuples in the queue)
if len(inners) < depth + 1:
inners.append([]) #We append an empty list (1 per level) when the first node of a level shows up.
if len(outers) < depth + 1:
outers.append([])
# Inner node
if jet_children[node, 0] != -1:#If node is not a leaf (it has a left child)
inners[depth].append(node+offset) #We append the node to the inner list at row=depth because it has children
position = len(inners[depth]) - 1 #position on the inners list of the last node we added
is_leaf = False
queue.append((jet_children[node+offset, 0], node+offset, True, depth + 1)) #Format: (node at the next level, parent node,"left", depth)
queue.append((jet_children[node+offset, 1], node+offset, False, depth + 1))
# Outer node
else: #If the node is a leaf
outers[depth].append(node+offset)
# print('outers=',outers)
position = len(outers[depth]) - 1 #position on the outers list of the last node we added
is_leaf = True
# Register node at its parent. We save the position of the left and right children in the inners (or outers) array (at depth=depth_parent+1)
if parent >= 0:
if is_left:
level_children[parent, 0] = position #position of the left child in the inners (or outers) array (at depth=depth_parent+1)
level_children[parent, 1] = is_leaf #if True then the left child is a leaf => look in the outers array, else in the inners one
else:
level_children[parent, 2] = position
level_children[parent, 3] = is_leaf
offset += len(jet["tree"]) # We need this offset to get the right location in the jet_children array of each jet root node
#
# # SANITY CHECK 2: Below we check that the level_children contains the right location of each children subjet
# ii = 1 #location of the parent in the inner list at level_parent
# level_parent=0
# print('Root of jet #',ii+1,' location =',inners[level_parent][ii]) #The root is at level 0
# print('Content jet #',ii+1,'=',jet_contents[inners[level_parent][ii]])
# print('Children location:\n left=',inners[level_parent+1][level_children[inners[level_parent][ii],0]],' right=',inners[level_parent+1][level_children[inners[level_parent][ii],2]])
# if level_children[inners[level_parent][ii],1]==True: print('The node is a leaf')
# else: print('Content ',inners[0][ii],' by adding the 2 children 4-vectors= ',jet_contents[inners[level_parent+1][level_children[inners[level_parent][ii],0]]]
# + jet_contents[inners[level_parent+1][level_children[inners[level_parent][ii],2]]])
#
# print('Is leaf at level ', level_parent,' = ', level_children[inners[level_parent][::],1])
# Reorganize levels[i] so that inner nodes appear first, then outer nodes
levels = []
n_inners = []
contents = []
prev_inner = np.array([], dtype=int)
print('----'*20)
for inner, outer in zip(inners, outers):
print('inner=',inner)
print('outer=',outer)
n_inners.append(len(inner)) # We append the number of inner nodes in each level
inner = np.array(inner, dtype=int)
outer = np.array(outer, dtype=int)
levels.append(np.concatenate((inner, outer))) #Append the inners and outers of each level
left = prev_inner[level_children[prev_inner, 1] == 1] # level_children[prev_inner, 1] returns a list with 1 for left children at level prev_inner+1 that are leaves and 0 otherwise. Then prev_inner[level_children[prev_inner, 1] == 1] picks the nodes at level prev_inner whose left children are leaves. So left are all nodes level prev_inner whose left child (at level prev_inner+1) is a leaf.
level_children[left, 0] += len(inner) #We apply an offset to "left" because we concatenated inner and outer, with inners coming first. So now we get the right position of the children that are leaves in the levels array.
right = prev_inner[level_children[prev_inner, 3] == 1]
level_children[right, 2] += len(inner)
contents.append(jet_contents[levels[-1]]) # We append the 4-vector given by the nodes in the last row that we added to levels. This way we create a list of contents where each row corresponds to 1 level.
# Then, the position of the left and right children in the levels list, will also be the position of them in the contents list, which is given by level_children Note that level_children keeps the old indices arrangement.
prev_inner = inner #This will be the inner of the previous level in the next loop
# print('level_children[prev_inner, 1] =',level_children[prev_inner, 1] )
# print('left=',left)
# print('right=',right)
# print('prev_inner=',prev_inner)
# print('contents=',contents)
# print('length contents=',len(contents))
# print('length levels =',len(levels))
#
# # # SANITY CHECK 3:
# ii = 1 #location of the parent in the inner list at level_parent
# level_parent=3
# print('Final rearrangement of jets in batches')
# print('Root of jet #',ii+1,' location =','level',level_parent,' pos:',ii) #The root is at level 0
# print('Content jet #',ii+1,'=',contents[level_parent][ii])
# print('Children location in the contents list','level',level_parent+1,'\n left=',level_children[levels[level_parent][ii],0],' right=',level_children[levels[level_parent][ii],2])
# if level_children[[ii],1]==True: print('The node is a leaf')
# else: print('Content ','level',level_parent,' pos:',ii,' by adding the 2 children 4-vectors= ',contents[level_parent+1][level_children[levels[level_parent][ii],0]]
# + contents[level_parent+1][level_children[levels[level_parent][ii],2]])
# levels: list of arrays
# levels[i][j] is a node id at a level i in one of the trees
# inner nodes are positioned within levels[i][:n_inners[i]], while
# leaves are positioned within levels[i][n_inners[i]:]
#
# level_children: array of shape [n_nodes, 4]
# level_children[node_id, 0] is the position j in the next level of
# the left child of node_id
# level_children[node_id, 2] is the position j in the next level of
# the right child of node_id
#
# n_inners: list of shape len(levels)
# n_inners[i] is the number of inner nodes at level i, accross all
# trees
#
# contents: array of shape [n_levels, n_nodes, n_features]
# contents[sum(len(l) for l in layers[:i]) + j] is the feature vector
# or node layers[i][j]
print('n_inners[0]=',n_inners[0])
return (levels, level_children[:, [0, 2]], n_inners, contents)
#-----------------------------------------------------------------------------------------------------------
# Batchization of the recursion (USING G LOUPPE'S CODE). String should be either qcd or tt. Adding zero padding
def batch_nyu_pad(jets,features):
# Batch the recursive activations across all nodes of a same level
# !!! Assume that jets have at least one inner node.
# Leads to off-by-one errors otherwise :(
# Reindex node IDs over all jets
#
# jet_children: array of shape [n_nodes, 2]
# jet_children[node_id, 0] is the node_id of the left child of node_id
# jet_children[node_id, 1] is the node_id of the right child of node_id
#
# jet_contents: array of shape [n_nodes, n_features]
# jet_contents[node_id] is the feature vector of node_id
jet_children = []
offset = 0
for jet in jets:
tree = np.copy(jet["tree"])
tree[tree != -1] += offset #Everything except the leaves (SM)
jet_children.append(tree)
offset += len(tree) #I think this is the offset to go to the next jet and be able to train in parallel? (SM)
jet_children = np.vstack(jet_children) #To get the tree of each jet one below the other (SM)
jet_contents = np.vstack([jet["content"] for jet in jets])
n_nodes = offset
# Level-wise traversal
level_children = np.zeros((n_nodes, 4), dtype=np.int32)
level_children[:, [0, 2]] -= 1
inners = [] # Inner nodes at level i ---- The nodes that are not leaves are in this category (SM)
outers = [] # Outer nodes at level i ---- The leaves are in this category (SM)
offset = 0
for jet in jets:
queue = [(jet["root_id"] + offset, -1, True, 0)]
while len(queue) > 0:
node, parent, is_left, depth = queue.pop(0)
if len(inners) < depth + 1:
inners.append([])
if len(outers) < depth + 1:
outers.append([])
# Inner node
if jet_children[node, 0] != -1:#If left child is not a leaf
inners[depth].append(node) #We append the node because it has children
position = len(inners[depth]) - 1
is_leaf = False
queue.append((jet_children[node, 0], node, True, depth + 1)) #Format: (left children position in contents, parent,"left", depth)
queue.append((jet_children[node, 1], node, False, depth + 1))
# Outer node
else:
outers[depth].append(node)
position = len(outers[depth]) - 1
is_leaf = True
# Register node at its parent
if parent >= 0:
if is_left:
level_children[parent, 0] = position #position of the left child in the inners (or outers) array
level_children[parent, 1] = is_leaf #if True look in the outers array, else in the inners one
else:
level_children[parent, 2] = position
level_children[parent, 3] = is_leaf
offset += len(jet["tree"])
# Reorganize levels[i] so that inner nodes appear first, then outer nodes
levels = []
n_inners = []
contents = []
n_level=[]
prev_inner = np.array([], dtype=int)
for inner, outer in zip(inners, outers):
n_inners.append(len(inner))
inner = np.array(inner, dtype=int)
outer = np.array(outer, dtype=int)
levels.append(np.concatenate((inner, outer)))
n_level.append(len(levels[-1]))
left = prev_inner[level_children[prev_inner, 1] == 1]
level_children[left, 0] += len(inner)
right = prev_inner[level_children[prev_inner, 3] == 1]
level_children[right, 2] += len(inner)
contents.append(jet_contents[levels[-1]])
prev_inner = inner
# print('----'*20)
# print('subjets per level=',n_level)
# print('----'*20)
# print('----'*20)
# print('Number of levels=',len(n_level))
# print('----'*20)
##-----------------------------------------------
# Zero padding
#We loop over the levels to zero pad the array (only a few levels per jet)
n_inners=np.asarray(n_inners)
max_n_level=np.max(n_level)
# print('max_n_level=',max_n_level)
# print('----'*20)
for i in range(len(levels)):
# print('max_n_level-len(levels[i])=',max_n_level-len(levels[i]))
pad_dim=int(max_n_level-len(levels[i]))
levels[i]=np.concatenate((levels[i],np.zeros((pad_dim))))
# print('/////'*20)
# print('contents[i].shape=',contents[i].shape)
contents[i]=np.concatenate((contents[i],np.zeros((pad_dim,int(features)))))
##-----------------------------------------------
# levels: list of arrays
# levels[i][j] is a node id at a level i in one of the trees
# inner nodes are positioned within levels[i][:n_inners[i]], while
# leaves are positioned within levels[i][n_inners[i]:]
#
# level_children: array of shape [n_nodes, 2]
# level_children[node_id, 0] is the position j in the next level of
# the left child of node_id
# level_children[node_id, 1] is the position j in the next level of
# the right child of node_id
#
# level_children is an array with shape = (total n_nodes in the batch,2) where shape[1] contains the left
# and right children locations of the node. This location gives the position of the children in the next
# level
#
# n_inners: list of shape len(levels)
# n_inners[i] is the number of inner nodes at level i, accross all
# trees
#
# contents: array of shape [n_nodes, n_features]
# contents[sum(len(l) for l in layers[:i]) + j] is the feature vector
# or node layers[i][j]
# return (levels, level_children[:, [0, 2]], n_inners, contents)
return (levels, level_children[:, [0, 2]], n_inners, contents, n_level)
#-----------------------------------------------------------------------------------------------------------
# CURRENTLY NOT USED. SKIP TO THE NEXT METHOD.
# Batchization of the recursion with zero padding.
def batch_level(jets,features):
'''
This methos loads the jet trees, reorganizes the tree by levels, creates a batch of N jets by appending the nodes of each jet to each level and adds zero padding so that all the levels have the same size
Args:
jets: Number of jets to create the batch
##-----------------------------------------------
Batch the recursive activations across all nodes of a same level
!!! Assume that jets have at least one inner node.
Leads to off-by-one errors otherwise :(
Reindex node IDs over all jets
jet_children: array of shape [n_nodes, 2]
jet_children[node_id, 0] is the node_id of the left child of node_id
jet_children[node_id, 1] is the node_id of the right child of node_id
jet_contents: array of shape [n_nodes, n_features]
jet_contents[node_id] is the feature vector of node_id (4-vector in our case)
'''
jet_children =np.vstack([jet['tree'] for jet in jets])
# print('jet_children=',jet_children)
# jet_children = np.vstack(jet_children) #concatenate all the jets tree into 1 tree
# print('jet_children=',jet_children)
jet_contents = np.vstack([jet["content"] for jet in jets]) #concatenate all the jet['contents'] into 1 array
# print('jet_contents=',jet_contents)
n_nodes=len(jet_children)
##-----------------------------------------------
# Level-wise traversal
level_children = np.zeros((n_nodes, 4), dtype=np.int32) #Array with 4 features per node
level_children[:, [0, 2]] -= 1 #We set features 0 and 2 to -1. Features 0 and 2 will be the position of the left and right children of node_i, where node_i is given by "contents[node_i]" and left child is "content[level_children[node,0]]"
##-----------------------------------------------
# # SANITY CHECK 1: Below we check that the jet_children contains the right location of each children subjet
# ii = -28
# print('Content ',ii,' = ',jet_contents[ii])
# print('Children location =',jet_children[ii])
# if jet_children[ii][0]==-1: print('The node is a leaf')
# else: print('Content ',ii,' by adding the 2 children 4-vectors= ',jet_contents[jet_children[ii,0]]
# + jet_contents[jet_children[ii,1]])
##-----------------------------------------------
inners = [] # Inner nodes at level i ---- The nodes that are not leaves are in this category
outers = [] # Outer nodes at level i ---- The leaves are in this category
offset = 0
# We fill the inners and outers array where each row corresponds to 1 level. We have each jet next to each other, so each jet root is a new column at depth 0, the first children add 2 columns at depth 1, and so on .... Then we save in "level_children" the position of the left(right) child in the inners or outers array at depth i. So the 4-vector of node_i would be e.g. content[outers[level_children[i,0]]
for jet in jets:
queue = [(jet["root_id"], -1, True, 0)] #(node, parent position, is_left, depth)
while len(queue) > 0:
node, parent, is_left, depth = queue.pop(0) #We pop the first element (This is expensive because we have to change the position of all the other tuples in the queue)
if len(inners) < depth + 1:
inners.append([]) #We append an empty list (1 per level) when the first node of a level shows up.
if len(outers) < depth + 1:
outers.append([])
#-----------
# Inner node
if jet_children[node, 0] != -1:#If node is not a leaf (it has a left child)
inners[depth].append(node+offset) #We append the node to the inner list at row=depth because it has children
position = len(inners[depth]) - 1 #position on the inners list of the last node we added
is_leaf = False
queue.append((jet_children[node+offset, 0], node+offset, True, depth + 1)) #Format: (node at the next level, parent node,"left", depth)
queue.append((jet_children[node+offset, 1], node+offset, False, depth + 1))
#-----------
# Outer node
else: #If the node is a leaf
outers[depth].append(node+offset)
# print('outers=',outers)
position = len(outers[depth]) - 1 #position on the outers list of the last node we added
is_leaf = True
#-----------
# Register node at its parent. We save the position of the left and right children in the inners (or outers) array (at depth=depth_parent+1)
if parent >= 0:
if is_left:
level_children[parent, 0] = position #position of the left child in the inners (or outers) array (at depth=depth_parent+1)
level_children[parent, 1] = is_leaf #if True then the left child is a leaf => look in the outers array, else in the inners one
else:
level_children[parent, 2] = position
level_children[parent, 3] = is_leaf
offset += len(jet["tree"]) # We need this offset to get the right location in the jet_children array of each jet root node because we concatenate one jet after each other
##-----------------------------------------------
# # SANITY CHECK 2: Below we check that the level_children contains the right location of each children subjet
# ii = 1 #location of the parent in the inner list at level_parent
# level_parent=0
# print('Root of jet #',ii+1,' location =',inners[level_parent][ii]) #The root is at level 0
# print('Content jet #',ii+1,'=',jet_contents[inners[level_parent][ii]])
# print('Children location:\n left=',inners[level_parent+1][level_children[inners[level_parent][ii],0]],' right=',inners[level_parent+1][level_children[inners[level_parent][ii],2]])
# if level_children[inners[level_parent][ii],1]==True: print('The node is a leaf')
# else: print('Content ',inners[0][ii],' by adding the 2 children 4-vectors= ',jet_contents[inners[level_parent+1][level_children[inners[level_parent][ii],0]]]
# + jet_contents[inners[level_parent+1][level_children[inners[level_parent][ii],2]]])
#
# print('Is leaf at level ', level_parent,' = ', level_children[inners[level_parent][::],1])
##-----------------------------------------------
# Reorganize levels[i] so that inner nodes appear first, then outer nodes
levels = []
n_inners = []
contents = []
n_level=[]
prev_inner = np.array([], dtype=int)
for inner, outer in zip(inners, outers):
# print('inner=',inner)
# print('outer=',outer)
n_inners.append(len(inner)) # We append the number of inner nodes in each level
inner = np.array(inner, dtype=int)
outer = np.array(outer, dtype=int)
levels.append(np.concatenate((inner, outer))) #Append the inners and outers of each level
n_level.append(len(levels[-1]))
left = prev_inner[level_children[prev_inner, 1] == 1] # level_children[prev_inner, 1] returns a list with 1 for left children at level prev_inner+1 that are leaves and 0 otherwise. Then prev_inner[level_children[prev_inner, 1] == 1] picks the nodes at level prev_inner whose left children are leaves. So left are all nodes level prev_inner whose left child (at level prev_inner+1) is a leaf.
level_children[left, 0] += len(inner) #We apply an offset to "left" because we concatenated inner and outer, with inners coming first. So now we get the right position of the children that are leaves in the levels array.
right = prev_inner[level_children[prev_inner, 3] == 1]
level_children[right, 2] += len(inner)
contents.append(jet_contents[levels[-1]]) # We append the 4-vector given by the nodes in the last row that we added to levels. This way we create a list of contents where each row corresponds to 1 level.
# Then, the position of the left and right children in the levels list, will also be the position of them in the contents list, which is given by level_children Note that level_children keeps the old indices arrangement.
prev_inner = inner #This will be the inner of the previous level in the next loop
# print('level_children[prev_inner, 1] =',level_children[prev_inner, 1] )
# print('left=',left)
# print('right=',right)
# print('prev_inner=',prev_inner)
# print('contents=',contents)
# print('length contents=',len(contents))
# print('length levels =',len(levels))
##-----------------------------------------------
# # # SANITY CHECK 3:
# ii = 1 #location of the parent in the inner list at level_parent
# level_parent=3
# print('Final rearrangement of jets in batches')
# print('Root of jet #',ii+1,' location =','level',level_parent,' pos:',ii) #The root is at level 0
# print('Content jet #',ii+1,'=',contents[level_parent][ii])
# print('Children location in the contents list','level',level_parent+1,'\n left=',level_children[levels[level_parent][ii],0],' right=',level_children[levels[level_parent][ii],2])
# if level_children[[ii],1]==True: print('The node is a leaf')
# else: print('Content ','level',level_parent,' pos:',ii,' by adding the 2 children 4-vectors= ',contents[level_parent+1][level_children[levels[level_parent][ii],0]]
# + contents[level_parent+1][level_children[levels[level_parent][ii],2]])
#
# ##-----------------------------------------------
# # Zero padding
# #We loop over the levels to zero pad the array (only a few levels per jet)
# n_inners=np.asarray(n_inners)
# max_n_level=np.max(n_level)
# # print('max_n_level=',max_n_level)
# # print('----'*20)
#
# for i in range(len(levels)):
# # print('max_n_level-len(levels[i])=',max_n_level-len(levels[i]))
# pad_dim=int(max_n_level-len(levels[i]))
# levels[i]=np.concatenate((levels[i],np.zeros((pad_dim))))
# contents[i]=np.concatenate((contents[i],np.zeros((pad_dim,int(features)))))
#
# ##-----------------------------------------------
'''
levels: list of arrays
levels[i][j] is a node id at a level i in one of the trees
inner nodes are positioned within levels[i][:n_inners[i]], while
leaves are positioned within levels[i][n_inners[i]:]
level_children: array of shape [n_nodes, 4]
level_children[node_id, 0] is the position j in the next level of
the left child of node_id
level_children[node_id, 2] is the position j in the next level of
the right child of node_id
n_inners: list of shape len(levels)
n_inners[i] is the number of inner nodes at level i, accross all
trees
contents: array of shape [n_levels, n_nodes, n_features]
contents[sum(len(l) for l in layers[:i]) + j] is the feature vector
or node layers[i][j]
n_level: list with the number of nodes in each level
'''
# return (levels, level_children[:, [0, 2]], n_inners, contents, n_level)
return (levels, level_children[:, [0, 2]], n_inners, contents)
#-----------------------------------------------------------------------------------------------------------
# Generator function
def make_pad_batch_iterator_level_old( batches, batch_size):
'''
This method is a generator function that loads the batches, shifts numpy arrays to torch tensors and feeds the training, val pipeline
Args:
batches: batches of data
batch_size: number of jets per batch
'''
for i in range(len(batches)):
levels = np.asarray(batches[i][0])
children = np.asarray(batches[i][1]) # Children is an array with shape = (total n_nodes in the batch,2) where shape[1] contains the left and right children locations of the node. This location gives the position of the children in the next level
n_inners = np.asarray(batches[i][2])
contents = np.asarray(batches[i][3])
n_level = np.asarray(batches[i][4])
labels= np.asarray(batches[i][5])
# print('levels=',levels)
# print('----'*20)
# print('children=',children)
# print('----'*20)
# print('n_inners=',n_inners)
# print('----'*20)
# print('contents=',contents)
# print('----'*20)
# print('n_level=',n_level)
# print('----'*20)
# print('labels=',labels)
levels=torch.LongTensor(levels)
children=torch.LongTensor(children)
n_inners=torch.LongTensor(n_inners)
contents = torch.FloatTensor(contents)
n_level=torch.LongTensor(n_level)
labels= torch.LongTensor(labels)
##-----------------------------------------------
# shift tensors to GPU if available
if torch_params.cuda:
levels = levels.cuda()
children=children.cuda()
n_inners=n_inners.cuda()
contents=contents.cuda()
n_level= n_level.cuda()
labels =labels.cuda()
##-----------------------------------------------
# convert them to Variables to record operations in the computational graph
levels=Variable(levels)
children=Variable(children)
n_inners=Variable(n_inners)
contents = Variable(contents)
n_level=Variable(n_level)
labels = Variable(labels)
yield levels, children, n_inners, contents, n_level, labels
#-----------------------------------------------------------------------------------------------------------
#
# def batch_array(sample_x,sample_y,batch_size, features):
# '''
# Loads the DataLoader class to create the train, val, test datasets
# Args:
# sample_x: jet trees
# sample_y: truth value for the jet labels
# batch_size: number of jets in each batch
# '''
# # tot_levels=[]
# #
# # loader=DataLoader
# # num_steps=len(sample_x)//batch_size
# # batches=[]
# # for i in range(num_steps):
# # batches.append([])
# levels, children, n_inners, contents, n_level= self.batch_nyu_pad(sample_x,features)
# batches[-1].append(levels)
# batches[-1].append(children)
# batches[-1].append(n_inners)
# batches[-1].append(contents)
# batches[-1].append(n_level)
# batches[-1].append(sample_y[i*batch_size:(i+1)*batch_size])
# if (i+1)%100==0: logging.info('Number of batches created='+str(i+1))
#
# #
# # #Get average number of levels
# # tot_levels.append(n_level)
# #
# # print('Total jets=',len(tot_levels))
# # print('----'*20)
# # print('Average levels per jet=',np.sum([len(level) for level in tot_levels])/len(tot_levels))
#
# batches=np.asarray(batches)
#
# return batches
#-----------------------------------------------------------------------------------------------------------
# Generator function
def make_pad_batch_iterator_level(sample_x,sample_y,batch_size, features,num_steps):# batches, batch_size):
'''
This method is a generator function that loads the batches, shifts numpy arrays to torch tensors and feeds the training, val pipeline
Args:
batches: batches of data
batch_size: number of jets per batch
'''
# r = np.random.RandomState(seed=None) # If seed is None, then RandomState will try to read data from /dev/urandom (or the Windows analogue) if available or seed from the clock otherwise.
# # np.random.seed(0) #If we want to fix the seed number
# sample=
##-----------------------------------------------
# loader=DataLoader
# Shuffle the sets
# np.random.seed(0) #If we want to fix the seed number.
# indices_train = check_random_state(1).permutation(len(train_x))
# train_x = train_x[indices_train]
# train_y = train_y[indices_train]
# If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed.
indices = check_random_state(seed=None).permutation(len(sample_x))
# print('sample x=',sample_x)
# print('sample_x shape=',sample_x.shape)
# print('sample_x[0]=',sample_x[0])
# print('sample_y[0]=',sample_y)
sample_x=sample_x[indices]
sample_y=sample_y[indices]
# print('sample_x[0]=',sample_x[0])
# print('sample_y[0]=',sample_y)
# num_steps=len(sample_x)//batch_size
for i in range(num_steps):
# batches.append([])
levels, children, n_inners, contents, n_level= DataLoader.batch_nyu_pad(sample_x[i*batch_size:(i+1)*batch_size],features)
# if (i+1)%1==0: logging.info('Number of batches created='+str(i+1))
# for i in range(len(batches)):
#
levels = np.asarray(levels)
children = np.asarray(children) # Children is an array with shape = (total n_nodes in the batch,2) where shape[1] contains the left and right children locations of the node. This location gives the position of the children in the next level
n_inners = np.asarray(n_inners)
contents = np.asarray(contents)
n_level = np.asarray(n_level)
labels= np.asarray(sample_y[i*batch_size:(i+1)*batch_size])
# print('levels=',levels)
# print('----'*20)
# print('children=',children)
# print('----'*20)
# print('n_inners=',n_inners)
# print('----'*20)
# print('contents=',contents)
# print('----'*20)
# print('n_level=',n_level)
# print('----'*20)
# print('labels=',labels)
levels=torch.LongTensor(levels)
children=torch.LongTensor(children)
n_inners=torch.LongTensor(n_inners)
contents = torch.FloatTensor(contents)
n_level=torch.LongTensor(n_level)
labels= torch.LongTensor(labels)
##-----------------------------------------------
# shift tensors to GPU if available
if torch_params.cuda:
levels = levels.cuda()
children=children.cuda()
n_inners=n_inners.cuda()
contents=contents.cuda()
n_level= n_level.cuda()
labels =labels.cuda()
##-----------------------------------------------
# convert them to Variables to record operations in the computational graph
levels=Variable(levels)
children=Variable(children)
n_inners=Variable(n_inners)
contents = Variable(contents)
n_level=Variable(n_level)
labels = Variable(labels)
yield levels, children, n_inners, contents, n_level, labels
##############################################################################################################
#///////////////////// OTHER FUNCTIONS //////////////////////////////////////////////////////////////
##############################################################################################################
# Loads the DataLoader class to create the train, val, test datasets with zero paddings
def batch_array(sample_x,sample_y,batch_size, features):
'''
Loads the DataLoader class to create the train, val, test datasets
Args:
sample_x: jet trees
sample_y: truth value for the jet labels
batch_size: number of jets in each batch
'''
tot_levels=[]
loader=DataLoader
num_steps=len(sample_x)//batch_size
batches=[]
for i in range(num_steps):
batches.append([])
levels, children, n_inners, contents, n_level= loader.batch_nyu_pad(sample_x[i*batch_size:(i+1)*batch_size],features)
batches[-1].append(levels)
batches[-1].append(children)
batches[-1].append(n_inners)
batches[-1].append(contents)
batches[-1].append(n_level)
batches[-1].append(sample_y[i*batch_size:(i+1)*batch_size])
if (i+1)%100==0: logging.info('Number of batches created='+str(i+1))
#
# #Get average number of levels
# tot_levels.append(n_level)
#
# print('Total jets=',len(tot_levels))
# print('----'*20)
# print('Average levels per jet=',np.sum([len(level) for level in tot_levels])/len(tot_levels))
batches=np.asarray(batches)
return batches
#-------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------
###///////////////////////////////////////////////////////////////////////////////////////////////////////////
#-------------------------------------------------------------------------------------------------------------
# if __name__=='__main__':
#
# myN_jets=10
# batch_size=1
#
# load=DataLoader
#
# sig_tree, sig_list=load.makeTrees(dir_jets_subjets,sg,myN_jets,0)
# bkg_tree, bkg_list=load.makeTrees(dir_jets_subjets,bg,myN_jets,0)
#
# train_data, dev_data, test_data = load.shuffle_split(sig_list, bkg_list, 0.6, 0.2, 0.2)
#
# data_iterator=load.make_pad_batch_iterator(train_data, batch_size)
| 44.440525
| 422
| 0.579846
| 7,365
| 54,173
| 4.141752
| 0.062186
| 0.037077
| 0.013375
| 0.009179
| 0.824646
| 0.801928
| 0.791929
| 0.782455
| 0.772423
| 0.761474
| 0
| 0.012966
| 0.236889
| 54,173
| 1,219
| 423
| 44.440525
| 0.724915
| 0.59681
| 0
| 0.570423
| 0
| 0
| 0.032391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032864
| false
| 0
| 0.042254
| 0
| 0.107981
| 0.051643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2141f13b1774a59196a790c150d796b6815c3cf2
| 162
|
py
|
Python
|
phyluce/tests/test_imports.py
|
faircloth-lab/phyluce
|
ae6801a7e749be2fa38513db9846046241d0fd7a
|
[
"BSD-3-Clause"
] | 63
|
2015-03-16T15:10:17.000Z
|
2022-02-16T12:36:23.000Z
|
phyluce/tests/test_imports.py
|
faircloth-lab/phyluce
|
ae6801a7e749be2fa38513db9846046241d0fd7a
|
[
"BSD-3-Clause"
] | 253
|
2015-01-26T13:03:23.000Z
|
2022-03-15T19:03:05.000Z
|
phyluce/tests/test_imports.py
|
faircloth-lab/phyluce
|
ae6801a7e749be2fa38513db9846046241d0fd7a
|
[
"BSD-3-Clause"
] | 45
|
2015-01-26T13:09:50.000Z
|
2021-05-24T04:20:30.000Z
|
"""Test various imports from packages"""
import phyluce
def test_phyluce_version():
"""Ensure we can successfully import"""
assert phyluce.__version__
| 18
| 43
| 0.734568
| 19
| 162
| 5.947368
| 0.736842
| 0.247788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 162
| 8
| 44
| 20.25
| 0.837037
| 0.419753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
dcbc51fd463d9f5abcd834b580a65017d733399c
| 14,197
|
py
|
Python
|
src/alert_module.py
|
fiAnaliz/fiAnaliz
|
6c617c9ec875b182fa3f26e58701e7ca5aafed2e
|
[
"MIT"
] | 6
|
2021-05-22T15:12:38.000Z
|
2021-07-01T13:22:19.000Z
|
src/alert_module.py
|
fiAnaliz/fiAnaliz
|
6c617c9ec875b182fa3f26e58701e7ca5aafed2e
|
[
"MIT"
] | null | null | null |
src/alert_module.py
|
fiAnaliz/fiAnaliz
|
6c617c9ec875b182fa3f26e58701e7ca5aafed2e
|
[
"MIT"
] | 3
|
2021-07-01T12:21:43.000Z
|
2022-01-19T18:59:11.000Z
|
# -*- coding: utf-8 -*-
import pymysql.cursors
import random
import requests
import time
import json
import datetime
"""
Database Connection
"""
class Database:
host = ""
user = ""
password = ""
db = ""
charset = "utf8mb4"
"""
Functions
"""
def divide_chunks(l, n):
# looping till length l
for i in range(0, len(l), n):
yield l[i:i + n]
baglanti = ""
def connect():
global db
global baglanti
db = pymysql.connect(host= Database.host,
user= Database.user,
password= Database.password,
db= Database.db,
charset= Database.charset,
cursorclass=pymysql.cursors.DictCursor)
baglanti = db.cursor()
"""
Alerts Loop
"""
with open('Config.json', encoding='utf-8') as json_file:
dataX = json.load(json_file)
connect()
while True:
try:
db.commit()
baglanti.execute("SELECT code FROM alerts WHERE completed = 0 AND type = 0 GROUP BY code")
alerts = baglanti.fetchall()
if (len(alerts) != 0):
for coinID in list(divide_chunks(alerts, 10)):
coins = ""
for i in coinID:
coins = coins + i['code'] + ","
data = requests.get('https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=usd'.format(coins[:-1])).json()
for coinID in coinID:
baglanti.execute("SELECT * FROM alerts WHERE code = %s AND completed = 0 AND type = 0", (coinID['code']))
for alert in baglanti.fetchall():
if alert['compare'] == 1 and alert['price'] <= data[coinID['code']]['usd']:
baglanti.execute("SELECT * FROM users WHERE uuid = %s", (alert['uuid']))
if alert['platform'] == 0:
fromNumber = baglanti.fetchall()[0]['whatsapp']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* $ hedef fiyatlı *büyük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} $".format(dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9000', json=payload)
elif alert['platform'] == 1:
fromNumber = baglanti.fetchall()[0]['telegram']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* $ hedef fiyatlı *büyük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} $".format(dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9001', json=payload)
elif alert['platform'] == 2:
fromNumber = baglanti.fetchall()[0]['discord']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "**{}**, **{}** $ hedef fiyatlı **büyük veya eşit olma koşullu** alarmınız gerçekleşmiştir!\n\n**Güncel fiyat:** {} $ | %s".format(dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9002', json=payload)
baglanti.execute('UPDATE alerts SET completed = 1 WHERE id = %s', (alert['id']))
db.commit()
elif alert['compare'] == 0 and alert['price'] >= data[coinID['code']]['usd']:
baglanti.execute("SELECT * FROM users WHERE uuid = %s", (alert['uuid']))
if alert['platform'] == 0:
fromNumber = baglanti.fetchall()[0]['whatsapp']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* $ hedef fiyatlı *küçük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} $".format( dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9000', json=payload)
elif alert['platform'] == 1:
fromNumber = baglanti.fetchall()[0]['telegram']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* $ hedef fiyatlı *küçük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} $".format( dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9001', json=payload)
elif alert['platform'] == 2:
fromNumber = baglanti.fetchall()[0]['discord']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "**{}**, **{}** $ hedef fiyatlı **küçük veya eşit olma koşullu** alarmınız gerçekleşmiştir!\n\n**Güncel fiyat:** {} $ | %s".format( dataX['COIN_symbols'][dataX['COIN_id'].index(alert['code'])], alert['price'],float(data[coinID['code']]['usd']))
}
response = requests.post('http://localhost:9002', json=payload)
baglanti.execute('UPDATE alerts SET completed = 1 WHERE id = %s', (alert['id']))
db.commit()
now = datetime.datetime.now()
if(10 < now.hour < 19 and now.weekday() < 6):
baglanti.execute("SELECT code FROM alerts WHERE completed = 0 AND type = 1 GROUP BY code")
alerts = baglanti.fetchall()
if (len(alerts) != 0):
now = datetime.datetime.now() + datetime.timedelta(days=3)
yy, mm, dd = str(now.year), str(now.month), str(now.day)
if(len(dd)==1):
dd = "0" + dd
if(len(mm)==1):
mm = "0" + mm
today = yy+mm+dd
now = now - datetime.timedelta(days=10)
yy, mm, dd = str(now.year), str(now.month), str(now.day)
if(len(dd)==1):
dd = "0" + dd
if(len(mm)==1):
mm = "0" + mm
lastday = yy+mm+dd
for code in alerts:
code = code['code']
data = requests.get("https://web-paragaranti-pubsub.foreks.com/web-services/historical-data?userName=undefined&name={}&exchange=BIST&market=E&group=F&last=300&period=1440&intraPeriod=null&isLast=false&from={}000000&to={}235900".format(code, lastday, today)).json()['dataSet'][-1]
baglanti.execute("SELECT * FROM alerts WHERE code = %s AND completed = 0 AND type = 1", (code))
for alert in baglanti.fetchall():
if alert['compare'] == 1 and alert['price'] <= data['close']:
baglanti.execute("SELECT * FROM users WHERE uuid = %s", (alert['uuid']))
if alert['platform'] == 0:
fromNumber = baglanti.fetchall()[0]['whatsapp']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* ₺ hedef fiyatlı *büyük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺".format(alert['code'], alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9000', json=payload)
elif alert['platform'] == 1:
fromNumber = baglanti.fetchall()[0]['telegram']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* ₺ hedef fiyatlı *büyük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺".format(alert['code'], alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9001', json=payload)
elif alert['platform'] == 2:
fromNumber = baglanti.fetchall()[0]['discord']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "**{}**, **{}** ₺ hedef fiyatlı *büyük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺ | %s".format(alert['code'], alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9002', json=payload)
baglanti.execute('UPDATE alerts SET completed = 1 WHERE id = %s', (alert['id']))
db.commit()
elif alert['compare'] == 0 and alert['price'] >= data['close']:
baglanti.execute("SELECT whatsapp FROM users WHERE uuid = %s", (alert['uuid']))
if alert['platform'] == 0:
fromNumber = baglanti.fetchall()[0]['whatsapp']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* ₺ hedef fiyatlı *küçük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺".format(alert['code'] , alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9000', json=payload)
elif alert['platform'] == 1:
fromNumber = baglanti.fetchall()[0]['telegram']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "*ALARMLARIM* | @%s 🔔🔔🔔\n\n*{}*, *{}* ₺ hedef fiyatlı *küçük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺".format(alert['code'] , alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9001', json=payload)
elif alert['platform'] == 2:
fromNumber = baglanti.fetchall()[0]['discord']
payload = { "id": alert['id'],
"toNumber": alert['toChat'],
"fromNumber": fromNumber,
"crypto": 1,
"message": "**{}**, **{}** ₺ hedef fiyatlı *küçük veya eşit olma koşullu* alarmınız gerçekleşmiştir!\n\n*Güncel fiyat:* {} ₺ | %s".format(alert['code'] , alert['price'], float(data['close']))
}
response = requests.post('http://localhost:9002', json=payload)
baglanti.execute('UPDATE alerts SET completed = 1 WHERE id = %s', (alert['id']))
db.commit()
time.sleep(1)
except Exception as E:
connect()
print(E)
print('BEKLEMEDE')
time.sleep(60)
| 66.032558
| 305
| 0.424174
| 1,226
| 14,197
| 4.927406
| 0.149266
| 0.006621
| 0.051647
| 0.053634
| 0.816752
| 0.808144
| 0.808144
| 0.808144
| 0.803013
| 0.803013
| 0
| 0.018516
| 0.429386
| 14,197
| 214
| 306
| 66.341122
| 0.72275
| 0.003029
| 0
| 0.619289
| 0
| 0.071066
| 0.270462
| 0.023142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010152
| false
| 0.010152
| 0.030457
| 0
| 0.071066
| 0.010152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dce3819b0f14760a423f8359416c9e8a2160dd5b
| 26
|
py
|
Python
|
grama/eval/__init__.py
|
natalia-rubio/py_grama
|
968c1c0238d7165de3b1b96534791feacc4aa960
|
[
"MIT"
] | 13
|
2020-02-24T16:51:51.000Z
|
2022-03-30T18:56:55.000Z
|
grama/eval/__init__.py
|
natalia-rubio/py_grama
|
968c1c0238d7165de3b1b96534791feacc4aa960
|
[
"MIT"
] | 78
|
2019-12-30T19:13:21.000Z
|
2022-02-23T18:17:54.000Z
|
grama/eval/__init__.py
|
natalia-rubio/py_grama
|
968c1c0238d7165de3b1b96534791feacc4aa960
|
[
"MIT"
] | 7
|
2020-10-19T17:49:25.000Z
|
2021-08-15T20:46:52.000Z
|
from .eval_pyDOE import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dcff8e09c7bb46b16444d3bc28acdcc601d0ae41
| 8,266
|
py
|
Python
|
tests/unit_tests/test_tethys_apps/test_management/test_commands/test_syncstores.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 79
|
2015-10-05T13:13:28.000Z
|
2022-02-01T12:30:33.000Z
|
tests/unit_tests/test_tethys_apps/test_management/test_commands/test_syncstores.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 542
|
2015-08-12T22:11:32.000Z
|
2022-03-29T22:18:08.000Z
|
tests/unit_tests/test_tethys_apps/test_management/test_commands/test_syncstores.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 71
|
2016-01-16T01:03:41.000Z
|
2022-03-31T17:55:54.000Z
|
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
from unittest import mock
from argparse import ArgumentParser
from tethys_apps.management.commands import syncstores
class ManagementCommandsSyncstoresTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_syncstores_add_arguments(self):
parser = ArgumentParser()
cmd = syncstores.Command()
cmd.add_arguments(parser)
self.assertIn('app_name', parser.format_usage())
self.assertIn('[-r]', parser.format_usage())
self.assertIn('[-f]', parser.format_usage())
self.assertIn('[-d DATABASE]', parser.format_usage())
self.assertIn('--refresh', parser.format_help())
self.assertIn('--firsttime', parser.format_help())
self.assertIn('--database DATABASE', parser.format_help())
@mock.patch('tethys_apps.management.commands.syncstores.Command.provision_persistent_stores')
def test_handle(self, mock_provision_persistent_stores):
# Mock the function, it will be tested elsewhere
mock_provision_persistent_stores.return_value = True
cmd = syncstores.Command()
cmd.handle(app_name='foo')
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp')
def test_provision_persistent_stores_all_apps_no_database(self, mock_app, mock_setting1, mock_setting2,
mock_setting3, mock_stdout):
# Mock arguments
mock_app_names = syncstores.ALL_APPS
mock_options = {'database': '', 'refresh': True, 'first_time': True}
# Mock for ps db settings
mock_setting1.name = 'setting1_name'
mock_setting1.create_persistent_store_database.return_value = True
mock_setting2.name = 'setting2_name'
mock_setting2.create_persistent_store_database.return_value = True
mock_setting3.name = 'setting3_name'
mock_setting3.create_persistent_store_database.return_value = True
# Mock for TethysApp (2 apps, 2 settings for first app, 1 setting for second app)
mock_app1 = mock.MagicMock()
mock_app1.persistent_store_database_settings = [mock_setting1, mock_setting2]
mock_app2 = mock.MagicMock()
mock_app2.persistent_store_database_settings = [mock_setting3]
mock_app.objects.all.return_value = [mock_app1, mock_app2]
cmd = syncstores.Command()
cmd.provision_persistent_stores(app_names=mock_app_names, options=mock_options)
mock_app.objects.all.assert_called_once()
mock_setting1.create_persistent_store_database.assert_called_once_with(refresh=True, force_first_time=True)
mock_setting2.create_persistent_store_database.assert_called_once_with(refresh=True, force_first_time=True)
mock_setting3.create_persistent_store_database.assert_called_once_with(refresh=True, force_first_time=True)
self.assertIn('Provisioning Persistent Stores...', mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp')
def test_provision_persistent_stores_all_apps_database_no_match(self, mock_app, mock_setting1, mock_setting2,
mock_setting3, mock_stdout):
# Mock arguments
mock_app_names = syncstores.ALL_APPS
mock_options = {'database': '/foo/no_match', 'refresh': True, 'first_time': True}
# Mock for ps db settings
mock_setting1.name = 'setting1_name'
mock_setting1.create_persistent_store_database.return_value = True
mock_setting2.name = 'setting2_name'
mock_setting2.create_persistent_store_database.return_value = True
mock_setting3.name = 'setting3_name'
mock_setting3.create_persistent_store_database.return_value = True
# Mock for TethysApp (2 apps, 2 settings for first app, 1 setting for second app)
mock_app1 = mock.MagicMock()
mock_app1.persistent_store_database_settings = [mock_setting1, mock_setting2]
mock_app2 = mock.MagicMock()
mock_app2.persistent_store_database_settings = [mock_setting3]
mock_app.objects.all.return_value = [mock_app1, mock_app2]
cmd = syncstores.Command()
cmd.provision_persistent_stores(app_names=mock_app_names, options=mock_options)
mock_app.objects.all.assert_called_once()
mock_setting1.create_persistent_store_database.assert_not_called()
mock_setting2.create_persistent_store_database.assert_not_called()
mock_setting3.create_persistent_store_database.assert_not_called()
self.assertIn('Provisioning Persistent Stores...', mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp.persistent_store_database_settings')
@mock.patch('tethys_apps.models.TethysApp')
def test_provision_persistent_stores_all_apps_database_single_match(self, mock_app, mock_setting1, mock_setting2,
mock_setting3, mock_stdout):
# Mock arguments
mock_app_names = syncstores.ALL_APPS
mock_options = {'database': '/foo/match', 'refresh': False, 'first_time': False}
# Mock for ps db settings
mock_setting1.name = 'setting1_name'
mock_setting1.create_persistent_store_database.return_value = True
mock_setting2.name = '/foo/match'
mock_setting2.create_persistent_store_database.return_value = True
mock_setting3.name = 'setting3_name'
mock_setting3.create_persistent_store_database.return_value = True
# Mock for TethysApp (2 apps, 2 settings for first app, 1 setting for second app)
mock_app1 = mock.MagicMock()
mock_app1.persistent_store_database_settings = [mock_setting1, mock_setting2]
mock_app2 = mock.MagicMock()
mock_app2.persistent_store_database_settings = [mock_setting3]
mock_app.objects.all.return_value = [mock_app1, mock_app2]
cmd = syncstores.Command()
cmd.provision_persistent_stores(app_names=mock_app_names, options=mock_options)
mock_app.objects.all.assert_called_once()
mock_setting1.create_persistent_store_database.assert_not_called()
mock_setting2.create_persistent_store_database.assert_called_once_with(refresh=False, force_first_time=False)
mock_setting3.create_persistent_store_database.assert_not_called()
self.assertIn('Provisioning Persistent Stores...', mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('tethys_apps.models.TethysApp')
def test_provision_persistent_stores_given_apps_not_found(self, mock_app, mock_stdout):
# Mock arguments
mock_app_names = ['foo_missing']
mock_options = {'database': '', 'refresh': True, 'first_time': True}
# Mock for TethysApp (return no apps found)
mock_app.objects.filter.return_value = []
cmd = syncstores.Command()
cmd.provision_persistent_stores(app_names=mock_app_names, options=mock_options)
mock_app.objects.filter.assert_called_once()
self.assertIn('The app named "foo_missing" cannot be found.', mock_stdout.getvalue())
self.assertIn('Please make sure it is installed and try again.', mock_stdout.getvalue())
self.assertIn('Provisioning Persistent Stores...', mock_stdout.getvalue())
| 51.024691
| 117
| 0.724292
| 991
| 8,266
| 5.677094
| 0.112008
| 0.087984
| 0.134909
| 0.092784
| 0.826164
| 0.78706
| 0.78706
| 0.779417
| 0.767508
| 0.767508
| 0
| 0.011898
| 0.186547
| 8,266
| 161
| 118
| 51.341615
| 0.824807
| 0.05565
| 0
| 0.655738
| 0
| 0
| 0.171821
| 0.097138
| 0
| 0
| 0
| 0
| 0.213115
| 1
| 0.065574
| false
| 0.016393
| 0.057377
| 0
| 0.131148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0d0ea5cc0134919d03aa7c93e90b818c60d7fed2
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/charset_normalizer/constant.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 1
|
2022-02-22T04:49:18.000Z
|
2022-02-22T04:49:18.000Z
|
venv/lib/python3.8/site-packages/charset_normalizer/constant.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/charset_normalizer/constant.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/5b/8b/b6/29e0be44124fe23c5f4cafeb38750444c9de8e8636d558487853a040ac
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0d406fec36b34d8589879d9befbdef5a7dd7579f
| 41
|
py
|
Python
|
src/qaseio/xcode/__init__.py
|
qase-tms/qase-xctest
|
d880cbafa3b69f8535d6ac826aa326c156f4c987
|
[
"Apache-2.0"
] | null | null | null |
src/qaseio/xcode/__init__.py
|
qase-tms/qase-xctest
|
d880cbafa3b69f8535d6ac826aa326c156f4c987
|
[
"Apache-2.0"
] | null | null | null |
src/qaseio/xcode/__init__.py
|
qase-tms/qase-xctest
|
d880cbafa3b69f8535d6ac826aa326c156f4c987
|
[
"Apache-2.0"
] | null | null | null |
from .qase_exporter import QaseExtractor
| 20.5
| 40
| 0.878049
| 5
| 41
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4ccb6a6c5f85a1150db9f75b21e97c48356cad9
| 79
|
py
|
Python
|
wiremock/base/__init__.py
|
sp1rs/python-wiremock
|
b570b0ebc60ac0d873812f21f78f2a8a4353792f
|
[
"Apache-2.0"
] | 22
|
2017-07-01T14:44:04.000Z
|
2021-09-08T08:45:21.000Z
|
wiremock/base/__init__.py
|
sp1rs/python-wiremock
|
b570b0ebc60ac0d873812f21f78f2a8a4353792f
|
[
"Apache-2.0"
] | 37
|
2017-04-24T15:28:27.000Z
|
2021-09-20T08:58:26.000Z
|
wiremock/base/__init__.py
|
sp1rs/python-wiremock
|
b570b0ebc60ac0d873812f21f78f2a8a4353792f
|
[
"Apache-2.0"
] | 22
|
2017-04-24T14:58:06.000Z
|
2021-09-09T09:22:31.000Z
|
from .base_entity import *
from .base_resource import BaseResource, RestClient
| 26.333333
| 51
| 0.835443
| 10
| 79
| 6.4
| 0.7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 79
| 2
| 52
| 39.5
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4ed2c3780d9b244b6ed69369f2ac5b827021da3
| 33
|
py
|
Python
|
alignments/components/__init__.py
|
Roxot/m-to-m-alignments
|
f45aaa2132ceb709d948e9db8dc2669678ba5527
|
[
"MIT"
] | null | null | null |
alignments/components/__init__.py
|
Roxot/m-to-m-alignments
|
f45aaa2132ceb709d948e9db8dc2669678ba5527
|
[
"MIT"
] | null | null | null |
alignments/components/__init__.py
|
Roxot/m-to-m-alignments
|
f45aaa2132ceb709d948e9db8dc2669678ba5527
|
[
"MIT"
] | null | null | null |
from .encoders import RNNEncoder
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3717cec29a070420979983c0cc33f985c3457474
| 7,842
|
py
|
Python
|
tests/block_patterns_test.py
|
vcamp314/schemed-parsing
|
13cfb4a720af533be640afcda2b9731dca2c843a
|
[
"MIT"
] | null | null | null |
tests/block_patterns_test.py
|
vcamp314/schemed-parsing
|
13cfb4a720af533be640afcda2b9731dca2c843a
|
[
"MIT"
] | null | null | null |
tests/block_patterns_test.py
|
vcamp314/schemed-parsing
|
13cfb4a720af533be640afcda2b9731dca2c843a
|
[
"MIT"
] | null | null | null |
import os
import pytest
from .context import schemedparsing
# the below two lines are for pip installing with test option and when
# the tests will open files:
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
os.chdir(CURRENT_DIR)
def test_block_extraction_empty_patterns_returns_empty_list():
txt_gen = (txt for txt in ["import { sampleImportName1, sampleImportName2 } from './sample/path'"])
schemes = []
expected = []
result_names, result_blocklist = schemedparsing.parse(txt_gen, schemes)
assert result_names == expected
assert result_blocklist == expected
@pytest.fixture
def single_extraction_pattern():
return [{'query': r'import (\w+)', }, ]
def test_extraction_empty_text_returns_empty_list(single_extraction_pattern):
txt_gen = (txt for txt in [])
expected = []
result_names, result_blocklist = schemedparsing.parse(txt_gen, single_extraction_pattern)
assert result_names == expected
assert result_blocklist == expected
def test_find_flat_blocks():
txt = 'if(isTest == true){ doSomething() }; if(isSpecialTest == true) { doSomethingElse() };'
block_schemes = [
{
'block_start_pattern': {'query': '{'},
'block_end_pattern': {'query': '}'},
'block_category': 'test_cat',
}
]
expected = [
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
},
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
}
]
result = []
names = []
line_no = 1
schemedparsing.parse_line(txt, block_schemes, result, names, line_no)
assert result == expected
assert names == []
def test_find_nested_blocks():
txt = 'if(isTest == true){ if(isSpecialTest == true;) { doSomethingElse(); } }'
block_schemes = [
{
'block_start_pattern': {'query': '{'},
'block_end_pattern': {'query': '}'},
'block_category': 'test_cat',
}
]
expected = [
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
},
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
'parent_id': 0,
}
]
result = []
names = []
line_no = 1
schemedparsing.parse_line(txt, block_schemes, result, names, line_no)
assert result == expected
assert names == []
def test_find_flat_blocks_and_their_params():
txt = "import { sampleImportName1, sampleImportName2 } from './sample/path'; import { sampleImportName3, " \
"sampleImportName4 } from './sample/path'; "
block_schemes = [
{
'block_start_pattern': {'query': '{'},
'block_end_pattern': {'query': '}'},
'block_category': 'test_cat',
'extraction_patterns': [
{
'query': r'(\w+)'
}
]
}
]
expected_blocks = [
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
},
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
}
]
expected_names = [
{
'name': 'sampleImportName1',
'block_id': 0,
},
{
'name': 'sampleImportName2',
'block_id': 0,
},
{
'name': 'sampleImportName3',
'block_id': 1,
},
{
'name': 'sampleImportName4',
'block_id': 1,
},
]
result_blocks = []
result_names = []
line_no = 1
schemedparsing.parse_line(txt, block_schemes, result_blocks, result_names, line_no)
assert result_blocks == expected_blocks
assert result_names == expected_names
def test_find_flat_blocks_with_ending_props_and_params():
txt = "import { sampleImportName1, sampleImportName2 } from './sample/path1'; import { sampleImportName3, " \
"sampleImportName4 } from './sample/path2'; "
block_schemes = [
{
'block_start_pattern': {'query': '{'},
'block_end_pattern': {
'query': '}',
'properties': [
{
'property_name': 'from_path',
'extraction_patterns': [
{
'query': r'from\s*?(?:"|\')(.*)(?:"|\')'
}
]
}
]
},
'block_category': 'test_cat',
'extraction_patterns': [
{
'query': r'(\w+)'
}
]
}
]
expected_blocks = [
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
'from_path': './sample/path1',
},
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
'from_path': './sample/path2',
}
]
expected_names = [
{
'name': 'sampleImportName1',
'block_id': 0,
},
{
'name': 'sampleImportName2',
'block_id': 0,
},
{
'name': 'sampleImportName3',
'block_id': 1,
},
{
'name': 'sampleImportName4',
'block_id': 1,
},
]
result_blocks = []
result_names = []
line_no = 1
schemedparsing.parse_line(txt, block_schemes, result_blocks, result_names, line_no)
assert result_blocks == expected_blocks
assert result_names == expected_names
def test_find_flat_blocks_with_starting_props_and_params():
txt = "; import { sampleImportName1, sampleImportName2 } from './sample/path1'; import { sampleImportName3, " \
"sampleImportName4 } from './sample/path2'; "
block_schemes = [
{
'block_start_pattern': {
'query': '{',
'properties': [
{
'property_name': 'block_type',
'extraction_patterns': [
{
'query': r'; (\w+)'
}
]
}
]
},
'block_end_pattern': {'query': '}'},
'block_category': 'test_cat',
'extraction_patterns': [
{
'query': r'(\w+)'
}
]
}
]
expected_blocks = [
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
'block_type': 'import',
},
{
'block_category': 'test_cat',
'starting_line_no': 1,
'ending_line_no': 1,
'block_type': 'import',
}
]
expected_names = [
{
'name': 'sampleImportName1',
'block_id': 0,
},
{
'name': 'sampleImportName2',
'block_id': 0,
},
{
'name': 'sampleImportName3',
'block_id': 1,
},
{
'name': 'sampleImportName4',
'block_id': 1,
},
]
result_blocks = []
result_names = []
line_no = 1
schemedparsing.parse_line(txt, block_schemes, result_blocks, result_names, line_no)
assert result_blocks == expected_blocks
assert result_names == expected_names
| 27.515789
| 115
| 0.485973
| 665
| 7,842
| 5.37594
| 0.138346
| 0.05035
| 0.048951
| 0.083916
| 0.858741
| 0.80951
| 0.780979
| 0.764476
| 0.734266
| 0.701259
| 0
| 0.014565
| 0.387146
| 7,842
| 285
| 116
| 27.515789
| 0.729297
| 0.012114
| 0
| 0.505837
| 0
| 0
| 0.28328
| 0
| 0
| 0
| 0
| 0
| 0.054475
| 1
| 0.031128
| false
| 0
| 0.097276
| 0.003891
| 0.132296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ea3142306eb2f8afe86bfbbc9b131c81502e829
| 3,985
|
py
|
Python
|
基础教程/A2-神经网络基本原理/第8步 - 卷积神经网络/src/ch17-CNNBasic/Level4_Col2Img_Test.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 11,094
|
2019-05-07T02:48:50.000Z
|
2022-03-31T08:49:42.000Z
|
基础教程/A2-神经网络基本原理/第8步 - 卷积神经网络/src/ch17-CNNBasic/Level4_Col2Img_Test.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 157
|
2019-05-13T15:07:19.000Z
|
2022-03-23T08:52:32.000Z
|
基础教程/A2-神经网络基本原理/第8步 - 卷积神经网络/src/ch17-CNNBasic/Level4_Col2Img_Test.py
|
microsoft/ai-edu
|
2f59fa4d3cf19f14e0b291e907d89664bcdc8df3
|
[
"Apache-2.0"
] | 2,412
|
2019-05-07T02:55:15.000Z
|
2022-03-30T06:56:52.000Z
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy
import numba
import time
from MiniFramework.ConvWeightsBias import *
from MiniFramework.ConvLayer import *
from MiniFramework.HyperParameters_4_2 import *
def calculate_output_size(input_h, input_w, filter_h, filter_w, padding, stride=1):
output_h = (input_h - filter_h + 2 * padding) // stride + 1
output_w = (input_w - filter_w + 2 * padding) // stride + 1
return (output_h, output_w)
def understand_4d_col2img_simple():
batch_size = 1
stride = 1
padding = 0
fh = 2
fw = 2
input_channel = 1
output_channel = 1
iw = 3
ih = 3
(output_height, output_width) = calculate_output_size(ih, iw, fh, fw, padding, stride)
wb = ConvWeightsBias(output_channel, input_channel, fh, fw, InitialMethod.MSRA, OptimizerName.SGD, 0.1)
wb.Initialize("test", "test", True)
wb.W = np.array(range(output_channel * input_channel * fh * fw)).reshape(output_channel, input_channel, fh, fw)
wb.B = np.array([0])
x = np.array(range(input_channel * iw * ih * batch_size)).reshape(batch_size, input_channel, ih, iw)
print("x=\n", x)
col_x = img2col(x, fh, fw, stride, padding)
print("col_x=\n", col_x)
print("w=\n", wb.W)
col_w = wb.W.reshape(output_channel, -1).T
print("col_w=\n", col_w)
# backward
delta_in = np.array(range(batch_size*output_channel*output_height*output_width)).reshape(batch_size, output_channel, output_height, output_width)
print("delta_in=\n", delta_in)
delta_in_2d = np.transpose(delta_in, axes=(0,2,3,1)).reshape(-1, output_channel)
print("delta_in_2d=\n", delta_in_2d)
dB = np.sum(delta_in_2d, axis=0, keepdims=True).T / batch_size
print("dB=\n", dB)
dW = np.dot(col_x.T, delta_in_2d) / batch_size
print("dW=\n", dW)
dW = np.transpose(dW, axes=(1, 0)).reshape(output_channel, input_channel, fh, fw)
print("dW=\n", dW)
dcol = np.dot(delta_in_2d, col_w.T)
print("dcol=\n", dcol)
delta_out = col2img(dcol, x.shape, fh, fw, stride, padding, output_height, output_width)
print("delta_out=\n", delta_out)
def understand_4d_col2img_complex():
batch_size = 2
stride = 1
padding = 0
fh = 2
fw = 2
input_channel = 3
output_channel = 2
iw = 3
ih = 3
(output_height, output_width) = calculate_output_size(ih, iw, fh, fw, padding, stride)
wb = ConvWeightsBias(output_channel, input_channel, fh, fw, InitialMethod.MSRA, OptimizerName.SGD, 0.1)
wb.Initialize("test", "test", True)
wb.W = np.array(range(output_channel * input_channel * fh * fw)).reshape(output_channel, input_channel, fh, fw)
wb.B = np.array([0])
x = np.array(range(input_channel * iw * ih * batch_size)).reshape(batch_size, input_channel, ih, iw)
print("x=\n", x)
col_x = img2col(x, fh, fw, stride, padding)
print("col_x=\n", col_x)
print("w=\n", wb.W)
col_w = wb.W.reshape(output_channel, -1).T
print("col_w=\n", col_w)
# backward
delta_in = np.array(range(batch_size*output_channel*output_height*output_width)).reshape(batch_size, output_channel, output_height, output_width)
print("delta_in=\n", delta_in)
delta_in_2d = np.transpose(delta_in, axes=(0,2,3,1)).reshape(-1, output_channel)
print("delta_in_2d=\n", delta_in_2d)
dB = np.sum(delta_in_2d, axis=0, keepdims=True).T / batch_size
print("dB=\n", dB)
dW = np.dot(col_x.T, delta_in_2d) / batch_size
print("dW=\n", dW)
dW = np.transpose(dW, axes=(1, 0)).reshape(output_channel, input_channel, fh, fw)
print("dW=\n", dW)
dcol = np.dot(delta_in_2d, col_w.T)
print("dcol=\n", dcol)
delta_out = col2img(dcol, x.shape, fh, fw, stride, padding, output_height, output_width)
print("delta_out=\n", delta_out)
if __name__ == '__main__':
understand_4d_col2img_simple()
understand_4d_col2img_complex()
| 38.68932
| 149
| 0.676286
| 637
| 3,985
| 3.985871
| 0.145997
| 0.05514
| 0.042536
| 0.072469
| 0.771957
| 0.771957
| 0.771957
| 0.771957
| 0.771957
| 0.771957
| 0
| 0.022393
| 0.181932
| 3,985
| 102
| 150
| 39.068627
| 0.756442
| 0.040652
| 0
| 0.752941
| 0
| 0
| 0.049764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.070588
| 0
| 0.117647
| 0.258824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2eaede119bad76a2653fb34175a24c2928e809e2
| 203
|
py
|
Python
|
python/testData/inspections/PyArgumentListInspection/xRange.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyArgumentListInspection/xRange.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 11
|
2017-02-27T22:35:32.000Z
|
2021-12-24T08:07:40.000Z
|
python/testData/inspections/PyArgumentListInspection/xRange.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
print(xrange(<warning descr="Parameter 'start' unfilled">)</warning>)
print(xrange(1))
print(xrange(1, 2))
print(xrange(1, 2, 3))
print(xrange(1, 2, 3, <warning descr="Unexpected argument">4</warning>))
| 33.833333
| 72
| 0.704433
| 31
| 203
| 4.612903
| 0.419355
| 0.384615
| 0.335664
| 0.272727
| 0.195804
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053476
| 0.078818
| 203
| 5
| 73
| 40.6
| 0.71123
| 0
| 0
| 0
| 0
| 0
| 0.221675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2ecf9cb187b70dc9ada1b47f66b830606b603579
| 22
|
py
|
Python
|
pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | null | null | null |
pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | null | null | null |
pizza.py
|
gray-adeyi/pizza
|
659db6e85492903374416295cc3ca3a78584eccb
|
[
"MIT"
] | 1
|
2022-03-17T00:54:27.000Z
|
2022-03-17T00:54:27.000Z
|
from pizza import cli
| 11
| 21
| 0.818182
| 4
| 22
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2edf50feb3c2ff65d1609302023b0421c113f1cf
| 21
|
py
|
Python
|
models/__init__.py
|
jack-willturner/fbnet_without_training
|
2dba276121b34cb4e252492f116a21637e75e442
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
jack-willturner/fbnet_without_training
|
2dba276121b34cb4e252492f116a21637e75e442
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
jack-willturner/fbnet_without_training
|
2dba276121b34cb4e252492f116a21637e75e442
|
[
"MIT"
] | null | null | null |
from .fbnet import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c14dc0154155e2ba30c4a79d35447372e773e13
| 144
|
py
|
Python
|
controller/__init__.py
|
ivohutasoit/onanplus-service
|
28ec5efce228b3379d5cada04bf1626b16fc55e0
|
[
"MIT"
] | null | null | null |
controller/__init__.py
|
ivohutasoit/onanplus-service
|
28ec5efce228b3379d5cada04bf1626b16fc55e0
|
[
"MIT"
] | null | null | null |
controller/__init__.py
|
ivohutasoit/onanplus-service
|
28ec5efce228b3379d5cada04bf1626b16fc55e0
|
[
"MIT"
] | null | null | null |
from .price_controller import price_controller
from .product_controller import product_controller
from .store_controller import store_controller
| 48
| 50
| 0.902778
| 18
| 144
| 6.888889
| 0.333333
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076389
| 144
| 3
| 51
| 48
| 0.932331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c2ebe8bee0cbf3a09201aee150aaad32813763f
| 28,428
|
py
|
Python
|
Boundaries.py
|
Basistransformoptimusprime/Particle_in_a_Box
|
61c8587cc449cb0d0d0b6aaa499a524a9133fbca
|
[
"MIT"
] | 1
|
2021-05-30T19:39:44.000Z
|
2021-05-30T19:39:44.000Z
|
Boundaries.py
|
Basistransformoptimusprime/Particle_in_a_Box
|
61c8587cc449cb0d0d0b6aaa499a524a9133fbca
|
[
"MIT"
] | null | null | null |
Boundaries.py
|
Basistransformoptimusprime/Particle_in_a_Box
|
61c8587cc449cb0d0d0b6aaa499a524a9133fbca
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from Backend import *
from scipy.optimize import fsolve
from scipy.optimize import brentq
from scipy.integrate import quad
from scipy.misc import derivative
import warnings
class Symmetric_Boundary(New_Style_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
self._pos_energy_even_state_eq = lambda gammaL, kL: gammaL - kL*np.tan(kL/2)
self._pos_energy_odd_state_eq = lambda gammaL, kL: gammaL + kL/np.tan(kL/2)
self._neg_energy_even_state_eq = lambda gammaL, kappaL: gammaL + kappaL*np.tanh(kappaL/2)
self._neg_energy_odd_state_eq = lambda gammaL, kappaL: gammaL + kappaL/np.tanh(kappaL/2)
self._eps = np.finfo(np.float32).eps
def set_eps(self, new_eps: float) -> None:
self._eps = new_eps
def get_kn(self, n: int | np.ndarray) -> float | np.ndarray:
return n*np.pi/self._L + self._theta/(2*self._L)
def get_kl(self, l: int) -> complex:
gammaL = self._gamma*self._L
eps = self._eps
if l == 0:
if self._gamma > 0:
transc_eq = self._pos_energy_even_state_eq
kL_upper_bound = (l+1)*np.pi-eps
kL_lower_bound = eps
kL_solution = brentq(lambda Kl: transc_eq(gammaL, Kl), kL_lower_bound, kL_upper_bound)
return kL_solution/self._L
else:
transc_eq = self._neg_energy_even_state_eq
kL_approx = -gammaL
kL_solution = fsolve(lambda Kl: transc_eq(gammaL, Kl), kL_approx)[0]
return 1j*kL_solution/self._L
elif l == 1:
if self._gamma > -2/self._L:
transc_eq = self._pos_energy_odd_state_eq
kL_upper_bound = (l+1)*np.pi-eps
kL_lower_bound = eps
kL_solution = brentq(lambda Kl: transc_eq(gammaL, Kl), kL_lower_bound, kL_upper_bound)
return kL_solution/self._L
else:
transc_eq = self._neg_energy_odd_state_eq
kL_approx = -gammaL
kL_solution = fsolve(lambda Kl: transc_eq(gammaL, Kl), kL_approx)[0]
return 1j*kL_solution/self._L
else:
if l%2 == 0:
transc_eq = self._pos_energy_even_state_eq
else:
transc_eq = self._pos_energy_odd_state_eq
kL_upper_bound = (l+1)*np.pi-eps
kL_lower_bound = (l-1)*np.pi+eps
kL_solution = brentq(lambda Kl: transc_eq(gammaL, Kl), kL_lower_bound, kL_upper_bound)
return kL_solution/self._L
pass
def get_x_space_projection(self, l: int) -> Function_of_n:
L = self._L
kl = self._l_kl_map.get_kl(l)
if l%2 == 1:
if np.imag(kl) == 0:
return Function_of_n(lambda x: np.sqrt(2/L)*np.power(1-np.sin(kl*L)/(kl*L), -1/2)*np.sin(kl*x))
else:
kappal = np.imag(kl)
return Function_of_n(lambda x: np.sqrt(2/L)*np.power(-1+np.sinh(kappal*L)/(kappal*L), -1/2)*np.sinh(kappal*x))
else:
if np.imag(kl) == 0:
return Function_of_n(lambda x: np.sqrt(2/L)*np.power(1+np.sin(kl*L)/(kl*L), -1/2)*np.cos(kl*x))
else:
kappal = np.imag(kl)
return Function_of_n(lambda x: np.sqrt(2/L)*np.power(1+np.sinh(kappal*L)/(kappal*L), -1/2)*np.cosh(kappal*x))
def get_k_space_projection(self, l: int) -> Function_of_n:
#print("computing the k_space_projection using analytic results...")
L = self._L
kl = self._l_kl_map.get_kl(l)
if l%2 == 1:
if np.imag(kl) == 0:
return Function_of_n(lambda k: 1j*np.sqrt(L/np.pi)/np.sqrt(1 - np.sin(kl*L)/(kl*L))*(np.sin((kl+k)*L/2)/(kl*L+k*L) - np.sin((kl-k)*L/2)/(kl*L-k*L)))
else:
kappal = np.imag(kl)
return Function_of_n(lambda k: (2j)*np.sqrt(L/np.pi)/np.sqrt(-1+np.sinh(kappal*L)/(kappal*L))*(k*L*np.cos(k*L/2)*np.sinh(kappal*L/2) - kappal*L*np.sin(k*L/2)*np.cosh(kappal*L/2))/((kappal*L)**2+(k*L)**2))
else:
if np.imag(kl) == 0:
return Function_of_n(lambda k: np.sqrt(L/np.pi)/np.sqrt(1 + np.sin(kl*L)/(kl*L))*(np.sin((kl+k)*L/2)/(kl*L+k*L) + np.sin((kl-k)*L/2)/(kl*L-k*L)))
else:
kappal = np.imag(kl)
return Function_of_n(lambda k: (2)*np.sqrt(L/np.pi)/np.sqrt(1+np.sinh(kappal*L)/(kappal*L))*(kappal*L*np.cos(k*L/2)*np.sinh(kappal*L/2) + k*L*np.sin(k*L/2)*np.cosh(kappal*L/2))/((kappal*L)**2+(k*L)**2))
def get_x_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
if lhs_state%2 == rhs_state%2:
return 0
if lhs_state%2 == 1:
temp = lhs_state
lhs_state = rhs_state
rhs_state = temp
lhs_k = self._l_kl_map.get_kl(lhs_state)
rhs_k = self._l_kl_map.get_kl(rhs_state)
L = self._L
if np.imag(lhs_k) == 0:
if np.imag(rhs_k) == 0:
cos_expr = np.cos((lhs_k-rhs_k)*L/2)/((lhs_k-rhs_k)*L) - np.cos((lhs_k+rhs_k)*L/2)/((lhs_k+rhs_k)*L)
sin_expr = np.sin((lhs_k+rhs_k)*L/2)/(((lhs_k+rhs_k)*L)**2) - np.sin((lhs_k-rhs_k)*L/2)/(((lhs_k-rhs_k)*L)**2)
norm_expr = np.sqrt((1+np.sin(lhs_k*L)/(lhs_k*L))*(1-np.sin(rhs_k*L)/(rhs_k*L)))
return (cos_expr + 2*sin_expr)/norm_expr*L
else:
rhs_kappa = np.imag(rhs_k)
norm_kappa = rhs_kappa*L/2
norm_k = lhs_k*L/2
cos_cosh_expr = rhs_kappa*np.cos(norm_k)*np.cosh(norm_kappa)
sin_sinh_expr = lhs_k*np.sin(norm_k)*np.sinh(norm_kappa)
symm_expr = L/(rhs_kappa**2+lhs_k**2)*(sin_sinh_expr + cos_cosh_expr)
cos_sinh_expr = (lhs_k**2-rhs_kappa**2)*np.cos(norm_k)*np.sinh(norm_kappa)
sin_cosh_expr = 2*rhs_kappa*lhs_k*np.sin(norm_k)*np.cosh(norm_kappa)
anti_symm_expr = 2/((rhs_kappa**2+lhs_k**2)**2)*(cos_sinh_expr - sin_cosh_expr)
norm_expr = np.sqrt((1+np.sin(lhs_k*L)/(lhs_k*L))*(-1+np.sinh(rhs_kappa*L)/(rhs_kappa*L)))
return (2/L)*(symm_expr + anti_symm_expr)/norm_expr
else:
lhs_kappa = np.imag(lhs_k)
if np.imag(rhs_k) == 0:
norm_kappa = lhs_kappa*L/2
norm_k = rhs_k*L/2
norm_expr = np.sqrt((1-np.sin(rhs_k*L)/(rhs_k*L))*(1+np.sinh(lhs_kappa*L)/(lhs_kappa*L)))
sin_sinh_expr = lhs_kappa*np.sin(norm_k)*np.sinh(norm_kappa)
cos_cosh_expr = rhs_k*np.cos(norm_k)*np.cosh(norm_kappa)
symm_expr = L/(lhs_kappa**2+rhs_k**2)*(sin_sinh_expr - cos_cosh_expr)
cos_sinh_expr = 2*lhs_kappa*rhs_k*np.cos(norm_k)*np.sinh(norm_kappa)
sin_cosh_expr = (rhs_k**2-lhs_kappa**2)*np.sin(norm_k)*np.cosh(norm_kappa)
anti_symm_expr = 2/((rhs_k**2+lhs_kappa**2)**2)*(sin_cosh_expr + cos_sinh_expr)
return (2/L)*(symm_expr + anti_symm_expr)/norm_expr
else:
rhs_kappa = np.imag(rhs_k)
cosh_expr = np.cosh((lhs_kappa+rhs_kappa)*L/2)/((lhs_kappa+rhs_kappa)*L) - np.cosh((lhs_kappa-rhs_kappa)*L/2)/((lhs_kappa-rhs_kappa)*L)
sinh_expr = np.sinh((lhs_kappa-rhs_kappa)*L/2)/((lhs_kappa*L-rhs_kappa*L)**2) - np.sinh((lhs_kappa+rhs_kappa)*L/2)/((lhs_kappa*L+rhs_kappa*L)**2)
norm_expr = np.sqrt((1+np.sinh(lhs_kappa*L)/(lhs_kappa*L))*(-1+np.sinh(rhs_kappa*L)/(rhs_kappa*L)))
return (cosh_expr + 2*sinh_expr)/norm_expr*L
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
#print("computing the pR elements using analytic results...")
if lhs_state%2 == rhs_state%2:
return 0
lhs_k = self._l_kl_map.get_kl(lhs_state)
rhs_k = self._l_kl_map.get_kl(rhs_state)
L = self._L
lhs_sign = (-1)**lhs_state
rhs_sign = -lhs_sign
if np.imag(lhs_k) == 0:
if np.imag(rhs_k) == 0:
norm_expr = np.sqrt((1 + lhs_sign*np.sin(lhs_k*L)/(lhs_k*L))*(1 + rhs_sign*np.sin(rhs_k*L)/(rhs_k*L)))
sin_expr = np.sin((lhs_k+rhs_k)*L/2)/((lhs_k+rhs_k)*L) + lhs_sign*np.sin((lhs_k-rhs_k)*L/2)/((lhs_k-rhs_k)*L)
return (-2j)*rhs_k*sin_expr/norm_expr
else:
rhs_kappa = np.imag(rhs_k)
norm_kappa = rhs_kappa*L/2
norm_k = lhs_k*L/2
norm_expr = np.sqrt((1 + lhs_sign*np.sin(lhs_k*L)/(lhs_k*L))*(rhs_sign + np.sinh(rhs_kappa*L)/(rhs_kappa*L)))
if rhs_state%2 == 0:
sin_cosh_expr = rhs_kappa*np.sin(norm_k)*np.cosh(norm_kappa)
cos_sinh_expr = -lhs_k*np.cos(norm_k)*np.sinh(norm_kappa)
else:
sin_cosh_expr = lhs_k*np.sin(norm_k)*np.cosh(norm_kappa)
cos_sinh_expr = rhs_kappa*np.cos(norm_k)*np.sinh(norm_kappa)
anti_symm_expr = 2/(lhs_k**2+rhs_kappa**2)*(sin_cosh_expr + cos_sinh_expr)
return (-1j*rhs_kappa)*(2/L)*anti_symm_expr/norm_expr
else:
lhs_kappa = np.imag(lhs_k)
if np.imag(rhs_k) == 0:
norm_kappa = lhs_kappa*L/2
norm_k = rhs_k*L/2
norm_expr = np.sqrt((1 + rhs_sign*np.sin(rhs_k*L)/(rhs_k*L))*(lhs_sign + np.sinh(lhs_kappa*L)/(lhs_kappa*L)))
if lhs_state%2 == 0:
sin_cosh_expr = rhs_k*np.sin(norm_k)*np.cosh(norm_kappa)
cos_sinh_expr = lhs_kappa*np.cos(norm_k)*np.sinh(norm_kappa)
else:
sin_cosh_expr = lhs_kappa*np.sin(norm_k)*np.cosh(norm_kappa)
cos_sinh_expr = -rhs_k*np.cos(norm_k)*np.sinh(norm_kappa)
anti_symm_expr = 2/(lhs_kappa**2+rhs_k**2)*(sin_cosh_expr + cos_sinh_expr)
return (rhs_sign*1j*rhs_k)*(2/L)*anti_symm_expr/norm_expr
else:
rhs_kappa = np.imag(rhs_k)
norm_expr = np.sqrt((lhs_sign + np.sinh(lhs_kappa*L)/(lhs_kappa*L))*(rhs_sign + np.sinh(rhs_kappa*L)/(rhs_kappa*L)))
sinh_expr = np.sinh((lhs_kappa+rhs_kappa)*L/2)/((lhs_kappa+rhs_kappa)*L) + lhs_sign*np.sinh((lhs_kappa-rhs_kappa)*L/2)/((lhs_kappa-rhs_kappa)*L)
return (-2j)*rhs_kappa*sinh_expr/norm_expr
def discrete_momentum_projection_helper(self, l: int, n_array: np.ndarray) -> np.ndarray:
kn_array = self.get_kn(n_array)
temp_k_space_proj = np.sqrt(np.pi/self._L)*self.get_k_space_projection(l)
return temp_k_space_proj(kn_array)
def get_new_k_space_projection(self, l: int) -> Function_of_n:
return Function_of_n(lambda n: self.discrete_momentum_projection_helper(l, n))
class Neumann_Boudnary(New_Style_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
def get_kn(self, n: int | list) -> float | list:
return n*np.pi/self._L
def get_kl(self, l: int) -> complex:
return l*np.pi/self._L
def get_x_space_projection(self, l: int) -> Function_of_n:
L = self._L
if l == 0:
return Function_of_n(lambda x: 1/np.sqrt(L)*np.ones(np.shape(x)))
else:
if l%2 == 0:
return Function_of_n(lambda x: np.sqrt(2/L)*np.cos(l*np.pi/L*x))
else:
return Function_of_n(lambda x: np.sqrt(2/L)*np.sin(l*np.pi/L*x))
def get_x_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
if lhs_state%2 == rhs_state%2:
return 0
if lhs_state%2 == 1:
temp_state = lhs_state
lhs_state = rhs_state
rhs_state = temp_state
L = self._L
if lhs_state == 0:
return (2*np.sqrt(2)*L/(np.pi*rhs_state)**2)*(-1)**((rhs_state-1)/2)
else:
return (2*L/np.pi**2)*(-1)**((lhs_state+rhs_state-1)/2)*2*(lhs_state**2 + rhs_state**2)/(lhs_state**2 - rhs_state**2)**2
def get_k_space_projection(self, l: int) -> Function_of_n:
L = self._L
if l == 0:
return Function_of_n(lambda k: np.sqrt(2*L/np.pi)*np.sin(k*L/2)/(k*L))
if l%2 == 0:
return Function_of_n(lambda k: np.sqrt(L/np.pi)*(np.sin(l*np.pi/2 + k*L/2)/(l*np.pi + k*L) + np.sin(l*np.pi/2 - k*L/2)/(l*np.pi - k*L)))
else:
return Function_of_n(lambda k: 1j*np.sqrt(L/np.pi)*(np.sin(l*np.pi/2 + k*L/2)/(l*np.pi + k*L) - np.sin(l*np.pi/2 - k*L/2)/(l*np.pi - k*L)))
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
L = self._L
if lhs_state%2 == rhs_state%2:
return 0
if rhs_state == 0:
# lhs_state != 0 is already implicitly given
# as otherwise lhs_state%2 == rhs_state%2
return 1j*np.sqrt(2)/L*(-1)**((lhs_state-1)/2)
elif lhs_state == 0:
# rhs_state != 0 is already implicitly given
# as otherwise lhs_state%2 == rhs_state%2
return -1j*np.sqrt(2)/L*(-1)**((rhs_state-1)/2)
else:
# The only case that reamains is when neither rhs_state = 0 nor
# lhs_state = 0 and lhs_state%2 != rhs_state%2
return 2j/L*(-1)**((lhs_state+rhs_state-1)/2)*(lhs_state**2 + rhs_state**2)/(lhs_state**2 - rhs_state**2)
def discrete_momentum_projection_helper(self, l: int, n_array: np.ndarray) -> np.ndarray:
if isinstance(n_array, int):
n_array = [n_array]
projection_coefficients = []
if l == 0:
for n in n_array:
if n%2 == 0:
coeff_append = 1/np.sqrt(2) if n==0 else 0
else:
coeff_append = np.sqrt(2)/(np.pi*n)*(-1)**((n-1)/2)
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
if l%2 == 1:
for n in n_array:
if n%2 == 0:
coeff_append = 2j/np.pi*(-1)**((n+l-1)/2)*n/(n**2-l**2)
elif n == l:
coeff_append = -1j/2
elif n == -l:
coeff_append = 1j/2
else:
coeff_append = 0
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
elif l%2 == 0:
for n in n_array:
if n%2 == 1:
coeff_append = 2/np.pi*(-1)**((n+l-1)/2)*n/(n**2-l**2)
elif abs(n) == abs(l):
coeff_append = 1/2
else:
coeff_append = 0
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
def get_new_k_space_projection(self, l: int) -> Function_of_n:
return Function_of_n(lambda n: self.discrete_momentum_projection_helper(l, n))
def set_theta(self, new_theta: float) -> None:
super().set_theta(new_theta)
warnings.warn("setting theta has not been implemented for pure Neumann boundaries yet and will thus have no effect")
class Dirichlet_Boundary(New_Style_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
def get_kn(self, n: int | list) -> float | list:
return n*np.pi/self._L
def get_kl(self, l: int) -> complex:
return (l+1)*np.pi/self._L
def get_x_space_projection(self, l: int) -> Function_of_n:
L = self._L
if l%2 == 0:
return Function_of_n(lambda x: np.sqrt(2/L)*np.cos((l+1)*np.pi/L*x))
else:
return Function_of_n(lambda x: np.sqrt(2/L)*np.sin((l+1)*np.pi/L*x))
def get_x_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
if lhs_state%2 == rhs_state%2:
return 0
else:
L = self._L
sign_expr = (-1)**((lhs_state+rhs_state-1)/2)
return (2*L/np.pi**2)*sign_expr*(4*(lhs_state+1)*(rhs_state+1))/((lhs_state+1)**2 - (rhs_state+1)**2)**2
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
if lhs_state%2 == rhs_state%2:
return 0
else:
L = self._L
sign_expr = (-1)**((lhs_state+rhs_state-1)/2)
return 1j/L*sign_expr*(4*(lhs_state+1)*(rhs_state+1))/((lhs_state+1)**2 - (rhs_state+1)**2)
def get_k_space_projection(self, l: int) -> Function_of_n:
L = self._L
i_factor = lambda l: 1j if l%2 == 1 else 1
sign_factor = (-1)**l
return Function_of_n(lambda k: i_factor(l)*np.sqrt(L/np.pi)*(np.sin((l+1)*np.pi/2 + k*L/2)/((l+1)*np.pi + k*L) + sign_factor*np.sin((l+1)*np.pi/2 - k*L/2)/((l+1)*np.pi - k*L)))
def discrete_momentum_projection_helper(self, l: int, n_array: np.ndarray) -> np.ndarray:
if isinstance(n_array, int):
n_array = [n_array]
projection_coefficients = []
if l%2 == 0:
for n in n_array:
if n%2 == 0:
coeff_append = 2/np.pi*(-1)**((l+n)/2)*(l+1)/((l+1)**2 - n**2)
elif abs(n) == abs(l+1):
coeff_append = 1/2
else:
coeff_append = 0
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
elif l%2 == 1:
for n in n_array:
if n%2 == 1:
coeff_append = 2j/np.pi*(-1)**((l+n)/2)*(l+1)/((l+1)**2 - n**2)
elif n == l+1:
coeff_append = -1j/2
elif n == -(l+1):
coeff_append = 1j/2
else:
coeff_append = 0
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
def get_new_k_space_projection(self, l: int) -> Function_of_n:
return Function_of_n(lambda n: self.discrete_momentum_projection_helper(l, n))
def set_theta(self, new_theta: float) -> None:
super().set_theta(new_theta)
warnings.warn("setting theta has not been implemented for pure Dirichlet boundaries yet and will thus have no effect")
class Dirichlet_Neumann_Boundary(New_Style_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
def get_kn(self, n: int | list) -> float | list:
return (n+1/2)*np.pi/self._L
def get_kl(self, l: int) -> complex:
return (2*l+1)/2*np.pi/self._L
def get_x_space_projection(self, l: int) -> Function_of_n:
L = self._L
kl = self._l_kl_map.get_kl(l)
return Function_of_n(lambda x: np.sqrt(2/L)*np.sin(kl*(x+L/2)))
def get_k_space_projection(self, l: int) -> Function_of_n:
L = self._L
kl = self._l_kl_map.get_kl(l)
lhs_term = Function_of_n(lambda k: np.sin((kl+k)*L/2)/((kl+k)*L)*np.exp(-1j*(2*l+1)*np.pi/4))
rhs_term = Function_of_n(lambda k: np.sin((kl-k)*L/2)/((kl-k)*L)*np.exp(1j*(2*l+1)*np.pi/4))
return 1j*np.sqrt(L/np.pi)*(lhs_term - rhs_term)
def discrete_momentum_projection_helper(self, l: int, n_array: np.ndarray) -> np.ndarray:
if isinstance(n_array, int):
n_array = [n_array]
projection_coefficients = []
for n in n_array:
if n == l:
coeff_append = 1j/np.pi*(-1)**(l)*np.exp(-1j*(2*l+1)*np.pi/4)/(2*l+1) - 1j/2*np.exp(1j*(2*l+1)*np.pi/4)
elif l+n == -1:
coeff_append = 1j/2*np.exp(-1j*(2*l+1)*np.pi/4) - 1j/np.pi*(-1)**(l)*np.exp(1j*(2*l+1)*np.pi/4)/(2*l+1)
elif (n+l)%2 == 0:
coeff_append = 1j/np.pi*(-1)**((l+n)/2)*np.exp(-1j*(2*l+1)*np.pi/4)/(l+n+1)
elif (n+l)%2 == 1:
coeff_append = -1j/np.pi*(-1)**((l-n-1)/2)*np.exp(1j*(2*l+1)*np.pi/4)/(l-n)
else:
print("eh?")
projection_coefficients.append(coeff_append)
return np.array(projection_coefficients)
def get_new_k_space_projection(self, l: int) -> Function_of_n:
return Function_of_n(lambda n: self.discrete_momentum_projection_helper(l, n))
def get_x_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
L = self._L
if lhs_state%2 == rhs_state%2:
return (2*L/np.pi**2)/(lhs_state+rhs_state+1)**2
else:
return -(2*L/np.pi**2)/(lhs_state-rhs_state)**2
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
L = self._L
if lhs_state%2 == rhs_state%2:
return 1j/L*(lhs_state-rhs_state)/(lhs_state+rhs_state+1)
else:
return 1j/L*(lhs_state+rhs_state+1)/(rhs_state-lhs_state)
def set_theta(self, new_theta: float) -> None:
super().set_theta(new_theta)
warnings.warn("setting theta has not been implemented for Dirichlet Neumann boundaries yet and will thus have no effect")
class Anti_Symmetric_Boundary(New_Style_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
@staticmethod
def x_space_projection_for_nummerics(L, gamma, l, kl) -> Function_of_n:
phase_factor = lambda l: np.exp(1j*np.arctan((gamma*L)/(np.pi*l))) if l%2 == 0 else np.exp(-1j*np.arctan((np.pi*l)/(gamma*L)))
if np.imag(kl) == 0:
boundray_expr = ((-1)**l)*(gamma + 1j*kl)/(gamma - 1j*kl)
return Function_of_n(lambda x: phase_factor(l)/(np.sqrt(2*L))*(np.exp(1j*kl*x) - boundray_expr*np.exp(-1j*kl*x)))
else:
return Function_of_n(lambda x: np.sqrt(gamma/np.sinh(gamma*L))*np.exp(-gamma*x))
def get_kn(self, n: int | list) -> float | list:
return n*np.pi/self._L + self._theta/(2*self._L)
def get_kl(self, l: int) -> complex:
if l == 0:
return 1j*self._gamma if self._gamma > 0 else -1j*self._gamma
else:
return l*np.pi/self._L
def get_x_space_projection(self, l: int) -> Function_of_n:
gamma = self._gamma
L = self._L
kl = self._l_kl_map.get_kl(l)
return self.x_space_projection_for_nummerics(L, gamma, l, kl)
def get_x_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
gamma = self._gamma
L = self._L
lhs_k = self._l_kl_map.get_kl(lhs_state)
rhs_k = self._l_kl_map.get_kl(rhs_state)
lhs_integrand = self.x_space_projection_for_nummerics(L, gamma, lhs_state, lhs_k)
rhs_integrand = self.x_space_projection_for_nummerics(L, gamma, rhs_state, rhs_k)
integrand = lambda x: np.conj(lhs_integrand(x))*x*rhs_integrand(x)
real = quad(lambda x: np.real(integrand(x)), -L/2, L/2)[0]
imag = quad(lambda x: np.imag(integrand(x)), -L/2, L/2)[0]
return real + 1j*imag
def get_k_space_projection(self, l: int) -> Function_of_n:
gamma = self._gamma
L = self._L
kl = self._l_kl_map.get_kl(l)
x_space_proj = self.x_space_projection_for_nummerics(L, gamma, l, kl)
def converter(k_range: np.ndarray) -> np.ndarray:
if isinstance(k_range, (int, float)):
k_range = [k_range]
out = []
for k in k_range:
integrand = lambda x: x_space_proj(x)*np.exp(-1j*k*x)
real = quad(lambda x: np.real(integrand(x)), -L/2, L/2)[0]
imag = quad(lambda x: np.imag(integrand(x)), -L/2, L/2)[0]
out.append((real + 1j*imag)*1/np.sqrt(2*L))
return np.array(out)
return Function_of_n(converter)
def discrete_momentum_projection_helper(self, l: int, n_array: np.ndarray) -> np.ndarray:
kn_array = self.get_kn(n_array)
temp_k_space_proj = np.sqrt(np.pi/self._L)*self.get_k_space_projection(l)
return temp_k_space_proj(kn_array)
def get_new_k_space_projection(self, l: int) -> Function_of_n:
return Function_of_n(lambda n: self.discrete_momentum_projection_helper(l, n))
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
gamma = self._gamma
L = self._L
lhs_k = self._l_kl_map.get_kl(lhs_state)
rhs_k = self._l_kl_map.get_kl(rhs_state)
lhs_integrand = self.x_space_projection_for_nummerics(L, gamma, lhs_state, lhs_k)
rhs_integrand = self.x_space_projection_for_nummerics(L, gamma, rhs_state, rhs_k)
integrand = lambda x: (-1j)*np.conj(lhs_integrand(x))*derivative(rhs_integrand, x, 0.0001)
real = quad(lambda x: np.real(integrand(x)), -L/2, L/2)[0]
imag = quad(lambda x: np.imag(integrand(x)), -L/2, L/2)[0]
return real + 1j*imag
class Symmetric_Nummeric(Symmetric_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
self._n_range = 100
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
# This implementation of the <get_pR_matrix_element> method determines
# the matrix elements <l|pR|l'> by expanding the energy states in the
# momentum eigenbasis such that the matrix elements are obtained by
# summing over all momentum states.
lhs_proj_coeffs = self.get_new_k_space_projection(lhs_state)
rhs_proj_coeffs = self.get_new_k_space_projection(rhs_state)
# Construction of one or two intervals of momentum quantum numbers that
# are taken as samples to approximate the infinite sum given in
# <l|pR|l'> if expanded in the momentum basis.
n_center = (lhs_state+rhs_state)//2
n_range_adapted = abs(lhs_state-rhs_state)//2+self._n_range
n_range_u = n_range_adapted
n_range_d = n_range_adapted if n_center > n_range_adapted else n_center
n_p = np.arange(n_center-n_range_d+1, n_center+n_range_u+1, 1)
n_n = np.arange(-n_center-n_range_u, -n_center+n_range_d+1, 1)
n = np.append(n_n, n_p)
return np.sum(self.get_kn(n)*np.conj(lhs_proj_coeffs(n))*rhs_proj_coeffs(n))
def set_n_range(self, new_n_range) -> None:
self._n_range = new_n_range
class Anti_Symmetric_Nummeric(Anti_Symmetric_Boundary):
def __init__(self, L: float, gamma: float, theta: float, l_to_kl_mapper_ref: l_to_kl_mapper) -> None:
super().__init__(L, gamma, theta, l_to_kl_mapper_ref)
self._n_range = 100
def get_pR_matrix_element(self, lhs_state: int, rhs_state: int) -> complex:
lhs_proj_coeffs = self.get_new_k_space_projection(lhs_state)
rhs_proj_coeffs = self.get_new_k_space_projection(rhs_state)
n_center = max(lhs_state, rhs_state)
n_range_u = self._n_range
n_range_d = self._n_range if n_center > self._n_range else n_center
n_p = np.arange(n_center-n_range_d+1, n_center+n_range_u+1, 1)
n_n = np.arange(-n_center-n_range_u, -n_center+n_range_d+1, 1)
n = np.append(n_n, n_p)
return np.sum(self.get_kn(n)*np.conj(lhs_proj_coeffs(n))*rhs_proj_coeffs(n))
def set_n_range(self, new_n_range) -> None:
self._n_range = new_n_range
| 42.366617
| 220
| 0.574504
| 4,650
| 28,428
| 3.230538
| 0.044516
| 0.028625
| 0.032219
| 0.030555
| 0.865065
| 0.844761
| 0.824857
| 0.80888
| 0.787711
| 0.767142
| 0
| 0.024624
| 0.284297
| 28,428
| 670
| 221
| 42.429851
| 0.713703
| 0.02881
| 0
| 0.639752
| 0
| 0
| 0.011126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118012
| false
| 0.00207
| 0.014493
| 0.026915
| 0.3147
| 0.00207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25cd27caf895f841992a27aabb00613836fcc1a2
| 2,033
|
py
|
Python
|
rt-smart/tools/host.py
|
dengchow/rt_smart_imx6ull-
|
4d9879e3d543a4e4ddd4b73ce0d30668127f5c5a
|
[
"Apache-2.0"
] | null | null | null |
rt-smart/tools/host.py
|
dengchow/rt_smart_imx6ull-
|
4d9879e3d543a4e4ddd4b73ce0d30668127f5c5a
|
[
"Apache-2.0"
] | null | null | null |
rt-smart/tools/host.py
|
dengchow/rt_smart_imx6ull-
|
4d9879e3d543a4e4ddd4b73ce0d30668127f5c5a
|
[
"Apache-2.0"
] | 2
|
2021-11-10T12:07:35.000Z
|
2022-01-17T14:24:56.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File : building.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2019, RT-Thread Development Team
#
# Change Logs:
# Date Author Notes
# 2019-05-26 Bernard The first version
#
import os
import sys
import string
import pdb
from SCons.Script import *
from building import *
def BuildHostApplication(TARGET, SConscriptFile):
import platform
global Env
platform_type = platform.system()
if platform_type == 'Windows' or platform_type.find('MINGW') != -1:
TARGET = TARGET.replace('.mo', '.exe')
HostRtt = os.path.join(os.path.dirname(__file__), 'host', 'rtthread')
Env = Environment()
if not GetOption('verbose'):
# override the default verbose command string
Env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
objs = SConscript(SConscriptFile)
objs += SConscript(HostRtt + '/SConscript')
target = Env.Program(TARGET, objs)
return target
def BuildHostLibrary(TARGET, SConscriptFile):
import platform
global Env
platform_type = platform.system()
if platform_type == 'Windows' or platform_type.find('MINGW') != -1:
TARGET = TARGET.replace('.mo', '.exe')
HostRtt = os.path.join(os.getcwd(), 'tools', 'host', 'rtthread')
Env = Environment()
if not GetOption('verbose'):
# override the default verbose command string
Env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
objs = SConscript(SConscriptFile)
objs += SConscript(HostRtt + '/SConscript')
target = Env.Program(TARGET, objs)
return target
| 26.402597
| 73
| 0.602066
| 218
| 2,033
| 5.568807
| 0.412844
| 0.059308
| 0.042834
| 0.056013
| 0.746293
| 0.746293
| 0.746293
| 0.746293
| 0.746293
| 0.746293
| 0
| 0.012916
| 0.276439
| 2,033
| 76
| 74
| 26.75
| 0.812373
| 0.165765
| 0
| 0.75
| 0
| 0
| 0.136067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25dbe4ce2de2aabfbf836cca40d9a58fb7d3da5d
| 7,181
|
py
|
Python
|
workspace/module/python-2.7/LxData/datObjects/_datObjData.py
|
no7hings/Lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | 2
|
2018-03-06T03:33:55.000Z
|
2019-03-26T03:25:11.000Z
|
workspace/module/python-2.7/LxData/datObjects/_datObjData.py
|
no7hings/lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | null | null | null |
workspace/module/python-2.7/LxData/datObjects/_datObjData.py
|
no7hings/lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from .. import datCfg, datObjAbs
class _Dat_Digit(datObjAbs.Abs_DatData):
def __add__(self, other):
"""
:param other: object of "Data"
:return: number
"""
assert isinstance(other.raw(), self.VAR_dat__raw__rawtype_pattern), u'Argument Error, "arg" Must "VAR_dat__raw__rawtype_pattern".'
return self.__class__(self, self.raw() + other.raw())
def __sub__(self, other):
"""
:param other: object of "Data"
:return: number
"""
assert isinstance(other.raw(), self.VAR_dat__raw__rawtype_pattern), u'Argument Error, "arg" Must "VAR_dat__raw__rawtype_pattern".'
return self.__class__(self, self.raw() - other.raw())
def __mul__(self, other):
"""
:param other: object of "Data"
:return: number
"""
assert isinstance(other.raw(), self.VAR_dat__raw__rawtype_pattern), u'Argument Error, "arg" Must "VAR_dat__raw__rawtype_pattern".'
return self.__class__(self, self.raw() * other.raw())
def __div__(self, other):
"""
:param other: object of "Data"
:return: number
"""
assert isinstance(other.raw(), self.VAR_dat__raw__rawtype_pattern), u'Argument Error, "arg" Must "VAR_dat__raw__rawtype_pattern".'
return self.__class__(self, self.raw() / other.raw())
class Dat_Closure(datObjAbs.Abs_DatData):
CLS_dat__raw = None
VAR_dat__raw__rawtype_pattern = None
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
def _raw__get_str_(self):
return u''
class Dat_Boolean(datObjAbs.Abs_DatData):
CLS_dat__raw = bool
VAR_dat__raw__rawtype_pattern = bool, int
VAR_dat__raw__default = False
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
def _raw__get_raw_by_str(self, string):
_dict = {'false': False, 'true': True}
if string in _dict:
return _dict[string]
else:
return False
def _raw__get_str_(self):
if self.hasRaw():
return [u'false', u'true'][self.raw()]
return u'false'
class Dat_Integer(_Dat_Digit):
CLS_dat__raw = int
VAR_dat__raw__rawtype_pattern = int, float
VAR_dat__raw__default = 0
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_IntegerN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_Integer
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__raw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_IntegerNN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_IntegerN
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__compraw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_Float(_Dat_Digit):
CLS_dat__raw = float
VAR_dat__raw__rawtype_pattern = float, int
VAR_dat__raw__default = 0.0
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_FloatN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_Float
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__raw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_FloatNN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_FloatN
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__compraw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_String(datObjAbs.Abs_DatData):
CLS_dat__raw = unicode
VAR_dat__raw__rawtype_pattern = unicode, str
VAR_dat__raw__default = u''
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_StringN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_String
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__raw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_Filepath(datObjAbs.Abs_DatData):
CLS_dat__raw = unicode
VAR_dat__raw__rawtype_pattern = unicode, str
VAR_dat__raw__default = u''
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_FilepathN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_Filepath
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__raw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_Nodename(datObjAbs.Abs_DatData):
CLS_dat__raw = unicode
VAR_dat__raw__rawtype_pattern = unicode, str
VAR_dat__raw__default = u''
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
class Dat_NodenameN(datObjAbs.Abs_DatData):
CLS_dat__raw = list
VAR_dat__raw__rawtype_pattern = list, tuple
VAR_dat__raw__default = []
CLS_dat__data__element = Dat_Nodename
VAR_dat__data__datasep = datCfg.DatUtility.DEF_dat__raw_strsep
def __init__(self, *args):
"""
:param args:
1-1.object of value, raw;
1-2.object of data, raw.
"""
self._initAbsDatData(*args)
| 24.762069
| 138
| 0.617602
| 917
| 7,181
| 4.293348
| 0.087241
| 0.082296
| 0.08001
| 0.089408
| 0.888494
| 0.836678
| 0.822454
| 0.822454
| 0.822454
| 0.822454
| 0
| 0.01161
| 0.280323
| 7,181
| 289
| 139
| 24.847751
| 0.750194
| 0.168361
| 0
| 0.577236
| 0
| 0
| 0.048859
| 0.024146
| 0
| 0
| 0
| 0
| 0.03252
| 1
| 0.170732
| false
| 0
| 0.00813
| 0.00813
| 0.821138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d333c1deb3ffa5af5f93133be20dd1f2474017f9
| 774
|
py
|
Python
|
tests/formatters/test_core.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 13
|
2016-02-23T08:15:22.000Z
|
2021-07-17T20:54:57.000Z
|
tests/formatters/test_core.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 1
|
2017-03-30T08:11:40.000Z
|
2017-09-07T15:01:08.000Z
|
tests/formatters/test_core.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 1
|
2020-02-21T09:44:40.000Z
|
2020-02-21T09:44:40.000Z
|
import npc
import pytest
def test_listing_formatter():
formatter = npc.formatters.get_listing_formatter('markdown')
assert formatter == npc.formatters.markdown.listing
formatter = npc.formatters.get_listing_formatter('html')
assert formatter == npc.formatters.html.listing
formatter = npc.formatters.get_listing_formatter('json')
assert formatter == npc.formatters.json.listing
def test_report_formatter():
formatter = npc.formatters.get_report_formatter('markdown')
assert formatter == npc.formatters.markdown.report
formatter = npc.formatters.get_report_formatter('html')
assert formatter == npc.formatters.html.report
formatter = npc.formatters.get_report_formatter('json')
assert formatter == npc.formatters.json.report
| 40.736842
| 64
| 0.770026
| 90
| 774
| 6.444444
| 0.155556
| 0.248276
| 0.455172
| 0.258621
| 0.894828
| 0.863793
| 0.755172
| 0
| 0
| 0
| 0
| 0
| 0.129199
| 774
| 18
| 65
| 43
| 0.860534
| 0
| 0
| 0
| 0
| 0
| 0.041344
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d36b1b8d7f2d60f43bdee39d6e5cbbca107f625e
| 5,210
|
py
|
Python
|
tests/test_selenium.py
|
mkhumtai/6CCS3PRJ
|
c7d5bedf9529f6e2b7a57e102761716c11f961c8
|
[
"MIT"
] | null | null | null |
tests/test_selenium.py
|
mkhumtai/6CCS3PRJ
|
c7d5bedf9529f6e2b7a57e102761716c11f961c8
|
[
"MIT"
] | null | null | null |
tests/test_selenium.py
|
mkhumtai/6CCS3PRJ
|
c7d5bedf9529f6e2b7a57e102761716c11f961c8
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import os
# Setup selenium driver
def test_setup():
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DRIVER_BIN = os.path.join(PROJECT_ROOT, "chromedriver_mac")
global driver
#driver = webdriver.Chrome('chromedriver_mac')
driver = webdriver.Chrome(executable_path=DRIVER_BIN)
driver.implicitly_wait(10)
# Check that users who are not logged receives Unauthorized user notice
def test_login_required():
driver.get('https://k1763918.herokuapp.com/logout.html')
driver.get('https://k1763918.herokuapp.com/allTime.html')
assert "Unauthorized" in driver.page_source
# Check that users are able to register
def test_register():
driver.get('https://k1763918.herokuapp.com/logout.html')
driver.get('https://k1763918.herokuapp.com/register.html')
driver.find_element_by_id('username').send_keys('testing_user')
driver.find_element_by_id('email').send_keys('testing@gmail.com')
driver.find_element_by_id('password').send_keys('testing_password')
driver.find_element_by_xpath("//button[@class='btn btn-fill btn-primary']").click()
# Test that users cannot register with the same information
def test_user_exists():
driver.get('https://k1763918.herokuapp.com/logout.html')
driver.get('https://k1763918.herokuapp.com/register.html')
driver.find_element_by_id('username').send_keys('testing_user')
driver.find_element_by_id('email').send_keys('testing@gmail.com')
driver.find_element_by_id('password').send_keys('testing_password')
driver.find_element_by_xpath("//button[@class='btn btn-fill btn-primary']").click()
assert "User exists!" in driver.page_source
# Check that users with incorrect login details cannot login
def test_unauthenticated_user():
driver.get('https://k1763918.herokuapp.com/login.html')
driver.find_element_by_id('username').send_keys('random_user')
driver.find_element_by_id('password').send_keys('wrong_password')
driver.find_element_by_xpath("//button[@class='btn btn-fill btn-primary']").click()
assert "<label>Unknown user</label>" in driver.page_source
# Check that users with correct login details can login
def test_authenticated_user():
driver.get('https://k1763918.herokuapp.com/login.html')
driver.find_element_by_id('username').send_keys('testing_user')
driver.find_element_by_id('password').send_keys('testing_password')
driver.find_element_by_xpath("//button[@class='btn btn-fill btn-primary']").click()
assert "No. of Cases by quarter" in driver.page_source
# Check that table visualization can be viewed from /query.html
def test_query_table():
driver.get('https://k1763918.herokuapp.com/query.html')
sero = driver.find_element_by_xpath("//input[@value='H5N1 HPAI']")
region = driver.find_element_by_xpath("//input[@value='Asia']")
type = driver.find_element_by_xpath("//input[@value='wild']")
sero.click()
region.click()
type.click()
driver.find_element_by_name("from_date").send_keys('01/01/2004')
driver.find_element_by_name("to_date").send_keys('01/01/2020')
driver.find_element_by_xpath("//input[@value='Table']").click()
assert "Data gathered from" in driver.page_source
# Check that zero rows are returned when data queried incorrectly
def test_query_table():
driver.get('https://k1763918.herokuapp.com/query.html')
sero = driver.find_element_by_xpath("//input[@value='H5N1 HPAI']")
region = driver.find_element_by_xpath("//input[@value='Asia']")
sero.click()
region.click()
driver.find_element_by_name("from_date").send_keys('01/01/2004')
driver.find_element_by_name("to_date").send_keys('01/01/2020')
driver.find_element_by_xpath("//input[@value='Table']").click()
assert "Total rows: 0" in driver.page_source
# Check that marker visualization can be viewed from /query.html
def test_query_marker():
driver.get('https://k1763918.herokuapp.com/query.html')
sero = driver.find_element_by_xpath("//input[@value='H5N1 HPAI']")
region = driver.find_element_by_xpath("//input[@value='Asia']")
type = driver.find_element_by_xpath("//input[@value='wild']")
sero.click()
region.click()
type.click()
driver.find_element_by_name("from_date").send_keys('01/01/2004')
driver.find_element_by_name("to_date").send_keys('01/01/2020')
driver.find_element_by_xpath("//input[@value='Markers']").click()
assert "Interactive Map" in driver.page_source
# Check that heatmap visualization can be viewed from /query.html
def test_query_heatmap():
driver.get('https://k1763918.herokuapp.com/query.html')
sero = driver.find_element_by_xpath("//input[@value='H5N1 HPAI']")
region = driver.find_element_by_xpath("//input[@value='Asia']")
type = driver.find_element_by_xpath("//input[@value='wild']")
sero.click()
region.click()
type.click()
driver.find_element_by_name("from_date").send_keys('01/01/2004')
driver.find_element_by_name("to_date").send_keys('01/01/2020')
driver.find_element_by_xpath("//input[@value='Heatmap']").click()
assert "Heatmap using Leaflet" in driver.page_source
def test_teardown():
driver.close()
driver.quit()
| 42.704918
| 87
| 0.732821
| 745
| 5,210
| 4.877852
| 0.174497
| 0.101816
| 0.173088
| 0.193451
| 0.759769
| 0.759769
| 0.73005
| 0.721244
| 0.701431
| 0.690149
| 0
| 0.034535
| 0.116315
| 5,210
| 121
| 88
| 43.057851
| 0.754778
| 0.114779
| 0
| 0.647727
| 0
| 0
| 0.337174
| 0.054348
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.125
| false
| 0.045455
| 0.022727
| 0
| 0.147727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3a34210881e07e14dc11529a4498357c5533cea
| 143
|
py
|
Python
|
ex10_drills.py
|
shanukk27/learn-python
|
cb7d76db00101a3ad96858f9a2f9593b5a8c3f93
|
[
"Apache-2.0"
] | null | null | null |
ex10_drills.py
|
shanukk27/learn-python
|
cb7d76db00101a3ad96858f9a2f9593b5a8c3f93
|
[
"Apache-2.0"
] | null | null | null |
ex10_drills.py
|
shanukk27/learn-python
|
cb7d76db00101a3ad96858f9a2f9593b5a8c3f93
|
[
"Apache-2.0"
] | null | null | null |
family_members = "\nKoyakutty\nFathima\nShinu\nShynu\nArif\nReyah\nShanu\nNasri"
print("My family members are: {}".format(family_members))
| 35.75
| 81
| 0.769231
| 18
| 143
| 6
| 0.777778
| 0.361111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083916
| 143
| 3
| 82
| 47.666667
| 0.824427
| 0
| 0
| 0
| 0
| 0
| 0.614286
| 0.435714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6ca948e5b1f96d3d024b87ee65402b6be10714d4
| 1,369
|
py
|
Python
|
models/cog.py
|
Minigrim0/MACS_VUBot
|
776c2a6586cf3d54272f72e187e2efc91210cf4e
|
[
"MIT"
] | null | null | null |
models/cog.py
|
Minigrim0/MACS_VUBot
|
776c2a6586cf3d54272f72e187e2efc91210cf4e
|
[
"MIT"
] | 1
|
2021-11-14T14:35:53.000Z
|
2021-11-14T15:32:29.000Z
|
models/cog.py
|
Minigrim0/MACS_VUBot
|
776c2a6586cf3d54272f72e187e2efc91210cf4e
|
[
"MIT"
] | 1
|
2021-11-14T14:37:07.000Z
|
2021-11-14T14:37:07.000Z
|
from discord.ext import commands
from discord.ext.commands import Context
from models.client import MaxVUBot
class CommandCog(commands.Cog):
@commands.command()
async def pls_pin(self, ctx: Context):
message_reference: Optional[discord.MessageReference] = ctx.message.reference
if not message_reference:
await ctx.reply("Please use this command while replying on the message you wish to pin")
return
message_to_pin = message_reference.cached_message or await ctx.channel.fetch_message(
message_reference.message_id
)
await message_to_pin.pin(reason=f"Pinned by {ctx.author}")
await ctx.message.delete(delay=3) # Deletes the request to pin after 3 seconds on command success
@commands.command()
async def pls_unpin(self, ctx: Context):
message_reference: Optional[discord.MessageReference] = ctx.message.reference
if not message_reference:
await ctx.reply("Please use this command while replying on the message you wish to pin")
return
message_to_pin = message_reference.cached_message or await ctx.channel.fetch_message(
message_reference.message_id
)
await message_to_pin.unpin()
await ctx.message.delete(delay=3) # Deletes the request to pin after 3 seconds on command success
| 44.16129
| 106
| 0.707816
| 180
| 1,369
| 5.25
| 0.316667
| 0.169312
| 0.050794
| 0.048677
| 0.831746
| 0.77672
| 0.77672
| 0.77672
| 0.77672
| 0.77672
| 0
| 0.003777
| 0.226443
| 1,369
| 30
| 107
| 45.633333
| 0.888574
| 0.089847
| 0
| 0.615385
| 0
| 0
| 0.128721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9f4ae343e7d053c7246ff591f8a06402f34e5515
| 276
|
py
|
Python
|
flake8_pie/test_utils.py
|
sbdchd/flake8-pie
|
96ae441d92abe64b23e1c37b0eb15778434000cc
|
[
"BSD-2-Clause"
] | 23
|
2019-01-25T14:58:20.000Z
|
2022-03-27T02:20:01.000Z
|
flake8_pie/test_utils.py
|
sbdchd/flake8-assign-and-return
|
96ae441d92abe64b23e1c37b0eb15778434000cc
|
[
"BSD-2-Clause"
] | 50
|
2019-04-17T02:37:01.000Z
|
2022-03-27T02:19:53.000Z
|
flake8_pie/test_utils.py
|
sbdchd/flake8-assign-and-return
|
96ae441d92abe64b23e1c37b0eb15778434000cc
|
[
"BSD-2-Clause"
] | 5
|
2019-02-21T07:29:12.000Z
|
2021-11-06T21:01:26.000Z
|
from flake8_pie.utils import pairwise
def test_pairwise() -> None:
assert list(pairwise([1])) == [(1, None)]
assert list(pairwise([])) == []
assert list(pairwise([1, 2])) == [(1, 2), (2, None)]
assert list(pairwise([1, 2, 3])) == [(1, 2), (2, 3), (3, None)]
| 30.666667
| 67
| 0.550725
| 40
| 276
| 3.75
| 0.35
| 0.266667
| 0.48
| 0.44
| 0.446667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 0.195652
| 276
| 8
| 68
| 34.5
| 0.603604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9f557c14f4347f6753f5f3c3292b5bfda4a23936
| 4,633
|
py
|
Python
|
batch_test_accuracies.py
|
Big-Data-Course-Team/Machine-Learning-with-Spark-Streaming
|
40cbddaa079b7de6c9501951a119ba6fee18ed50
|
[
"MIT"
] | null | null | null |
batch_test_accuracies.py
|
Big-Data-Course-Team/Machine-Learning-with-Spark-Streaming
|
40cbddaa079b7de6c9501951a119ba6fee18ed50
|
[
"MIT"
] | null | null | null |
batch_test_accuracies.py
|
Big-Data-Course-Team/Machine-Learning-with-Spark-Streaming
|
40cbddaa079b7de6c9501951a119ba6fee18ed50
|
[
"MIT"
] | 1
|
2021-12-29T08:56:31.000Z
|
2021-12-29T08:56:31.000Z
|
import pickle
import json
import importlib
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
import argparse
plt.rcParams.update({'figure.figsize':(14, 10), 'figure.dpi':100})
plt.rcParams.update({'font.size': 14})
acc_file_1 = open('Batch_1000/test_eval_metrics/lr_3.txt', "r")
acc_list_1 = acc_file_1.readlines()
acc_list_1 = [float(i) for i in acc_list_1]
acc_file_2 = open('Batch_2000/test_eval_metrics/lr_3.txt', "r")
acc_list_2 = acc_file_2.readlines()
acc_list_2 = [float(i) for i in acc_list_2]
acc_file_3 = open('Batch_2500/test_eval_metrics/lr_3.txt', "r")
acc_list_3 = acc_file_3.readlines()
acc_list_3 = [float(i) for i in acc_list_3]
acc_file_4 = open('Batch_3000/test_eval_metrics/lr_3.txt', "r")
acc_list_4 = acc_file_4.readlines()
acc_list_4 = [float(i) for i in acc_list_4]
acc_file_5 = open('Batch_4000/test_eval_metrics/lr_3.txt', "r")
acc_list_5 = acc_file_5.readlines()
acc_list_5 = [float(i) for i in acc_list_5]
acc_file_6 = open('Batch_5000/test_eval_metrics/lr_3.txt', "r")
acc_list_6 = acc_file_6.readlines()
acc_list_6 = [float(i) for i in acc_list_6]
iters=[i for i in range (1,len(acc_list_1)+1)]
plt.plot(iters, acc_list_1, label='Batch size - 1000')
plt.plot(iters, acc_list_2, label='Batch size - 2000')
plt.plot(iters, acc_list_3, label='Batch size - 2500')
plt.plot(iters, acc_list_4, label='Batch size - 3000')
plt.plot(iters, acc_list_5, label='Batch size - 4000')
plt.plot(iters, acc_list_6, label='Batch size - 5000')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
plt.title("SGD")
plt.legend()
img_file = open('./batch_accuracy_SGD.eps', "wb+")
plt.savefig(img_file, format='eps', bbox_inches='tight')
plt.clf()
acc_file_1 = open('Batch_1000/test_eval_metrics/mnb_3.txt', "r")
acc_list_1 = acc_file_1.readlines()
acc_list_1 = [float(i) for i in acc_list_1]
acc_file_2 = open('Batch_2000/test_eval_metrics/mnb_3.txt', "r")
acc_list_2 = acc_file_2.readlines()
acc_list_2 = [float(i) for i in acc_list_2]
acc_file_3 = open('Batch_2500/test_eval_metrics/mnb_3.txt', "r")
acc_list_3 = acc_file_3.readlines()
acc_list_3 = [float(i) for i in acc_list_3]
acc_file_4 = open('Batch_3000/test_eval_metrics/mnb_3.txt', "r")
acc_list_4 = acc_file_4.readlines()
acc_list_4 = [float(i) for i in acc_list_4]
acc_file_5 = open('Batch_4000/test_eval_metrics/mnb_3.txt', "r")
acc_list_5 = acc_file_5.readlines()
acc_list_5 = [float(i) for i in acc_list_5]
acc_file_6 = open('Batch_5000/test_eval_metrics/mnb_3.txt', "r")
acc_list_6 = acc_file_6.readlines()
acc_list_6 = [float(i) for i in acc_list_6]
iters=[i for i in range (1,len(acc_list_1)+1)]
plt.plot(iters, acc_list_1, label='Batch size - 1000')
plt.plot(iters, acc_list_2, label='Batch size - 2000')
plt.plot(iters, acc_list_3, label='Batch size - 2500')
plt.plot(iters, acc_list_4, label='Batch size - 3000')
plt.plot(iters, acc_list_5, label='Batch size - 4000')
plt.plot(iters, acc_list_6, label='Batch size - 5000')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
plt.title("MNB")
plt.legend()
img_file = open('./batch_accuracy_MNB.eps', "wb+")
plt.savefig(img_file, format='eps', bbox_inches='tight')
plt.clf()
acc_file_1 = open('Batch_1000/test_eval_metrics/pac_3.txt', "r")
acc_list_1 = acc_file_1.readlines()
acc_list_1 = [float(i) for i in acc_list_1]
acc_file_2 = open('Batch_2000/test_eval_metrics/pac_3.txt', "r")
acc_list_2 = acc_file_2.readlines()
acc_list_2 = [float(i) for i in acc_list_2]
acc_file_3 = open('Batch_2500/test_eval_metrics/pac_3.txt', "r")
acc_list_3 = acc_file_3.readlines()
acc_list_3 = [float(i) for i in acc_list_3]
acc_file_4 = open('Batch_3000/test_eval_metrics/pac_3.txt', "r")
acc_list_4 = acc_file_4.readlines()
acc_list_4 = [float(i) for i in acc_list_4]
acc_file_5 = open('Batch_4000/test_eval_metrics/pac_3.txt', "r")
acc_list_5 = acc_file_5.readlines()
acc_list_5 = [float(i) for i in acc_list_5]
acc_file_6 = open('Batch_5000/test_eval_metrics/pac_3.txt', "r")
acc_list_6 = acc_file_6.readlines()
acc_list_6 = [float(i) for i in acc_list_6]
iters=[i for i in range (1,len(acc_list_1)+1)]
plt.plot(iters, acc_list_1, label='Batch size - 1000')
plt.plot(iters, acc_list_2, label='Batch size - 2000')
plt.plot(iters, acc_list_3, label='Batch size - 2500')
plt.plot(iters, acc_list_4, label='Batch size - 3000')
plt.plot(iters, acc_list_5, label='Batch size - 4000')
plt.plot(iters, acc_list_6, label='Batch size - 5000')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
plt.title("PAC")
plt.legend()
img_file = open('./batch_accuracy_PAC.eps', "wb+")
plt.savefig(img_file, format='eps', bbox_inches='tight')
plt.clf()
| 32.626761
| 66
| 0.742715
| 904
| 4,633
| 3.464602
| 0.084071
| 0.167625
| 0.033525
| 0.046935
| 0.925287
| 0.925287
| 0.925287
| 0.893678
| 0.893678
| 0.866858
| 0
| 0.069297
| 0.102957
| 4,633
| 141
| 67
| 32.858156
| 0.684312
| 0
| 0
| 0.672897
| 0
| 0
| 0.260475
| 0.161987
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074766
| 0
| 0.074766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9f5d9d483eb8eb7488cb5fe3654317c8666dd3cd
| 81
|
py
|
Python
|
snippets/numpy/lib/__init__.py
|
oojBuffalo/micropython-ulab
|
4407bec88c3a7585ffbdfdd98e72bed12329ff3c
|
[
"MIT"
] | 1
|
2022-03-07T08:54:35.000Z
|
2022-03-07T08:54:35.000Z
|
snippets/numpy/lib/__init__.py
|
oojBuffalo/micropython-ulab
|
4407bec88c3a7585ffbdfdd98e72bed12329ff3c
|
[
"MIT"
] | null | null | null |
snippets/numpy/lib/__init__.py
|
oojBuffalo/micropython-ulab
|
4407bec88c3a7585ffbdfdd98e72bed12329ff3c
|
[
"MIT"
] | null | null | null |
from .function_base import *
from .polynomial import *
from .type_check import *
| 20.25
| 28
| 0.777778
| 11
| 81
| 5.545455
| 0.636364
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 4
| 29
| 20.25
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9f9e60f0bd92015c665951a32bf941313ff54b3c
| 46
|
py
|
Python
|
scan_models/modbus/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | 1
|
2020-05-22T09:52:33.000Z
|
2020-05-22T09:52:33.000Z
|
scan_models/modbus/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | 8
|
2021-03-18T21:22:40.000Z
|
2022-03-11T23:32:48.000Z
|
scan_models/modbus/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | null | null | null |
from .scan import modbus_resolve, modbus_scan
| 23
| 45
| 0.847826
| 7
| 46
| 5.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 1
| 46
| 46
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c8f8ce6ca0385b16b24e39502594550208f0d52
| 84
|
py
|
Python
|
vk_dialog_backuper/__main__.py
|
r4rdsn/vk-dialog-backuper
|
046466f0eaadeeeec4f147062604571ac4666af2
|
[
"MIT"
] | 1
|
2020-09-07T00:55:13.000Z
|
2020-09-07T00:55:13.000Z
|
vk_dialog_backuper/__main__.py
|
r4rdsn/vk-dialog-backuper
|
046466f0eaadeeeec4f147062604571ac4666af2
|
[
"MIT"
] | null | null | null |
vk_dialog_backuper/__main__.py
|
r4rdsn/vk-dialog-backuper
|
046466f0eaadeeeec4f147062604571ac4666af2
|
[
"MIT"
] | null | null | null |
import vk_dialog_backuper
if __name__ == '__main__':
vk_dialog_backuper.main()
| 16.8
| 29
| 0.761905
| 11
| 84
| 4.727273
| 0.636364
| 0.307692
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 84
| 4
| 30
| 21
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4c9c287823d32ac61bb3a69fa99170b12f074f58
| 138
|
py
|
Python
|
dist_zero/cgen/__init__.py
|
koreiklein/dist_zero
|
68ef5a0603edc53925daeec1f4bb684025cacbd4
|
[
"Unlicense"
] | 1
|
2019-03-18T13:27:35.000Z
|
2019-03-18T13:27:35.000Z
|
dist_zero/cgen/__init__.py
|
koreiklein/dist_zero
|
68ef5a0603edc53925daeec1f4bb684025cacbd4
|
[
"Unlicense"
] | null | null | null |
dist_zero/cgen/__init__.py
|
koreiklein/dist_zero
|
68ef5a0603edc53925daeec1f4bb684025cacbd4
|
[
"Unlicense"
] | null | null | null |
from .expression import *
from .statement import *
from .lvalue import *
from .program import *
from .type import *
from .common import *
| 19.714286
| 25
| 0.73913
| 18
| 138
| 5.666667
| 0.444444
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 138
| 6
| 26
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e25ca50c1dfeb91062f7dbb04357e12e791a6ea6
| 41
|
py
|
Python
|
kuwala/pipelines/google-poi/src/__init__.py
|
bmahmoudyan/kuwala
|
7951ed49ac1c31c874a4446bb4661152c4d69c90
|
[
"Apache-2.0"
] | 381
|
2021-04-08T13:04:57.000Z
|
2022-03-29T09:49:46.000Z
|
kuwala/pipelines/google-poi/src/__init__.py
|
bmahmoudyan/kuwala
|
7951ed49ac1c31c874a4446bb4661152c4d69c90
|
[
"Apache-2.0"
] | 92
|
2021-04-20T12:28:40.000Z
|
2022-03-30T17:55:36.000Z
|
kuwala/pipelines/google-poi/src/__init__.py
|
bmahmoudyan/kuwala
|
7951ed49ac1c31c874a4446bb4661152c4d69c90
|
[
"Apache-2.0"
] | 27
|
2021-04-26T17:52:32.000Z
|
2022-03-21T19:36:34.000Z
|
import nest_asyncio
nest_asyncio.apply()
| 13.666667
| 20
| 0.853659
| 6
| 41
| 5.5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 2
| 21
| 20.5
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e284e5b1f47084f129d877cd99c5f21af12375e7
| 208
|
py
|
Python
|
examples/coolColumns.py
|
jsharf/Hexagons
|
d9f295c39262e4eaf3f98db3cef872b9ecf37c49
|
[
"MIT"
] | null | null | null |
examples/coolColumns.py
|
jsharf/Hexagons
|
d9f295c39262e4eaf3f98db3cef872b9ecf37c49
|
[
"MIT"
] | null | null | null |
examples/coolColumns.py
|
jsharf/Hexagons
|
d9f295c39262e4eaf3f98db3cef872b9ecf37c49
|
[
"MIT"
] | null | null | null |
Column(-300, 50)
Column(-170, 50)
Column(-160, 50)
Column(-150, 50)
Column(-20, 50)
Column(-10, 50)
Column(0, 50)
Column(10, 50)
Column(20, 50)
Column(150, 50)
Column(160, 50)
Column(170, 50)
Column(300, 50)
| 14.857143
| 16
| 0.658654
| 39
| 208
| 3.512821
| 0.230769
| 0.70073
| 0.160584
| 0.189781
| 0.890511
| 0
| 0
| 0
| 0
| 0
| 0
| 0.324176
| 0.125
| 208
| 13
| 17
| 16
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e2b63ba9c5d56976a7cbe15ed293468a106bb15b
| 75
|
py
|
Python
|
tests/run_flake8/bytes.py
|
10sr/flake8-no-implicit-concat
|
11db2327ffc122d9481c6e03a77cf62b1dc85d25
|
[
"MIT"
] | 15
|
2020-05-21T19:39:58.000Z
|
2022-03-22T11:04:12.000Z
|
tests/run_flake8/bytes.py
|
10sr/flake8-no-implicit-concat
|
11db2327ffc122d9481c6e03a77cf62b1dc85d25
|
[
"MIT"
] | 43
|
2020-05-20T05:19:20.000Z
|
2021-11-25T05:34:51.000Z
|
tests/run_flake8/bytes.py
|
10sr/flake8-no-implicit-concat
|
11db2327ffc122d9481c6e03a77cf62b1dc85d25
|
[
"MIT"
] | 1
|
2020-08-25T23:04:08.000Z
|
2020-08-25T23:04:08.000Z
|
a = b"aaa" b"bbb"
b = [b"aaa",
b"bbb"
b"ccc"]
c = rb"aaa" b"bbb"
| 12.5
| 18
| 0.426667
| 17
| 75
| 1.882353
| 0.411765
| 0.375
| 0.65625
| 0.5
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.306667
| 75
| 5
| 19
| 15
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2c78607fc802cad1bdf3d94a15ab0a3978c6ad29
| 41
|
py
|
Python
|
mystring/__init__.py
|
lamter/mydealutil
|
26934e6e61b40327cb9cabc43e41cd01caf5bd2b
|
[
"MIT"
] | null | null | null |
mystring/__init__.py
|
lamter/mydealutil
|
26934e6e61b40327cb9cabc43e41cd01caf5bd2b
|
[
"MIT"
] | null | null | null |
mystring/__init__.py
|
lamter/mydealutil
|
26934e6e61b40327cb9cabc43e41cd01caf5bd2b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from .mystring import *
| 10.25
| 23
| 0.682927
| 6
| 41
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.195122
| 41
| 3
| 24
| 13.666667
| 0.818182
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c820a57f530bbb56c5243e8f2c0b550a2974865
| 151
|
py
|
Python
|
src/test/anovos/drift/test_distances.py
|
dattranm/anovos
|
817378c810b2260e85794ef473c3080efabc34ca
|
[
"Apache-2.0"
] | null | null | null |
src/test/anovos/drift/test_distances.py
|
dattranm/anovos
|
817378c810b2260e85794ef473c3080efabc34ca
|
[
"Apache-2.0"
] | 3
|
2022-02-28T18:22:39.000Z
|
2022-03-28T18:17:46.000Z
|
src/test/anovos/drift/test_distances.py
|
dattranm/anovos
|
817378c810b2260e85794ef473c3080efabc34ca
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import numpy as np
from anovos.drift.distances import hellinger, psi, js_divergence, kl_divergence, ks
def test_hellinger():
pass
| 16.777778
| 83
| 0.781457
| 22
| 151
| 5.227273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15894
| 151
| 8
| 84
| 18.875
| 0.905512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3940435e33101f147fa943bc767f608b929fa1ad
| 110
|
py
|
Python
|
alpaca/utils/__init__.py
|
kra5h/alpaca
|
0e014f12bfa6601e5bb2c57c4da083c270560d6c
|
[
"Apache-2.0"
] | 14
|
2020-03-04T14:16:23.000Z
|
2021-12-26T17:47:55.000Z
|
alpaca/utils/__init__.py
|
kra5h/alpaca
|
0e014f12bfa6601e5bb2c57c4da083c270560d6c
|
[
"Apache-2.0"
] | 5
|
2020-07-07T15:27:57.000Z
|
2020-11-09T14:11:06.000Z
|
alpaca/utils/__init__.py
|
kra5h/alpaca
|
0e014f12bfa6601e5bb2c57c4da083c270560d6c
|
[
"Apache-2.0"
] | 5
|
2020-03-14T18:27:53.000Z
|
2021-12-26T17:49:18.000Z
|
from . import datasets
from . import ue_metrics
from . import dimension_reduction
from . import model_builder
| 22
| 33
| 0.818182
| 15
| 110
| 5.8
| 0.6
| 0.45977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 110
| 4
| 34
| 27.5
| 0.925532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
394f96a5ee3621dd59afdf09917e0c02cee00c42
| 25
|
py
|
Python
|
bruges/models/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
bruges/models/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
bruges/models/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
from .wedge import wedge
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
395249463c197603056a294813a673d6fb0e7f3a
| 47
|
py
|
Python
|
vigilance/default_suites/__init__.py
|
wilstoff/vigilance
|
0947ed5256ae54e941f4b57915395f6fe70ca58a
|
[
"MIT"
] | 1
|
2019-02-09T01:11:12.000Z
|
2019-02-09T01:11:12.000Z
|
vigilance/default_suites/__init__.py
|
wilstoff/vigilance
|
0947ed5256ae54e941f4b57915395f6fe70ca58a
|
[
"MIT"
] | null | null | null |
vigilance/default_suites/__init__.py
|
wilstoff/vigilance
|
0947ed5256ae54e941f4b57915395f6fe70ca58a
|
[
"MIT"
] | 2
|
2018-04-21T04:38:43.000Z
|
2022-03-02T22:34:07.000Z
|
"""@defgroup default_suites default_suites
"""
| 15.666667
| 42
| 0.765957
| 5
| 47
| 6.8
| 0.6
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 2
| 43
| 23.5
| 0.790698
| 0.829787
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
20066b121ae0abdd0c7da7fabf1b1bda94af7424
| 185
|
py
|
Python
|
avalanche/benchmarks/utils/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | 12
|
2021-04-16T15:49:59.000Z
|
2022-02-27T18:04:58.000Z
|
avalanche/benchmarks/utils/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | null | null | null |
avalanche/benchmarks/utils/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | 2
|
2021-06-22T04:11:52.000Z
|
2021-11-12T03:27:18.000Z
|
from .utils import *
from .dataset_utils import IDataset, IDatasetWithTargets
from .avalanche_dataset import *
from .datasets_from_filelists import *
from .torchvision_wrapper import *
| 30.833333
| 56
| 0.832432
| 22
| 185
| 6.772727
| 0.5
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113514
| 185
| 5
| 57
| 37
| 0.908537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
200676a9830be1f0220554c67dfaa65e76a4a71b
| 207
|
py
|
Python
|
flask_dance/consumer/__init__.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | 836
|
2015-01-11T23:01:58.000Z
|
2022-03-28T07:32:52.000Z
|
flask_dance/consumer/__init__.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | 353
|
2015-02-11T00:32:58.000Z
|
2022-03-28T14:45:38.000Z
|
flask_dance/consumer/__init__.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | 189
|
2015-03-10T15:04:29.000Z
|
2022-03-16T21:49:11.000Z
|
from .oauth1 import OAuth1ConsumerBlueprint
from .oauth2 import OAuth2ConsumerBlueprint
from .base import oauth_authorized, oauth_before_login, oauth_error
from .requests import OAuth1Session, OAuth2Session
| 41.4
| 67
| 0.874396
| 23
| 207
| 7.695652
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.091787
| 207
| 4
| 68
| 51.75
| 0.909574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
200f042d8fd57535c0b33ee54e02a3a240d5f8b9
| 27
|
py
|
Python
|
src/gui/telegrambot/tlgbotcore/sqliteutils/__init__.py
|
kaefik/wtf
|
74a4e12e0303fc1341838541a418fee011f2d9a7
|
[
"MIT"
] | null | null | null |
src/gui/telegrambot/tlgbotcore/sqliteutils/__init__.py
|
kaefik/wtf
|
74a4e12e0303fc1341838541a418fee011f2d9a7
|
[
"MIT"
] | null | null | null |
src/gui/telegrambot/tlgbotcore/sqliteutils/__init__.py
|
kaefik/wtf
|
74a4e12e0303fc1341838541a418fee011f2d9a7
|
[
"MIT"
] | null | null | null |
from .sqliteutils import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
645082fdf189a0fc2f989a8aa63d4f1bc824a374
| 3,876
|
py
|
Python
|
BHP-Code/Chapter9/decryptor.py
|
yangtze736/Snake
|
e47f89bec994352562e9e171b2d640d0aa8621b0
|
[
"MIT"
] | 6
|
2021-12-07T21:02:12.000Z
|
2022-03-03T12:08:14.000Z
|
BHP-Code/Chapter9/decryptor.py
|
yangtze736/Snake
|
e47f89bec994352562e9e171b2d640d0aa8621b0
|
[
"MIT"
] | 15
|
2020-01-28T22:25:10.000Z
|
2022-03-11T23:21:02.000Z
|
BHP-Code/Chapter9/decryptor.py
|
yangtze736/Snake
|
e47f89bec994352562e9e171b2d640d0aa8621b0
|
[
"MIT"
] | 1
|
2022-01-15T23:57:36.000Z
|
2022-01-15T23:57:36.000Z
|
import zlib
import base64
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
encrypted = """XxfaX7nfQ48K+l0rXM3tQf3ShFcytAQ4sLe6vn8bWdreho4riaJ5Dy5PeijSKbsgWSMoeZLmihxb0YAFgCaIp11AUl4kmIiY+c+8LJonbTTembxv98GePM1SEme5/vMwGORJilw+rTdORSHzwbC56sw5NG8KosgLWwHEGEGbhii2qBkuyQrIc9ydoOKKCe0ofTRnaI2c/lb9Ot3vkEIgxCks94H6qVkAfhO34HS7nClUldn9UN040RYgtEqBgvAFzoEhDuRtfjJu1dzyzaFtRAVhcQ6HdgZMWRfpaxKQOmbhXwYyGRQfwNl/Rwgn1EJBFAhvIaEifHDlCw+hLViNYlae7IdfIb6hWtWPyFrkaNjmkbhhXclNgZe0+iPPDzsZbpHI1IckG0gVlTdlGKGz+nK5Cxyso41icC4gO7tmdXDGgF6bMt/GC1VjMVmL/rYsb8jzJblmuQBAeFNacyhjxrzIH5v60RQ1BxwfD+wLCKfyzn3vQucPak2cnwBs3yTIEShYj0ymP4idU/5Qt5qkqMDyvO4U8DmqB4KT58+o2B3c88+lUZjz7c9ygwKjp2hSNf+Dm9H3YJY2Pn6YlydyT1sYWCy06DZko7z3uae5GYGjez8hnCIFt+mpeLvEelSHeZfyV8wYyHg5Y9eA2NZNX6yNVD8IREhjXGWdbGTn41lVCqEiCetY9SKdWeL1Hp/vJN3SOo4qglbQF7P6oqqg0bofnAcphLVaHw/FOGWtW1CFEQUQdIg9bk+SJqM/s1ozJlisenrRzxv3L5LthEfLflCafK0u3n2gPa4F3ok4tx9i+r+MykRTw+OksMfVu71CAMuJdrFQLMSpyWkQ86Vc/QIXgdoCKkAYx5xr/U8gDXkZ4GvL9biEZv/fb5Wh7Br1Hu6idUgTYpEJVVnMuI13ePGeJLA54Il2S7aDyrgfhb61WQmoMRGvLP7uxCjgLwrxZNjAYJTmXszLvvgmI+lHe5o8rgQw6zSGpl9k27urV4bA0Zt+PsYiLNbEQqqxrJxKcbKqozl8XtfMXanct9pKu4vaq8fH/j9jvZ133UtcaR5iTQ0K7P4J5Qoaxz3uUhGrgplZ1jE9Nr0iyRj722dW82b4m1f/h80K7EuvwEeOfdYZl7iFL8yRi9dfopwATjKbKrWFroGCb/wvpc5ujpzDfwAeWsSU4Nve2qBDo5coVt1GI8rzHUh52TQ007JhcYABIxZGSFeeJ3bFgvqO2kUK/Pc36Au0VlNFds/j+fIuMlmFUuckBLCTpE2W9hYqmVOWBmyeZPJNzVI4gLexFbXbg8+0Eq6Pa4MxZsR3wypgC9LE/dvLbQ3oSn9x7nKMXpdq9r+xK1sjodpeYNz7t/5GpFu1teN0SFbmsoXjVEyOAn3L5Gd4Wxua7y9xOixc1H2/bbyNqJZAjEm34DDmNRTQtrqCwOEXwFGKgRGUzPYGC74wAPDDTaQEBv7Toc7rfkzgRX4ROW0SUaEPmi5tAlXe+CKVdJGtLKXUXYRHLMZ4jTzGsD89dmt2r2Fh6AUUN2e9jzzK2ULMnMhRUnDdcM74jbuDHGtXt56pFxFKJ21FQFS8JK0ZOqYa+0JjLuSzrLN9gSCu/JuTPC60LTxLsLcWZVR7cIHQE+sgDtt40/6O1YE7/8rs6qB9re28gDY1s9R5HFtjowO3ylRWqlaV9MC1OGzM4xHPxG2V+2zuq6ol8Cs="""
private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAyXUTgFoL/2EPKoN31l5Tlak7VxhdusNCWQKDfcN5Jj45GQ1o
ZZjsECQ8jK5AaQuCWdmEQkgCEV23L2y71G+Th/zlVPjp0hgC6nOKOuwmlQ1jGvfV
vaNZ0YXrs+sX/wg5FT/bTS4yzXeW6920tdls2N7Pu5N1FLRW5PMhk6GW5rzVhwdD
vnfaUoSVj7oKaIMLbN/TENvnwhZZKlTZeK79ix4qXwYLe66CrgCHDf4oBJ/nO1oY
welxuIXVPhIZnVpkbz3IL6BfEZ3ZDKzGeRs6YLZuR2u5KUbr9uabEzgtrLyOeoK8
UscKmzOvtwxZDcgNijqMJKuqpNZczPHmf9cS1wIDAQABAoIBAAdOiMOKAI9lrNAk
7o7G4w81kSJqjtO8S0bBMZW5Jka90QJYmyW8MyuutMeBdnKY6URrAEILLJAGryM4
NWPSHC69fG/li02Ec26ffC8A67FSR/rtbEIxj4tq6Q6gg0FLwg5EP6b/+vW61a1+
YBSMa0c+ZZhvE7sJg3FQZDJflQKPXFHYxOlS42+UyUP8K07cFznsQCvia9mCHUG6
BDFbV/yjbMyYgKTCVmMeaCS2K0TlbcyGpF0Bz95mVpkrU6pHXY0UAJIv4dyguywe
dBZcJlruSRL0OJ+3Gb3CJS7YdsPW807LSyf8gcrHMpgV5z2CdGlaoaLBJyS/nDHi
n07PIbECgYEA4Rjlet1xL/Sr9HnHVUH0m1iST0SrLlQCzrMkiw4g5rCOCnhWPNQE
dpnRpgUWMhhyZj82SwigkdXC2GpvBP6GDg9pB3Njs8qkwEsGI8GFhUQfKf8Bnnd2
w3GUHiRoJpVxrrE3byh23pUiHBdbp7h2+EaOTrRsc2w3Q4NbNF+FOOkCgYEA5R1Z
KvuKn1Sq+0EWpb8fZB+PTwK60qObRENbLdnbmGrVwjNxiBWE4BausHMr0Bz/cQzk
tDyohkHx8clp6Qt+hRFd5CXXNidaelkCDLZ7dasddXm1bmIlTIHjWWSsUEsgUTh7
crjVvghU2Sqs/vCLJCW6WYGb9JD2BI5R9pOClb8CgYEAlsOtGBDvebY/4fwaxYDq
i43UWSFeIiaExtr30+c/pCOGz35wDEfZQXKfF7p6dk0nelJGVBVQLr1kxrzq5QZw
1UP/Dc18bvSASoc1codwnaTV1rQE6pWLRzZwhYvO8mDQBriNr3cDvutWMEh4zCpi
DMJ9GDwCE4DctuxpDvgXa9kCgYEAuxNjo30Qi1iO4+kZnOyZrR833MPV1/hO50Y4
RRAGBkX1lER9ByjK/k6HBPyFYcDLsntcou6EjFt8OnjDSc5g2DZ9+7QKLeWkMxJK
Yib+V+4Id8uRIThyTC4ifPN+33D4SllcMyhJHome/lOiPegbNMC5kCwMM33J455x
vmxjy/ECgYAOrFR7A9fP4QlPqFCQKDio/FhoQy5ERpl94lGozk4Ma+QDJiRUxA3N
GomBPAvYGntvGgPWrsEHrS01ZoOKGBfk5MgubSPFVI00BD6lccmff/0tOxYtb+Pp
vOGHt9D9yo3DOhyvJbedpi3u3g13G+FZFw6d1T8Jzm5eZUvG7WeUtg==
-----END RSA PRIVATE KEY-----"""
rsakey = RSA.importKey(private_key)
rsakey = PKCS1_OAEP.new(rsakey)
offset = 0
decrypted = ""
encrypted = base64.b64decode(encrypted)
while offset < len(encrypted):
decrypted += rsakey.decrypt(encrypted[offset:offset+256])
offset += 256
# now we decompress to original
plaintext = zlib.decompress(decrypted)
print plaintext
| 74.538462
| 1,726
| 0.926987
| 183
| 3,876
| 19.612022
| 0.808743
| 0.011145
| 0.007244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140957
| 0.029928
| 3,876
| 52
| 1,727
| 74.538462
| 0.813564
| 0.007482
| 0
| 0
| 0
| 0.02381
| 0.880395
| 0.858034
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.119048
| null | null | 0.02381
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6451a0214449a4edee70920c97ceea448c2cd0bf
| 2,315
|
py
|
Python
|
mdp/policy_iteration.py
|
diegocom/ai_implementation
|
f32227589649a148b002aedf477c2e8803efb7bf
|
[
"MIT"
] | null | null | null |
mdp/policy_iteration.py
|
diegocom/ai_implementation
|
f32227589649a148b002aedf477c2e8803efb7bf
|
[
"MIT"
] | null | null | null |
mdp/policy_iteration.py
|
diegocom/ai_implementation
|
f32227589649a148b002aedf477c2e8803efb7bf
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym
import gym_ai_lab
import mdps.planning as mdp
from timeit import default_timer as timer
# Learning parameters
delta = 1e-3
gamma = 0.9
pmaxiters = 50 # Max number of policy improvements to perform
vmaxiters = 5 # Max number of iterations to perform while evaluating a policy
envname = "LavaFloor-v0"
print("\n----------------------------------------------------------------")
print("\tEnvironment: ", envname)
print("----------------------------------------------------------------\n")
env = gym.make(envname)
env.render()
t = timer()
policy = mdp.policy_iteration(env, pmaxiters, vmaxiters, gamma, delta)
print("\n\nPolicy Iteration:\n----------------------------------------------------------------"
"\nExecution time: {0}s\nPolicy:\n{1}".format(round(timer() - t, 4), np.vectorize(env.actions.get)(policy.reshape(
env.rows, env.cols))))
envname = "VeryBadLavaFloor-v0"
print("\n----------------------------------------------------------------")
print("\tEnvironment: ", envname)
print("----------------------------------------------------------------\n")
env = gym.make(envname)
env.render()
t = timer()
policy = mdp.policy_iteration(env, pmaxiters, vmaxiters, gamma, delta)
print("\n\nPolicy Iteration:\n----------------------------------------------------------------"
"\nExecution time: {0}s\nPolicy:\n{1}".format(round(timer() - t, 4), np.vectorize(env.actions.get)(policy.reshape(
env.rows, env.cols))))
envname = "NiceLavaFloor-v0"
print("\n----------------------------------------------------------------")
print("\tEnvironment: ", envname)
print("----------------------------------------------------------------\n")
env = gym.make(envname)
env.render()
t = timer()
policy = mdp.policy_iteration(env, pmaxiters, vmaxiters, gamma, delta)
print("\n\nPolicy Iteration:\n----------------------------------------------------------------"
"\nExecution time: {0}s\nPolicy:\n{1}".format(round(timer() - t, 4), np.vectorize(env.actions.get)(policy.reshape(
env.rows, env.cols))))
| 36.171875
| 120
| 0.445356
| 216
| 2,315
| 4.74537
| 0.296296
| 0.052683
| 0.023415
| 0.038049
| 0.721951
| 0.721951
| 0.721951
| 0.721951
| 0.721951
| 0.721951
| 0
| 0.010215
| 0.196544
| 2,315
| 63
| 121
| 36.746032
| 0.54086
| 0.054428
| 0
| 0.714286
| 0
| 0
| 0.39222
| 0.285584
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.119048
| 0
| 0.119048
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b38128070f11706580db205e89d4ed3942822e0e
| 158
|
py
|
Python
|
scripts/models/__init__.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
scripts/models/__init__.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
scripts/models/__init__.py
|
jmquintana79/utilsDS
|
1693810b6f10024542b30fdfedbfcd0518f32945
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: jmquintana79
# @Date: 2018-08-30 23:53:16
# @Last Modified by: jmquintana79
# @Last Modified time: 2018-08-30 23:54:01
| 26.333333
| 42
| 0.64557
| 25
| 158
| 4.08
| 0.72
| 0.117647
| 0.156863
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251908
| 0.170886
| 158
| 5
| 43
| 31.6
| 0.526718
| 0.93038
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b39e5b7ebc6df74d5bd401b1545cd3bd530fbb80
| 1,194
|
py
|
Python
|
tests/Composition/test_Composition__mixture_molar_volume.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
tests/Composition/test_Composition__mixture_molar_volume.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
tests/Composition/test_Composition__mixture_molar_volume.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Composition
####
################################################################################
################################################################################
class Composition(unittest.TestCase):
def test_Composition__mixture_molar_volume__allowed_calls(self):
pass
################################################################################
################################################################################
def test_Composition__mixture_molar_volume__not_allowed_calls(self):
pass
################################################################################
################################################################################
def test_Composition__mixture_molar_volume__computation(self):
pass
################################################################################
################################################################################
| 34.114286
| 80
| 0.237856
| 44
| 1,194
| 5.909091
| 0.454545
| 0.080769
| 0.207692
| 0.288462
| 0.569231
| 0.569231
| 0.430769
| 0.430769
| 0.430769
| 0.430769
| 0
| 0
| 0.070352
| 1,194
| 34
| 81
| 35.117647
| 0.234234
| 0.015075
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.3
| 0.3
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
b3c2fe038d7d941b137a6310ed7d11ca7ec8040c
| 30
|
py
|
Python
|
bankreader_demo/demoapp/__init__.py
|
misli/django-bankreader
|
c741c9af3f11899b1d9c9f2966da4810b3ade4c2
|
[
"BSD-3-Clause"
] | 1
|
2018-10-13T22:38:42.000Z
|
2018-10-13T22:38:42.000Z
|
bankreader_demo/demoapp/__init__.py
|
misli/django-bankreader
|
c741c9af3f11899b1d9c9f2966da4810b3ade4c2
|
[
"BSD-3-Clause"
] | null | null | null |
bankreader_demo/demoapp/__init__.py
|
misli/django-bankreader
|
c741c9af3f11899b1d9c9f2966da4810b3ade4c2
|
[
"BSD-3-Clause"
] | null | null | null |
from . import readers # noqa
| 15
| 29
| 0.7
| 4
| 30
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 1
| 30
| 30
| 0.913043
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b3c41c959f089fee7d76bb80d33690a1240d77e3
| 4,464
|
py
|
Python
|
a_rua_dos_cataventos/components/analysis/backup_analysis/iracema_analysis_old.py
|
DaviRaubach/arua
|
1e75d449e8f7205cd9522a7f1a1704c89b29023a
|
[
"MIT"
] | null | null | null |
a_rua_dos_cataventos/components/analysis/backup_analysis/iracema_analysis_old.py
|
DaviRaubach/arua
|
1e75d449e8f7205cd9522a7f1a1704c89b29023a
|
[
"MIT"
] | null | null | null |
a_rua_dos_cataventos/components/analysis/backup_analysis/iracema_analysis_old.py
|
DaviRaubach/arua
|
1e75d449e8f7205cd9522a7f1a1704c89b29023a
|
[
"MIT"
] | null | null | null |
import iracema
import matplotlib.pyplot as plt
import numpy as np
import abjad
import muda
def IracemaAnalysis(audioin, nharmonics, denominator):
audio = iracema.Audio(audioin)
# audio.play()
# audio.plot()
# specifying window and hop sizes
window, hop = 2048, 1024
# calculating the FFT
fft = iracema.spectral.fft(audio, window, hop)
# plotting the spectrogram
# iracema.plot.plot_spectrogram(fft)
# calculating the RMS
# rms = iracema.features.rms(audio, window, hop)
# plotting the RMS
# rms.plot()
# calculating the Peak Envelope
# peak = iracema.features.peak_envelope(audio, window, hop)
# plotting the Peak Envelope
# peak.plot()
# extract pitch
hps_pitch = iracema.pitch.hps(fft, minf0=1, maxf0=1000)
#extract harmonics
harmonics = iracema.harmonics.extract(fft, hps_pitch, nharm=nharmonics)
# plot the harmonics over the spectrogram
# iracema.plot.plot_audio_spectrogram_harmonics(
# audio=audio,
# rms=rms,
# peak_envelope=peak,
# fft=fft,
# fzero=harmonics['frequency'],
# harmonics=harmonics['frequency'],
# fftlim=(0,12000)
# )
# print(harmonics['frequency'].time)
x = harmonics['frequency'].data
y = harmonics['frequency'].time
# print(x.shape, y.shape)
# print(x[1].shape)
# for n, data in enumerate(x):
# print(n)
# plt.plot(y, x[n])
print("fs:", audio.fs)
freq_list = []
for n, data in enumerate(x):
if n != 0:
freq_sub_list = []
samples = data.shape[0]
measure = 44.1 * 4
half_second = int(44.1 / denominator)
mymod = int(samples / half_second)
for i, d in enumerate(data):
if i % mymod == 0:
freq_sub_list.append(d)
freq_list.append(freq_sub_list)
freq_list = np.array(freq_list)
print(freq_list[0, 2])
print(freq_list.shape)
all_pitches = []
container = abjad.Container()
for n in range(freq_list.shape[1]):
pitches = []
for i, list_ in enumerate(freq_list):
pitches.append(abjad.NamedPitch.from_hertz(freq_list[i, n]))
chord = abjad.Chord("<e' g' c''>4")
chord.written_duration = abjad.Duration(1, denominator)
chord.written_pitches = pitches
container.append(chord)
print(abjad.lilypond(container))
voice = abjad.Voice()
voice.append(container)
abjad.show(voice)
return container
analysis = IracemaAnalysis("janela_cut.wav", 12, 8)
# audio = iracema.Audio("janela_cut.wav")
# # audio.play()
# # audio.plot()
# # specifying window and hop sizes
# window, hop = 2048, 1024
# # calculating the FFT
# fft = iracema.spectral.fft(audio, window, hop)
# # plotting the spectrogram
# # iracema.plot.plot_spectrogram(fft)
# # calculating the RMS
# rms = iracema.features.rms(audio, window, hop)
# # plotting the RMS
# # rms.plot()
# # calculating the Peak Envelope
# peak = iracema.features.peak_envelope(audio, window, hop)
# # plotting the Peak Envelope
# # peak.plot()
# # extract pitch
# hps_pitch = iracema.pitch.hps(fft, minf0=1, maxf0=1000)
# #extract harmonics
# harmonics = iracema.harmonics.extract(fft, hps_pitch, nharm=12)
# # plot the harmonics over the spectrogram
# # iracema.plot.plot_audio_spectrogram_harmonics(
# # audio=audio,
# # rms=rms,
# # peak_envelope=peak,
# # fft=fft,
# # fzero=harmonics['frequency'],
# # harmonics=harmonics['frequency'],
# # fftlim=(0,12000)
# # )
# # print(harmonics['frequency'].time)
# x = harmonics['frequency'].data
# y = harmonics['frequency'].time
# print(x.shape, y.shape)
# print(x[1].shape)
# for n, data in enumerate(x):
# print(n)
# plt.plot(y, x[n])
# freq_list = []
# for n, data in enumerate(x):
# if n != 0:
# freq_sub_list = []
# samples = data.shape[0]
# half_second = int(0.5 * 44.1)
# mymod = int(samples/half_second)
# for i, d in enumerate(data):
# if i % mymod == 0:
# freq_sub_list.append(d)
# freq_list.append(freq_sub_list)
# freq_list = np.array(freq_list)
# print(freq_list[0, 2])
# print(freq_list.shape)
# all_pitches = []
# container = abjad.Container()
# for n in range(freq_list.shape[1]):
# pitches = []
# for i, list_ in enumerate(freq_list):
# pitches.append(abjad.NamedPitch.from_hertz(freq_list[i, n]))
# chord = abjad.Chord("<e' g' c''>4")
# chord.written_duration = abjad.Duration(1, 8)
# chord.written_pitches = pitches
# container.append(chord)
# print(abjad.lilypond(container))
# voice = abjad.Voice(container)
# abjad.show(voice)
| 23.871658
| 72
| 0.664203
| 608
| 4,464
| 4.774671
| 0.167763
| 0.049604
| 0.028936
| 0.04547
| 0.869445
| 0.869445
| 0.869445
| 0.869445
| 0.869445
| 0.869445
| 0
| 0.021589
| 0.190636
| 4,464
| 186
| 73
| 24
| 0.781899
| 0.602151
| 0
| 0
| 0
| 0
| 0.02845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.111111
| 0
| 0.155556
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b3fe24fa1c8f8d599cb66093c9c05f306ae98ca0
| 44
|
py
|
Python
|
asr_deepspeech/data/__init__.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 44
|
2020-03-03T13:05:57.000Z
|
2022-03-24T03:42:31.000Z
|
asr_deepspeech/data/__init__.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 6
|
2020-12-15T10:58:19.000Z
|
2021-10-12T01:59:17.000Z
|
asr_deepspeech/data/__init__.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 13
|
2020-05-20T06:42:20.000Z
|
2022-03-24T03:42:31.000Z
|
from .noise_injection import NoiseInjection
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
37607a4df1fb814e9da0a9a55dfbc83ae4adcd8d
| 182
|
py
|
Python
|
neighbor/admin.py
|
BRIGHTON-ASUMANI/Jirani-hood
|
7cf8e72e650bc613aa31ef346444a9d727d340da
|
[
"MIT"
] | 1
|
2019-02-24T21:03:21.000Z
|
2019-02-24T21:03:21.000Z
|
neighbor/admin.py
|
BRIGHTON-ASUMANI/Jirani-hood
|
7cf8e72e650bc613aa31ef346444a9d727d340da
|
[
"MIT"
] | 1
|
2021-06-10T20:56:11.000Z
|
2021-06-10T20:56:11.000Z
|
neighbor/admin.py
|
BRIGHTON-ASUMANI/Jirani-hood
|
7cf8e72e650bc613aa31ef346444a9d727d340da
|
[
"MIT"
] | 1
|
2019-02-24T21:03:22.000Z
|
2019-02-24T21:03:22.000Z
|
from django.contrib import admin
from .models import Neighbourhood, Profile, Business
admin.site.register(Neighbourhood)
admin.site.register(Business)
admin.site.register(Profile)
| 26
| 53
| 0.82967
| 23
| 182
| 6.565217
| 0.478261
| 0.178808
| 0.337748
| 0.331126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082418
| 182
| 6
| 54
| 30.333333
| 0.904192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3767ce0b076c20f0c9dd26abd935a7a6e6fe8281
| 169
|
py
|
Python
|
gunicorn.py
|
skavila/AddressParser
|
105493146a39096d1258cd18a938bee25f872ac1
|
[
"MIT"
] | null | null | null |
gunicorn.py
|
skavila/AddressParser
|
105493146a39096d1258cd18a938bee25f872ac1
|
[
"MIT"
] | null | null | null |
gunicorn.py
|
skavila/AddressParser
|
105493146a39096d1258cd18a938bee25f872ac1
|
[
"MIT"
] | null | null | null |
import multiprocessing
import os
bind = os.getenv('SVC_BIND', '0.0.0.0:3000')
workers = int(os.getenv('SVC_CONCURRENCY', 1))
threads = int(os.getenv('SVC_THREADS', 10))
| 28.166667
| 46
| 0.721893
| 28
| 169
| 4.25
| 0.5
| 0.201681
| 0.277311
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071895
| 0.094675
| 169
| 6
| 47
| 28.166667
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0.270588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
806fde3433acb2e4c179edccfd84f1ebe9c6c743
| 92
|
py
|
Python
|
Python/tigre/utilities/io/__init__.py
|
tsadakane/TIGRE
|
a853cd2d4a6bc9509c01414b85ca75b4448fd700
|
[
"BSD-3-Clause"
] | 326
|
2016-07-01T10:48:09.000Z
|
2022-03-20T07:34:52.000Z
|
Python/tigre/utilities/io/__init__.py
|
tsadakane/TIGRE
|
a853cd2d4a6bc9509c01414b85ca75b4448fd700
|
[
"BSD-3-Clause"
] | 311
|
2016-07-05T16:00:06.000Z
|
2022-03-30T12:14:55.000Z
|
Python/tigre/utilities/io/__init__.py
|
tsadakane/TIGRE
|
a853cd2d4a6bc9509c01414b85ca75b4448fd700
|
[
"BSD-3-Clause"
] | 157
|
2016-08-08T12:13:09.000Z
|
2022-03-17T00:37:45.000Z
|
from .NikonDataLoader import NikonDataLoader
from .BrukerDataLoader import BrukerDataLoader
| 30.666667
| 46
| 0.891304
| 8
| 92
| 10.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 92
| 2
| 47
| 46
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80b3e07dd600fe8c4c60ec4e34549ef6791e83c8
| 31,244
|
py
|
Python
|
dnarandombot.py
|
saintdanelimbu/Random-Bot
|
330e557ac609a0dbeba3a126adc7103abef3f08a
|
[
"MIT"
] | null | null | null |
dnarandombot.py
|
saintdanelimbu/Random-Bot
|
330e557ac609a0dbeba3a126adc7103abef3f08a
|
[
"MIT"
] | null | null | null |
dnarandombot.py
|
saintdanelimbu/Random-Bot
|
330e557ac609a0dbeba3a126adc7103abef3f08a
|
[
"MIT"
] | null | null | null |
import discord
import os
import time
from discord.utils import get
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, CheckFailure, check
import random
from random import randint
from dhooks import Webhook,Embed
import names
client = discord.Client()
client = commands.Bot(command_prefix = '-',case_insensitive=True)
@client.event
async def on_ready():
channel = client.get_channel(903690747197390888)
await channel.send('Random Bot is UP!')
print("bot online")
##GLOBE##
@client.command()
async def globe(ctx,howmany:int):
def awitized():
numbers=[]
for i in range(0,howmany):
globe = ['0905','0906','0915','0916','0917','0926','0927','0935','0936','0937','0945','0953','0954','0955','0956','0965','0966','0967','0975','0977','0978','0979','0994','0995','0996','0997']
globenumber = random.choice(globe)
randomized = f"{globenumber}{random.randint(1000000,9999999)}"
numbers.append(randomized)
return '\n'.join(str(e) for e in numbers)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='GLOBE'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/814869462188556339/903697451754590218/globe-removebg-preview.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##SMART##
@client.command()
async def smart(ctx,howmany:int):
def awitized():
numbers=[]
for i in range(0,howmany):
smart = ['0908','0918','0919','0920','0921','0928','0929','0939','0947','0949','0951','0961','0998','0999']
smartnumber = random.choice(smart)
randomized = f"{smartnumber}{random.randint(1000000,9999999)}"
numbers.append(randomized)
return '\n'.join(str(e) for e in numbers)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='SMART'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://download.logo.wine/logo/Smart_Communications/Smart_Communications-Logo.wine.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##SUN##
@client.command()
async def sun(ctx,howmany:int):
def awitized():
numbers=[]
for i in range(0,howmany):
sun = ['0922','0923','0924','0925','0931','0932','0933','0934','0940','0941','0942','0943','0973','0974']
sunnumber = random.choice(sun)
randomized = f"{sunnumber}{random.randint(1000000,9999999)}"
numbers.append(randomized)
return '\n'.join(str(e) for e in numbers)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='SUN'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://static.wikia.nocookie.net/logopedia/images/7/7c/Sun_Cellular_logo.svg/revision/latest/scale-to-width-down/250?cb=20130911111111')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##TNT##
@client.command()
async def tnt(ctx,howmany:int):
def awitized():
numbers=[]
for i in range(0,howmany):
tnt = ['0907','0909','0910','0912','0930','0938','0946','0948','0950']
tntnumber = random.choice(tnt)
randomized = f"{tntnumber}{random.randint(1000000,9999999)}"
numbers.append(randomized)
return '\n'.join(str(e) for e in numbers)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='TNT'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://upload.wikimedia.org/wikipedia/commons/3/36/TNT_%28cellular_service%29_logo.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##ADDRESS##
shit = open('words.txt').read().splitlines()
##NCR##
@client.command()
async def ncr(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
ncr = open('ncr.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomncr =random.choice(ncr)
address.append(f"{str(randomnumber)} {randomshit} Street {randomncr}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='NATIONAL CAPITAL REGION (NCR)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## CORDILLERA ADMINISTRATIVE REGION ##
@client.command()
async def cordillera(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
cordillera = open('cordillera.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomcordillera =random.choice(cordillera)
address.append(f"{str(randomnumber)} {randomshit} Street {randomcordillera}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='CORDILLERA ADMINISTRATIVE REGION'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 1 (ILOCOS REGION)##
@client.command()
async def ilocosregion(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region1 = open('region1.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion1 =random.choice(region1)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion1}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 1 (ILOCOS REGION)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 2 (CAGAYAN REGION)##
@client.command()
async def cagayanregion(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region2 = open('region2.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion2 =random.choice(region2)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion2}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 2 (CAGAYAN REGION)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 3 (CENTRAL LUZON)##
@client.command()
async def centralluzon(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region3 = open('region3.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion3 =random.choice(region3)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion3}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 3 (CENTRAL LUZON)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 4 (CALABARZON)##
@client.command()
async def calabarzon(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
calabarzon = open('calabarzon.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomcalabarzon =random.choice(calabarzon)
address.append(f"{str(randomnumber)} {randomshit} Street {randomcalabarzon}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 4 (CALABARZON)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 4 (MIMAROPA)##
@client.command()
async def mimaropa(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
mimaropa = open('mimaropa.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randommimaropa =random.choice(mimaropa)
address.append(f"{str(randomnumber)} {randomshit} Street {randommimaropa}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 4 (MIMAROPA)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 5 (BICOL REGION)##
@client.command()
async def bicolregion(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region5 = open('region5.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion5 =random.choice(region5)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion5}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 5 (BICOL REGION)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 6 (WESTERN VISAYAS)##
@client.command()
async def westernvisayas(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region6 = open('region6.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion6 =random.choice(region6)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion6}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 6 (WESTERN VISAYAS)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 7 (CENTRAL VISAYAS)##
@client.command()
async def centralvisayas(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region7 = open('region7.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion7 =random.choice(region7)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion7}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 7 (CENTRAL VISAYAS)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 8 (EASTERN VISAYAS)##
@client.command()
async def easternvisayas(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region8 = open('region8.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion8 =random.choice(region8)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion8}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 8 (EASTERN VISAYAS)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 9 (ZAMBOANGA PENINSULA)##
@client.command()
async def zamboanga(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region9 = open('region9.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion9 =random.choice(region9)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion9}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 9 (ZAMBOANGA PENINSULA)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 10 (NORTHERN MINDANAO)##
@client.command()
async def northernmindanao(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region10 = open('region10.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion10 =random.choice(region10)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion10}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 10 (NORTHERN MINDANAO)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 11 (DAVAO REGION)##
@client.command()
async def davaoregion(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region11 = open('region11.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion11 =random.choice(region11)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion11}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 11 (DAVAO REGION)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 12 (Soccsksargen)##
@client.command()
async def region12(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region12 = open('region12.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion12 =random.choice(region12)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion12}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 12 (Soccsksargen)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## REGION 13 (CARAGA)##
@client.command()
async def caraga(ctx,howmany:int):
def awitized():
address=[]
for i in range(0,howmany):
region13 = open('region13.txt').read().splitlines()
randomnumber = random.randint(1,999)
randomshit = random.choice(shit)
randomregion13 =random.choice(region13)
address.append(f"{str(randomnumber)} {randomshit} Street {randomregion13}")
return '\n'.join(str(e) for e in address)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='REGION 13 (CARAGA)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://wallpaperaccess.com/full/503514.jpg')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##RANDOM NAME##
## FIRSTNAME##
@client.command()
async def firstname(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
awit = names.get_first_name()
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FIRST NAME'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://media.discordapp.net/attachments/814869462188556339/904842661318520882/MALEFEMALE-removebg-preview.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## FIRSTNAME MALE##
@client.command()
async def firstname_male(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
awit = names.get_first_name(gender='male')
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FIRST NAME (MALE)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://upload.wikimedia.org/wikipedia/commons/thumb/4/4f/Mars-male-symbol-pseudo-3D-blue.svg/1200px-Mars-male-symbol-pseudo-3D-blue.svg.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## FIRSTNAME FEMALE##
@client.command()
async def firstname_female(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
awit = names.get_first_name(gender='female')
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FIRST NAME (FEMALE)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://upload.wikimedia.org/wikipedia/commons/thumb/2/24/Venus-female-symbol-pseudo-3D-pink.svg/1200px-Venus-female-symbol-pseudo-3D-pink.svg.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
lastname = open('lastname.txt').read().splitlines()
## FULLNAME##
@client.command()
async def fullname(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
randomlastname = random.choice(lastname)
awit = f"{names.get_first_name()} {randomlastname}"
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FULL NAME'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://media.discordapp.net/attachments/814869462188556339/904842661318520882/MALEFEMALE-removebg-preview.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## FULLNAME MALE##
@client.command()
async def fullname_male(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
randomlastname = random.choice(lastname)
awit = f"{names.get_first_name(gender='male')} {randomlastname}"
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FULL NAME (MALE)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://upload.wikimedia.org/wikipedia/commons/thumb/4/4f/Mars-male-symbol-pseudo-3D-blue.svg/1200px-Mars-male-symbol-pseudo-3D-blue.svg.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
## FULLNAME FEMALE##
@client.command()
async def fullname_female(ctx,howmany:int):
def awitized():
name=[]
for i in range(0,howmany):
randomlastname = random.choice(lastname)
awit = f"{names.get_first_name(gender='female')} {randomlastname}"
name.append(awit)
return '\n'.join(str(e) for e in name)
hook = Webhook('https://discord.com/api/webhooks/890607746481782815/HHIIWq6PrYTmkfGX-buMS92CGXDfZoek-2JvyfU2kFywge5jW3OcblFar6qMjTNNhD6g')
embed = discord.Embed()
embed.title='FULL NAME (FEMALE)'
embed.colour = discord.Color.teal()
embed.description= awitized()
embed.set_footer(text=f'Requested by {ctx.author}',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_thumbnail(url='https://upload.wikimedia.org/wikipedia/commons/thumb/2/24/Venus-female-symbol-pseudo-3D-pink.svg/1200px-Venus-female-symbol-pseudo-3D-pink.svg.png')
embed.set_author(name='DNA Random Bot')
await ctx.send(embed=embed)
hook.send(embed=embed)
##HELP##
client.remove_command('help')
@client.command(pass_context=True)
async def help(ctx):
author = ctx.message.author
embed = discord.Embed(
description="""**RANDOM PHONE NUMBERS:**
-globe (value)
-smart (value)
-sun (value)
-tnt (value)
**RANDOM ADDRESS:**
-ncr (value)
-cordillera (value)
-ilocosregion (value)
-cagayanregion (value)
-centralluzon (value)
-calabarzon (value)
-bicolregion (value)
-westernvisayas (value)
-centralvisayas (value)
-easternvisayas (value)
-zamboanga (value)
-northernmindanao(value)
-davaoregion (value)
-region12 (value)
-caraga (value)
**RANDOM NAME:**
-firstname (value)
-firstname_male (value)
-firstname_female (value)
-fullname (value)
-fullname_male (value)
-fullname_female (value) """
)
embed.colour = discord.Color.teal()
embed.title='DNA HELP'
embed.set_thumbnail(url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
embed.set_footer(text='Powered by DNA Solutions',icon_url='https://media.discordapp.net/attachments/814869462188556339/862375736416403496/DNA_Logo.png')
await author.send(embed=embed)
await ctx.send("Look at your DM's!")
client.run("TOKEN")
| 47.628049
| 201
| 0.687364
| 3,653
| 31,244
| 5.83274
| 0.088694
| 0.030037
| 0.034824
| 0.032384
| 0.82433
| 0.804806
| 0.802365
| 0.768574
| 0.768574
| 0.768574
| 0
| 0.097144
| 0.173025
| 31,244
| 655
| 202
| 47.700763
| 0.727494
| 0.016291
| 0
| 0.691652
| 0
| 0.015332
| 0.385555
| 0.010146
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044293
| false
| 0.001704
| 0.017036
| 0
| 0.105622
| 0.001704
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
80ce485cd82b771ace6bf484cb0c7c81025c1390
| 42
|
py
|
Python
|
ttrw/dictionaries/__init__.py
|
ttomasz/ttrw
|
ef0418b4f9578ada38efc1d56711ba001e4466af
|
[
"MIT"
] | null | null | null |
ttrw/dictionaries/__init__.py
|
ttomasz/ttrw
|
ef0418b4f9578ada38efc1d56711ba001e4466af
|
[
"MIT"
] | null | null | null |
ttrw/dictionaries/__init__.py
|
ttomasz/ttrw
|
ef0418b4f9578ada38efc1d56711ba001e4466af
|
[
"MIT"
] | null | null | null |
from .dict_loader import languages, words
| 21
| 41
| 0.833333
| 6
| 42
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80d38f6330b28a2063db82ec476697edd0660c8f
| 157
|
py
|
Python
|
laspy/__init__.py
|
Ellon/laspy
|
ad0a1a43f4e127c2b22a8d4b1e088cad58fd21f3
|
[
"BSD-2-Clause"
] | 1
|
2020-02-26T20:55:13.000Z
|
2020-02-26T20:55:13.000Z
|
laspy/__init__.py
|
Ellon/laspy
|
ad0a1a43f4e127c2b22a8d4b1e088cad58fd21f3
|
[
"BSD-2-Clause"
] | null | null | null |
laspy/__init__.py
|
Ellon/laspy
|
ad0a1a43f4e127c2b22a8d4b1e088cad58fd21f3
|
[
"BSD-2-Clause"
] | 1
|
2020-02-26T20:55:19.000Z
|
2020-02-26T20:55:19.000Z
|
from __future__ import absolute_import
__version__ = '1.5.0'
from laspy import base
from laspy import file
from laspy import header
from laspy import util
| 17.444444
| 38
| 0.808917
| 25
| 157
| 4.72
| 0.52
| 0.305085
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.159236
| 157
| 8
| 39
| 19.625
| 0.871212
| 0
| 0
| 0
| 0
| 0
| 0.031847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
80ed44097a332bdb6c23530973c9d9c1a5844987
| 1,202
|
py
|
Python
|
conditional_statements_advanced/lab/trade_commissions.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
conditional_statements_advanced/lab/trade_commissions.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
conditional_statements_advanced/lab/trade_commissions.py
|
PetkoAndreev/Python-basics
|
a376362548380ae50c7c707551cb821547f44402
|
[
"MIT"
] | null | null | null |
city = input()
sales_quantity = float(input())
if city == 'Sofia' and 0 <= sales_quantity <= 500:
print(f'{0.05*sales_quantity:.2f}')
elif city == 'Sofia' and 500 < sales_quantity <= 1000:
print(f'{0.07*sales_quantity:.2f}')
elif city == 'Sofia' and 1000 < sales_quantity <= 10000:
print(f'{0.08*sales_quantity:.2f}')
elif city == 'Sofia' and sales_quantity > 10000:
print(f'{0.12*sales_quantity:.2f}')
elif city == 'Varna' and 0 <= sales_quantity <= 500:
print(f'{0.045*sales_quantity:.2f}')
elif city == 'Varna' and 500 < sales_quantity <= 1000:
print(f'{0.075*sales_quantity:.2f}')
elif city == 'Varna' and 1000 < sales_quantity <= 10000:
print(f'{0.1*sales_quantity:.2f}')
elif city == 'Varna' and sales_quantity > 10000:
print(f'{0.13*sales_quantity:.2f}')
elif city == 'Plovdiv' and 0 <= sales_quantity <= 500:
print(f'{0.055*sales_quantity:.2f}')
elif city == 'Plovdiv' and 500 < sales_quantity <= 1000:
print(f'{0.08*sales_quantity:.2f}')
elif city == 'Plovdiv' and 1000 < sales_quantity <= 10000:
print(f'{0.12*sales_quantity:.2f}')
elif city == 'Plovdiv' and sales_quantity > 10000:
print(f'{0.145*sales_quantity:.2f}')
else:
print('error')
| 42.928571
| 58
| 0.65807
| 186
| 1,202
| 4.11828
| 0.16129
| 0.424282
| 0.109661
| 0.272846
| 0.881201
| 0.881201
| 0.881201
| 0.511749
| 0.214099
| 0.130548
| 0
| 0.123894
| 0.15391
| 1,202
| 28
| 59
| 42.928571
| 0.629302
| 0
| 0
| 0.142857
| 0
| 0
| 0.312552
| 0.25187
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.464286
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
03c1e7127a155d14ddbc00cb0cf8d627d6c34ab2
| 106
|
py
|
Python
|
smart_recruiting_server/conf/environments/staging.py
|
mohseenrm/smart_recruiting_server
|
ac73c727b02d8f0c9d630d8bf867ed28a351e671
|
[
"MIT"
] | null | null | null |
smart_recruiting_server/conf/environments/staging.py
|
mohseenrm/smart_recruiting_server
|
ac73c727b02d8f0c9d630d8bf867ed28a351e671
|
[
"MIT"
] | 81
|
2019-06-17T20:09:28.000Z
|
2021-08-02T13:15:38.000Z
|
smart_recruiting_server/conf/environments/staging.py
|
mohseenrm/smart_recruiting_server
|
ac73c727b02d8f0c9d630d8bf867ed28a351e671
|
[
"MIT"
] | null | null | null |
from smart_recruiting_server.conf.environments.base import BaseConfig
class Config(BaseConfig):
pass
| 21.2
| 69
| 0.830189
| 13
| 106
| 6.615385
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 4
| 70
| 26.5
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
03d244e644776ec40444dcc302aab13971a27de8
| 2,986
|
py
|
Python
|
previous/week7/discreteTimeModels.py
|
code-lab-org/sys611
|
3b8c46788dee629a9f2d6b7f84373e041b918ff0
|
[
"MIT"
] | 3
|
2021-04-07T03:52:07.000Z
|
2022-03-04T18:16:16.000Z
|
previous/week7/discreteTimeModels.py
|
code-lab-org/sys611
|
3b8c46788dee629a9f2d6b7f84373e041b918ff0
|
[
"MIT"
] | null | null | null |
previous/week7/discreteTimeModels.py
|
code-lab-org/sys611
|
3b8c46788dee629a9f2d6b7f84373e041b918ff0
|
[
"MIT"
] | 6
|
2021-02-12T01:57:23.000Z
|
2022-03-04T18:05:27.000Z
|
"""
SYS-611 Discrete Time Models.
@author: Paul T. Grogan, pgrogan@stevens.edu
"""
# import the python3 behavior for importing, division, and printing in python2
from __future__ import absolute_import, division, print_function
# import the matplotlib pyplot package and refer to it as `plt`
# see http://matplotlib.org/api/pyplot_api.html for documentation
import matplotlib.pyplot as plt
#%% delay system example
# define the input trajectory
x = [1,1,0,0,1,0,0,0,1]
# define the state update function
def _delta(q, x):
return x
# define the output function
def _lambda(q, x):
return x
# define the output and state trajectories
y = [0,0,0,0,0,0,0,0,0]
q = [0,0,0,0,0,0,0,0,0,0]
# initialize the simulation
t = 0
q[0] = 0
# execute the simulation
while t <= 8:
# record output value
y[t] = _lambda(q[t], x[t])
# record state update
q[t+1] = _delta(q[t], x[t])
# advance time
t += 1
plt.figure()
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
ax1.bar(range(9), x, color='k')
ax1.set_ylabel('Input ($x$)')
ax2.bar(range(9), q[:-1], color='k')
ax2.set_ylabel('State ($q$)')
ax3.bar(range(9), y, color='k')
ax3.set_ylabel('Output ($y$)')
plt.xlabel('Time (ticks)')
plt.suptitle('Delay System Model')
#%% binary counter example
# define the input trajectory
x = [1,1,0,0,1,0,0,0,1]
# define the state update function
def _delta(q, x):
return q != x
# define the output function
def _lambda(q, x):
return q and x
# define the output and state trajectories
y = [0,0,0,0,0,0,0,0,0]
q = [0,0,0,0,0,0,0,0,0,0]
# initialize the simulation
t = 0
q[0] = 0
# execute the simulation
while t <= 8:
# record output value
y[t] = _lambda(q[t], x[t])
# record state update
q[t+1] = _delta(q[t], x[t])
# advance time
t += 1
plt.figure()
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
ax1.bar(range(9), x, color='k')
ax1.set_ylabel('Input ($x$)')
ax2.bar(range(9), q[:-1], color='k')
ax2.set_ylabel('State ($q$)')
ax3.bar(range(9), y, color='k')
ax3.set_ylabel('Output ($y$)')
plt.xlabel('Time (ticks)')
plt.suptitle('Binary Counter Model')
#%% delay flip-flop example
# define the input trajectory
x = [1,1,0,0,1,0,0,0,1]
# define the state update function
def _delta(q, x):
return x
# define the output function
def _lambda(q):
return q
# define the output and state trajectories
y = [0,0,0,0,0,0,0,0,0]
q = [0,0,0,0,0,0,0,0,0,0]
# initialize the simulation
t = 0
q[0] = 0
# execute the simulation
while t <= 8:
# record output value
y[t] = _lambda(q[t])
# record state update
q[t+1] = _delta(q[t], x[t])
# advance time
t += 1
plt.figure()
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
ax1.bar(range(9), x, color='k')
ax1.set_ylabel('Input ($x$)')
ax2.bar(range(9), q[:-1], color='k')
ax2.set_ylabel('State ($q$)')
ax3.bar(range(9), y, color='k')
ax3.set_ylabel('Output ($y$)')
plt.xlabel('Time (ticks)')
plt.suptitle('Delay Flip-Flop Model')
| 22.118519
| 78
| 0.634963
| 542
| 2,986
| 3.446494
| 0.171587
| 0.067452
| 0.077088
| 0.083512
| 0.79015
| 0.79015
| 0.79015
| 0.79015
| 0.79015
| 0.79015
| 0
| 0.061066
| 0.182853
| 2,986
| 135
| 79
| 22.118519
| 0.704508
| 0.350301
| 0
| 0.859155
| 0
| 0
| 0.108364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0.028169
| 0.084507
| 0.197183
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
20a72d76886e1d147bd8695fae0ef21419d26ee9
| 3,158
|
py
|
Python
|
src/bench/bench_counters.py
|
JohanSmet/lsim
|
144d86a68e183436db7d9364d1220580404a53c6
|
[
"BSD-3-Clause"
] | 7
|
2020-09-17T11:26:47.000Z
|
2022-03-13T19:20:49.000Z
|
src/bench/bench_counters.py
|
JohanSmet/lsim
|
144d86a68e183436db7d9364d1220580404a53c6
|
[
"BSD-3-Clause"
] | null | null | null |
src/bench/bench_counters.py
|
JohanSmet/lsim
|
144d86a68e183436db7d9364d1220580404a53c6
|
[
"BSD-3-Clause"
] | 3
|
2020-09-17T11:26:52.000Z
|
2022-03-21T19:46:45.000Z
|
#!/usr/bin/env python3
import lsimpy
from bench_utils import *
def cycle_clock(sim, circuit):
circuit.write_port("Clk", lsimpy.ValueTrue)
sim.run_until_stable(2)
circuit.write_port("Clk", lsimpy.ValueFalse)
sim.run_until_stable(2)
def test_bin_counter_4b(lsim):
print("*** running BinCounter 4b")
sim = lsim.sim()
circuit_desc = lsim.user_library().circuit_by_name('BinCounter 4b')
pins_D = [circuit_desc.port_by_name(f"D[{i:}]") for i in range(0,4)]
pins_Y = [circuit_desc.port_by_name(f"Y[{i:}]") for i in range(0,4)]
circuit = circuit_desc.instantiate(sim)
sim.init()
circuit.write_nibble(pins_D, 0)
circuit.write_port("Load", lsimpy.ValueFalse)
circuit.write_port("Clk", lsimpy.ValueFalse)
circuit.write_port("Res", lsimpy.ValueTrue)
circuit.write_port("En", lsimpy.ValueTrue)
sim.run_until_stable(2)
circuit.write_port("Res", lsimpy.ValueFalse)
sim.run_until_stable(2)
CHECK(circuit.read_nibble(pins_Y), 0, "reset")
for i in range(1, 2**4):
cycle_clock(sim, circuit)
CHECK(circuit.read_nibble(pins_Y), i, "clock cycle")
CHECK(circuit.read_port("RCO"), i == (2**4)-1, "")
circuit.write_nibble(pins_D, 5)
circuit.write_port("Load", lsimpy.ValueTrue)
sim.run_until_stable(2)
circuit.write_port("Load", lsimpy.ValueFalse)
sim.run_until_stable(2)
CHECK(circuit.read_nibble(pins_Y), 5, "after load")
cycle_clock(sim, circuit)
CHECK(circuit.read_nibble(pins_Y), 6, "increment")
def test_bin_counter_8b(lsim):
print("*** running BinCounter 8b")
sim = lsim.sim()
circuit_desc = lsim.user_library().circuit_by_name('BinCounter 8b')
pins_D = [circuit_desc.port_by_name(f"D[{i:}]") for i in range(0,8)]
pins_Y = [circuit_desc.port_by_name(f"Y[{i:}]") for i in range(0,8)]
circuit = circuit_desc.instantiate(sim)
sim.init()
circuit.write_byte(pins_D, 0)
circuit.write_port("Load", lsimpy.ValueFalse)
circuit.write_port("Clk", lsimpy.ValueFalse)
circuit.write_port("Res", lsimpy.ValueTrue)
circuit.write_port("En", lsimpy.ValueTrue)
sim.run_until_stable(2)
circuit.write_port("Res", lsimpy.ValueFalse)
sim.run_until_stable(2)
CHECK(circuit.read_byte(pins_Y), 0, "reset")
for i in range(1, 2**8):
cycle_clock(sim, circuit)
CHECK(circuit.read_byte(pins_Y), i, "clock cycle")
CHECK(circuit.read_port("RCO"), i == (2**8)-1, "")
circuit.write_byte(pins_D, 5)
circuit.write_port("Load", lsimpy.ValueTrue)
sim.run_until_stable(2)
circuit.write_port("Load", lsimpy.ValueFalse)
sim.run_until_stable(2)
CHECK(circuit.read_byte(pins_Y), 5, "after load")
cycle_clock(sim, circuit)
CHECK(circuit.read_byte(pins_Y), 6, "increment")
def main():
lsim = lsimpy.LSimContext()
lsim.add_folder("examples", "../../examples")
if (not lsim.load_user_library("examples/cpu_8bit/lib_counter.lsim")):
print("Unable to load circuit\n")
exit(-1)
test_bin_counter_4b(lsim)
test_bin_counter_8b(lsim)
print_stats()
if __name__ == "__main__":
main()
| 32.22449
| 74
| 0.674478
| 471
| 3,158
| 4.267516
| 0.165605
| 0.119403
| 0.127363
| 0.084577
| 0.859204
| 0.795025
| 0.768657
| 0.749751
| 0.749751
| 0.699005
| 0
| 0.019563
| 0.174478
| 3,158
| 98
| 75
| 32.22449
| 0.751438
| 0.00665
| 0
| 0.460526
| 0
| 0
| 0.102008
| 0.010838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.026316
| 0
| 0.078947
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
459fe0d93752805ef952c32cdfc1a044a1280650
| 40
|
py
|
Python
|
eod/historical_prices/stock_price_data_api/__init__.py
|
gereon/eod-data
|
4286a03cc08bc8b5dc42ebae0bb8eb22bdfa3230
|
[
"Apache-2.0"
] | 19
|
2021-09-18T11:31:45.000Z
|
2022-03-15T20:03:52.000Z
|
eod/historical_prices/stock_price_data_api/__init__.py
|
gereon/eod-data
|
4286a03cc08bc8b5dc42ebae0bb8eb22bdfa3230
|
[
"Apache-2.0"
] | 2
|
2022-02-18T23:37:48.000Z
|
2022-03-01T18:14:06.000Z
|
eod/historical_prices/stock_price_data_api/__init__.py
|
gereon/eod-data
|
4286a03cc08bc8b5dc42ebae0bb8eb22bdfa3230
|
[
"Apache-2.0"
] | 8
|
2021-09-13T16:49:52.000Z
|
2022-03-31T21:09:44.000Z
|
from .stock_prices import StockPriceData
| 40
| 40
| 0.9
| 5
| 40
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45b70cb476eb7ce75e5e28f019c0947ad832b1e6
| 79
|
py
|
Python
|
search/test/test.py
|
fbennets/HCLC-GDPR-Bot
|
a26306e6593d8356a7a58dce32090ca21f30ac29
|
[
"MIT"
] | 1
|
2021-06-04T15:57:11.000Z
|
2021-06-04T15:57:11.000Z
|
search/test/test.py
|
fbennets/HCLC-GDPR-Bot
|
a26306e6593d8356a7a58dce32090ca21f30ac29
|
[
"MIT"
] | 175
|
2020-06-10T23:33:08.000Z
|
2021-12-26T10:35:51.000Z
|
search/test/test.py
|
fbennets/HCLC-GDPR-Bot
|
a26306e6593d8356a7a58dce32090ca21f30ac29
|
[
"MIT"
] | 2
|
2020-06-12T15:11:20.000Z
|
2021-06-13T10:37:35.000Z
|
from datenanfragen import search_company
print(search_company("n62 gbhm", 10))
| 39.5
| 41
| 0.822785
| 11
| 79
| 5.727273
| 0.818182
| 0.412698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.088608
| 79
| 2
| 42
| 39.5
| 0.819444
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
afd783f4ba77640a6cb1abf30979d80cd56251b1
| 12,162
|
py
|
Python
|
lib/turkish_nltk/trnltk/morphology/contextless/parser/test/test_rootfinders.py
|
myasiny/wordembed
|
d4df516a4ac6eed71d1cc6e085638e895c525de6
|
[
"MIT"
] | null | null | null |
lib/turkish_nltk/trnltk/morphology/contextless/parser/test/test_rootfinders.py
|
myasiny/wordembed
|
d4df516a4ac6eed71d1cc6e085638e895c525de6
|
[
"MIT"
] | null | null | null |
lib/turkish_nltk/trnltk/morphology/contextless/parser/test/test_rootfinders.py
|
myasiny/wordembed
|
d4df516a4ac6eed71d1cc6e085638e895c525de6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from hamcrest import *
from mock import Mock
from trnltk.morphology.model.lexeme import SecondarySyntacticCategory, SyntacticCategory
from trnltk.morphology.contextless.parser.rootfinder import DigitNumeralRootFinder, ProperNounFromApostropheRootFinder, ProperNounWithoutApostropheRootFinder, WordRootFinder, TextNumeralRootFinder
class WordRootFinderTest(unittest.TestCase):
def setUp(self):
mock_lexeme1_1 = Mock()
mock_lexeme1_2 = Mock()
mock_lexeme2_1 = Mock()
mock_lexeme2_2 = Mock()
mock_lexeme1_1.syntactic_category = SyntacticCategory.NOUN
mock_lexeme1_2.syntactic_category = SyntacticCategory.NOUN
mock_lexeme2_1.syntactic_category = SyntacticCategory.NOUN
mock_lexeme2_2.syntactic_category = SyntacticCategory.NUMERAL
self.mock_root1_1 = Mock()
self.mock_root1_2 = Mock()
self.mock_root2_1 = Mock()
self.mock_root2_2 = Mock()
self.mock_root1_1.lexeme = mock_lexeme1_1
self.mock_root1_2.lexeme = mock_lexeme1_2
self.mock_root2_1.lexeme = mock_lexeme2_1
self.mock_root2_2.lexeme = mock_lexeme2_2
lexeme_map = {u'root1' : [self.mock_root1_1, self.mock_root1_2], u'root2': [self.mock_root2_1, self.mock_root2_2]}
self.root_finder = WordRootFinder(lexeme_map)
def test_should_find_roots(self):
roots = self.root_finder.find_roots_for_partial_input(u"root1")
assert_that(roots, has_length(2))
assert_that(roots, has_items(self.mock_root1_1, self.mock_root1_2))
roots = self.root_finder.find_roots_for_partial_input(u"root2")
assert_that(roots, has_length(1))
assert_that(roots, has_items(self.mock_root2_1))
roots = self.root_finder.find_roots_for_partial_input(u"UNDEFINED")
assert_that(roots, has_length(0))
class TextNumeralRootFinderTest(unittest.TestCase):
def setUp(self):
mock_lexeme1_1 = Mock()
mock_lexeme1_2 = Mock()
mock_lexeme2_1 = Mock()
mock_lexeme2_2 = Mock()
mock_lexeme1_1.syntactic_category = SyntacticCategory.NUMERAL
mock_lexeme1_2.syntactic_category = SyntacticCategory.NUMERAL
mock_lexeme2_1.syntactic_category = SyntacticCategory.NUMERAL
mock_lexeme2_2.syntactic_category = SyntacticCategory.NOUN
self.mock_root1_1 = Mock()
self.mock_root1_2 = Mock()
self.mock_root2_1 = Mock()
self.mock_root2_2 = Mock()
self.mock_root1_1.lexeme = mock_lexeme1_1
self.mock_root1_2.lexeme = mock_lexeme1_2
self.mock_root2_1.lexeme = mock_lexeme2_1
self.mock_root2_2.lexeme = mock_lexeme2_2
lexeme_map = {u'root1' : [self.mock_root1_1, self.mock_root1_2], u'root2': [self.mock_root2_1, self.mock_root2_2]}
self.root_finder = TextNumeralRootFinder(lexeme_map)
def test_should_find_roots(self):
roots = self.root_finder.find_roots_for_partial_input(u"root1")
assert_that(roots, has_length(2))
assert_that(roots, has_items(self.mock_root1_1, self.mock_root1_2))
roots = self.root_finder.find_roots_for_partial_input(u"root2")
assert_that(roots, has_length(1))
assert_that(roots, has_items(self.mock_root2_1))
roots = self.root_finder.find_roots_for_partial_input(u"UNDEFINED")
assert_that(roots, has_length(0))
class DigitNumeralRootFinderTest(unittest.TestCase):
def setUp(self):
self.root_finder = DigitNumeralRootFinder()
def test_should_recognize_number_roots(self):
roots = self.root_finder.find_roots_for_partial_input(u'3')
assert_that(roots[0].str, equal_to(u'3'))
roots = self.root_finder.find_roots_for_partial_input(u'0')
assert_that(roots[0].str, equal_to(u'0'))
roots = self.root_finder.find_roots_for_partial_input(u'-1')
assert_that(roots[0].str, equal_to(u'-1'))
roots = self.root_finder.find_roots_for_partial_input(u'+3')
assert_that(roots[0].str, equal_to(u'+3'))
roots = self.root_finder.find_roots_for_partial_input(u'3,5')
assert_that(roots[0].str, equal_to(u'3,5'))
roots = self.root_finder.find_roots_for_partial_input(u'-999999999999,12345678901')
assert_that(roots[0].str, equal_to(u'-999999999999,12345678901'))
roots = self.root_finder.find_roots_for_partial_input(u'+2.999.999.999.999,12345678901')
assert_that(roots[0].str, equal_to(u'+2.999.999.999.999,12345678901'))
class ProperNounFromApostropheRootFinderTest(unittest.TestCase):
def setUp(self):
self.root_finder = ProperNounFromApostropheRootFinder()
def test_should_recognize_abbreviations(self):
roots = self.root_finder.find_roots_for_partial_input(u"TR'")
assert_that(roots[0].str, equal_to(u'TR'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"MB'")
assert_that(roots[0].str, equal_to(u'MB'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"POL'")
assert_that(roots[0].str, equal_to(u'POL'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"KAFA1500'")
assert_that(roots[0].str, equal_to(u'KAFA1500'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"1500KAFA'")
assert_that(roots[0].str, equal_to(u'1500KAFA'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"İŞÇĞÜÖ'")
assert_that(roots[0].str, equal_to(u'İŞÇĞÜÖ'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.ABBREVIATION))
roots = self.root_finder.find_roots_for_partial_input(u"123'")
assert_that(roots, has_length(0))
def test_should_recognize_proper_nouns(self):
roots = self.root_finder.find_roots_for_partial_input(u"Ahmet'")
assert_that(roots[0].str, equal_to(u'Ahmet'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Mehmed'")
assert_that(roots[0].str, equal_to(u'Mehmed'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"A123a'")
assert_that(roots[0].str, equal_to(u'A123a'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"AvA'")
assert_that(roots[0].str, equal_to(u'AvA'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"AAxxAA'")
assert_that(roots[0].str, equal_to(u'AAxxAA'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"İstanbul'")
assert_that(roots[0].str, equal_to(u'İstanbul'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Çanakkale'")
assert_that(roots[0].str, equal_to(u'Çanakkale'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Ömer'")
assert_that(roots[0].str, equal_to(u'Ömer'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Şaban'")
assert_that(roots[0].str, equal_to(u'Şaban'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Ümmühan'")
assert_that(roots[0].str, equal_to(u'Ümmühan'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"aaa'")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"aAAAA'")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"1aa'")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"a111'")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"şaa'")
assert_that(roots, has_length(0))
class ProperNounWithoutApostropheRootFinderTest(unittest.TestCase):
def setUp(self):
self.root_finder = ProperNounWithoutApostropheRootFinder()
def test_should_recognize_proper_nouns(self):
roots = self.root_finder.find_roots_for_partial_input(u"A", u"Ali")
assert_that(roots[0].str, equal_to(u'A'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Al", u"Ali")
assert_that(roots[0].str, equal_to(u'Al'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Ali", u"Ali")
assert_that(roots[0].str, equal_to(u'Ali'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
roots = self.root_finder.find_roots_for_partial_input(u"Ali8", u"Ali8912")
assert_that(roots[0].str, equal_to(u'Ali8'))
assert_that(roots[0].lexeme.secondary_syntactic_category, equal_to(SecondarySyntacticCategory.PROPER_NOUN))
def test_should_not_recognize_proper_nouns_when_the_input_is_not(self):
roots = self.root_finder.find_roots_for_partial_input(u"A", u"Ali'nin")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"Al", u"Ali'nin")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"Ali", u"Ali'nin")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"Ali8", u"Ali8912'nin")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"a", u"aa")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"Ali'nin", u"Ali'nin")
assert_that(roots, has_length(0))
roots = self.root_finder.find_roots_for_partial_input(u"123A", u"123A")
assert_that(roots, has_length(0))
if __name__ == '__main__':
unittest.main()
| 45.721805
| 196
| 0.735816
| 1,686
| 12,162
| 4.961447
| 0.100237
| 0.083682
| 0.125523
| 0.089898
| 0.847101
| 0.845786
| 0.785057
| 0.777047
| 0.704244
| 0.691811
| 0
| 0.033552
| 0.159431
| 12,162
| 265
| 197
| 45.89434
| 0.78431
| 0.047936
| 0
| 0.469274
| 0
| 0
| 0.044421
| 0.009507
| 0
| 0
| 0
| 0
| 0.391061
| 1
| 0.067039
| false
| 0
| 0.027933
| 0
| 0.122905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b3012d106372b72e9888ee946ff09fafe9394bf1
| 163
|
py
|
Python
|
terra_sdk/util/converter.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | null | null | null |
terra_sdk/util/converter.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | null | null | null |
terra_sdk/util/converter.py
|
fabio-nukui/terra.py
|
adee2e1abf41a05a1c39d52b664bd7cf7c9bc975
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def to_isoformat(dt: datetime) -> str:
return dt.isoformat(timespec="milliseconds").replace("+00:00", "Z").replace("000Z", "Z")
| 32.6
| 92
| 0.705521
| 22
| 163
| 5.181818
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048276
| 0.110429
| 163
| 5
| 92
| 32.6
| 0.737931
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b318b9b7d5fc786b6110867d33de9ddb143886a2
| 30
|
py
|
Python
|
app/__init__.py
|
matheusnalmeida/Sistema-de-transporte-de-passageiros
|
e7c67586af0f814def990690a8389ca90d64fba0
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
matheusnalmeida/Sistema-de-transporte-de-passageiros
|
e7c67586af0f814def990690a8389ca90d64fba0
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
matheusnalmeida/Sistema-de-transporte-de-passageiros
|
e7c67586af0f814def990690a8389ca90d64fba0
|
[
"MIT"
] | null | null | null |
from app.app import create_app
| 30
| 30
| 0.866667
| 6
| 30
| 4.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2fd8c746105bb3cdbdbf4a26ea7db1d91dd65b0a
| 27,327
|
py
|
Python
|
messengerext/gallery/tests.py
|
groupsome/groupsome
|
4edcf30d66ff458c4df37d3198ef187219a768d7
|
[
"MIT"
] | 6
|
2016-10-07T13:43:17.000Z
|
2017-10-07T22:34:44.000Z
|
messengerext/gallery/tests.py
|
groupsome/groupsome
|
4edcf30d66ff458c4df37d3198ef187219a768d7
|
[
"MIT"
] | null | null | null |
messengerext/gallery/tests.py
|
groupsome/groupsome
|
4edcf30d66ff458c4df37d3198ef187219a768d7
|
[
"MIT"
] | 1
|
2020-07-15T04:29:31.000Z
|
2020-07-15T04:29:31.000Z
|
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import resolve, reverse
from django.test.client import Client
from django.contrib.auth.models import User
from home import models
from bot.models import TelegramUser
from gallery.models import Album
from gallery import queries
from groups.tests import create_user, create_group, create_photo, create_album
import mock
import json
class TestAlbumsOverview(TestCase):
user = None
group = None
album = None
def create_album_and_photos(self, user):
photo = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="1.jpg",
thumbnail="TODO")
photo_2 = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="2.jpg",
thumbnail="TODO")
self.album = Album.create_and_save(name='Best of Croatia', description='only the best', group=self.group)
self.album.photos.add(photo)
self.album.photos.add(photo_2)
def create_empty_album(self, user):
self.album = Album.create_and_save(name='Best of Croatia', description='only the best', group=self.group)
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.unprivileged_user = create_user("Unprivileged", "unpriv@test.test")
self.group = create_group(self.user, is_admin=True, users=[self.unprivileged_user])
def test_gallery_shows_template(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_overview', kwargs={"group_id": self.group.id}))
self.assertTemplateUsed(response=response, template_name='gallery/group/overview.html')
def test_gallery_returns_album(self):
self.create_album_and_photos(self.user)
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_album', kwargs={"group_id": self.group.id,
"album_id": self.album.id}))
self.assertEquals(response.context['album'], self.album)
def test_gallery_counts_photos_in_an_album(self):
self.create_album_and_photos(self.user)
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_overview', kwargs={"group_id": self.group.id}))
self.assertEquals(response.context['albums'][0]['photo_count'], 2)
def test_gallery_serves_media_url_for_title_photo(self):
self.create_album_and_photos(self.user)
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_overview', kwargs={"group_id": self.group.id}))
self.assertTrue(response.context['albums'][0]['photo_file'].find('/media/photo/1') != -1)
def test_gallery_uses_placeholder_for_empty_albums(self):
self.create_empty_album(self.user)
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_overview', kwargs={"group_id": self.group.id}))
self.assertEquals(response.context['albums'][0]['photo_file'], '/static/img/add-pictures.jpg')
@mock.patch("gallery.models.Album.create_and_save")
def test_create_album_redirect(self, create_and_save):
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'new_album': 'true', 'album_group': '1', 'album_name': 'Awesome Album',
'album_description': 'this is a album description'},
follow=True)
self.assertRedirects(response, reverse('groups:photo_overview', kwargs={"group_id": self.group.id}))
@mock.patch("gallery.models.Album.create_and_save")
def test_create_album_unprivileged(self, create_and_save):
self.create_album_and_photos(user=self.user)
self.client.force_login(user=self.unprivileged_user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'new_album': 'true', 'album_group': self.group.id, 'album_name': 'Awesome Album',
'album_description': 'this is a album description'},
follow=True)
self.assertEquals(403, response.status_code)
@mock.patch("gallery.models.Album.create_and_save")
def test_create_album_works_with_valid_input(self, create_and_save):
self.create_album_and_photos(user=self.user)
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'new_album': 'true', 'album_group': self.group.id, 'album_name': 'Awesome Album',
'album_description': 'this is a album description'},
follow=True)
create_and_save.assert_called()
@mock.patch("gallery.models.Album.create_and_save")
def test_no_album_created_with_too_short_name(self, create_and_save):
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'new_album': 'true', 'album_group': '1', 'album_name': 'A',
'album_description': 'this is a album description'},
follow=True)
create_and_save.assert_not_called()
@mock.patch("gallery.models.Album.create_and_save")
def test_no_album_created_without_description(self, create_and_save):
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'new_album': 'true', 'album_group': '1', 'album_name': 'A',
'album_description': ''},
follow=True)
create_and_save.assert_not_called()
@mock.patch("gallery.models.Album.delete", )
def test_delete_album_unprivileged(self, delete):
self.create_album_and_photos(user=self.user)
self.client.force_login(user=self.unprivileged_user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'delete_album': 'true', 'album_id': self.album.id},
follow=True)
self.assertEquals(403, response.status_code)
@mock.patch("gallery.models.Album.delete", )
def test_delete_album_works(self, delete):
self.create_album_and_photos(user=self.user)
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'delete_album': 'true', 'album_id': self.album.id},
follow=True)
delete.assert_called()
@mock.patch("gallery.models.Album.delete")
def test_delete_album_is_forbidden_when_album_is_wrong(self, delete):
self.client.force_login(user=self.user)
response = self.client.post(
reverse('groups:photo_overview', kwargs={"group_id": self.group.id}),
{'delete_album': 'true', 'album_id': '-1'},
follow=True)
delete.assert_not_called()
class TestPhotoAlbum(TestCase):
group = None
group2 = None
album = None
album2 = None
def create_user(self):
user = User.objects.create_user('Superuser',
'superuser@super.com',
'Password')
user.save()
TelegramUser.create_and_save(user=user, telegram_id=1)
return user
def create_album_and_photos(self, user):
self.group = models.Group.create_and_save(name="Croatia 2016",
picture="", description="abc", telegram_id=3)
self.group.users.add(self.user)
self.group2 = models.Group.create_and_save(name="Croatia 2016",
picture="", description="abc", telegram_id=4)
photo = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="1.jpg",
thumbnail="TODO")
photo_2 = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="2.jpg",
thumbnail="TODO")
self.album = Album.create_and_save(name='Best of Croatia', description='only the best', group=self.group)
self.album.photos.add(photo)
self.album.photos.add(photo_2)
user2 = User.objects.create_user('user2',
'user2@super.com',
'Password')
user2.save()
self.album2 = Album.create_and_save(name='Not this album', description='never', group=self.group2)
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = self.create_user()
self.create_album_and_photos(self.user)
def test_album_detail_uses_template(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_album', kwargs={"group_id": self.group.id,
"album_id": self.album.id}))
self.assertTemplateUsed(response=response, template_name='gallery/group/album.html')
def test_album_from_other_user_is_not_accessible(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_album', kwargs={"group_id": self.group2.id,
"album_id": self.album2.id}))
self.assertEquals(response.status_code, 403)
def test_view_returns_pictures(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_album', kwargs={"group_id": self.group.id,
"album_id": self.album.id}))
self.assertEquals(len(response.context['photos']), 2)
def test_view_returns_only_other_albums_than_itself(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('groups:photo_album', kwargs={"group_id": self.group.id,
"album_id": self.album.id}))
self.assertEquals(len(response.context['albums']), 0)
class TestAddPhotoView(TestCase):
group = None
album = None
photo = None
photo_2 = None
def create_album_and_photos(self, user):
self.group = models.Group.create_and_save(name="Croatia 2016",
picture="", description="abc", telegram_id=3, everyone_is_admin=False)
self.group.users.add(self.user)
self.group.users.add(self.unprivileged_user)
self.group.admins.add(self.user)
group_2 = models.Group.create_and_save(name="Not allowed to add photo from here",
picture="", description="abc", telegram_id=4)
self.photo = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="1.jpg",
thumbnail="TODO")
self.photo_2 = models.Photo.create_and_save(user=self.user, group=group_2,
timestamp="2016-05-25 12:59:10",
file="2.jpg",
thumbnail="TODO")
self.album = Album.create_and_save(name='Best of Croatia', description='only the best', group=self.group)
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.unprivileged_user = create_user("Unprivileged", "unpriv@test.test")
self.create_album_and_photos(self.user)
def test_add_photo_to_album_unprivileged(self):
self.client.force_login(user=self.unprivileged_user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/add/' + str(self.photo.id))
self.assertEqual(response.status_code, 403)
def test_add_photo_to_album(self):
self.client.force_login(user=self.user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/add/' + str(self.photo.id))
self.assertEqual(response.status_code, 200)
self.assertEquals(
response.content,
b'{"message": "Added image successfully"}'
)
def test_can_not_add_photo_to_album_from_a_group_user_is_not_in(self):
self.client.force_login(user=self.user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/add/' + str(self.photo_2.id))
self.assertEqual(response.status_code, 200)
self.assertEquals(
response.content,
b'{"message": "Something went wrong"}'
)
def test_add_photo_to_album(self):
self.client.force_login(user=self.user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/add/' + str(self.photo.id))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/add/' + str(self.photo.id))
self.assertEquals(
response.content,
b'{"message": "Already in album"}'
)
class TestDeletePhotoFromAlbumView(TestCase):
group = None
album = None
album_2 = None
photo = None
photo_2 = None
def create_album_and_photos(self, user):
self.group = models.Group.create_and_save(name="Croatia 2016",
picture="", description="abc", telegram_id=3, everyone_is_admin=False)
self.group.users.add(self.user)
self.group.users.add(self.unprivileged_user)
self.group.admins.add(self.user)
group_2 = models.Group.create_and_save(name="Not allowed to add photo from here",
picture="", description="abc", telegram_id=4)
self.photo = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:10",
file="1.jpg",
thumbnail="TODO")
self.photo_2 = models.Photo.create_and_save(user=self.user, group=self.group,
timestamp="2016-05-25 12:59:11",
file="2.jpg",
thumbnail="TODO")
self.album = Album.create_and_save(name='Best of Croatia', description='only the best', group=self.group)
self.album.photos.add(self.photo)
self.album.photos.add(self.photo_2)
self.album_2 = Album.create_and_save(name='Another album',
description='no photo removed from here',
group=group_2)
self.album_2.photos.add(self.photo_2)
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.unprivileged_user = create_user("Unprivileged", "unpriv@test.test")
self.create_album_and_photos(self.user)
def test_delete_photo_from_album_unprivileged(self):
self.client.force_login(user=self.unprivileged_user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/delete_from_album/' + str(self.photo.id))
self.assertEqual(response.status_code, 403)
def test_delete_photo_to_album(self):
self.client.force_login(user=self.user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/delete_from_album/' + str(self.photo.id))
self.assertEqual(response.status_code, 200)
self.assertEquals(
response.content,
b'{"message": "Successfully removed from album"}'
)
def test_can_not_delete_photo_from_album_of_a_group_the_user_is_not_in(self):
self.client.force_login(user=self.user)
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album_2.id) + '/delete_from_album/' + str(
self.photo_2.id))
self.assertEqual(response.status_code, 200)
self.assertEquals(
response.content,
b'{"message": "Something went wrong"}'
)
def test_photo_is_removed_from_album(self):
self.client.force_login(user=self.user)
album_content_count_before = self.album.photos.count()
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/delete_from_album/' + str(self.photo.id))
album_content_count_after = self.album.photos.count()
self.assertEqual(album_content_count_after, album_content_count_before - 1)
def test_photo_is_removed_from_only_one_album(self):
self.client.force_login(user=self.user)
album_content_count_before = self.album_2.photos.count()
response = self.client.post(
'/gallery/' + str(self.group.id) + '/' + str(self.album.id) + '/delete_from_album/' + str(self.photo.id))
album_content_count_after = self.album_2.photos.count()
self.assertEqual(album_content_count_after, album_content_count_before)
class TestGroupView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user)
self.photo = create_photo(self.user, self.group)
self.album = create_album(self.group, self.photo)
def test_overview(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos")
self.assertTemplateUsed(response, "gallery/group/overview.html")
self.assertIn(self.photo, response.context["photos"])
self.assertEquals(self.album.id, response.context["albums"][0]["id"])
class TestGroupAlbumDetailView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user)
self.photo = create_photo(self.user, self.group)
self.album = create_album(self.group, self.photo)
self.other_group = create_group(telegram_id=2)
self.other_album = create_album(self.other_group)
self.uncategorized_photo = create_photo(self.user, self.group)
def test_other_album(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos/albums/2")
self.assertEquals(response.status_code, 404)
def test_album(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos/albums/1")
self.assertTemplateUsed(response, "gallery/group/album.html")
self.assertEquals(self.album, response.context["album"])
self.assertNotIn(self.album, response.context["albums"])
self.assertEquals(self.photo.media_url, response.context["cover"])
self.assertIn(self.photo, response.context["photos"])
self.assertNotIn(self.uncategorized_photo, response.context["photos"])
class TestSetCoverView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.unprivileged_user = create_user("Unprivileged", "unpriv@test.test")
self.group = create_group(self.user, is_admin=True, users=[self.unprivileged_user])
self.photo = create_photo(self.user, self.group)
self.album = create_album(self.group, self.photo)
self.other_photo = create_photo(self.user, self.group)
self.album.photos.add(self.other_photo)
def test_set_cover_unprivileged(self):
self.client.force_login(user=self.unprivileged_user)
response = self.client.post("/gallery/1/1/cover", follow=True)
self.assertEquals(403, response.status_code)
def test_set_cover(self):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/1/1/cover", follow=True)
self.assertRedirects(response, reverse('groups:photo_album', kwargs={"group_id": self.group.id,
"album_id": self.album.id}))
self.album.refresh_from_db()
self.assertEquals(self.album.cover, self.photo)
def test_default_cover(self):
cover = queries.get_album_cover(self.album, self.album.photos)
self.assertEquals(cover, self.photo.media_url)
def test_explicit_cover(self):
self.album.cover = self.other_photo
cover = queries.get_album_cover(self.album, self.album.photos)
self.assertEquals(cover, self.other_photo.media_url)
class TestDeletePhotoView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.unprivileged_user = create_user("Unprivileged", "unpriv@test.test")
self.group = create_group(self.user, is_admin=True, users=[self.unprivileged_user])
self.photo = create_photo(self.user, self.group)
def test_delete_photo_unprivileged(self):
self.client.force_login(user=self.unprivileged_user)
response = self.client.post("/gallery/photos/1/delete")
self.assertEquals(response.status_code, 403)
def test_delete_photo(self):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/1/delete")
self.assertEquals(response.status_code, 200)
self.assertEquals(response.get("Content-Type"), "application/json")
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["message"], "Photo deleted")
self.assertEquals(models.Photo.objects.filter(pk=self.photo.id).count(), 0)
def test_delete_non_existingphoto(self):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/2/delete")
self.assertEquals(response.status_code, 404)
class TestPhotoDetailView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user)
self.photo = create_photo(self.user, self.group)
def test_view(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos/1")
self.assertTemplateUsed(response, "gallery/group/photo.html")
self.assertEquals(self.photo, response.context["photo"])
class TestAlbumPhotoDetailView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user)
self.photo = create_photo(self.user, self.group)
self.album = create_album(self.group, self.photo)
self.photo2 = create_photo(self.user, self.group)
self.photo3 = create_photo(self.user, self.group)
self.album.photos.add(self.photo2)
self.album.photos.add(self.photo3)
def test_view(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos/albums/1/1")
self.assertTemplateUsed(response, "gallery/group/album_photo.html")
self.assertEquals(self.photo, response.context["photo"])
self.assertEquals(self.album, response.context["album"])
def test_pager(self):
self.client.force_login(user=self.user)
response = self.client.get("/groups/1/photos/albums/1/2")
self.assertTemplateUsed(response, "gallery/group/album_photo.html")
self.assertEquals(self.photo, response.context["prev"])
self.assertEquals(self.photo3, response.context["next"])
class TestRotatePhotoLeftView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user, is_admin=True)
self.photo = create_photo(self.user, self.group)
@mock.patch("django_rq.enqueue")
def test_rotate_photo(self, enqueue):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/1/rotate/left")
self.assertEquals(response.status_code, 200)
self.assertEquals(response.get("Content-Type"), "application/json")
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["message"], "Photo rotated")
enqueue.assert_called()
@mock.patch("django_rq.enqueue")
def test_rotate_non_existing_photo(self, enqueue):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/2/rotate/left")
self.assertEquals(response.status_code, 404)
enqueue.assert_not_called()
class TestRotatePhotoRightView(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
self.group = create_group(self.user, is_admin=True)
self.photo = create_photo(self.user, self.group)
@mock.patch("django_rq.enqueue")
def test_rotate_photo(self, enqueue):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/1/rotate/right")
self.assertEquals(response.status_code, 200)
self.assertEquals(response.get("Content-Type"), "application/json")
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["message"], "Photo rotated")
enqueue.assert_called()
@mock.patch("django_rq.enqueue")
def test_rotate_non_existing_photo(self, enqueue):
self.client.force_login(user=self.user)
response = self.client.post("/gallery/photos/2/rotate/right")
self.assertEquals(response.status_code, 404)
enqueue.assert_not_called()
| 47.442708
| 120
| 0.623376
| 3,282
| 27,327
| 5.00457
| 0.066423
| 0.04968
| 0.035068
| 0.049924
| 0.844992
| 0.833059
| 0.813638
| 0.798782
| 0.781918
| 0.766149
| 0
| 0.01375
| 0.252168
| 27,327
| 575
| 121
| 47.525217
| 0.789978
| 0
| 0
| 0.682731
| 0
| 0
| 0.125846
| 0.038131
| 0
| 0
| 0
| 0
| 0.144578
| 1
| 0.12249
| false
| 0.004016
| 0.022088
| 0
| 0.202811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2fe61377eeb431b3fc0fba60524ff0696549b353
| 104
|
py
|
Python
|
testalign.py
|
TuxStory/Python3
|
4c1b2291d1613b32aa36b62b0b881ea40b423cce
|
[
"MIT"
] | null | null | null |
testalign.py
|
TuxStory/Python3
|
4c1b2291d1613b32aa36b62b0b881ea40b423cce
|
[
"MIT"
] | null | null | null |
testalign.py
|
TuxStory/Python3
|
4c1b2291d1613b32aa36b62b0b881ea40b423cce
|
[
"MIT"
] | null | null | null |
print("Test".ljust(20,".")+"20$")
print("Pear".ljust(20,".")+"99$")
print("Apple".ljust(20,".")+"120$")
| 26
| 35
| 0.538462
| 15
| 104
| 3.733333
| 0.533333
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 0.028846
| 104
| 3
| 36
| 34.666667
| 0.425743
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
64093497cb0d8801f31ea89738a3fd86a0f22a9d
| 41,820
|
py
|
Python
|
src/frr/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
src/frr/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
src/frr/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation,
# Inc. ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Following tests are covered to test prefix-list functionality:
Test steps
- Create topology (setup module)
Creating 4 routers topology, r1, r2, r3 are in IBGP and
r3, r4 are in EBGP
- Bring up topology
- Verify for bgp to converge
IP prefix-list tests
- Test ip prefix-lists IN permit
- Test ip prefix-lists OUT permit
- Test ip prefix-lists IN deny and permit any
- Test delete ip prefix-lists
- Test ip prefix-lists OUT deny and permit any
- Test modify ip prefix-lists IN permit to deny
- Test modify ip prefix-lists IN deny to permit
- Test modify ip prefix-lists OUT permit to deny
- Test modify prefix-lists OUT deny to permit
- Test ip prefix-lists implicit deny
"""
import sys
import time
import os
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
reset_config_on_routers,
verify_rib,
create_static_routes,
create_prefix_lists,
verify_prefix_lists,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd]
# Global variables
bgp_convergence = False
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/prefix_lists.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
# Checking BGP convergence
global BGP_CONVERGENCE
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Api call verify whether BGP is converged
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
BGP_CONVERGENCE
)
logger.info("Running setup_module() done")
def teardown_module(mod):
"""
Teardown the pytest environment
* `mod`: module name
"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
#####################################################
#
# Tests starting
#
#####################################################
def test_ip_prefix_lists_in_permit(request):
"""
Create ip prefix list and test permit prefixes IN direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Create Static routes
input_dict = {
"r1": {
"static_routes": [
{"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [{"seqid": 10, "network": "any", "action": "permit"}]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure bgp neighbor with prefix list
input_dict_3 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {
"prefix_lists": [
{"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
def test_ip_prefix_lists_out_permit(request):
"""
Create ip prefix list and test permit prefixes out direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static routes
input_dict_1 = {
"r1": {
"static_routes": [
{"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict_5 = {
"r3": {
"static_routes": [
{"network": "10.0.0.2/30", "no_of_ip": 1, "next_hop": "10.0.0.9"}
]
}
}
result = create_static_routes(tgen, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{"seqid": 10, "network": "20.0.20.1/32", "action": "permit"}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
# Configure bgp neighbor with prefix list
input_dict_3 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"r1": {
"prefix_lists": [
{
"name": "pf_list_1",
"direction": "out",
}
]
}
}
}
},
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
],
}
}
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
def test_ip_prefix_lists_in_deny_and_permit_any(request):
"""
Create ip prefix list and test permit/deny prefixes IN direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{"seqid": "10", "network": "10.0.20.1/32", "action": "deny"},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure bgp neighbor with prefix list
input_dict_3 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {
"prefix_lists": [
{"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
}
}
}
},
}
# Configure prefix list to bgp neighbor
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
def test_delete_prefix_lists(request):
"""
Delete ip prefix list
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create ip prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{"seqid": "10", "network": "10.0.20.1/32", "action": "deny"}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
# Delete prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.20.1/32",
"action": "deny",
"delete": True,
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
def test_ip_prefix_lists_out_deny_and_permit_any(request):
"""
Create ip prefix list and test deny/permit any prefixes OUT direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static Routes
input_dict_1 = {
"r2": {
"static_routes": [
{"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"}
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_3 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "deny",
},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r3": {
"prefix_lists": [
{
"name": "pf_list_1",
"direction": "out",
}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
def test_modify_prefix_lists_in_permit_to_deny(request):
"""
Modify ip prefix list and test permit to deny prefixes IN direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "permit",
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_3 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {
"prefix_lists": [
{"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Modify prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "deny",
},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
def test_modify_prefix_lists_in_deny_to_permit(request):
"""
Modify ip prefix list and test deny to permit prefixes IN direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "deny",
},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r3": {
"prefix_lists": [
{"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "permit",
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
def test_modify_prefix_lists_out_permit_to_deny(request):
"""
Modify ip prefix list and test permit to deny prefixes OUT direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "permit",
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r3": {
"prefix_lists": [
{
"name": "pf_list_1",
"direction": "out",
}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Modify ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "deny",
},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
def test_modify_prefix_lists_out_deny_to_permit(request):
"""
Modify ip prefix list and test deny to permit prefixes OUT direction
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "deny",
},
{"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r3": {
"prefix_lists": [
{
"name": "pf_list_1",
"direction": "out",
}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "permit",
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
def test_ip_prefix_lists_implicit_deny(request):
"""
Create ip prefix list and test implicit deny
"""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Create Static Routes
input_dict = {
"r1": {
"static_routes": [
{"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static Routes
input_dict_1 = {
"r2": {
"static_routes": [
{"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
input_dict_3 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_list_1": [
{
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
"action": "permit",
}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_4 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
}
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r3": {
"prefix_lists": [
{
"name": "pf_list_1",
"direction": "out",
}
]
}
}
}
}
}
}
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(
tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
| 31.278983
| 88
| 0.433142
| 3,826
| 41,820
| 4.540512
| 0.071877
| 0.047663
| 0.041446
| 0.064529
| 0.851831
| 0.84642
| 0.827423
| 0.820746
| 0.811536
| 0.802153
| 0
| 0.02539
| 0.457532
| 41,820
| 1,336
| 89
| 31.302395
| 0.740369
| 0.123458
| 0
| 0.614228
| 0
| 0
| 0.178336
| 0
| 0
| 0
| 0
| 0
| 0.06012
| 1
| 0.012024
| false
| 0
| 0.009018
| 0
| 0.021042
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6432a6ba130dc1aa157faf59393b3506f384d9d8
| 10,786
|
py
|
Python
|
temporal-difference/TD_Exercise.py
|
albimc/deep-reinforcement-learning
|
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
|
[
"MIT"
] | null | null | null |
temporal-difference/TD_Exercise.py
|
albimc/deep-reinforcement-learning
|
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
|
[
"MIT"
] | null | null | null |
temporal-difference/TD_Exercise.py
|
albimc/deep-reinforcement-learning
|
e11a6c9d4c8991cf229e686b645ae22ec4cff4f5
|
[
"MIT"
] | null | null | null |
# TD Exercise #
import sys
import gym
import numpy as np
from collections import defaultdict, deque
import matplotlib.pyplot as plt
import check_test
from plot_utils import plot_values
# #############
# Environment #
# #############
env = gym.make('CliffWalking-v0')
# ###
print(env.action_space)
print(env.observation_space)
# ##############################
# Optimal state-value function #
# ##############################
V_opt = np.zeros((4, 12))
print(V_opt)
V_opt[0][0:13] = -np.arange(3, 15)[::-1]
V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1
V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
print(V_opt)
plot_values(V_opt)
plt.show()
# ###########################
# Part 1: TD Control: Sarsa #
# ###########################
def update_Q_sarsa(Qsa, Qsa_next, reward, alpha, gamma):
""" updates the action-value function estimate using the most recent time step """
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))
def epsilon_greedy_probs(env, Q_s, epsilon):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(env.nA) * epsilon / env.nA
policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / env.nA)
return policy_s
def sarsa(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05, plot_every=100):
Q = defaultdict(lambda: np.zeros(env.nA)) # initialize action-value function (empty dictionary of arrays)
epsilon = eps_start # initialize epsilon
# initialize performance monitor
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = env.reset()
# set value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], epsilon)
# pick action A
action = np.random.choice(np.arange(env.nA), p=policy_s)
# limit number of time steps per episode
# for t_step in np.arange(300):
while True:
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
if not done:
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[next_state], epsilon)
# pick next action A'
next_action = np.random.choice(np.arange(env.nA), p=policy_s)
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], Q[next_state][next_action], reward, alpha, gamma)
# S <- S'
state = next_state
# A <- A'
action = next_action
if done:
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], 0, reward, alpha, gamma)
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0, num_episodes, len(scores), endpoint=False), np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, num_episodes=5000, alpha=0.01, gamma=1.0, eps_start=1.0, eps_decay=0.5, eps_min=1/5000, plot_every=100)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4, 12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
# #########################################
# Part 2: TD Control: Q-learning Sarsamax #
# #########################################
def sarsamax(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05, plot_every=100):
Q = defaultdict(lambda: np.zeros(env.nA)) # initialize action-value function (empty dictionary of arrays)
epsilon = eps_start # initialize epsilon
# initialize performance monitor
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = env.reset()
# set value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
while True:
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], epsilon)
# pick action A
action = np.random.choice(np.arange(env.nA), p=policy_s)
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
# pick next best action A'
next_best_action = np.argmax(Q[next_state])
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], Q[next_state][next_best_action], reward, alpha, gamma)
# S <- S'
state = next_state
# until S is terminal
if done:
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], 0, reward, alpha, gamma)
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0, num_episodes, len(scores), endpoint=False), np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = sarsamax(env, num_episodes=5000, alpha=0.01, gamma=1.0, eps_start=1.0, eps_decay=0.1, eps_min=1/5000, plot_every=100)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4, 12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
# ####################################
# Part 3: TD Control: Expected Sarsa #
# ####################################
def expsarsa(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05, plot_every=100):
Q = defaultdict(lambda: np.zeros(env.nA)) # initialize action-value function (empty dictionary of arrays)
epsilon = eps_start # initialize epsilon
# initialize performance monitor
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = env.reset()
# set value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
while True:
# get epsilon-greedy action probabilities
policy_s = epsilon_greedy_probs(env, Q[state], epsilon)
# pick action A
action = np.random.choice(np.arange(env.nA), p=policy_s)
# take action A, observe R, S'
next_state, reward, done, info = env.step(action)
# add reward to score
score += reward
# pick next best action A'
policy_next_s = epsilon_greedy_probs(env, Q[next_state], epsilon)
exp_next_Q = np.dot(Q[next_state], policy_next_s)
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], exp_next_Q, reward, alpha, gamma)
# S <- S'
state = next_state
# until S is terminal
if done:
# update TD estimate of Q
Q[state][action] = update_Q_sarsa(Q[state][action], 0, reward, alpha, gamma)
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0, num_episodes, len(scores), endpoint=False), np.asarray(scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expsarsa(env, num_episodes=5000, alpha=0.1, gamma=1.0, eps_start=1.0, eps_decay=0.5, eps_min=1/5000, plot_every=100)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4, 12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
| 41.011407
| 130
| 0.613202
| 1,491
| 10,786
| 4.296445
| 0.120724
| 0.030909
| 0.009366
| 0.019669
| 0.838589
| 0.823915
| 0.820169
| 0.806744
| 0.797846
| 0.756478
| 0
| 0.028279
| 0.249212
| 10,786
| 262
| 131
| 41.167939
| 0.762781
| 0.216762
| 0
| 0.645833
| 0
| 0.020833
| 0.075709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034722
| false
| 0
| 0.048611
| 0
| 0.118056
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ff2764f973c6f06d6a5be2f0e783df51cc0e7727
| 29
|
py
|
Python
|
evaluate/previous_works/svsyn/dataset/__init__.py
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | 92
|
2019-09-08T09:55:05.000Z
|
2022-02-21T21:29:40.000Z
|
dataset/__init__.py
|
zjsprit/SphericalViewSynthesis
|
fcdec95bf3ad109767d27396434b51cf3aad2b4b
|
[
"BSD-2-Clause"
] | 4
|
2020-05-12T02:29:36.000Z
|
2021-11-26T07:49:43.000Z
|
dataset/__init__.py
|
zjsprit/SphericalViewSynthesis
|
fcdec95bf3ad109767d27396434b51cf3aad2b4b
|
[
"BSD-2-Clause"
] | 26
|
2019-09-16T02:26:33.000Z
|
2021-10-21T03:55:02.000Z
|
from .dataset_360D import *
| 14.5
| 28
| 0.758621
| 4
| 29
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.172414
| 29
| 1
| 29
| 29
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff2fccd516d5ca3d479310ad6a38bc80a0797bcc
| 26,098
|
py
|
Python
|
ConvNNet.py
|
wmorning/IndianaJones
|
6e69be7a146148a3c1a85f794900f4680d4e7065
|
[
"MIT"
] | null | null | null |
ConvNNet.py
|
wmorning/IndianaJones
|
6e69be7a146148a3c1a85f794900f4680d4e7065
|
[
"MIT"
] | 4
|
2015-11-09T05:25:36.000Z
|
2015-11-23T19:06:25.000Z
|
ConvNNet.py
|
wmorning/IndianaJones
|
6e69be7a146148a3c1a85f794900f4680d4e7065
|
[
"MIT"
] | null | null | null |
import numpy as np
import inDianajonES as InD
import tensorflow as tf
import sys
'''
ConvNNet implements a convolutional neural network
using the TensorFlow framework. It consists of a
ConvNNet class, which contains several functions:
- Train inputs a list of images and artifacts,
builds the design matrix, and implements
the neural net (outputting the training)
error as it goes
- Test runs the neural net on a test data set.
Can do whatever we make it do.
- Save_model saves the session to the input filename.
- Resume_from loads a saved session.
Also @Joe if you are wondering why I used a class, its
because the class allowed the session to be saved as a
global variable without it being a script.
'''
# ============================================================
class ConvNNet(object):
'''
ConvNNet implements a convolutional neural network
using the TensorFlow framework.
'''
def __init__(self, nimg, farts, gridsize, cgfactor, mbsize=100,
mbpath='/home/jderose/scratch/des/data', batchsize=1,
cgafactor=1, Ncategories=29, Nstepspermb=20):
self.nimg = nimg
self.farts = farts
self.gridsize = gridsize
self.cgfactor = cgfactor
self.cgafactor = cgafactor
self.Nmb = (self.nimg+mbsize-1)//mbsize-1
self.Ncategories = Ncategories
self.mbpath = mbpath
self.mbsize = mbsize
self.batchsize = batchsize
self.Nstepspermb = Nstepspermb
self.savefreq = 1000
if self.Ncategories == 29:
self.twoclasses = False
elif self.Ncategories == 2:
self.twoclasses = True
else:
print 'You chose the wrong # of classes bro \n'
print 'Switching to the default (29) classes \n'
self.twoclasses = False
self.Ncategories = 29
def convert_labels(self, y, twoclasses):
ey = InD.enumerate_labels(y)
ey2 = np.zeros([len(ey),self.Ncategories],float)
if twoclasses is True:
for i in range(len(ey)):
ey2[i,ey[i]//29] = 1.0
else:
for i in range(len(ey)):
ey2[i,ey[i]-1] = 1.0
return ey2
def load_minibatch(self, filepath, nimg, farts, gridsize, cg, num,cg_additional=1,twoclasses=False):
"""
Load a mini batch of images and their labels.
Labels need to be converted to tensorflow
format
inputs:
filepath -- Path where the files are located
nimg -- Number of images in the total batch
farts -- Fraction of artifacts
gridsize -- Number of pixels to a side
cg -- Coarsegraining factor
num -- The minibatch number
cg_additional -- additional coursegraining to perform on the fly
"""
X = np.load('{0}/X_{1}_{2}_{3}_{4}_mb{5}.npy'.format(filepath, nimg, farts, gridsize, cg, num))
y = np.load('{0}/y_{1}_{2}_{3}_{4}_mb{5}.npy'.format(filepath, nimg, farts, gridsize, cg, num))
X[X==-99] = np.nan
if cg_additional!=1:
X = np.mean(np.mean(X.reshape([X.shape[0],gridsize//cg,gridsize//cg//cg_additional,cg_additional]),axis=3).T.reshape(gridsize//cg//cg_additional,gridsize//cg//cg_additional,cg_additional,X.shape[0]),axis=2).T.reshape([X.shape[0],(gridsize//cg//cg_additional)**2])
X = 255*(np.arcsinh(X)-np.atleast_2d(np.arcsinh(np.nanmin(X,axis=1))).T)/np.atleast_2d((np.arcsinh(np.nanmax(X,axis=1))-np.arcsinh(np.nanmin(X,axis=1)))).T
X[np.isnan(X)] = 0
#X -= np.atleast_2d(np.mean(X,axis=1)).T
#print(np.nanmean(X, axis=1))
ey = self.convert_labels(y, twoclasses)
return X, ey
def Train(self, Nsteps, Nfeatures_conv1=32, Wsize_1=5, Nfeatures_conv2=64, \
Wsize_2=5, Xlen_3=1024, gpu=False):
'''
This function creates the design matrix and loads the
true clasifications (if they don't already exist).
It then runs the neural net to train the optimal
predicting scheme.
* Currently the neural net is very similar to the
one used in the MNIST tutorial from Tensorflow (except
modified to use our images etc.). We
should modify it further to fit our needs *
Function inputs are below:
- Nsteps is number of training steps to run
- Nfeatures_conv1 is the number of convolution features (images)
in the first layer
- Wsize_1 is the size of the first convolution filter
(assumed to be square)
- Nfeatures_conv2 is the number of convolution features (images) in
the second layer
- Wsize_2 is the size of the second convolution filter
(assumed to be square).
- Xlen_3 is the length of the densely connected features vector.
'''
# start neural net: define x,y placeholders and create session
#self.Session = tf.InteractiveSession() # useful if running from notebook
print('Allocating placeholders')
self.x = tf.placeholder("float",shape=[None,(self.gridsize//(self.cgfactor*self.cgafactor))**2])
self.x_image = tf.reshape(self.x,[-1,(self.gridsize//(self.cgfactor*self.cgafactor)),(self.gridsize//(self.cgfactor*self.cgafactor)),1])
self.y_ = tf.placeholder("float",shape=[None,self.Ncategories])
# create first layer
# here we create 32 new images using a convolution with a
# 5x5x32 weights filter plus a bias (one for each new image)
# This is equivalent to measuring 32 features for each 5x5
# pannel of the original image. We'll likely want many more
# features, and to use more pixels. Keep that in mind.
#self.W_conv0 = bias_variable([5,5,1,1])
#self.h_conv0 = tf.nn.relu(conv2d(self.x_image,self.W_conv0))
print('Creating first layer')
self.W_conv1 = weight_variable([Wsize_1,Wsize_1,1,Nfeatures_conv1]) # play around with altering sizes
self.b_conv1 = bias_variable([Nfeatures_conv1])# length should be same as last dimension of W_conv1
self.h_conv1 = tf.nn.relu(conv2d(self.x_image, self.W_conv1)+self.b_conv1)
# split each image into 4, and obtain the maximum quadrant
self.h_pool1 = max_pool_2x2(self.h_conv1)
print('Creating second layer')
# create second layer
# here each of our 32 intermediate images is convolved with
# a 5x5x64 weights filter. We create 64 new images by summing
# over all 32 convolutions. Each of the 64 images has its own bias
# term. The shape of the result is the shape of the original image
# divided by 4 on each axis by 64 (i.e. if you started with a
# 2048x2048 image, you now have a 512x512x64 image)
self.W_conv2 = weight_variable([Wsize_2,Wsize_2,Nfeatures_conv1,Nfeatures_conv2]) # again, play with altering sizes
self.b_conv2 = bias_variable([Nfeatures_conv2]) # of the first two axes
self.h_conv2 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2) + self.b_conv2)
# split each image into 4, and obtain the maximum quadrant
self.h_pool2 = max_pool_2x2(self.h_conv2)
print('Creating densely connected layer')
# Densely Connected layer
# Here, the 7x7x64 image tensor is flattened, and we get a
# 1x1024 vector using the form h_fc1 = h_2 * W + b
self.W_fc1 = weight_variable([(self.gridsize//(self.cgfactor*self.cgafactor)//4)**2*Nfeatures_conv2, Xlen_3])
self.b_fc1 = bias_variable([Xlen_3])
self.h_pool2_flat = tf.reshape(self.h_pool2, [-1, \
(self.gridsize//(self.cgfactor*self.cgafactor)//4) \
*(self.gridsize//(self.cgfactor*self.cgafactor)//4)*Nfeatures_conv2])
self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool2_flat, self.W_fc1)+self.b_fc1)
print('Dropout')
# avoid overfitting using tensorflows dropout function.
# specifically, we keep each component of h_fc1 with
# probability keep_prob.
self.keep_prob = tf.placeholder("float")
self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob)
print('Softmax')
# finally, a softmax regression to predict the output
self.W_fc2 = weight_variable([Xlen_3,self.Ncategories])
self.b_fc2 = bias_variable([self.Ncategories])
print('Setting output format')
# output of NN
self.y_conv = tf.nn.softmax(tf.matmul(self.h_fc1_drop, self.W_fc2) + self.b_fc2)
self.Session = tf.Session()
print('Setting optimization parameters')
# run the optimization. We'll minimize the cross entropy
#self.train_step = tf.train.AdamOptimizer(1e-2, epsilon=0.1).minimize(self.cross_entropy)
self.cross_entropy = -tf.reduce_sum(self.y_*tf.log(self.y_conv))
self.train_step = tf.train.AdamOptimizer(1e-5, epsilon=0.1).minimize(self.cross_entropy)
#self.nfn = min_false_neg(self.y_conv, self.y_, self.Ncategories, session=self.Session)
#self.chisq = tf.reduce_mean(tf.pow(tf.sub(self.y_,self.y_conv),2)+1e-4)
#self.train_step = tf.train.GradientDescentOptimizer(1e-4).minimize(self.cross_entropy)
self.correct_prediction = tf.equal(tf.argmax(self.y_conv,1), tf.argmax(self.y_,1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction,"float"))
print('Running session')
self.Session.run(tf.initialize_all_variables())
# batch gradient descent ticker
current_index = 0
testX, testy = self.load_minibatch(self.mbpath, self.nimg, self.farts, self.gridsize,
self.cgfactor, self.Nmb, cg_additional=self.cgafactor,twoclasses=self.twoclasses)
xentropy = []
testerr = []
for npass in range(Nsteps):
for i in range(self.Nmb):
#print('Minibatch {0}'.format(i))
self.X, self.y = self.load_minibatch(self.mbpath, self.nimg, self.farts, self.gridsize,
self.cgfactor, i, cg_additional=self.cgafactor, twoclasses=self.twoclasses)
for j in range(self.Nstepspermb):
#print('Batch {0}'.format(j))
# update the parameters using batch gradient descent.
# use 50 examples per iteration (can change)
next_set = np.arange(current_index,current_index+self.batchsize,1)% self.mbsize
x_examples = self.X[next_set,:]
y_examples = self.y[next_set,:]
current_index = (current_index+self.batchsize) % self.mbsize
#for every thousandth step, print the training error.
if (i*self.Nstepspermb+j)%1000 ==0:
train_accuracy = self.accuracy.eval(feed_dict={self.x:x_examples \
, self.y_: y_examples, self.keep_prob: 1.0},session=self.Session)
print "step %d, training accuracy %g"%(i, train_accuracy)
self.train_step.run(feed_dict={self.x: x_examples, self.y_: y_examples, self.keep_prob: 0.5},session=self.Session)
#debugging step --> dont keep
if (npass !=0) or (j != 0):
self.W1old = 1*self.W1curr
self.W1curr = self.W_conv1.eval(session=self.Session)
if (npass!=0) or (j!=0):
#print('model evolved by: ', np.sum(abs(self.W1curr-self.W1old)))
pass
if (i%self.savefreq==0) & (i!=0):
if gpu:
self.Save_model('Trained_Model_{0}_{1}_{2}_{3}_gpu_mb{4}.tfm'.format(self.nimg, self.farts, self.gridsize, (self.cgfactor*self.cgafactor), i), i*self.Nstepspermb)
else:
self.Save_model('Trained_Model_{0}_{1}_{2}_{3}_mb{4}.tfm'.format(self.nimg, self.farts, self.gridsize, (self.cgfactor*self.cgafactor), i), i*self.Nstepspermb)
testerr.append(self.Test(testX, testy))
xentropy.append(self.cross_entropy.eval(feed_dict = {self.x: testX, self.y_: testy, self.keep_prob:1.0},session=self.Session))
print(xentropy[-1])
return testerr, xentropy
def Train2(self, Nsteps, alpha=1e-3, Nfeatures_conv1=32, Wsize_1=5, Nfeatures_conv2=64, \
Wsize_2=5, Xlen_3=1024, gpu=False):
'''
This function creates the design matrix and loads the
true clasifications (if they don't already exist).
It then runs the neural net to train the optimal
predicting scheme.
* Currently the neural net is very similar to the
one used in the MNIST tutorial from Tensorflow (except
modified to use our images etc.). We
should modify it further to fit our needs *
Function inputs are below:
- Nsteps is number of training steps to run
- Nfeatures_conv1 is the number of convolution features (images)
in the first layer
- Wsize_1 is the size of the first convolution filter
(assumed to be square)
- Nfeatures_conv2 is the number of convolution features (images) in
the second layer
- Wsize_2 is the size of the second convolution filter
(assumed to be square).
- Xlen_3 is the length of the densely connected features vector.
'''
# start neural net: define x,y placeholders and create session
#self.Session = tf.InteractiveSession() # useful if running from notebook
print('Allocating placeholders')
self.x = tf.placeholder("float",shape=[None,(self.gridsize//(self.cgfactor*self.cgafactor))**2])
self.x_image = tf.reshape(self.x,[-1,(self.gridsize//(self.cgfactor*self.cgafactor)),(self.gridsize//(self.cgfactor*self.cgafactor)),1])
self.y_ = tf.placeholder("float",shape=[None,self.Ncategories])
# create first layer
# here we create 32 new images using a convolution with a
# 5x5x32 weights filter plus a bias (one for each new image)
# This is equivalent to measuring 32 features for each 5x5
# pannel of the original image. We'll likely want many more
# features, and to use more pixels. Keep that in mind.
print('Creating first layer')
self.W_conv1 = weight_variable([Wsize_1,Wsize_1,1,Nfeatures_conv1]) # play around with altering sizes
self.b_conv1 = bias_variable([Nfeatures_conv1])# length should be same as last dimension of W_conv1
self.h_conv1 = tf.nn.relu(conv2d(self.x_image, self.W_conv1)+self.b_conv1)
# split each image into 4, and obtain the maximum quadrant
self.h_pool1 = max_pool_2x2(self.h_conv1)
print('Creating second layer')
# create second layer
# here each of our 32 intermediate images is convolved with
# a 5x5x64 weights filter. We create 64 new images by summing
# over all 32 convolutions. Each of the 64 images has its own bias
# term. The shape of the result is the shape of the original image
# divided by 4 on each axis by 64 (i.e. if you started with a
# 2048x2048 image, you now have a 512x512x64 image)
self.W_conv2 = weight_variable([Wsize_2,Wsize_2,Nfeatures_conv1,Nfeatures_conv2]) # again, play with altering sizes
self.b_conv2 = bias_variable([Nfeatures_conv2]) # of the first two axes
self.h_conv2 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2) + self.b_conv2)
# split each image into 4, and obtain the maximum quadrant
self.h_pool2 = max_pool_2x2(self.h_conv2)
self.keep_prob1 = tf.placeholder("float")
self.h_drop2 = tf.nn.dropout(self.h_pool2, self.keep_prob1)
print('Creating densely connected layer')
# Densely Connected layer
# Here, the 7x7x64 image tensor is flattened, and we get a
# 1x1024 vector using the form h_fc1 = h_2 * W + b
self.W_fc1 = weight_variable([(self.gridsize//(self.cgfactor*self.cgafactor)//4)**2*Nfeatures_conv2, Xlen_3])
self.b_fc1 = bias_variable([Xlen_3])
self.h_pool2_flat = tf.reshape(self.h_drop2, [-1, \
(self.gridsize//(self.cgfactor*self.cgafactor)//4) \
*(self.gridsize//(self.cgfactor*self.cgafactor)//4)*Nfeatures_conv2])
self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool2_flat, self.W_fc1)+self.b_fc1)
print('Dropout')
# avoid overfitting using tensorflows dropout function.
# specifically, we keep each component of h_fc1 with
# probability keep_prob.
self.keep_prob2 = tf.placeholder("float")
self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob2)
self.W_fc2 = weight_variable([Xlen_3, Xlen_3])
self.b_fc2 = bias_variable([Xlen_3])
self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1_drop, self.W_fc2)+self.b_fc2)
self.keep_prob3 = tf.placeholder("float")
self.h_fc2_drop = tf.nn.dropout(self.h_fc2, self.keep_prob3)
print('Softmax')
# finally, a softmax regression to predict the output
self.W_fc3 = weight_variable([Xlen_3,self.Ncategories])
self.b_fc3 = bias_variable([self.Ncategories])
print('Setting output format')
# output of NN
self.y_conv = tf.nn.softmax(tf.matmul(self.h_fc2_drop, self.W_fc3) + self.b_fc3)
self.Session = tf.Session()
print('Setting optimization parameters')
# run the optimization. We'll minimize the cross entropy
self.cross_entropy = -tf.reduce_sum(self.y_*tf.log(tf.clip_by_value(self.y_conv, 1e-10, 1.0)))
self.train_step = tf.train.AdamOptimizer(alpha, epsilon=0.1).minimize(self.cross_entropy)
#self.nfn = min_false_neg(self.y_conv, self.y_, self.Ncategories, session=self.Session)
#self.chisq = tf.reduce_mean(tf.pow(tf.sub(self.y_,self.y_conv),2)+1e-4)
#self.train_step = tf.train.GradientDescentOptimizer(1e-2).minimize(self.cross_entropy)
self.correct_prediction = tf.equal(tf.argmax(self.y_conv,1), tf.argmax(self.y_,1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction,"float"))
print('Running session')
self.Session.run(tf.initialize_all_variables())
# batch gradient descent ticker
current_index = 0
testX, testy = self.load_minibatch(self.mbpath, self.nimg, self.farts, self.gridsize,
self.cgfactor, self.Nmb, cg_additional=self.cgafactor,twoclasses=self.twoclasses)
xentropy = []
testerr = []
for npass in range(Nsteps):
for i in range(self.Nmb):
#print('Minibatch {0}'.format(i))
self.X, self.y = self.load_minibatch(self.mbpath, self.nimg, self.farts, self.gridsize,
self.cgfactor, i, cg_additional=self.cgafactor, twoclasses=self.twoclasses)
for j in range(self.Nstepspermb):
#print('Batch {0}'.format(j))
# update the parameters using batch gradient descent.
# use 50 examples per iteration (can change)
next_set = np.arange(current_index,current_index+self.batchsize,1)% self.mbsize
x_examples = self.X[next_set,:]
y_examples = self.y[next_set,:]
current_index = (current_index+self.batchsize) % self.mbsize
#for every thousandth step, print the training error.
if (i*self.Nstepspermb+j)%1000 ==0:
train_accuracy = self.accuracy.eval(feed_dict={self.x:x_examples \
, self.y_: y_examples, self.keep_prob1: 1.0\
, self.keep_prob2: 1.0, self.keep_prob3: 1.0},session=self.Session)
print "step %d, training accuracy %g"%(i, train_accuracy)
self.train_step.run(feed_dict={self.x: x_examples, self.y_: y_examples, self.keep_prob1: 0.25, \
self.keep_prob2: 0.5, self.keep_prob3:0.5},session=self.Session)
#debugging step --> dont keep
if (npass !=0) or (j != 0):
self.W1old = 1*self.W1curr
self.W1curr = self.W_conv1.eval(session=self.Session)
if (npass!=0) or (j!=0):
#print('model evolved by: ', np.sum(abs(self.W1curr-self.W1old)))
pass
if (i%self.savefreq==0) & (i!=0):
if gpu:
self.Save_model('Trained_Model_{0}_{1}_{2}_{3}_gpu_mb{4}.tfm'.format(self.nimg, self.farts, self.gridsize, (self.cgfactor*self.cgafactor), i), i*self.Nstepspermb)
else:
self.Save_model('Trained_Model_{0}_{1}_{2}_{3}_mb{4}.tfm'.format(self.nimg, self.farts, self.gridsize, (self.cgfactor*self.cgafactor), i), i*self.Nstepspermb)
testerr.append(self.Test(testX, testy,test2=True))
xentropy.append(self.cross_entropy.eval(feed_dict = {self.x: testX, self.y_: testy,
self.keep_prob1:1.0, self.keep_prob2:1.0,
self.keep_prob3:1.0},session=self.Session))
print(xentropy[-1])
return testerr, xentropy
def Test(self,test_data_x,test_data_y,test2=False):
'''
Test the current model on an input set of data
'''
if test2:
test_accuracy = self.accuracy.eval(feed_dict={self.x:test_data_x \
, self.y_: test_data_y, self.keep_prob1: 1.0,
self.keep_prob2:1.0, self.keep_prob3:1.0},session=self.Session)
else:
test_accuracy = self.accuracy.eval(feed_dict={self.x:test_data_x \
, self.y_: test_data_y, self.keep_prob: 1.0},session=self.Session)
print('Test Accuracy: ', test_accuracy)
#raise Exception('cannot test model yet \n')
return test_accuracy
def Predict(self, data_x, test2=False):
'''
Predict the classes for unseen data :)
'''
predictions = tf.arg_max(self.y_conv,1)
if test2:
return( predictions.eval(feed_dict={self.x:data_x \
, self.keep_prob1: 1.0,
self.keep_prob2:1.0, self.keep_prob3:1.0},session=self.Session))
else:
return( predictions.eval(feed_dict={self.x:data_x \
, self.keep_prob: 1.0},session=self.Session))
def Save_model(self, filename, Nsteps):
'''
Use tensorflow's train.Saver to create checkpoint
file.
- Nsteps is number of training steps that have already
been run.
'''
#raise Exception('cannot save model yet \n')
saver = tf.train.Saver()
saver.save(self.Session, filename, global_step=Nsteps)
return
def Resume_from(self, filename):
'''
Use tensorflow's train.Saver to reload a saved
checkpoint, and resume training.
'''
raise Exception('cannot resume training yet \n')
self.Session = tf.Session()
saver = tf.train.Saver()
saver.restore(self.Session, filename)
return
# ------------------------------------------------------------
'''
Neural net functions
'''
def weight_variable(shape):
'''
Initialize a tensorflow weight variable
'''
initial = tf.truncated_normal(shape, stddev=10**-2)
return tf.Variable(initial) # note: this won't let us spread across multiple GPUs.
def bias_variable(shape):
'''
Initialize a tensorflow bias variable
'''
#initial = tf.random_normal(shape, stddev=1e-3)
initial = tf.constant(0.001,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
'''
Convolve a 2d image (x) with a filter (W)
'''
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
'''
Return quadrant of image with max pixel values
'''
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def min_false_neg(y, ytrue, nclass, naclass='last', session=None):
Lweights = np.ones((nclass, nclass), dtype=np.float32)
if naclass=='last':
Lweights[-1,:] = 1000
else:
Lweights[0,:] = 1000
dL = Lweights.diagonal()
dL = 0
Lweights = tf.constant(Lweights)
print(Lweights.get_shape())
print(y.get_shape())
print(ytrue.get_shape())
L = tf.matmul(ytrue, tf.matmul(Lweights, tf.transpose(y)))
print(L.get_shape())
L = tf.pack([L[i,i] for i in range(nclass)])
return tf.reduce_sum(L)
# ------------------------------------------------------------
if __name__=='__main__':
if len(sys.argv)>1:
gpu = True
else:
gpu = False
cnn = ConvNNet(1000, 1.0, 2048, 2, cgafactor=8)
cnn.Train(100, Nfeatures_conv1=16, Nfeatures_conv2=32, Xlen_3=10, gpu=gpu)
| 47.450909
| 275
| 0.595984
| 3,463
| 26,098
| 4.367023
| 0.136298
| 0.011572
| 0.027772
| 0.03174
| 0.769027
| 0.755802
| 0.733651
| 0.726774
| 0.715467
| 0.711367
| 0
| 0.032045
| 0.29213
| 26,098
| 549
| 276
| 47.537341
| 0.786565
| 0.179094
| 0
| 0.530075
| 0
| 0.022556
| 0.050136
| 0.014838
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.030075
| 0.015038
| null | null | 0.109023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ff55a601271d3caff5c0402f0dcc0e976c2f1049
| 7,251
|
py
|
Python
|
tests/functional/basic_tests.py
|
OpertusMundi/clustering-outliers-service
|
6d3d89eaa8d3c491c5c78d4c12b67aef01391e32
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/basic_tests.py
|
OpertusMundi/clustering-outliers-service
|
6d3d89eaa8d3c491c5c78d4c12b67aef01391e32
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/basic_tests.py
|
OpertusMundi/clustering-outliers-service
|
6d3d89eaa8d3c491c5c78d4c12b67aef01391e32
|
[
"Apache-2.0"
] | null | null | null |
from os import path, getenv, mkdir
import tempfile
import logging
from clustering_outliers.app import app
_tempdir: str = ""
def setup_module():
print(f" == Setting up tests for {__name__}")
app.config['TESTING'] = True
global _tempdir
_tempdir = getenv('TEMPDIR')
if _tempdir:
try:
mkdir(_tempdir)
except FileExistsError:
pass
else:
_tempdir = tempfile.gettempdir()
def teardown_module():
print(f" == Tearing down tests for {__name__}")
# Tests
dirname = path.dirname(__file__)
csv_file = path.join(dirname, '..', 'test_data', 'luxembourg-pois.osm.csv')
shp_file = path.join(dirname, '..', 'test_data', 'get_pois_v02_corfu_2100.zip')
def test_get_documentation_1():
with app.test_client() as client:
res = client.get('/', query_string=dict(), headers=dict())
assert res.status_code == 200
r = res.get_json()
assert not (r.get('openapi') is None)
def test_get_health_check():
with app.test_client() as client:
res = client.get('/_health', query_string=dict(), headers=dict())
assert res.status_code == 200
r = res.get_json()
if 'reason' in r:
logging.error('The service is unhealthy: %(reason)s\n%(detail)s', r)
logging.debug("From /_health: %s" % r)
assert r['status'] == 'OK'
def test_file_kmeans_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/kmeans/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'cluster_centers', 'ids', 'labels'}
def test_file_kmeans_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/kmeans/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'cluster_centers', 'ids', 'labels'}
def test_path_kmeans_csv():
payload = {"resource": csv_file, "resource_type": "csv", "id_column": "ID", "columns-0": "LON", "columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/kmeans/path', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'cluster_centers', 'ids', 'labels'}
def test_path_kmeans_shp():
payload = {"resource": shp_file, "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/kmeans/path', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'cluster_centers', 'ids', 'labels'}
def test_file_dbscan_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/dbscan/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'core_sample_indices', 'components', 'ids', 'labels'}
def test_file_dbscan_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/dbscan/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'core_sample_indices', 'components', 'ids', 'labels'}
def test_file_agglomerative_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/agglomerative/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'n_clusters', 'n_leaves', 'n_connected_components', 'children', 'ids', 'labels'}
def test_file_agglomerative_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/agglomerative/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert set(r.keys()) == {'n_clusters', 'n_leaves', 'n_connected_components', 'children', 'ids', 'labels'}
def test_file_isolation_forest_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/isolation_forest/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
def test_file_isolation_forest_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/isolation_forest/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
def test_file_local_outlier_factor_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/local_outlier_factor/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
def test_file_local_outlier_factor_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/local_outlier_factor/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
def test_file_one_class_svm_csv():
payload = {'resource': (open(csv_file, 'rb'), 'sample.csv'), "resource_type": "csv", "id_column": "ID", "columns-0": "LON",
"columns-1": "LAT"}
with app.test_client() as client:
res = client.post('/one_class_svm/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
def test_file_one_class_svm_shp():
payload = {'resource': (open(shp_file, 'rb'), 'sample.zip'), "resource_type": "shp"}
with app.test_client() as client:
res = client.post('/one_class_svm/file', data=payload, content_type='multipart/form-data')
assert res.status_code == 200
r = res.get_json()
assert isinstance(r, dict)
| 39.194595
| 127
| 0.627362
| 959
| 7,251
| 4.520334
| 0.131387
| 0.025836
| 0.0406
| 0.062745
| 0.853749
| 0.846828
| 0.825606
| 0.825606
| 0.825606
| 0.808535
| 0
| 0.011948
| 0.203558
| 7,251
| 184
| 128
| 39.407609
| 0.738701
| 0.00069
| 0
| 0.666667
| 0
| 0
| 0.244892
| 0.029266
| 0
| 0
| 0
| 0
| 0.22695
| 1
| 0.12766
| false
| 0.007092
| 0.028369
| 0
| 0.156028
| 0.014184
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
440febc339c3c2f7ac3da51fcf2c99c86ca1b5b6
| 49
|
py
|
Python
|
dask_cuda/__init__.py
|
paulhendricks/dask-cuda
|
a8b3d34d00752c57d5ab892b99a5d518dfa4c71d
|
[
"Apache-2.0"
] | 19
|
2019-01-04T17:50:22.000Z
|
2019-06-26T02:23:27.000Z
|
dask_cuda/__init__.py
|
paulhendricks/dask-cuda
|
a8b3d34d00752c57d5ab892b99a5d518dfa4c71d
|
[
"Apache-2.0"
] | 4
|
2019-01-04T17:47:44.000Z
|
2019-03-29T14:47:07.000Z
|
dask_cuda/__init__.py
|
paulhendricks/dask-cuda
|
a8b3d34d00752c57d5ab892b99a5d518dfa4c71d
|
[
"Apache-2.0"
] | 1
|
2021-09-20T15:55:35.000Z
|
2021-09-20T15:55:35.000Z
|
from .local_cuda_cluster import LocalCUDACluster
| 24.5
| 48
| 0.897959
| 6
| 49
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
443ffcd78c071822a07f85f42752b9be2825cffe
| 84
|
py
|
Python
|
_example/replace.py
|
flew-software/Dem
|
20b7eb9bc7c11f1baf23acfe7bfbab359ddd97fb
|
[
"MIT"
] | 1
|
2021-02-17T08:30:05.000Z
|
2021-02-17T08:30:05.000Z
|
_example/replace.py
|
flew-software/Dem
|
20b7eb9bc7c11f1baf23acfe7bfbab359ddd97fb
|
[
"MIT"
] | null | null | null |
_example/replace.py
|
flew-software/Dem
|
20b7eb9bc7c11f1baf23acfe7bfbab359ddd97fb
|
[
"MIT"
] | null | null | null |
import _2D
a = [[1, 2, 3][1, 2, 3][1, 2, 3]]
print(a)
print(_2D.Replace(a, 3, 4))
| 12
| 33
| 0.511905
| 20
| 84
| 2.05
| 0.45
| 0.146341
| 0.219512
| 0.195122
| 0.219512
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 0.202381
| 84
| 6
| 34
| 14
| 0.41791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
92371fa6f6cf59f8dd0ad37fce6e01d457af270f
| 5,013
|
py
|
Python
|
models/base/init_utils.py
|
sAviOr287/imagenet_ICLR
|
1ac83d799f5335355161156aa9bba63e0d82a063
|
[
"MIT"
] | null | null | null |
models/base/init_utils.py
|
sAviOr287/imagenet_ICLR
|
1ac83d799f5335355161156aa9bba63e0d82a063
|
[
"MIT"
] | null | null | null |
models/base/init_utils.py
|
sAviOr287/imagenet_ICLR
|
1ac83d799f5335355161156aa9bba63e0d82a063
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import math
def weights_init_kaiming_xavier(m):
# print('=> weights init')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# nn.init.normal_(m.weight, 0, 0.1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
# nn.init.xavier_normal(m.weight)
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_kaiming_relu(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',nonlinearity='relu')
# nn.init.normal_(m.weight, 0, 0.1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_kaiming_tanh(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',nonlinearity='tanh')
# nn.init.normal_(m.weight, 0, 0.1)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity='tanh')
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_xavier(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_EOC(m):
if isinstance(m, nn.Conv2d):
EOC_weights(m.weight)
if m.bias is not None:
EOC_bias(m.bias)
elif isinstance(m, nn.Linear):
EOC_weights(m.weight)
if m.bias is not None:
EOC_bias(m.bias)
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_ord(m):
if isinstance(m, nn.Conv2d):
ord_weights(m.weight)
if m.bias is not None:
ord_bias(m.bias)
elif isinstance(m, nn.Linear):
ord_weights(m.weight)
if m.bias is not None:
ord_bias(m.bias)
elif isinstance(m, nn.BatchNorm2d):
# Note that BN's running_var/mean are
# already initialized to 1 and 0 respectively.
if m.weight is not None:
m.weight.data.fill_(1.0)
if m.bias is not None:
m.bias.data.zero_()
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.dim()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def EOC_weights(tensor, act='relu'):
print('#' * 40)
print('We are using {} activation on EOC'.format(act))
print('#' * 40)
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
sigma_w2 = 1.
if act == 'relu':
#print('relu')
sigma_w2 = 2.
q = 'constant variance'
elif act == 'tanh':
#print('tanh')
sigma_w2 = 1.2981 ** 2
q = 0.49
elif act == 'elu':
#print('elu')
sigma_w2 = 1.22459 ** 2
q = 1.01
std = math.sqrt(sigma_w2 / float(fan_in))
with torch.no_grad():
return tensor.normal_(0, std)
def EOC_bias(tensor, act='relu'):
print('#' * 40)
print('We are using {} activation on EOC'.format(act))
print('#' * 40)
sigma_b2 = 0.
if act == 'relu':
sigma_b2 = 1e-16
q = 'constant variance'
elif act == 'tanh':
sigma_b2 = 0.2 ** 2
q = 0.49
elif act == 'elu':
sigma_b2 = 0.2 ** 2
q = 1.01
std = math.sqrt(sigma_b2)
with torch.no_grad():
return tensor.normal_(0, std)
def ord_weights(tensor,sigma_w2):
print('#' * 40)
print('Ordered phase with {}sigma_w2'.format(sigma_w2))
print('#' * 40)
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = math.sqrt(sigma_w2 / float(fan_in))
with torch.no_grad():
return tensor.normal_(0, std)
def ord_bias(tensor):
sigma_b2 = 1.
std = math.sqrt(sigma_b2)
with torch.no_grad():
return tensor.normal_(0, std)
| 25.974093
| 100
| 0.689607
| 876
| 5,013
| 3.784247
| 0.116438
| 0.043741
| 0.054299
| 0.048265
| 0.788839
| 0.784615
| 0.7454
| 0.721267
| 0.704072
| 0.704072
| 0
| 0.031559
| 0.171953
| 5,013
| 192
| 101
| 26.109375
| 0.767044
| 0.137443
| 0
| 0.708333
| 0
| 0
| 0.066016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076389
| false
| 0
| 0.020833
| 0
| 0.131944
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92463d0790fd4f8568e0e8a342f8baf27bd768e9
| 170
|
py
|
Python
|
zerovl/utils/__init__.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 14
|
2022-01-19T08:08:29.000Z
|
2022-03-10T05:55:36.000Z
|
zerovl/utils/__init__.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 2
|
2022-02-25T14:35:47.000Z
|
2022-03-01T03:11:13.000Z
|
zerovl/utils/__init__.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 3
|
2022-02-09T01:23:11.000Z
|
2022-02-15T11:45:30.000Z
|
from . import logger
from .context import *
from .dist import *
from .misc import *
from .registry import *
from .checkpoint_utils import *
from .interpolate_pe import *
| 21.25
| 31
| 0.758824
| 23
| 170
| 5.521739
| 0.478261
| 0.393701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164706
| 170
| 8
| 32
| 21.25
| 0.894366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.