hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24e1ac4f666b8d35294b6791390859f3bd239115
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/past/utils/__init__.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/past/utils/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/past/utils/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/7b/c9/75/b0e7dd8832777647240562cd5af0b56a10b9057e616870b64c674d800f
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.479167
| 0
| 96
| 1
| 96
| 96
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
705fa12ee134dc049f2494a23c955ed94a7f8540
| 3,364
|
py
|
Python
|
dockerfiles/jdk17/s2ipf_stub_l2_dockerfile/scripts/WrapperCore/JobOrderInventoryReader.py
|
CSC-PS-S2/s2-workflow
|
4efadfb164e14f4a142d9e1d011f7a881d39250f
|
[
"Apache-2.0"
] | 7
|
2021-07-29T09:24:52.000Z
|
2021-12-15T17:23:58.000Z
|
dockerfiles/jdk17/s2ipf_stub_l2_dockerfile/scripts/WrapperCore/JobOrderInventoryReader.py
|
CSC-PS-S2/s2-workflow
|
4efadfb164e14f4a142d9e1d011f7a881d39250f
|
[
"Apache-2.0"
] | 34
|
2021-09-28T07:38:32.000Z
|
2022-01-25T13:59:03.000Z
|
dockerfiles/jdk17/s2ipf_stub_l2_dockerfile/scripts/WrapperCore/JobOrderInventoryReader.py
|
CSC-PS-S2/s2-workflow
|
4efadfb164e14f4a142d9e1d011f7a881d39250f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import copy
from lxml import etree as et
class JobOrderInventoryReader(object):
def __init__(self, filename):
self.tree_hdr = et.parse(filename)
self.root_hdr = self.tree_hdr.getroot()
self.filename = filename
def set_acquisition_station(self,station):
result = self.root_hdr.xpath(
'//Ipf_Job_Order/Ipf_Conf/Acquisition_Station')
result[0].text = station
def set_processing_station(self,station):
result = self.root_hdr.xpath(
'//Ipf_Job_Order/Ipf_Conf/Processing_Station')
result[0].text = station
def set_l2_ds_input(self, filename):
result = self.root_hdr.xpath(
'//Ipf_Job_Order/List_of_Ipf_Procs[1]/Ipf_Proc/List_of_Inputs/Input[File_Type/text()=\'DS\']'
'/List_of_File_Names[1]/File_Name')
result[0].text = filename
def set_l1_ds_input(self, filename):
result = self.root_hdr.xpath(
'//Ipf_Job_Order/List_of_Ipf_Procs[1]/Ipf_Proc/List_of_Inputs/Input[File_Type/text()=\'L1C_DS\']'
'/List_of_File_Names[1]/File_Name')
result[0].text = filename
def set_l2_tl_input(self, filenames):
base_xpath_filenames = '//Ipf_Job_Order/List_of_Ipf_Procs[1]/Ipf_Proc/List_of_Inputs/' \
'Input[File_Type/text()=\'TL\']/List_of_File_Names'
list_of_filenames = self.root_hdr.xpath(base_xpath_filenames)
filename_node_tpl = self.root_hdr.xpath(base_xpath_filenames + '[1]/File_Name')
for g in filenames:
new_node = copy.deepcopy(filename_node_tpl[0])
new_node.text = g
list_of_filenames[0].append(new_node)
list_of_filenames[0].remove(filename_node_tpl[0])
list_of_filenames[0].attrib['count'] = str(len(filenames))
def set_l1_tl_input(self, filenames):
base_xpath_filenames = '//Ipf_Job_Order/List_of_Ipf_Procs[1]/Ipf_Proc/List_of_Inputs/' \
'Input[File_Type/text()=\'TL_L1C\']/List_of_File_Names'
list_of_filenames = self.root_hdr.xpath(base_xpath_filenames)
filename_node_tpl = self.root_hdr.xpath(base_xpath_filenames + '[1]/File_Name')
for g in filenames:
new_node = copy.deepcopy(filename_node_tpl[0])
new_node.text = g
list_of_filenames[0].append(new_node)
list_of_filenames[0].remove(filename_node_tpl[0])
list_of_filenames[0].attrib['count'] = str(len(filenames))
def set_working_input(self, filename):
result = self.root_hdr.xpath(
'//Ipf_Job_Order/List_of_Ipf_Procs[1]/Ipf_Proc/List_of_Inputs/Input[File_Type/'
'text()=\'WORKING\']/List_of_File_Names[1]/File_Name')
result[0].text = filename
def set_l2_tl_sensing_start(self, sensing_start):
result = self.root_hdr.xpath('//Ipf_Job_Order/Ipf_Conf/Dynamic_Processing_Parameters/Processing_Parameter[Name/text()=\'SENSING_START\']/Value')
result[0].text = sensing_start
def set_l2_tl_sensing_stop(self, sensing_stop):
result = self.root_hdr.xpath('//Ipf_Job_Order/Ipf_Conf/Dynamic_Processing_Parameters/Processing_Parameter[Name/text()=\'SENSING_STOP\']/Value')
result[0].text = sensing_stop
def write_to_file(self, filename):
self.tree_hdr.write(filename, encoding="UTF-8")
| 44.263158
| 152
| 0.6739
| 476
| 3,364
| 4.371849
| 0.159664
| 0.066314
| 0.063431
| 0.084575
| 0.846708
| 0.790966
| 0.790966
| 0.761173
| 0.761173
| 0.761173
| 0
| 0.013827
| 0.204518
| 3,364
| 75
| 153
| 44.853333
| 0.763827
| 0.003567
| 0
| 0.5
| 0
| 0
| 0.263881
| 0.232537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.183333
| false
| 0
| 0.033333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70877165d6eb7419d67348e97e49fd69895add2f
| 263
|
py
|
Python
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/ltfn/test_pak1/__init__.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/ltfn/test_pak1/__init__.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/ltfn/test_pak1/__init__.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from state import called
def setup():
called.append('test_pak1.setup')
def teardown():
called.append('test_pak1.teardown')
def test_one_one():
called.append('test_pak1.test_one_one')
def test_one_two():
called.append('test_pak1.test_one_two')
| 18.785714
| 43
| 0.726236
| 40
| 263
| 4.475
| 0.3
| 0.268156
| 0.357542
| 0.446927
| 0.301676
| 0.301676
| 0
| 0
| 0
| 0
| 0
| 0.017621
| 0.136882
| 263
| 13
| 44
| 20.230769
| 0.770925
| 0
| 0
| 0
| 0
| 0
| 0.292776
| 0.1673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| true
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7094832b7cb4be99f15cc22b4a9f8ac37513298e
| 229
|
py
|
Python
|
fdbk/data_tools/functions/__init__.py
|
kangasta/fdbk
|
426a04131869ceefd3bd2c80d327b60a3a8e2d7b
|
[
"MIT"
] | 1
|
2019-05-04T09:18:48.000Z
|
2019-05-04T09:18:48.000Z
|
fdbk/data_tools/functions/__init__.py
|
kangasta/fdbk
|
426a04131869ceefd3bd2c80d327b60a3a8e2d7b
|
[
"MIT"
] | 36
|
2018-10-25T13:29:12.000Z
|
2021-09-23T22:30:07.000Z
|
fdbk/data_tools/functions/__init__.py
|
kangasta/fdbk
|
426a04131869ceefd3bd2c80d327b60a3a8e2d7b
|
[
"MIT"
] | null | null | null |
from ._chart_funcs import *
from ._collection_funcs import *
from ._status_funcs import *
from ._value_funcs import *
# pylint: disable=invalid-name
functions = {**CHART_FUNCS, **COLLECTION_FUNCS, **STATUS_FUNCS, **VALUE_FUNCS}
| 28.625
| 78
| 0.772926
| 29
| 229
| 5.689655
| 0.413793
| 0.266667
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113537
| 229
| 7
| 79
| 32.714286
| 0.812808
| 0.122271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
70a2f3319224eb1140b7361b62eb61ae2cc003e5
| 15,154
|
py
|
Python
|
app/apigateway/tests/test_api.py
|
rodrigoalveslima/buzzblog
|
2b301ee363fbbebbe0ee31b1bf9538811d97b293
|
[
"Apache-2.0"
] | null | null | null |
app/apigateway/tests/test_api.py
|
rodrigoalveslima/buzzblog
|
2b301ee363fbbebbe0ee31b1bf9538811d97b293
|
[
"Apache-2.0"
] | null | null | null |
app/apigateway/tests/test_api.py
|
rodrigoalveslima/buzzblog
|
2b301ee363fbbebbe0ee31b1bf9538811d97b293
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Georgia Tech Center for Experimental Research in Computer
# Systems
import random
import string
import time
import unittest
import requests
from requests.auth import HTTPBasicAuth
# Constants
SERVER_HOSTNAME = "localhost"
SERVER_PORT = 8080
URL = "{hostname}:{port}".format(hostname=SERVER_HOSTNAME, port=SERVER_PORT)
def random_id(size=16, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class TestService(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestService, self).__init__(*args, **kwargs)
# Create test accounts.
self._accounts = [
{
"username": random_id(),
"password": "passwd",
"first_name": "George",
"last_name": "Burdell"
}
for i in range(4)
]
for account in self._accounts:
r = requests.post("http://{url}/account".format(url=URL),
params={"request_id": random_id()},
json={
"username": account["username"],
"password": account["password"],
"first_name": account["first_name"],
"last_name": account["last_name"]
}
)
response = r.json()
account["id"] = response["id"]
# Create test follow.
self._follow = {
"follower": self._accounts[0],
"followee": self._accounts[1]
}
r = requests.post("http://{url}/follow".format(url=URL),
auth=HTTPBasicAuth(self._follow["follower"]["username"],
self._follow["follower"]["password"]),
params={"request_id": random_id()},
json={"account_id": self._follow["followee"]["id"]}
)
response = r.json()
self._follow["id"] = response["id"]
# Create test posts.
self._posts = [
{
"text": "Lorem ipsum",
"author": self._accounts[0]
}
for i in range(2)
]
for post in self._posts:
r = requests.post("http://{url}/post".format(url=URL),
auth=HTTPBasicAuth(post["author"]["username"],
post["author"]["password"]),
params={"request_id": random_id()},
json={"text": post["text"]}
)
response = r.json()
post["id"] = response["id"]
# Create test like.
self._like = {
"account": self._accounts[1],
"post": self._posts[0]
}
r = requests.post("http://{url}/like".format(url=URL),
auth=HTTPBasicAuth(self._like["account"]["username"],
self._like["account"]["password"]),
params={"request_id": random_id()},
json={"post_id": self._like["post"]["id"]}
)
response = r.json()
self._like["id"] = response["id"]
def test_create_account_200(self):
r = requests.post("http://{url}/account".format(url=URL),
params={"request_id": random_id()},
json={
"username": "jane.doe",
"password": "passwd",
"first_name": "Jane",
"last_name": "Doe"
}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("account", response["object"])
self.assertEqual("standard", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertTrue(response["active"])
self.assertEqual("jane.doe", response["username"])
self.assertEqual("Jane", response["first_name"])
self.assertEqual("Doe", response["last_name"])
def test_retrieve_account_200(self):
r = requests.get("http://{url}/account/{account_id}".format(url=URL,
account_id=self._accounts[3]["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("account", response["object"])
self.assertEqual("expanded", response["mode"])
self.assertEqual(self._accounts[3]["id"], response["id"])
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertTrue(response["active"])
self.assertEqual(self._accounts[3]["username"], response["username"])
self.assertEqual(self._accounts[3]["first_name"], response["first_name"])
self.assertEqual(self._accounts[3]["last_name"], response["last_name"])
self.assertFalse(response["follows_you"])
self.assertFalse(response["followed_by_you"])
self.assertEqual(0, response["n_followers"])
self.assertEqual(0, response["n_following"])
self.assertEqual(0, response["n_posts"])
self.assertEqual(0, response["n_likes"])
def test_update_account_200(self):
self._accounts[0]["last_name"] = "P. Burdell"
r = requests.put("http://{url}/account/{account_id}".format(url=URL,
account_id=self._accounts[0]["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={
"password": self._accounts[0]["password"],
"first_name": self._accounts[0]["first_name"],
"last_name": self._accounts[0]["last_name"]
}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("account", response["object"])
self.assertEqual("standard", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertTrue(response["active"])
self.assertEqual(self._accounts[0]["username"], response["username"])
self.assertEqual(self._accounts[0]["first_name"], response["first_name"])
self.assertEqual(self._accounts[0]["last_name"], response["last_name"])
def test_delete_account_200(self):
# Create an account.
r = requests.post("http://{url}/account".format(url=URL),
params={"request_id": random_id()},
json={
"username": random_id(),
"password": "passwd",
"first_name": "George",
"last_name": "Burdell"
}
)
response = r.json()
# Delete that account.
r = requests.delete("http://{url}/account/{account_id}".format(url=URL,
account_id=response["id"]),
auth=HTTPBasicAuth(response["username"], "passwd"),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
def test_list_accounts_200(self):
r = requests.get("http://{url}/account".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={
"request_id": random_id(),
"username": random_id()
},
json={"limit": 10, "offset": 0}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual(0, len(response))
def test_follow_account_200(self):
r = requests.post("http://{url}/follow".format(url=URL),
auth=HTTPBasicAuth(self._accounts[1]["username"],
self._accounts[1]["password"]),
params={"request_id": random_id()},
json={"account_id": self._accounts[0]["id"]}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("follow", response["object"])
self.assertEqual("standard", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertEqual(self._accounts[1]["id"], response["follower_id"])
self.assertEqual(self._accounts[0]["id"], response["followee_id"])
def test_retrieve_follow_200(self):
r = requests.get("http://{url}/follow/{follow_id}".format(url=URL,
follow_id=self._follow["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("follow", response["object"])
self.assertEqual("expanded", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertEqual(self._follow["follower"]["id"], response["follower_id"])
self.assertEqual(self._follow["followee"]["id"], response["followee_id"])
self.assertEqual(self._follow["follower"]["id"], response["follower"]["id"])
self.assertEqual(self._follow["followee"]["id"], response["followee"]["id"])
def test_delete_follow_200(self):
# Follow an account.
r = requests.post("http://{url}/follow".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={"account_id": self._accounts[2]["id"]}
)
response = r.json()
# Delete that follow.
r = requests.delete("http://{url}/follow/{follow_id}".format(url=URL,
follow_id=response["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
def test_list_follows_200(self):
r = requests.get("http://{url}/follow".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={
"request_id": random_id(),
"follower_id": self._accounts[3]["id"]
},
json={"limit": 10, "offset": 0}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual(0, len(response))
def test_create_post_200(self):
r = requests.post("http://{url}/post".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={"text": "Lorem ipsum"}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("post", response["object"])
self.assertEqual("standard", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertTrue(response["active"])
self.assertEqual("Lorem ipsum", response["text"])
self.assertEqual(self._accounts[0]["id"], response["author_id"])
def test_retrieve_post_200(self):
r = requests.get("http://{url}/post/{post_id}".format(url=URL,
post_id=self._posts[0]["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("post", response["object"])
self.assertEqual("expanded", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertTrue(response["active"])
self.assertEqual(self._posts[0]["text"], response["text"])
self.assertEqual(self._posts[0]["author"]["id"], response["author_id"])
self.assertEqual(self._posts[0]["author"]["id"], response["author"]["id"])
def test_delete_post_200(self):
# Create a post.
r = requests.post("http://{url}/post".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={"text": "Lorem ipsum"}
)
response = r.json()
# Delete that post.
r = requests.delete("http://{url}/post/{post_id}".format(url=URL,
post_id=response["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
def test_list_posts_200(self):
r = requests.get("http://{url}/post".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={
"request_id": random_id(),
"author_id": self._accounts[3]["id"]
},
json={"limit": 10, "offset": 0}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual(0, len(response))
def test_like_post_200(self):
r = requests.post("http://{url}/like".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={"post_id": self._posts[1]["id"]}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("like", response["object"])
self.assertEqual("standard", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertEqual(self._accounts[0]["id"], response["account_id"])
self.assertEqual(self._posts[1]["id"], response["post_id"])
def test_retrieve_like_200(self):
r = requests.get("http://{url}/like/{like_id}".format(url=URL,
like_id=self._like["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual("like", response["object"])
self.assertEqual("expanded", response["mode"])
self.assertIsInstance(response["id"], int)
self.assertAlmostEqual(time.time(), response["created_at"], delta=60)
self.assertEqual(self._like["account"]["id"], response["account_id"])
self.assertEqual(self._like["post"]["id"], response["post_id"])
self.assertEqual(self._like["account"]["id"], response["account"]["id"])
self.assertEqual(self._like["post"]["id"], response["post"]["id"])
def test_delete_like_200(self):
# Like a post.
r = requests.post("http://{url}/like".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()},
json={"post_id": self._posts[0]["id"]}
)
response = r.json()
# Delete that like.
r = requests.delete("http://{url}/like/{like_id}".format(url=URL,
like_id=response["id"]),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={"request_id": random_id()}
)
self.assertEqual(200, r.status_code)
def test_list_likes_200(self):
r = requests.get("http://{url}/like".format(url=URL),
auth=HTTPBasicAuth(self._accounts[0]["username"],
self._accounts[0]["password"]),
params={
"request_id": random_id(),
"account_id": self._accounts[3]["id"]
},
json={"limit": 10, "offset": 0}
)
self.assertEqual(200, r.status_code)
response = r.json()
self.assertEqual(0, len(response))
if __name__ == "__main__":
unittest.main()
| 38.171285
| 80
| 0.615811
| 1,774
| 15,154
| 5.0823
| 0.07159
| 0.11646
| 0.06921
| 0.05823
| 0.82398
| 0.756433
| 0.74035
| 0.710958
| 0.680013
| 0.668256
| 0
| 0.018278
| 0.198495
| 15,154
| 396
| 81
| 38.267677
| 0.724024
| 0.020853
| 0
| 0.497175
| 0
| 0
| 0.178653
| 0
| 0
| 0
| 0
| 0
| 0.265537
| 1
| 0.053672
| false
| 0.076271
| 0.016949
| 0.002825
| 0.076271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
563fa93b815aaed27fa895274bd437dc9564b383
| 368
|
py
|
Python
|
sudoku/example.py
|
mflood/whimsical
|
e39865193b232cc8fc280f371a76f7ac7d07d782
|
[
"Apache-2.0"
] | null | null | null |
sudoku/example.py
|
mflood/whimsical
|
e39865193b232cc8fc280f371a76f7ac7d07d782
|
[
"Apache-2.0"
] | null | null | null |
sudoku/example.py
|
mflood/whimsical
|
e39865193b232cc8fc280f371a76f7ac7d07d782
|
[
"Apache-2.0"
] | null | null | null |
from sudoku import solve
puzzle = [[0,0,0,0,0,3,9,0,0],
[5,0,0,0,0,0,4,1,0],
[0,0,8,7,5,0,0,0,0],
[0,0,7,0,0,0,5,9,1],
[0,4,0,0,2,0,0,6,0],
[6,8,5,0,0,0,7,0,0],
[0,0,0,0,4,2,1,0,0],
[0,7,4,0,0,0,0,0,2],
[0,0,2,6,0,0,0,0,0]]
solution = solve(puzzle)
for row in solution:
print(row)
| 21.647059
| 30
| 0.410326
| 95
| 368
| 1.589474
| 0.210526
| 0.503311
| 0.476821
| 0.370861
| 0.344371
| 0.192053
| 0.092715
| 0
| 0
| 0
| 0
| 0.313953
| 0.298913
| 368
| 16
| 31
| 23
| 0.271318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
567c0bdca0347b593c0ebc0e01dc47164dea5643
| 32,419
|
py
|
Python
|
Synthesis/post/plot/scatter.py
|
pablorutschmann/3DPopSynthesis
|
6c2206bef0cf0b0dc21aeb6cbda6386a525ffbc7
|
[
"MIT"
] | null | null | null |
Synthesis/post/plot/scatter.py
|
pablorutschmann/3DPopSynthesis
|
6c2206bef0cf0b0dc21aeb6cbda6386a525ffbc7
|
[
"MIT"
] | null | null | null |
Synthesis/post/plot/scatter.py
|
pablorutschmann/3DPopSynthesis
|
6c2206bef0cf0b0dc21aeb6cbda6386a525ffbc7
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib import colors
from matplotlib import patches
import os.path as path
from Synthesis.units import *
from tqdm import tqdm
from scipy.integrate import quad
def Power_Law(x, a, b):
return a * np.power(x, b)
def scatter_parameters(pop):
TotalMasses = []
SigmaCoeffs = []
Reference = []
for sim in pop.SIMS.values():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
print(sim.Sigma_Exponent)
print(sim.Sigma_Norm * (R_S / au)**sim.Sigma_Exponent / denstos)
Reference.append(sim.Sigma_Norm / (R_S / au)**sim.Sigma_Exponent / denstos * pow(au/R_S, sim.Sigma_Exponent) / denstos)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Reference)
cmax = max(Reference)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Reference, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Reference Value at $1 \mathrm{au}$ [$\mathrm{g}\mathrm{cm}^{-2}$]', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_numbers(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
for id,sim in pop.SIMS.items():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
Masses = list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist = list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
system = zip(Masses, Orb_Dist)
filtered = [item for item in system if item[0] >= m_low_lim and item[1] <= a_up_lim]
# mean = np.max([item[0] for item in filtered])/np.sum([item[0] for item in filtered])
# Means.append(mean)
Numbers.append(len(filtered))
#Means = np.array(Means) / np.sum(Means)
print(Numbers)
Numbers = np.array(Numbers)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Numbers)
cmax = max(Numbers)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Numbers, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Number of Planets', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_numbers.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_lost_mass(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
lost_mass = []
numbers = []
Reference = []
TM = []
for id, sim in pop.SIMS.items():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
TM.append(np.sum([item for item in list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)]))
Reference.append(sim.Sigma_Norm / (R_S / au)**sim.Sigma_Exponent / denstos * pow(au/R_S, sim.Sigma_Exponent) / denstos)
masses = sim.lost_satellites['mass'].values * M_S / M_J
cols = sim.lost_satellites['collision'].values
filter_null = cols == 0.0
filtered = masses[filter_null]
summed = np.sum(filtered)
numbers.append(len(filtered))
# summed = np.sum(filtered)
lost_mass.append(summed)
lost_mass = np.array(lost_mass)
#Means = np.array(Means) / np.sum(Means)
# print(Numbers / np.array(Means))
Numbers = np.array(SigmaCoeffs)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
# arr = np.unique(SigmaCoeffs)
cmap = plt.get_cmap(pop.cmap_standart,len(SigmaCoeffs))
norm = colors.BoundaryNorm(np.linspace(-1.625, -0.375, len(np.unique(SigmaCoeffs))+1, endpoint=True), cmap.N)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Reference, lost_mass, c=SigmaCoeffs, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(ylabel='Total Lost Mass [$\mathrm{M_J}$]', xlabel=r'Reference Value at 1 $\mathrm{au}$ [$\mathrm{g cm^{-2}}$]')
# ax2 = ax.twinx()
# mn, mx = ax.get_ylim()
# ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
# ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap,norm=norm), orientation='vertical',
label=r'Power-Law Exponent', ax=ax, ticks=np.unique(SigmaCoeffs))
ax.set_yscale('log')
ax.set_xscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_reference_lost_mass.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_AMD(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
AMDS = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
AMD, N = sim.get_AMD(m_low_lim, a_up_lim)
AMDS.append(AMD)
Means = np.array(Means) / np.sum(Means)
print(Numbers * Means)
Numbers = np.array(AMDS)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Numbers)
cmax = max(Numbers)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Numbers, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'AMD', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_amd.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_RMC(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
RMCS = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
RMC, N = sim.get_RMC(m_low_lim, a_up_lim)
RMCS.append(RMC)
Means = np.array(Means) / np.sum(Means)
print(Numbers * Means)
Numbers = np.array(RMCS)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(RMCS)
cmax = max(RMCS)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=RMCS, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'RMC', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_rmc_nonlog.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_collision_number(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
times = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
times.append(len(sim.collisions.index))
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(times)
cmax = max(times)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=times, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Number of Collisions', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_collision_number.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_ecc_inc(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
Ecc = []
Inc = []
Types = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
Ecc += list(sim.snaps[sim.N_snaps - 1].satellites['e'].values)
Inc += list(sim.snaps[sim.N_snaps - 1].satellites['i'].values)
Types += list(sim.snaps[sim.N_snaps - 1].satellites['Type'].values)
data = zip(Masses, Orb_Dist, Ecc, Inc, Types)
data = [item for item in data if item[0] >= m_low_lim and item[1] <= a_up_lim]
Masses, Orb_Dist, Ecc, Inc, Types = zip(*data)
number_of_no_accretion = len([item for item in data if np.abs(0.01-item[0])/item[0] < 0.01 and item[-1] == 1])
print(f'Number of Object: {len(Masses)}')
print(f'Number of Embryos with no significant accretion: {number_of_no_accretion}, {number_of_no_accretion/len(Masses)}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(Orb_Dist)
cmax = max(Orb_Dist)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Ecc, np.sin(np.array(Inc)/360 * 2 * np.pi), c=Orb_Dist, cmap=cmap, norm=norm, s=3)
# ax.scatter(Ecc, np.sin(np.array(Inc)), c=Orb_Dist, cmap=cmap, norm=norm, s=3)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Eccentricity', ylabel=r'$\sin(\mathrm{inclination})$')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Orbital Distance [$\mathrm{au}$]',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_ecc_inc'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_a_mass(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
WM = []
SWM = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
WM += list(sim.snaps[sim.N_snaps - 1].satellites['WM'].values * M_S / M_E)
SWM += list(sim.snaps[sim.N_snaps - 1].satellites['SWM'].values * M_S / M_E)
data = zip(Masses, Orb_Dist, WM, SWM)
data = [(m, a, wm / m, swm / m) for (m, a, wm, swm) in data if m >= m_low_lim and a <= a_up_lim]
Masses, Orb_Dist, WMF, SWMF = zip(*data)
TWMF = np.array(WMF) + np.array(SWMF)
print(f'Number of Object: {len(Masses)}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(TWMF)
cmax = max(TWMF)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Orb_Dist, Masses, c=TWMF, cmap=cmap, norm=norm, s=2, alpha=1)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel=r'Orbital Distance [$\mathrm{au}$]', ylabel=r'Mass [$\mathrm{M_{\oplus}}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Total WMF',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_a_mass'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_radial_twmf(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
WM = []
SWM = []
Ecc = []
System = []
for key, sim in pop.SIMS.items():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Ecc += list(sim.snaps[sim.N_snaps - 1].satellites['e'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
WM += list(sim.snaps[sim.N_snaps - 1].satellites['WM'].values * M_S / M_E)
SWM += list(sim.snaps[sim.N_snaps - 1].satellites['SWM'].values * M_S / M_E)
System += [key for i in sim.snaps[sim.N_snaps - 1].satellites['M'].values]
WMF = np.array(WM) / np.array(Masses)
SWMF = np.array(SWM) / np.array(Masses)
TWMF = WMF + SWMF
total_number = len(Masses)
print(f'Total Number of planets: {total_number}')
data = zip(Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System)
data = [item for item in data if item[0] >= 0.3 and item[0] <= 3]
mass_lim_number = len(data)
print(f'Number of planets in mass limit: {mass_lim_number}, {mass_lim_number/total_number}')
data_copy = data.copy()
data_wmf = [item for item in data_copy if item[2] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_wmf)
n_ea_ml_nz_wmf = len(Masses)
print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_wmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_wmf_lim = [item for item in data_copy if item[2] > 0.0 and item[3] > 0.00075]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_wmf_lim)
# n_ea_ml_nz_wmf = len(Masses)
# print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_wmf_lim'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_swmf_lim = [item for item in data_copy if item[3] > 0.0 and item[2] > 0.00025]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_swmf_lim)
# n_ea_ml_nz_wmf = len(Masses)
# print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_swmf_lim'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_swmf = [item for item in data_copy if item[3] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_swmf)
n_ea_ml_nz_swmf = len(Masses)
print(f'Number of planets in mass limit with nonzero hydrated solids watermass fraction: {n_ea_ml_nz_swmf}, {n_ea_ml_nz_swmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(SWMF)), np.log10(max(SWMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(3 * OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_swmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_twmf = [item for item in data_copy if item[2] > 0.0 and item[3] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_twmf)
n_ea_ml_nz_twmf = len(Masses)
print(f'Number of planets in mass limit with nonzero wmf and swmf: {n_ea_ml_nz_twmf}, {n_ea_ml_nz_twmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
ratios = np.array(WMF)/np.array(SWMF)
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(ratios)), np.log10(max(ratios)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(ratios, bins=bins, rwidth=0.95)
ax.axvline(1/3, color='red', linewidth=1)
ax.set(xlabel=r'Ratio', ylabel=r'Counts')
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_twmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data = [item for item in data if item[4] > 0]
non_zero_wm = data.copy()
non_zero_wmf_number = len(data)
print(f'Number of planets in mass limit with non zero TWMF: {non_zero_wmf_number}, {non_zero_wmf_number/mass_lim_number} ({non_zero_wmf_number/total_number})')
earth_analogs = [item for item in data if item[0] >= 0.101 and item[1] <= a_up_lim and item[2] > 0.001]
#print(earth_analogs)
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
TWMF = TWMF
cmin = min(TWMF)
cmax = max(TWMF)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Orb_Dist, Masses, c=TWMF, cmap=cmap, norm=norm, s=7, alpha=1)
# ax.scatter(obs, ms, c=twmf, cmap=cmap, norm=norm, s=10)
ax.axvline(1, color='black', linewidth=0.7, linestyle='--')
ax.axhline(1, color='black', linewidth=0.7, linestyle='--')
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center', )
ax.set(xlabel='Orbital Distance [$\mathrm{au}$]', ylabel=r'Mass [$\mathrm{M_{\oplus}}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical', label=r'Total WMF', ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Total WMF Radial Distribution')
save_name = 'scatter_radial_twmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_pie(earth_analogs):
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
fig, ax = plt.subplots(figsize=pop.figsize)
colors = ['red', 'blue']
labels = ['Hydrated Silica', 'Water/Ice']
red_patch = patches.Patch(color='red', label='Hydrated Silica')
blue_patch = patches.Patch(color='blue', label='Water/Ice')
handles = [red_patch, blue_patch]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*earth_analogs)
mean_mass = np.min(Masses)
mass_scaling = mean_mass / 90000
mass_scaling = 0.000000001
def pie_1d(r1, r2):
# calculate the points of the first pie marker
# these are just the origin (0, 0) + some (cos, sin) points on a circle
x1 = np.cos(2 * np.pi * np.linspace(0, r1))
y1 = np.sin(2 * np.pi * np.linspace(0, r1))
xy1 = np.row_stack([[0, 0], np.column_stack([x1, y1])])
s1 = np.abs(xy1).max()
x2 = np.cos(2 * np.pi * np.linspace(r1, 1))
y2 = np.sin(2 * np.pi * np.linspace(r1, 1))
xy2 = np.row_stack([[0, 0], np.column_stack([x2, y2])])
s2 = np.abs(xy2).max()
# x3 = np.cos(2 * np.pi * np.linspace(r2, 1))
# y3 = np.sin(2 * np.pi * np.linspace(r2, 1))
# xy3 = np.row_stack([[0, 0], np.column_stack([x3, y3])])
# s3 = np.abs(xy3).max()
return xy1, s1, xy2, s2#, xy3, s3
# cale the masses to the marker sizes
# def NormalizeData(m):
# return (np.log10(m) - np.log10(np.min(TWMF))) / (np.log10(np.max(TWMF)) - np.log10(np.min(TWMF)))
def NormalizeData(m):
return (np.log10(m) - np.log10(np.min(Masses))) / (np.log10(np.max(Masses)) - np.log10(np.min(Masses)))
# def NormalizeData(m):
# return (m - (np.min(TWMF))) / ((np.max(TWMF)) - (np.min(TWMF)))
# def NormalizeData(m):
# return (m - (np.min(Masses))) / ((np.max(Masses)) - (np.min(Masses)))
earth_point = (1,1,0.00025,0.00075,0.001,0,0)
def plot_one(row,earth=False):
WMF_ratio = row[2]/row[4]
SWMF_Ratio = 1
#xy1, s1, xy2, s2, xy3, s3 = pie_1d(WMF_ratio, SWMF_ratio)
xy1, s1, xy2, s2 = pie_1d(WMF_ratio, 1)
scale = NormalizeData(row[0]) * 50
if earth == True:
ax.scatter(row[1], row[4], s=s2 * scale * 2, facecolor='green')
ax.scatter(row[1], row[4], marker=xy1, s=s1 * scale , facecolor='blue')
ax.scatter(row[1], row[4], marker=xy2, s=s2 * scale, facecolor='red')
#ax.scatter(row[1], row[6], marker=xy3, s=s3 * scale , facecolor='red')
for index, row in enumerate(earth_analogs):
plot_one(row)
plot_one(earth_point,True)
#ax.set_ylim(-1 * min(self.satellites['e']), 1.1 * max(self.satellites['e']))
ax.set_xlabel(r'Orbital Distance [$\mathrm{au}$]')
ax.set_ylabel('Total Water Mass Fractions')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(handles=handles, title='Components')
fig.savefig(path.join(pop.PLOT, 'scatter_ratios.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
#filter twmf close to earth
data = [item for item in data if item[2] >= 0.00025 and item[3] >= 0.00075]
systems_id = [sys[-1] for sys in data]
print(f'Number of systems with Earth Candidate {len(np.unique(systems_id))}, {len(np.unique(systems_id))/pop.NSIMS} ')
scatter_pie(non_zero_wm)
earth_analogs2 = data.copy()
wmf_sim_number = len(data)
#print(data)
print(f'Number of planets in mass limit and WMF above 0.00025 and SWMF above 0.00075: {len(data)}, {wmf_sim_number/mass_lim_number} ({wmf_sim_number/total_number})')
# for earth in data:
# print(f'System: {earth[-1]}')
# print(f'Mass: {earth[0]}')
# print(f'Orb Dist: {earth[1]}')
# print(f'WMF: {earth[2]}')
# print(f'SWMF: {earth[2]}')
# print(f'TWMF: {earth[2]}')
# print(f'Exponent: {pop.SIMS[earth[-1]].Sigma_Exponent}')
# print(f'Disk Mass: {pop.SIMS[earth[-1]].Total_Mass * M_S / M_J}')
# print(" ")
ms, obs, wmf, swmf, twmf, ecc, system = zip(*earth_analogs2)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(twmf)
cmax = max(twmf)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(wmf, swmf, c=twmf, cmap=cmap, norm=norm, s=2, alpha=1)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel=r'Water Mass Fraction]', ylabel=r'Solids Water Mass Fraction')
ax.axvline(0.00025, color='black', linewidth=0.7, linestyle='--')
ax.axhline(0.00075, color='black', linewidth=0.7, linestyle='--')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Total WMF',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_wmf_swmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
#filter roughly earth mass already roughly in the right positions
data = [item for item in data if item[1] <= 2]
earth_like_number = len(data)
print(f'Number of planets in mass limit and WMF above 0.00025 and SWMF above 0.00075 at correct positions: {earth_like_number}, {earth_like_number/wmf_sim_number} ({earth_like_number/total_number})')
SE = []
TM = []
RE = []
for id in np.unique([sys[-1] for sys in earth_analogs2]):
SE.append(pop.SIMS[id].Sigma_Exponent)
TM.append(pop.SIMS[id].Total_Mass)
RE.append(pop.SIMS[id].Sigma_Norm / (R_S / au)**pop.SIMS[id].Sigma_Exponent / denstos * pow(au/R_S, pop.SIMS[id].Sigma_Exponent) / denstos)
SE = np.array(SE)
TM = np.array(TM)
cmap = pop.cmap_standart
cmin = min(RE)
cmax = max(RE)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SE, TM, c=RE, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SE)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Reference Value at $1 \mathrm{au}$ [$\mathrm{g}\mathrm{cm}^{-2}$]', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_earth_analogs.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
| 39.875769
| 204
| 0.639162
| 4,895
| 32,419
| 4.088458
| 0.067416
| 0.01574
| 0.03143
| 0.019188
| 0.81467
| 0.794034
| 0.766902
| 0.741218
| 0.716884
| 0.698846
| 0
| 0.020237
| 0.202813
| 32,419
| 812
| 205
| 39.924877
| 0.75414
| 0.087109
| 0
| 0.624374
| 0
| 0.011686
| 0.165379
| 0.036721
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023372
| false
| 0
| 0.015025
| 0.003339
| 0.043406
| 0.028381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3b805a6cb67f708da9ca514771665167455f862c
| 133
|
py
|
Python
|
didcomm/core/__init__.py
|
alex-polosky/didcomm-python
|
955866024c9f6191df9c5a898cc77e1979781eae
|
[
"Apache-2.0"
] | 8
|
2021-09-04T19:28:18.000Z
|
2021-12-22T16:00:18.000Z
|
didcomm/core/__init__.py
|
alex-polosky/didcomm-python
|
955866024c9f6191df9c5a898cc77e1979781eae
|
[
"Apache-2.0"
] | 4
|
2021-07-27T23:44:33.000Z
|
2021-10-13T13:29:39.000Z
|
didcomm/core/__init__.py
|
alex-polosky/didcomm-python
|
955866024c9f6191df9c5a898cc77e1979781eae
|
[
"Apache-2.0"
] | 7
|
2021-07-22T08:19:13.000Z
|
2022-01-04T14:46:38.000Z
|
from authlib.jose import JsonWebEncryption
from authlib.jose.drafts import register_jwe_draft
register_jwe_draft(JsonWebEncryption)
| 26.6
| 50
| 0.887218
| 17
| 133
| 6.705882
| 0.529412
| 0.192982
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075188
| 133
| 4
| 51
| 33.25
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ea32f683004392b4d0a0eea3cf7f5934c30cd32
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/clikit/io/__init__.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/clikit/io/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/clikit/io/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/08/57/cc/d62b02ab43b1de37202b2fed8b7e8b8c420a6fe582a902d5e0493984fe
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ede3657a816cc37e5324bdb902a8687cc5cee73
| 290
|
py
|
Python
|
NCM/visualization/visualization.py
|
az10az/example_package
|
d21998069e947e41a71d2847db15d45b74a12e38
|
[
"MIT"
] | null | null | null |
NCM/visualization/visualization.py
|
az10az/example_package
|
d21998069e947e41a71d2847db15d45b74a12e38
|
[
"MIT"
] | null | null | null |
NCM/visualization/visualization.py
|
az10az/example_package
|
d21998069e947e41a71d2847db15d45b74a12e38
|
[
"MIT"
] | null | null | null |
def visualization():
"""Extract final adjust process values from BHDS tables.
Args:
pri_df {pyspark.sql.dataframe.DataFrame} -- Primary BHDS data
Returns:
[pyspark.sql.dataframe.DataFrame] -- Dataframe with features.
"""
print('visualizing data....')
| 24.166667
| 69
| 0.655172
| 31
| 290
| 6.096774
| 0.741935
| 0.285714
| 0.201058
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224138
| 290
| 11
| 70
| 26.363636
| 0.84
| 0.696552
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8ee83116db9a85c85009bca999166dce54bb7241
| 97
|
py
|
Python
|
terrascript/mysql/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/mysql/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/mysql/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/mysql/__init__.py
import terrascript
class mysql(terrascript.Provider):
pass
| 13.857143
| 34
| 0.783505
| 11
| 97
| 6.545455
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134021
| 97
| 6
| 35
| 16.166667
| 0.857143
| 0.298969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d906806bd6c9b301631f68150d3e607d3168058d
| 21,974
|
py
|
Python
|
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/test_compiler.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <phil@canary.md>
# date: 2016/09/15
# copy: (C) Copyright 2016-EOT Canary Health, Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import unittest
import os
import textwrap
from aadict import aadict
import fso
#------------------------------------------------------------------------------
class TestCompiler(unittest.TestCase):
maxDiff = None
#----------------------------------------------------------------------------
def test_fragments(self):
import jstc.compiler
compiler = jstc.compiler.Compiler()
hooks = aadict(name_transform=compiler._name_transform)
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', 'i am a template.', hooks)),
[('i am a template.', aadict(name='foo/bar', type='.jst'))])
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', '''\
##! zig
i am the zig template.
##! __here__
i am the root template.
''', hooks)),
[
(' i am the zig template.\n', aadict(name='foo/bar/zig', type='.jst')),
(' i am the root template.\n', aadict(name='foo/bar', type='.jst')),
])
#----------------------------------------------------------------------------
def test_attributes(self):
import jstc.compiler
compiler = jstc.compiler.Compiler()
hooks = aadict(name_transform=compiler._name_transform)
self.assertEqual(
list(compiler.fragments('foo/bar.jst', '', '''\
##! zig; channels: "public,protected"
i am the zig template.
##! __here__; public; protected
i am the root template.
##! zag; type: text/jst; !public; !protected
i am the zag template.
''', hooks)),
[
(' i am the zig template.\n', aadict(name='foo/bar/zig', type='.jst', channels='public,protected')),
(' i am the root template.\n', aadict(name='foo/bar', type='.jst', public=True, protected=True)),
(' i am the zag template.\n', aadict(name='foo/bar/zag', type='text/jst', public=False, protected=False)),
])
#----------------------------------------------------------------------------
def writecontent(self, files, dedent=True):
for name, content in files.items():
path = os.path.join(os.path.dirname(__file__), name)
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
with open(path, 'wb') as fp:
fp.write(textwrap.dedent(content))
#----------------------------------------------------------------------------
def test_render_simple(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/common/hello.hbs':
'''\
##! __here__
Hello, world!
##! name
Hello, {{name}}!
'''
})
self.assertEqual(
compiler.render_assets('jstc:test/common/hello.hbs', 'test'),
'''\
<script type="text/x-handlebars" data-template-name="common/hello">Hello, world!</script>\
<script type="text/x-handlebars" data-template-name="common/hello/name">Hello, {{name}}!</script>\
''')
#----------------------------------------------------------------------------
def test_render_trim_deprecated(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! 0-default
<span>
text
</span>
##! 1-trim; trim
<span>
text
</span>
##! 2-notrim; !trim
<span>
text
</span>
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/0-default"><span>
text
</span></script>\
<script type="text/x-handlebars" data-template-name="test/1-trim"><span>
text
</span></script>\
<script type="text/x-handlebars" data-template-name="test/2-notrim"> <span>
text
</span>
</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_default(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! default
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/default">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_preserve(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! preserve; space: preserve
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/preserve"> {{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_trim(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! trim; space: trim
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/trim">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_dedent(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! dedent; space: dedent
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/dedent">{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_complete(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/complete; space: collapse
{{#if value}}
<span>
{{value}}
</span>
{{else}}
<span>default</span>
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/complete">{{#if value}}<span>{{value}}</span>{{else}}<span>default</span>{{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_htmlSpace(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/htmlspace; space: collapse
{{#if value}}
<span >
{{value}}
</span >
{{else}}
<span>default</span >
{{/if}}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/htmlspace">{{#if value}}<span> {{value}}</span> {{else}}<span>default</span> {{/if}}</script>\
''')
#----------------------------------------------------------------------------
def test_render_space_collapse_hbsSpace(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test.hbs':
'''\
##! collapse/hbsspace; space: collapse
{{#if value }}
<span>
{{value }}
</span>
{{else }}
<span>default</span>
{{/if }}
'''
})
self.assertEqual(
compiler.render_assets('jstc:test.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="test/collapse/hbsspace">{{#if value}} <span>{{value}} </span>{{else}} <span>default</span>{{/if}} </script>\
''')
#----------------------------------------------------------------------------
def test_comments(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/application.hbs':
'''\
<div>
## TODO: super-secret comment!
Nothing to see here.
</div>
'''
})
self.assertEqual(
compiler.render_assets('jstc:test/application.hbs', 'test'),
'''\
<script type="text/x-handlebars" data-template-name="application"><div>
Nothing to see here.
</div>\
</script>\
''')
#----------------------------------------------------------------------------
def test_root(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets('jstc:test/one/template.hbs', 'test/one'),
'''\
<script type="text/x-handlebars" data-template-name="template">template "one".</script>\
''')
self.assertEqual(
compiler.render_assets('jstc:test/two/template.hbs', 'test/two'),
'''\
<script type="text/x-handlebars" data-template-name="template">template "two".</script>\
''')
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'], 'test'),
'''\
<script type="text/x-handlebars" data-template-name="one/template">template "one".</script>\
<script type="text/x-handlebars" data-template-name="two/template">template "two".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_error(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
overrides=dict(inline=True, precompile=False))
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
with self.assertRaises(jstc.TemplateCollision) as cm:
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two'])
self.assertEqual(
str(cm.exception),
''''text/x-handlebars' template 'template' is already defined''')
#----------------------------------------------------------------------------
def test_collision_ignore(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='ignore'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template">template "one".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_override(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='override'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs': 'template "one".',
'test/two/template.hbs': 'template "two".',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template">template "two".</script>\
''')
#----------------------------------------------------------------------------
def test_collision_pertemplate(self):
import jstc.compiler
compiler = jstc.compiler.Compiler(
defaults=dict(collision='ignore'),
overrides=dict(inline=True, precompile=False),
)
with fso.push() as overlay:
self.writecontent({
'test/one/template.hbs':
'''\
##! a
template "one/a".
##! b
template "one/b".
''',
'test/two/template.hbs':
'''\
##! a; collision: ignore
template "two/a".
##! b; collision: override
template "two/b".
''',
})
self.assertEqual(
compiler.render_assets(
['jstc:test/one/template.hbs', 'jstc:test/two/template.hbs'],
['test/one', 'test/two']),
'''\
<script type="text/x-handlebars" data-template-name="template/a">template "one/a".</script>\
<script type="text/x-handlebars" data-template-name="template/b">template "two/b".</script>\
''')
#----------------------------------------------------------------------------
def test_precompile(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello, world!',
'test/hello/name.hbs': 'hello, {{name}}!',
})
compiled = jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=True)
if 'text/x-handlebars' in compiled:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
self.assertMultiLineEqual(
compiled,
'''\
<script type="text/javascript" >(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["hello"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true});ts["hello/name"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "hello, "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)))
+ "!";
},"useData":true});})();</script>''')
#----------------------------------------------------------------------------
def test_asset_filter(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets(
'jstc:test/**.hbs', force_inline=True, force_precompile=False,
asset_filter=lambda name: name == 'test/hello.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
asset_filter=lambda name: name != 'test/hello.hbs'),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
''')
#----------------------------------------------------------------------------
def test_name_transform(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
def mynt(name, root):
return (name[2:].replace('d', 'd-').split('.')[0], 'text/x-mustache')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
name_transform=mynt),
'''\
<script type="text/x-mustache" data-template-name="st/good-bye">so long!</script>\
<script type="text/x-mustache" data-template-name="st/hello">hello!</script>\
''')
#----------------------------------------------------------------------------
def test_template_transform(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': 'so long!',
})
def mytt(text, attrs):
if attrs.name == 'hello':
text = 'hello, world!'
attrs.id = 'HW'
else:
attrs.type = 'template/jst'
return (text, attrs)
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
template_transform=mytt),
'''\
<script type="template/jst" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello" id="HW">hello, world!</script>\
''')
#----------------------------------------------------------------------------
def test_template_filter(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello!',
'test/goodbye.hbs': '''\
##! __here__
so long!
##! friend
ciao!
'''
})
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="goodbye/friend">ciao!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
self.assertEqual(
jstc.render_assets('jstc:test/**.hbs', force_inline=True, force_precompile=False,
template_filter=lambda text, attrs: 'ciao' not in text),
'''\
<script type="text/x-handlebars" data-template-name="goodbye">so long!</script>\
<script type="text/x-handlebars" data-template-name="hello">hello!</script>\
''')
#----------------------------------------------------------------------------
def test_script_wrapper(self):
import jstc
with fso.push() as overlay:
self.writecontent({
'test/hello.hbs': 'hello, world!',
'test/hello/name.hbs': 'hello, {{name}}!',
})
compiled = jstc.render_assets(
'jstc:test/**.hbs', force_inline=True, force_precompile=True,
script_wrapper = lambda script, *args, **kw: '<SCRIPT>' + script + '</SCRIPT>')
if 'text/x-handlebars' in compiled:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
self.assertMultiLineEqual(
compiled,
'''\
<SCRIPT>(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["hello"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true});ts["hello/name"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "hello, "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)))
+ "!";
},"useData":true});})();</SCRIPT>''')
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| 34.990446
| 276
| 0.51388
| 2,175
| 21,974
| 5.132874
| 0.095172
| 0.016571
| 0.06091
| 0.044339
| 0.844231
| 0.826585
| 0.814493
| 0.79631
| 0.792458
| 0.770244
| 0
| 0.002993
| 0.224538
| 21,974
| 627
| 277
| 35.046252
| 0.652171
| 0.109129
| 0
| 0.649275
| 0
| 0
| 0.197872
| 0.050553
| 0
| 0
| 0
| 0.001595
| 0.086957
| 1
| 0.075362
| false
| 0
| 0.081159
| 0.002899
| 0.168116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d92ecb84da9cb8b48f3870622cd21c4d6d2bd473
| 211
|
py
|
Python
|
payjp/account.py
|
wozozo/payjp-python
|
98c07d402c89bfba73009bd1de574ca17dc1f6e2
|
[
"MIT"
] | null | null | null |
payjp/account.py
|
wozozo/payjp-python
|
98c07d402c89bfba73009bd1de574ca17dc1f6e2
|
[
"MIT"
] | null | null | null |
payjp/account.py
|
wozozo/payjp-python
|
98c07d402c89bfba73009bd1de574ca17dc1f6e2
|
[
"MIT"
] | null | null | null |
class Account:
resource = 'accounts'
def __init__(self, requestor):
self.requestor = requestor
def retrieve(self):
return self.requestor.request('GET', '{}'.format(self.resource))
| 21.1
| 72
| 0.64455
| 22
| 211
| 6
| 0.590909
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222749
| 211
| 9
| 73
| 23.444444
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0.061611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d977c8ed87a80746c717a179a589834e7156fccd
| 69
|
py
|
Python
|
attax/__init__.py
|
jonasrauber/attax
|
952f2500c7ed675ba0f547570053f7163e572566
|
[
"MIT"
] | 2
|
2020-02-10T23:24:26.000Z
|
2021-09-27T01:45:04.000Z
|
attax/__init__.py
|
jonasrauber/attax
|
952f2500c7ed675ba0f547570053f7163e572566
|
[
"MIT"
] | null | null | null |
attax/__init__.py
|
jonasrauber/attax
|
952f2500c7ed675ba0f547570053f7163e572566
|
[
"MIT"
] | null | null | null |
from . import utils # noqa: F401
from .pgd import pgd # noqa: F401
| 23
| 34
| 0.681159
| 11
| 69
| 4.272727
| 0.545455
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 0.231884
| 69
| 2
| 35
| 34.5
| 0.773585
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79c4f164992ac85743973dd21a74e08e5fc6f055
| 8,485
|
py
|
Python
|
tests/test_hashes.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | 2
|
2015-07-17T06:45:10.000Z
|
2015-11-08T11:42:37.000Z
|
tests/test_hashes.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | null | null | null |
tests/test_hashes.py
|
methane/pip
|
ee44e13716cb8dad3b52f0ab222eb2c7ce107e48
|
[
"MIT"
] | null | null | null |
import os
from nose.tools import assert_raises
from pip.download import _get_hash_from_file, _check_hash
from pip.exceptions import InstallationError
from pip.index import Link
def test_get_hash_from_file_md5():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#md5=d41d8cd98f00b204e9800998ecf8427e")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 16
assert download_hash.hexdigest() == "d41d8cd98f00b204e9800998ecf8427e"
def test_get_hash_from_file_sha1():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 20
assert download_hash.hexdigest() == "da39a3ee5e6b4b0d3255bfef95601890afd80709"
def test_get_hash_from_file_sha224():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha224=d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 28
assert download_hash.hexdigest() == "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f"
def test_get_hash_from_file_sha384():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha384=38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 48
assert download_hash.hexdigest() == "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b"
def test_get_hash_from_file_sha256():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 32
assert download_hash.hexdigest() == "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
def test_get_hash_from_file_sha512():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha512=cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 64
assert download_hash.hexdigest() == "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
def test_get_hash_from_file_unknown():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#unknown_hash=d41d8cd98f00b204e9800998ecf8427e")
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash is None
def test_check_hash_md5_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#md5=d41d8cd98f00b204e9800998ecf8427e")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_md5_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#md5=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hash_sha1_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha1_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha1=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hash_sha224_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha224=d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f'")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha224_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha224=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hash_sha384_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha384=38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha384_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha384=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hash_sha256_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha256_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha256=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hash_sha512_valid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha512=cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha512_invalid():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha512=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, file_link)
def test_check_hasher_mismsatch():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "packages", "gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#md5=d41d8cd98f00b204e9800998ecf8427e")
other_link = Link("http://testserver/gmpy-1.15.tar.gz#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
download_hash = _get_hash_from_file(file_path, file_link)
assert_raises(InstallationError, _check_hash, download_hash, other_link)
| 43.512821
| 194
| 0.781025
| 1,137
| 8,485
| 5.447669
| 0.05365
| 0.058121
| 0.046335
| 0.066193
| 0.867614
| 0.864062
| 0.839199
| 0.839199
| 0.839199
| 0.839199
| 0
| 0.14241
| 0.097113
| 8,485
| 194
| 195
| 43.737113
| 0.666101
| 0
| 0
| 0.5625
| 0
| 0
| 0.327048
| 0.049028
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.178571
| false
| 0
| 0.044643
| 0
| 0.223214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
79c7a5c22851f98b8b7cd8a604bd7923c63fb737
| 41
|
py
|
Python
|
kujenga/__init__.py
|
macd/kujenga
|
27906d274565966a8da5918bed04ece8d3b9d70e
|
[
"MIT"
] | 3
|
2017-08-06T19:16:57.000Z
|
2017-08-17T15:50:35.000Z
|
kujenga/__init__.py
|
macd/kujenga
|
27906d274565966a8da5918bed04ece8d3b9d70e
|
[
"MIT"
] | null | null | null |
kujenga/__init__.py
|
macd/kujenga
|
27906d274565966a8da5918bed04ece8d3b9d70e
|
[
"MIT"
] | null | null | null |
from kujenga.kujenga import create_image
| 20.5
| 40
| 0.878049
| 6
| 41
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79d6bfc4185b9cad1df724cc33193310ee5ee478
| 3,581
|
py
|
Python
|
att_app/models.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 3
|
2019-07-05T16:03:39.000Z
|
2019-11-06T07:20:29.000Z
|
att_app/models.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 6
|
2020-06-05T17:53:31.000Z
|
2021-09-07T23:50:09.000Z
|
att_app/models.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 3
|
2018-04-30T15:09:04.000Z
|
2018-12-15T12:45:14.000Z
|
from django.db import models
import login
from django.conf import settings
# Create your models here.
class Student_Details(models.Model):
st_id = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,limit_choices_to={'is_staff': False})
first_name=models.CharField(max_length=50, help_text="Enter the First-Name",verbose_name="First Name",null=True)
last_name=models.CharField(max_length=50, help_text="Enter the Last Name",verbose_name="Last Name",null=True)
dob=models.DateField(max_length=8,help_text="Enter Date of Birth",verbose_name="Date of Birth",null=True)
address=models.CharField(max_length=50, help_text="Enter the Address",verbose_name="Address",null=True)
g_name=models.CharField(max_length=50, help_text="Enter the Student Guardian Name",verbose_name="Guardian Name",null=True)
phone=models.CharField(max_length=15, help_text="Enter Guardian Number",verbose_name="Guardian Phone",null=True)
s_class=models.CharField(max_length=1, help_text="Enter Student Class",verbose_name="Student Class",null=True)
sec=models.CharField(max_length=1, help_text="Enter Student Section",verbose_name="Student Section",null=True)
gender=models.CharField(max_length=1, help_text="Enter Student Gender(M/F/T)",verbose_name="Student Gender",null=True,blank=True)
email = models.EmailField(max_length=70,help_text="Enter Email",verbose_name="Email",blank=True,null=True,unique=True)
def __str__(self):
return str(self.st_id)
class Teacher_Details(models.Model):
t_id=models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,limit_choices_to={'is_staff': True})
first_name=models.CharField(max_length=50, help_text="Enter the First-Name",verbose_name="First Name",null=True)
last_name=models.CharField(max_length=50, help_text="Enter the Last Name",verbose_name="Last Name",null=True)
dob=models.DateField(max_length=8,help_text="Enter Date of Birth",verbose_name="Date of Birth",null=True)
address=models.CharField(max_length=50, help_text="Enter the Address",verbose_name="Address",null=True)
phone=models.CharField(max_length=15, help_text="Enter Phone Number",verbose_name="Phone No",null=True)
gender=models.CharField(max_length=1, help_text="Enter Teacher Gender(M/F/T)",verbose_name="Teacher Gender",null=True,blank=True)
email = models.EmailField(max_length=70,help_text="Enter Email",verbose_name="Email",blank=True,null=True,unique=True)
def __str__(self):
return str(self.t_id)
class Student_Attendance(models.Model):
st_id = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,limit_choices_to={'is_staff': False})
date=models.CharField(max_length=15, help_text="Enter the Date",verbose_name="Date",null=True)
in_time= models.CharField(max_length=15, help_text="Enter the IN Time",verbose_name="IN Time",null=True,blank=True)
out_time=models.CharField(max_length=15, help_text="Enter the OUT Time",verbose_name="OUT Time",null=True,blank=True)
duration=models.CharField(max_length=15, help_text="Enter the Duration",verbose_name="Duration",null=True,blank=True)
status = models.CharField(max_length=1, help_text="Enter the Status",verbose_name="Student Status",null=True)
def __str__(self):
return (str(self.st_id)+' '+str(self.date))
class Token(models.Model):
uid = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
token= models.CharField(max_length=300,help_text="Enter the token",verbose_name="Token",null=True)
def __str__(self):
return str(self.uid)
| 81.386364
| 133
| 0.774365
| 560
| 3,581
| 4.732143
| 0.148214
| 0.078113
| 0.11283
| 0.172075
| 0.753962
| 0.738113
| 0.723019
| 0.723019
| 0.695472
| 0.613962
| 0
| 0.0123
| 0.091874
| 3,581
| 43
| 134
| 83.27907
| 0.802583
| 0.006702
| 0
| 0.380952
| 0
| 0
| 0.192405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.071429
| 0.095238
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
8ddc428193e23a4f5ee1196f2563ccc620e8d649
| 11,760
|
py
|
Python
|
python-client/swagger_client/__init__.py
|
tamalsaha/kube-openapi-generator
|
6607d1e208965e3a09a0ee6d1f2de7e462939150
|
[
"Apache-2.0"
] | 3
|
2018-04-23T09:07:04.000Z
|
2019-09-27T10:25:29.000Z
|
python-client/swagger_client/__init__.py
|
tamalsaha/kube-openapi-generator
|
6607d1e208965e3a09a0ee6d1f2de7e462939150
|
[
"Apache-2.0"
] | 2
|
2018-04-09T09:00:17.000Z
|
2021-03-01T11:23:11.000Z
|
python-client/swagger_client/__init__.py
|
tamalsaha/kube-openapi-generator
|
6607d1e208965e3a09a0ee6d1f2de7e462939150
|
[
"Apache-2.0"
] | 2
|
2018-12-12T11:43:54.000Z
|
2019-06-29T12:15:07.000Z
|
# coding: utf-8
# flake8: noqa
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.apis_api import ApisApi
from swagger_client.api.repositories_stash_appscode_com_api import RepositoriesStashAppscodeComApi
from swagger_client.api.repositories_stash_appscode_com_v1alpha1_api import RepositoriesStashAppscodeComV1alpha1Api
from swagger_client.api.stash_appscode_com_api import StashAppscodeComApi
from swagger_client.api.stash_appscode_com_v1alpha1_api import StashAppscodeComV1alpha1Api
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.com_github_appscode_stash_apis_repositories_v1alpha1_snapshot import ComGithubAppscodeStashApisRepositoriesV1alpha1Snapshot
from swagger_client.models.com_github_appscode_stash_apis_repositories_v1alpha1_snapshot_list import ComGithubAppscodeStashApisRepositoriesV1alpha1SnapshotList
from swagger_client.models.com_github_appscode_stash_apis_repositories_v1alpha1_snapshot_status import ComGithubAppscodeStashApisRepositoriesV1alpha1SnapshotStatus
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_azure_spec import ComGithubAppscodeStashApisStashV1alpha1AzureSpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_b2_spec import ComGithubAppscodeStashApisStashV1alpha1B2Spec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_backend import ComGithubAppscodeStashApisStashV1alpha1Backend
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_file_group import ComGithubAppscodeStashApisStashV1alpha1FileGroup
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_gcs_spec import ComGithubAppscodeStashApisStashV1alpha1GCSSpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_local_spec import ComGithubAppscodeStashApisStashV1alpha1LocalSpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_local_typed_reference import ComGithubAppscodeStashApisStashV1alpha1LocalTypedReference
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_recovery import ComGithubAppscodeStashApisStashV1alpha1Recovery
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_recovery_list import ComGithubAppscodeStashApisStashV1alpha1RecoveryList
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_recovery_spec import ComGithubAppscodeStashApisStashV1alpha1RecoverySpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_recovery_status import ComGithubAppscodeStashApisStashV1alpha1RecoveryStatus
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_repository import ComGithubAppscodeStashApisStashV1alpha1Repository
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_repository_list import ComGithubAppscodeStashApisStashV1alpha1RepositoryList
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_repository_spec import ComGithubAppscodeStashApisStashV1alpha1RepositorySpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_repository_status import ComGithubAppscodeStashApisStashV1alpha1RepositoryStatus
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_restic import ComGithubAppscodeStashApisStashV1alpha1Restic
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_restic_list import ComGithubAppscodeStashApisStashV1alpha1ResticList
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_restic_spec import ComGithubAppscodeStashApisStashV1alpha1ResticSpec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_restore_stats import ComGithubAppscodeStashApisStashV1alpha1RestoreStats
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_retention_policy import ComGithubAppscodeStashApisStashV1alpha1RetentionPolicy
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_s3_spec import ComGithubAppscodeStashApisStashV1alpha1S3Spec
from swagger_client.models.com_github_appscode_stash_apis_stash_v1alpha1_swift_spec import ComGithubAppscodeStashApisStashV1alpha1SwiftSpec
from swagger_client.models.io_k8s_api_core_v1_aws_elastic_block_store_volume_source import IoK8sApiCoreV1AWSElasticBlockStoreVolumeSource
from swagger_client.models.io_k8s_api_core_v1_azure_disk_volume_source import IoK8sApiCoreV1AzureDiskVolumeSource
from swagger_client.models.io_k8s_api_core_v1_azure_file_volume_source import IoK8sApiCoreV1AzureFileVolumeSource
from swagger_client.models.io_k8s_api_core_v1_ceph_fs_volume_source import IoK8sApiCoreV1CephFSVolumeSource
from swagger_client.models.io_k8s_api_core_v1_cinder_volume_source import IoK8sApiCoreV1CinderVolumeSource
from swagger_client.models.io_k8s_api_core_v1_config_map_projection import IoK8sApiCoreV1ConfigMapProjection
from swagger_client.models.io_k8s_api_core_v1_config_map_volume_source import IoK8sApiCoreV1ConfigMapVolumeSource
from swagger_client.models.io_k8s_api_core_v1_downward_api_projection import IoK8sApiCoreV1DownwardAPIProjection
from swagger_client.models.io_k8s_api_core_v1_downward_api_volume_file import IoK8sApiCoreV1DownwardAPIVolumeFile
from swagger_client.models.io_k8s_api_core_v1_downward_api_volume_source import IoK8sApiCoreV1DownwardAPIVolumeSource
from swagger_client.models.io_k8s_api_core_v1_empty_dir_volume_source import IoK8sApiCoreV1EmptyDirVolumeSource
from swagger_client.models.io_k8s_api_core_v1_fc_volume_source import IoK8sApiCoreV1FCVolumeSource
from swagger_client.models.io_k8s_api_core_v1_flex_volume_source import IoK8sApiCoreV1FlexVolumeSource
from swagger_client.models.io_k8s_api_core_v1_flocker_volume_source import IoK8sApiCoreV1FlockerVolumeSource
from swagger_client.models.io_k8s_api_core_v1_gce_persistent_disk_volume_source import IoK8sApiCoreV1GCEPersistentDiskVolumeSource
from swagger_client.models.io_k8s_api_core_v1_git_repo_volume_source import IoK8sApiCoreV1GitRepoVolumeSource
from swagger_client.models.io_k8s_api_core_v1_glusterfs_volume_source import IoK8sApiCoreV1GlusterfsVolumeSource
from swagger_client.models.io_k8s_api_core_v1_host_path_volume_source import IoK8sApiCoreV1HostPathVolumeSource
from swagger_client.models.io_k8s_api_core_v1_iscsi_volume_source import IoK8sApiCoreV1ISCSIVolumeSource
from swagger_client.models.io_k8s_api_core_v1_key_to_path import IoK8sApiCoreV1KeyToPath
from swagger_client.models.io_k8s_api_core_v1_local_object_reference import IoK8sApiCoreV1LocalObjectReference
from swagger_client.models.io_k8s_api_core_v1_nfs_volume_source import IoK8sApiCoreV1NFSVolumeSource
from swagger_client.models.io_k8s_api_core_v1_object_field_selector import IoK8sApiCoreV1ObjectFieldSelector
from swagger_client.models.io_k8s_api_core_v1_persistent_volume_claim_volume_source import IoK8sApiCoreV1PersistentVolumeClaimVolumeSource
from swagger_client.models.io_k8s_api_core_v1_photon_persistent_disk_volume_source import IoK8sApiCoreV1PhotonPersistentDiskVolumeSource
from swagger_client.models.io_k8s_api_core_v1_portworx_volume_source import IoK8sApiCoreV1PortworxVolumeSource
from swagger_client.models.io_k8s_api_core_v1_projected_volume_source import IoK8sApiCoreV1ProjectedVolumeSource
from swagger_client.models.io_k8s_api_core_v1_quobyte_volume_source import IoK8sApiCoreV1QuobyteVolumeSource
from swagger_client.models.io_k8s_api_core_v1_rbd_volume_source import IoK8sApiCoreV1RBDVolumeSource
from swagger_client.models.io_k8s_api_core_v1_resource_field_selector import IoK8sApiCoreV1ResourceFieldSelector
from swagger_client.models.io_k8s_api_core_v1_resource_requirements import IoK8sApiCoreV1ResourceRequirements
from swagger_client.models.io_k8s_api_core_v1_scale_io_volume_source import IoK8sApiCoreV1ScaleIOVolumeSource
from swagger_client.models.io_k8s_api_core_v1_secret_projection import IoK8sApiCoreV1SecretProjection
from swagger_client.models.io_k8s_api_core_v1_secret_volume_source import IoK8sApiCoreV1SecretVolumeSource
from swagger_client.models.io_k8s_api_core_v1_storage_os_volume_source import IoK8sApiCoreV1StorageOSVolumeSource
from swagger_client.models.io_k8s_api_core_v1_volume_mount import IoK8sApiCoreV1VolumeMount
from swagger_client.models.io_k8s_api_core_v1_volume_projection import IoK8sApiCoreV1VolumeProjection
from swagger_client.models.io_k8s_api_core_v1_vsphere_virtual_disk_volume_source import IoK8sApiCoreV1VsphereVirtualDiskVolumeSource
from swagger_client.models.io_k8s_apimachinery_pkg_api_resource_quantity import IoK8sApimachineryPkgApiResourceQuantity
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_api_group import IoK8sApimachineryPkgApisMetaV1APIGroup
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_api_group_list import IoK8sApimachineryPkgApisMetaV1APIGroupList
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_api_resource import IoK8sApimachineryPkgApisMetaV1APIResource
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_api_resource_list import IoK8sApimachineryPkgApisMetaV1APIResourceList
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_delete_options import IoK8sApimachineryPkgApisMetaV1DeleteOptions
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_group_version_for_discovery import IoK8sApimachineryPkgApisMetaV1GroupVersionForDiscovery
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_initializer import IoK8sApimachineryPkgApisMetaV1Initializer
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_initializers import IoK8sApimachineryPkgApisMetaV1Initializers
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_label_selector import IoK8sApimachineryPkgApisMetaV1LabelSelector
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_label_selector_requirement import IoK8sApimachineryPkgApisMetaV1LabelSelectorRequirement
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_list_meta import IoK8sApimachineryPkgApisMetaV1ListMeta
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_object_meta import IoK8sApimachineryPkgApisMetaV1ObjectMeta
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_owner_reference import IoK8sApimachineryPkgApisMetaV1OwnerReference
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_patch import IoK8sApimachineryPkgApisMetaV1Patch
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_preconditions import IoK8sApimachineryPkgApisMetaV1Preconditions
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_server_address_by_client_cidr import IoK8sApimachineryPkgApisMetaV1ServerAddressByClientCIDR
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_status import IoK8sApimachineryPkgApisMetaV1Status
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_status_cause import IoK8sApimachineryPkgApisMetaV1StatusCause
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_status_details import IoK8sApimachineryPkgApisMetaV1StatusDetails
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_time import IoK8sApimachineryPkgApisMetaV1Time
from swagger_client.models.io_k8s_apimachinery_pkg_apis_meta_v1_watch_event import IoK8sApimachineryPkgApisMetaV1WatchEvent
from swagger_client.models.io_k8s_apimachinery_pkg_runtime_raw_extension import IoK8sApimachineryPkgRuntimeRawExtension
| 102.26087
| 163
| 0.943027
| 1,383
| 11,760
| 7.456255
| 0.164136
| 0.099205
| 0.153317
| 0.191815
| 0.481381
| 0.467223
| 0.459562
| 0.445209
| 0.42756
| 0.337859
| 0
| 0.031648
| 0.038095
| 11,760
| 114
| 164
| 103.157895
| 0.879951
| 0.027721
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
30942aabf8c2e1f864b86deca63301214b973e1b
| 33
|
py
|
Python
|
test_29.py
|
ccie8030/pynet
|
84be459c6cb50a025a801e3d4b9bd237c698776a
|
[
"Apache-2.0"
] | 1
|
2016-01-30T03:36:15.000Z
|
2016-01-30T03:36:15.000Z
|
test_29.py
|
ccie8030/pynet
|
84be459c6cb50a025a801e3d4b9bd237c698776a
|
[
"Apache-2.0"
] | null | null | null |
test_29.py
|
ccie8030/pynet
|
84be459c6cb50a025a801e3d4b9bd237c698776a
|
[
"Apache-2.0"
] | null | null | null |
print 'this is a new test file'
| 16.5
| 32
| 0.69697
| 7
| 33
| 3.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 33
| 1
| 33
| 33
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
30c35727de92a7a31e043a0f7fbb03425e3ba42d
| 6,292
|
py
|
Python
|
tests/testGlobalEM1D_FD_jac_layers.py
|
igotchalk/simpegEM1D
|
8f2233fc86bf26f14fe9c45f28c6b22ff54fafdc
|
[
"MIT"
] | 11
|
2015-04-11T03:35:45.000Z
|
2022-02-26T02:04:18.000Z
|
tests/testGlobalEM1D_FD_jac_layers.py
|
igotchalk/simpegEM1D
|
8f2233fc86bf26f14fe9c45f28c6b22ff54fafdc
|
[
"MIT"
] | 38
|
2018-04-21T23:07:29.000Z
|
2022-01-11T07:22:27.000Z
|
tests/testGlobalEM1D_FD_jac_layers.py
|
igotchalk/simpegEM1D
|
8f2233fc86bf26f14fe9c45f28c6b22ff54fafdc
|
[
"MIT"
] | 13
|
2015-07-15T21:54:33.000Z
|
2021-11-30T09:18:54.000Z
|
from __future__ import print_function
import unittest
import numpy as np
from simpegEM1D import (
GlobalEM1DProblemFD, GlobalEM1DSurveyFD,
get_vertical_discretization_frequency
)
from SimPEG import (
regularization, Inversion, InvProblem,
DataMisfit, Utils, Mesh, Maps, Optimization,
Tests
)
np.random.seed(41)
class GlobalEM1DFD(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = get_vertical_discretization_frequency(
frequency, sigma_background=1./10.
)
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
mesh = Mesh.TensorMesh([hx, hz], x0='00')
inds = mesh.gridCC[:, 1] < 25
inds_1 = mesh.gridCC[:, 1] < 50
sigma = np.ones(mesh.nC) * 1./100.
sigma[inds_1] = 1./10.
sigma[inds] = 1./50.
sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten()
mSynth = np.log(sigma_em1d)
x = mesh.vectorCCx
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
mapping = Maps.ExpMap(mesh)
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="Hz",
field_type='secondary',
topo=topo
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=mapping, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
class GlobalEM1DFD_Height(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = np.r_[1.]
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
e = np.ones(n_sounding)
mSynth = np.r_[e*np.log(1./100.), e*20]
x = np.arange(n_sounding)
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
wires = Maps.Wires(('sigma', n_sounding),('h', n_sounding))
expmap = Maps.ExpMap(nP=n_sounding)
sigmaMap = expmap * wires.sigma
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="ppm",
field_type='secondary',
topo=topo,
half_switch=True
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=sigmaMap, hMap=wires.h, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
mesh = Mesh.TensorMesh([int(n_sounding * 2)])
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth * 1.2
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
if __name__ == '__main__':
unittest.main()
| 30.692683
| 68
| 0.554673
| 765
| 6,292
| 4.460131
| 0.224837
| 0.021102
| 0.007034
| 0.008793
| 0.744431
| 0.729191
| 0.729191
| 0.729191
| 0.729191
| 0.729191
| 0
| 0.033247
| 0.325969
| 6,292
| 204
| 69
| 30.843137
| 0.77128
| 0.035442
| 0
| 0.674286
| 0
| 0
| 0.011547
| 0
| 0
| 0
| 0
| 0
| 0.034286
| 1
| 0.045714
| false
| 0.08
| 0.028571
| 0
| 0.085714
| 0.017143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
30d0ccf5eb015ef02caaf40aebf3152051cb2f81
| 108
|
py
|
Python
|
model_definitions/cnns/inceptions/inception_encoder.py
|
yyx1994/pytorch.repmet
|
847a2b71fa751e6d381c233df0107a53592d8ce5
|
[
"MIT"
] | 4
|
2019-09-29T08:57:09.000Z
|
2021-04-20T09:36:56.000Z
|
model_definitions/cnns/inceptions/inception_encoder.py
|
xtynwfn/pytorch.repmet
|
847a2b71fa751e6d381c233df0107a53592d8ce5
|
[
"MIT"
] | null | null | null |
model_definitions/cnns/inceptions/inception_encoder.py
|
xtynwfn/pytorch.repmet
|
847a2b71fa751e6d381c233df0107a53592d8ce5
|
[
"MIT"
] | 1
|
2021-04-12T06:58:06.000Z
|
2021-04-12T06:58:06.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.inception import inception_v3
| 27
| 53
| 0.842593
| 18
| 108
| 5
| 0.611111
| 0.244444
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010417
| 0.111111
| 108
| 4
| 53
| 27
| 0.927083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
30d4b8a19a6b78ac07f60bbbb4050d7854b92e3e
| 53
|
py
|
Python
|
test/test_step.py
|
codesaurus97/fruit
|
21f284cdf6ffd0b4484dc8133ef90b06d530b060
|
[
"MIT"
] | null | null | null |
test/test_step.py
|
codesaurus97/fruit
|
21f284cdf6ffd0b4484dc8133ef90b06d530b060
|
[
"MIT"
] | null | null | null |
test/test_step.py
|
codesaurus97/fruit
|
21f284cdf6ffd0b4484dc8133ef90b06d530b060
|
[
"MIT"
] | null | null | null |
from fruit.modules.step import Step
import unittest
| 13.25
| 35
| 0.830189
| 8
| 53
| 5.5
| 0.75
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 3
| 36
| 17.666667
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a50210b57e44eba3016e054032697c13ec5d4d58
| 43
|
py
|
Python
|
fancypages/assets/forms/__init__.py
|
ashiazed/django-fancypages
|
7587bac8f61ed8567f27ffee78c5dbedf039f345
|
[
"BSD-3-Clause"
] | 1
|
2018-05-28T09:50:13.000Z
|
2018-05-28T09:50:13.000Z
|
fancypages/assets/forms/__init__.py
|
ashiazed/django-fancypages
|
7587bac8f61ed8567f27ffee78c5dbedf039f345
|
[
"BSD-3-Clause"
] | null | null | null |
fancypages/assets/forms/__init__.py
|
ashiazed/django-fancypages
|
7587bac8f61ed8567f27ffee78c5dbedf039f345
|
[
"BSD-3-Clause"
] | null | null | null |
from .forms import *
from .fields import *
| 14.333333
| 21
| 0.72093
| 6
| 43
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 22
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eb75f24917ce4a4b667bc3a397b1c676c2316202
| 125
|
py
|
Python
|
node/interface/iblockchainmanager.py
|
tinker-coin/tinker-coin
|
3d599f642f4f49d30ba9bc58316a502e8a325e85
|
[
"MIT"
] | null | null | null |
node/interface/iblockchainmanager.py
|
tinker-coin/tinker-coin
|
3d599f642f4f49d30ba9bc58316a502e8a325e85
|
[
"MIT"
] | null | null | null |
node/interface/iblockchainmanager.py
|
tinker-coin/tinker-coin
|
3d599f642f4f49d30ba9bc58316a502e8a325e85
|
[
"MIT"
] | null | null | null |
import abc
from interface.irunnable import IRunnable
class IBlockchainManager(IRunnable, metaclass=abc.ABCMeta):
pass
| 15.625
| 59
| 0.808
| 14
| 125
| 7.214286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136
| 125
| 7
| 60
| 17.857143
| 0.935185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ccf30f2a4f17173b44b65d05417b4749811c97cb
| 3,577
|
py
|
Python
|
python/test/graph/directed_graph.py
|
Kinnoo/cs404.1-1
|
0fd2e2fbf02953eb1b2192945ab4107034399a68
|
[
"MIT"
] | null | null | null |
python/test/graph/directed_graph.py
|
Kinnoo/cs404.1-1
|
0fd2e2fbf02953eb1b2192945ab4107034399a68
|
[
"MIT"
] | null | null | null |
python/test/graph/directed_graph.py
|
Kinnoo/cs404.1-1
|
0fd2e2fbf02953eb1b2192945ab4107034399a68
|
[
"MIT"
] | null | null | null |
import unittest
from python.test.util.utilities import Utilities
class DirectedGraphTest(unittest.TestCase):
def setUp(self):
self.graph = Utilities.small_directed_graph()
def test_single_edge_direction(self):
self.assertTrue(6 in self.graph.adjacent_vertices(0))
self.assertTrue(2 in self.graph.adjacent_vertices(0))
self.assertTrue(1 in self.graph.adjacent_vertices(0))
self.assertTrue(5 in self.graph.adjacent_vertices(0))
self.assertTrue(5 in self.graph.adjacent_vertices(3))
self.assertTrue(4 in self.graph.adjacent_vertices(3))
self.assertTrue(5 in self.graph.adjacent_vertices(4))
self.assertTrue(6 in self.graph.adjacent_vertices(4))
self.assertTrue(8 in self.graph.adjacent_vertices(7))
self.assertTrue(10 in self.graph.adjacent_vertices(9))
self.assertTrue(11 in self.graph.adjacent_vertices(9))
self.assertFalse(0 in self.graph.adjacent_vertices(6))
self.assertFalse(0 in self.graph.adjacent_vertices(2))
self.assertFalse(0 in self.graph.adjacent_vertices(1))
self.assertFalse(0 in self.graph.adjacent_vertices(5))
self.assertFalse(3 in self.graph.adjacent_vertices(5))
self.assertFalse(3 in self.graph.adjacent_vertices(4))
self.assertFalse(4 in self.graph.adjacent_vertices(5))
self.assertFalse(4 in self.graph.adjacent_vertices(6))
self.assertFalse(7 in self.graph.adjacent_vertices(8))
self.assertFalse(9 in self.graph.adjacent_vertices(10))
self.assertFalse(9 in self.graph.adjacent_vertices(11))
def test_outdegree(self):
expected = [4, 0, 0, 2, 2, 0, 0, 1, 0, 2, 0, 0]
actual = [0] * len(expected)
for i in range(self.graph.num_vertices()):
actual[i] = self.graph.outdegree(i)
self.assertEqual(expected, actual)
def test_indegree(self):
expected = [0, 1, 1, 0, 1, 3, 2, 0, 1, 0, 1, 1]
actual = [0] * len(expected)
for i in range(self.graph.num_vertices()):
actual[i] = self.graph.indegree(i)
self.assertEqual(expected, actual)
def test_reversal(self):
reverse = self.graph.reverse()
self.assertTrue(0 in reverse.adjacent_vertices(6))
self.assertTrue(0 in reverse.adjacent_vertices(2))
self.assertTrue(0 in reverse.adjacent_vertices(1))
self.assertTrue(0 in reverse.adjacent_vertices(5))
self.assertTrue(3 in reverse.adjacent_vertices(5))
self.assertTrue(3 in reverse.adjacent_vertices(4))
self.assertTrue(4 in reverse.adjacent_vertices(5))
self.assertTrue(4 in reverse.adjacent_vertices(6))
self.assertTrue(7 in reverse.adjacent_vertices(8))
self.assertTrue(9 in reverse.adjacent_vertices(10))
self.assertTrue(9 in reverse.adjacent_vertices(11))
self.assertFalse(6 in reverse.adjacent_vertices(0))
self.assertFalse(2 in reverse.adjacent_vertices(0))
self.assertFalse(1 in reverse.adjacent_vertices(0))
self.assertFalse(5 in reverse.adjacent_vertices(0))
self.assertFalse(5 in reverse.adjacent_vertices(3))
self.assertFalse(4 in reverse.adjacent_vertices(3))
self.assertFalse(5 in reverse.adjacent_vertices(4))
self.assertFalse(6 in reverse.adjacent_vertices(4))
self.assertFalse(8 in reverse.adjacent_vertices(7))
self.assertFalse(10 in reverse.adjacent_vertices(9))
self.assertFalse(10 in reverse.adjacent_vertices(9))
if __name__ == '__main__':
unittest.main()
| 43.096386
| 63
| 0.688286
| 493
| 3,577
| 4.868154
| 0.103448
| 0.293333
| 0.100833
| 0.174167
| 0.825
| 0.802083
| 0.80125
| 0.515417
| 0.239167
| 0.239167
| 0
| 0.042598
| 0.199329
| 3,577
| 82
| 64
| 43.621951
| 0.795391
| 0
| 0
| 0.121212
| 0
| 0
| 0.002237
| 0
| 0
| 0
| 0
| 0
| 0.69697
| 1
| 0.075758
| false
| 0
| 0.030303
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
691b6b61246e6607753f1df65657bfd7ce8e2efa
| 13,026
|
py
|
Python
|
tests/helpers/test_perturb_func.py
|
sebastian-lapuschkin/Quantus
|
c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f
|
[
"MIT"
] | null | null | null |
tests/helpers/test_perturb_func.py
|
sebastian-lapuschkin/Quantus
|
c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f
|
[
"MIT"
] | null | null | null |
tests/helpers/test_perturb_func.py
|
sebastian-lapuschkin/Quantus
|
c3b8a9fb2018f34bd89ba38efa2b2b8c38128b3f
|
[
"MIT"
] | null | null | null |
from typing import Union
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
from ..fixtures import *
from ...quantus.helpers import *
from ...quantus.helpers import utils
@pytest.fixture
def input_zeros_1d_1ch():
return np.zeros(shape=(1, 224))
@pytest.fixture
def input_zeros_1d_3ch():
return np.zeros(shape=(3, 224))
@pytest.fixture
def input_zeros_2d_1ch():
return np.zeros(shape=(1, 224, 224))
@pytest.fixture
def input_zeros_2d_3ch():
return np.zeros(shape=(3, 224, 224))
@pytest.fixture
def input_zeros_2d_3ch_flattened():
return np.zeros(shape=(3, 224, 224)).flatten()
@pytest.fixture
def input_uniform_2d_3ch_flattened():
return np.random.uniform(0, 0.1, size=(3, 224, 224)).flatten()
@pytest.fixture
def input_ones_mnist():
return np.ones(shape=(1, 28, 28))
@pytest.fixture
def input_ones_mnist_flattened():
return np.ones(shape=(1, 28, 28)).flatten()
@pytest.fixture
def input_zeros_mnist_flattened():
return np.zeros(shape=(1, 28, 28)).flatten()
@pytest.fixture
def input_uniform_1d_3ch():
return np.random.uniform(0, 0.1, size=(3, 224))
@pytest.fixture
def input_uniform_2d_3ch():
return np.random.uniform(0, 0.1, size=(3, 224, 224))
@pytest.fixture
def input_uniform_2d_3ch_flattened():
return np.random.uniform(0, 0.1, size=(3, 224, 224)).flatten()
@pytest.fixture
def input_uniform_3d_3ch():
return np.random.uniform(0, 0.1, size=(3, 224, 224, 224))
@pytest.fixture
def input_uniform_mnist():
return np.random.uniform(0, 0.1, size=(1, 28, 28))
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_1d_3ch"),
{},
True,
),
(
lazy_fixture("input_uniform_2d_3ch"),
{},
True,
),
(
lazy_fixture("input_uniform_2d_3ch_flattened"),
{},
True,
),
],
)
def test_gaussian_noise(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = gaussian_noise(arr=data, **params)
assert any(out.flatten()[0] != out.flatten()), "Test failed."
assert any(out.flatten() != data.flatten()) == expected, "Test failed."
@pytest.mark.fixed
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_zeros_2d_3ch"),
{
"indices": [0, 2],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_zeros_2d_3ch_flattened"),
{
"indices": [0, 2],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_ones_mnist"),
{
"indices": np.arange(0, 784),
"input_shift": -1.0,
},
0, # TODO: verify expected
),
(
lazy_fixture("input_ones_mnist_flattened"),
{
"indices": np.arange(0, 784),
"input_shift": -1.0,
},
0,
),
(
lazy_fixture("input_zeros_1d_1ch"),
{
"indices": [0, 2, 112, 113, 128, 223],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_zeros_1d_3ch"),
{
"indices": [0, 2, 112, 113, 128, 223],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_zeros_2d_1ch"),
{
"indices": [0, 2, 224, 226, 448, 450],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"indices": [0, 2, 224, 226, 448, 450],
"fixed_values": 1.0,
},
1,
),
(
lazy_fixture("input_ones_mnist"),
{
"indices": np.arange(0, 784),
"input_shift": -1.0,
"nr_channels": 1,
},
0,
),
(
lazy_fixture("input_zeros_mnist_flattened"),
{
"indices": np.arange(0, 784),
"input_shift": -1.0,
"nr_channels": 1,
},
-1,
),
(
lazy_fixture("input_ones_mnist_flattened"),
{
"indices": np.arange(0, 784),
"input_shift": 1.0,
"nr_channels": 1,
},
2,
),
],
)
def test_baseline_replacement_by_indices(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = baseline_replacement_by_indices(arr=data, **params)
indices = np.unravel_index(params["indices"], data.shape)
if isinstance(expected, (int, float)):
assert np.all([i == expected for i in out[indices]]), f"Test failed.{out}"
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_zeros_1d_1ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (0,),
},
{},
),
(
lazy_fixture("input_zeros_1d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (0,),
},
{},
),
(
lazy_fixture("input_zeros_2d_1ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 10,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (11, 22),
},
{},
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (11, ),
},
{"exception": ValueError},
),
(
lazy_fixture("input_zeros_2d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (11, 11, 11, ),
},
{"exception": ValueError},
),
(
lazy_fixture("input_zeros_1d_3ch"),
{
"perturb_baseline": 1.0,
"patch_size": 4,
"coords": (11, 11, ),
},
{"exception": ValueError},
),
],
)
def test_baseline_replacement_by_patch(
data: np.ndarray, params: dict, expected: dict
):
print(params["patch_size"], params["coords"])
patch_slice = utils.create_patch_slice(
patch_size=params["patch_size"],
coords=params["coords"],
expand_first_dim=True,
)
print(patch_slice)
if "exception" in expected:
with pytest.raises(expected["exception"]):
out = baseline_replacement_by_patch(
arr=data,
patch_slice=patch_slice,
perturb_baseline=params["perturb_baseline"],
)
return
out = baseline_replacement_by_patch(
arr=data,
patch_slice=patch_slice,
perturb_baseline=params["perturb_baseline"],
)
patch_mask = np.zeros(data.shape, dtype=bool)
patch_mask[patch_slice] = True
assert np.all(out[patch_mask] != data[patch_mask]), "Test failed."
assert np.all(out[~patch_mask] == data[~patch_mask]), "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_1d_3ch"),
{"perturb_radius": 0.02},
True,
),
(
lazy_fixture("input_uniform_2d_3ch"),
{"perturb_radius": 0.02},
True,
),
(
lazy_fixture("input_uniform_2d_3ch_flattened"),
{"perturb_radius": 0.02},
True,
),
],
)
def test_uniform_sampling(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = uniform_sampling(arr=data, **params)
assert any(out.flatten()[0] != out.flatten()), "Test failed."
assert any(out.flatten() != data.flatten()) == expected, "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_2d_3ch"),
{"perturb_angle": 30},
True,
),
],
)
def test_rotation(data: dict, params: dict, expected: Union[float, dict, bool]):
out = rotation(arr=data, **params)
assert np.any(out != data) == expected, "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_2d_3ch"),
{"perturb_dx": 20, "perturb_baseline": "black"},
True,
)
],
)
def test_translation_x_direction(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = translation_x_direction(arr=data, **params)
assert np.any(out != data) == expected, "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_2d_3ch"),
{"perturb_dx": 20, "perturb_baseline": "black"},
True,
)
],
)
def test_translation_y_direction(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = translation_y_direction(arr=data, **params)
assert np.any(out != data) == expected, "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_2d_3ch"),
{"perturb_dx": 20},
True,
),
],
)
def test_no_perturbation(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
out = no_perturbation(arr=data, **params)
assert (out == data).all() == expected, "Test failed."
@pytest.mark.perturb_func
@pytest.mark.parametrize(
"data,params,expected",
[
(
lazy_fixture("input_uniform_2d_3ch"),
{
"blur_kernel_size": 15,
"patch_size": 4,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_uniform_2d_3ch"),
{
"blur_kernel_size": 7,
"patch_size": 4,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_uniform_mnist"),
{
"blur_kernel_size": 15,
"patch_size": 4,
"coords": (0, 0),
},
{},
),
(
lazy_fixture("input_uniform_1d_3ch"),
{
"blur_kernel_size": 15,
"patch_size": 4,
"coords": (0, ),
},
{"exception": NotImplementedError},
),
(
lazy_fixture("input_uniform_3d_3ch"),
{
"blur_kernel_size": 15,
"patch_size": 4,
"coords": (0, 0, 0),
},
{"exception": ValueError},
),
],
)
def test_baseline_replacement_by_blur(
data: np.ndarray, params: dict, expected: Union[float, dict, bool]
):
patch_slice = utils.create_patch_slice(
patch_size=params["patch_size"],
coords=params["coords"],
expand_first_dim=True,
)
if "exception" in expected:
with pytest.raises(expected["exception"]):
out = baseline_replacement_by_blur(
arr=data,
patch_slice=patch_slice,
blur_kernel_size=params["blur_kernel_size"],
)
return
out = baseline_replacement_by_blur(
arr=data,
patch_slice=patch_slice,
blur_kernel_size=params["blur_kernel_size"],
)
patch_mask = np.zeros(data.shape, dtype=bool)
patch_mask[patch_slice] = True
assert out.shape == data.shape, "Test failed."
assert np.all(out[patch_mask] != data[patch_mask]), "Test failed."
assert np.all(out[~patch_mask] == data[~patch_mask]), "Test failed."
| 25.293204
| 82
| 0.49762
| 1,343
| 13,026
| 4.568131
| 0.09382
| 0.064548
| 0.09128
| 0.054768
| 0.893562
| 0.864059
| 0.835697
| 0.780929
| 0.751752
| 0.719967
| 0
| 0.047342
| 0.36596
| 13,026
| 514
| 83
| 25.342412
| 0.695484
| 0.001612
| 0
| 0.6
| 0
| 0
| 0.159579
| 0.012843
| 0
| 0
| 0
| 0.001946
| 0.030435
| 1
| 0.05
| false
| 0
| 0.015217
| 0.030435
| 0.1
| 0.004348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6934411f1e4c15deb25b9b4f795dd5f539cddac9
| 303
|
py
|
Python
|
tools/Canvas/Canvas/data/__init__.py
|
Oshlack/Slinker
|
725d2c0861156034ef4d16293e2a3b74ac23c9e7
|
[
"MIT"
] | 15
|
2021-08-23T14:36:35.000Z
|
2022-03-17T06:56:17.000Z
|
tools/Canvas/Canvas/data/__init__.py
|
Oshlack/Slinker
|
725d2c0861156034ef4d16293e2a3b74ac23c9e7
|
[
"MIT"
] | 2
|
2021-08-17T03:00:23.000Z
|
2022-02-08T23:24:16.000Z
|
tools/Canvas/Canvas/data/__init__.py
|
Oshlack/Slinker
|
725d2c0861156034ef4d16293e2a3b74ac23c9e7
|
[
"MIT"
] | null | null | null |
#=#=======================================================================================================================
#
# CANVAS
# Author: Breon Schmidt
# License: MIT
#
#=======================================================================================================================
| 33.666667
| 122
| 0.112211
| 6
| 303
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066007
| 303
| 8
| 123
| 37.875
| 0.120141
| 0.947195
| 0
| null | 0
| null | 0
| 0
| null | 1
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
694139cf29b43ed0da35c60d491d5071f6a4580e
| 124
|
py
|
Python
|
blisk/run_blisk_py.py
|
suleymanmuti/CalculiX-Examples
|
3f5bc0247de90cfc312bf13a1d0e93b39da4b5e7
|
[
"MIT"
] | null | null | null |
blisk/run_blisk_py.py
|
suleymanmuti/CalculiX-Examples
|
3f5bc0247de90cfc312bf13a1d0e93b39da4b5e7
|
[
"MIT"
] | null | null | null |
blisk/run_blisk_py.py
|
suleymanmuti/CalculiX-Examples
|
3f5bc0247de90cfc312bf13a1d0e93b39da4b5e7
|
[
"MIT"
] | 1
|
2021-02-22T10:56:47.000Z
|
2021-02-22T10:56:47.000Z
|
#!/usr/bin/python
import os
os.system("cgx -bg blisk_pre.fbd")
os.system("ccx blisk")
os.system("cgx -bg blisk_post.fbd")
| 15.5
| 35
| 0.701613
| 23
| 124
| 3.695652
| 0.565217
| 0.282353
| 0.258824
| 0.305882
| 0.423529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104839
| 124
| 7
| 36
| 17.714286
| 0.765766
| 0.129032
| 0
| 0
| 0
| 0
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15f9391d10aa798635201237c3b36d66afa91b10
| 2,279
|
py
|
Python
|
django/kisaan/master/models.py
|
AkshitOstwal/cfthacks2019
|
7260ff1b4c0ce8ee288bd3dc445e0465845410d2
|
[
"MIT"
] | 2
|
2019-08-24T16:50:37.000Z
|
2020-09-05T08:39:49.000Z
|
django/kisaan/master/models.py
|
AkshitOstwal/cfthacks2019
|
7260ff1b4c0ce8ee288bd3dc445e0465845410d2
|
[
"MIT"
] | null | null | null |
django/kisaan/master/models.py
|
AkshitOstwal/cfthacks2019
|
7260ff1b4c0ce8ee288bd3dc445e0465845410d2
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class State(models.Model):
ACTIVE_CHOICES = (
(0,'Inactive'),
(1, 'Active'),
)
statecode = models.CharField(max_length=2, unique=True, primary_key=True)
name = models.CharField(max_length=50)
active = models.IntegerField(choices=ACTIVE_CHOICES)
is_deleted = models.BooleanField(default=False)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name
class Meta:
db_table = "master_state"
class Zone(models.Model):
ACTIVE_CHOICES = (
(0,'Inactive'),
(1, 'Active'),
)
zonecode = models.CharField(max_length=4, unique=True, primary_key=True)
statecode = models.ForeignKey(State, related_name="zone_belongs_to_state", on_delete=models.CASCADE)
name = models.CharField(max_length=50)
active = models.IntegerField(choices=ACTIVE_CHOICES)
is_deleted = models.BooleanField(default=False)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name
class Meta:
db_table = "master_zone"
class District(models.Model):
ACTIVE_CHOICES = (
(0,'Inactive'),
(1, 'Active'),
)
districtcode = models.CharField(max_length=4, unique=True, primary_key=True)
statecode = models.ForeignKey(State, related_name="district_belongs_to_state", on_delete=models.CASCADE)
zonecode = models.ForeignKey(Zone, related_name="district_belongs_to_zone", on_delete=models.CASCADE)
name = models.CharField(max_length=50)
active = models.IntegerField(choices=ACTIVE_CHOICES)
is_deleted = models.BooleanField(default=False)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name
class Meta:
db_table = "master_district"
class Language(models.Model):
id = models.CharField(max_length=2, unique=True, primary_key=True)
name = models.CharField(max_length=5)
timecreated = models.IntegerField()
timemodified = models.IntegerField()
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.name
class Meta:
db_table = "languages"
| 28.848101
| 108
| 0.685388
| 278
| 2,279
| 5.406475
| 0.233813
| 0.07984
| 0.095808
| 0.127745
| 0.83167
| 0.807718
| 0.807718
| 0.775116
| 0.695276
| 0.695276
| 0
| 0.009377
| 0.204476
| 2,279
| 78
| 109
| 29.217949
| 0.819636
| 0.10882
| 0
| 0.576923
| 0
| 0
| 0.078519
| 0.034568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.019231
| 0.076923
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
15fa7ca859f1461554f88764f618035b3eca2aee
| 138
|
py
|
Python
|
geneal/genetic_algorithms/__init__.py
|
NeveIsa/geneal
|
064b0409912088886bf56fe9a729d74dac92a235
|
[
"MIT"
] | 47
|
2020-07-10T14:28:52.000Z
|
2022-03-25T17:20:52.000Z
|
geneal/genetic_algorithms/__init__.py
|
NeveIsa/geneal
|
064b0409912088886bf56fe9a729d74dac92a235
|
[
"MIT"
] | 10
|
2020-08-08T16:35:40.000Z
|
2022-03-08T00:07:19.000Z
|
geneal/genetic_algorithms/__init__.py
|
NeveIsa/geneal
|
064b0409912088886bf56fe9a729d74dac92a235
|
[
"MIT"
] | 14
|
2020-08-07T20:49:18.000Z
|
2022-03-31T17:55:47.000Z
|
from geneal.genetic_algorithms._binary import BinaryGenAlgSolver
from geneal.genetic_algorithms._continuous import ContinuousGenAlgSolver
| 46
| 72
| 0.913043
| 14
| 138
| 8.714286
| 0.642857
| 0.163934
| 0.278689
| 0.442623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 138
| 2
| 73
| 69
| 0.938462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c61d5d2cb8347bcae20c2bea737bd15f8e6ca93c
| 215
|
py
|
Python
|
api/admin.py
|
SergioLeguizamon/prueba_tecnica_quick
|
1d09afa6ba4ed60221a88f7f2bd0811482860733
|
[
"MIT"
] | null | null | null |
api/admin.py
|
SergioLeguizamon/prueba_tecnica_quick
|
1d09afa6ba4ed60221a88f7f2bd0811482860733
|
[
"MIT"
] | null | null | null |
api/admin.py
|
SergioLeguizamon/prueba_tecnica_quick
|
1d09afa6ba4ed60221a88f7f2bd0811482860733
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Clients, Products, Bills, BillsProducts
admin.site.register(Clients)
admin.site.register(Products)
admin.site.register(Bills)
admin.site.register(BillsProducts)
| 26.875
| 59
| 0.827907
| 28
| 215
| 6.357143
| 0.428571
| 0.202247
| 0.382022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074419
| 215
| 8
| 60
| 26.875
| 0.894472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c64fa50b57778b2cbf189cd31620c08ccf546a72
| 58,861
|
py
|
Python
|
Processing.py
|
Christoper-Harvey/ecg-file-processing
|
85859b9892f242c3a07ce05364839cf3a174e039
|
[
"MIT"
] | null | null | null |
Processing.py
|
Christoper-Harvey/ecg-file-processing
|
85859b9892f242c3a07ce05364839cf3a174e039
|
[
"MIT"
] | null | null | null |
Processing.py
|
Christoper-Harvey/ecg-file-processing
|
85859b9892f242c3a07ce05364839cf3a174e039
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import glob
from scipy.signal import medfilt
from scipy.integrate import trapz
import xml.etree.ElementTree as et
from datetime import date
today = date.today()
np.warnings.filterwarnings('ignore')
sns.set(style="darkgrid")
roots = []
root_names = []
for n in glob.glob('*.xml'):
roots.append(et.parse(n).getroot())
root_names.append(n)
def modified_z_score(intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
if mad_int == 0:
mad_int = 1
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def half_df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
x += 2
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
x += 2
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def hanging_line(point1, point2):
a = (point2[1] - point1[1])/(np.cosh(point2[0] % 600) - np.cosh(point1[0] % 600))
b = point1[1] - a*np.cosh(point1[0] % 600)
x = np.linspace(point1[0], point2[0], (point2[0] - point1[0])+1)
y = a*np.cosh(x % 600) + b
return (x,y)
Tags = {'tags':[]}
tags = {'tags':[]}
for root in roots:
if len(root.find('{http://www3.medical.philips.com}waveforms').getchildren()) == 2:
if int(root.find('{http://www3.medical.philips.com}waveforms')[1].attrib['samplespersec']) == 1000:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
tag = {}
tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = 0
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Failed' or root[6][1][0][14].text == 'Failed' or (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid'):
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = int(elem[0].text)
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
tag['Qonset'] = np.nan
tag['Qrsdur'] = np.nan
tag['Qoffset'] = np.nan
tag['Tonset'] = np.nan
tag['Qtint'] = np.nan
tag['Toffset'] = np.nan
tag['Tdur'] = np.nan
else:
tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
tag['Qrsdur'] = int(root[6][0][29].text)
tag['Qoffset'] = tag['Qonset'] + tag['Qrsdur']
tag['Tonset'] = int(elem[4].text)
tag['Qtint'] = int(root[6][1][0][18].text)
tag['Toffset'] = tag['Qonset'] + tag['Qtint']
tag['Tdur'] = tag['Qoffset'] - tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None and root[6][0][31].text != 'Failed': tag['QTC'] = int(root[6][0][31].text)
tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
tag['HeartRate'] = np.nan
tag['RRint'] = np.nan
tag['AtrialRate'] = np.nan
tag['QRSFrontAxis'] = np.nan
tag['QTC'] = np.nan
tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
tag['Date'] = time['date']
tag['Time'] = time['time']
tag['Sex'] = root[5][0][6].text
tag['ID'] = root[5][0][0].text
tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
tag['Waveform'] = elem[6].text
# tag['LongWaveform'] = root[8][0].text
tags['tags'].append(tag)
else:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
Tag = {}
Tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = float(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = 0
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == None or root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = int(elem[0].text)
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][18].text == None or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
Tag['Qonset'] = np.nan
Tag['Qrsdur'] = np.nan
Tag['Qoffset'] = np.nan
Tag['Tonset'] = np.nan
Tag['Qtint'] = np.nan
Tag['Toffset'] = np.nan
Tag['Tdur'] = np.nan
else:
Tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
Tag['Qrsdur'] = int(root[6][0][29].text)
Tag['Qoffset'] = Tag['Qonset'] + Tag['Qrsdur']
Tag['Tonset'] = int(elem[4].text)
Tag['Qtint'] = int(root[6][1][0][18].text)
Tag['Toffset'] = Tag['Qonset'] + Tag['Qtint']
Tag['Tdur'] = Tag['Qoffset'] - Tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): Tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: Tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: Tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': Tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None: Tag['QTC'] = int(root[6][0][31].text)
Tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
Tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
Tag['HeartRate'] = np.nan
Tag['RRint'] = np.nan
Tag['AtrialRate'] = np.nan
Tag['QRSFrontAxis'] = np.nan
Tag['QTC'] = np.nan
Tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
Tag['Date'] = time['date']
Tag['Time'] = time['time']
Tag['Sex'] = root[5][0][6].text
Tag['ID'] = root[5][0][0].text
Tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if len(root[5][0].find('{http://www3.medical.philips.com}age')) > 0:
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
Tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
Tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
Tag['Waveform'] = elem[6].text
# Tag['LongWaveform'] = root[8][0].text
Tags['tags'].append(Tag)
half_data = pd.DataFrame(Tags['tags'])
data = pd.DataFrame(tags['tags'])
del roots
del root
del elem
count1000 = int(len(data)/12)
count500 = int(len(half_data)/12)
count = count1000 + count500
if len(data) > 0:
array = np.unique(data[data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_data = data.loc[data['ID'].isin(array) & data['Date'].isin(array) & data['Time'].isin(array)]
data.drop(missing_data.index, axis=0,inplace=True)
missing_data = missing_data.reset_index(drop=True)
del tag
del tags
data = data.reset_index(drop=True)
for n in range(count1000):
data.Tonset[n*12:(n+1)*12] = np.repeat(int(data.Tonset[n*12:(n+1)*12].sum()/12), 12)
data.Pdur[n*12:(n+1)*12] = np.repeat(int(data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(data.Waveform)):
t = base64.b64decode(data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
df = pd.DataFrame(a)
df.insert(0, 'Lead', data['Lead'])
blank = []
for n in range(count1000):
blank.append(pd.pivot_table(df[(n*12):(n+1)*12], columns=df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
df = pd.DataFrame(array)
df = pd.pivot_table(df, columns=test.columns)
df = df.fillna(0)
del a
del p
del o
del t
del blank
del new
del array
for n in range(count1000):
for x in range(12):
if (data.Toffset[n*12]-data.RRint[n*12]) >= data.Ponset[n*12] or (data.Ponset[n*12] + data.RRint[n*12]) - data.Toffset[n*12] == 1:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
rrint = data.RRint[n*12]
if (rrint + data.Ponset[n*12]) > 1200 and (data.Toffset[n*12]-rrint) < 0:
temp = df.iloc[:,x][int(n*1200):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif (rrint + data.Ponset[n*12]) > 1200 and (data.Toffset[n*12]-rrint) > 0:
temp = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)-rrint):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + data.Ponset[n*12] < 1200 and (data.Toffset[n*12]-rrint) < 0:
temp = df.iloc[:,x][int(n*1200):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)-rrint):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
unfiltered_leads = df.copy()
for n in range(count1000):
for inx in range(12):
test = df_fixer(df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
else:
point1 = [g[0]-1, test[g[0]-1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
for i in range(len(x)):
test[x[i]] = y[i]
if (trapz(abs(test[int(data.Qonset[n*12]):int(data.Qoffset[n*12])]))/trapz(abs(df.iloc[:,inx][int(data.Qonset[12*n]+(1200*n)):int(data.Qoffset[12*n]+(1200*n))]))) < .60:
test = df.iloc[:,inx][n*1200:(n+1)*1200]
test = medfilt(test, kernel_size=9)
df.iloc[:,inx][n*1200:(n+1)*1200] = test
del gaps
del lstOfNs
del gap
del test
VTI_leads = df[['III', 'aVF', 'aVL', 'aVR']]
df = df[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
Unfiltered_VTI_leads = unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']]
unfiltered_leads = unfiltered_leads[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
matrix = [[.38, -.07, -.13, .05, -.01, .14, .06, .54],
[-.07, .93, .06, -.02, -.05, .06, -.17, .13],
[.11, -.23, -.43, -.06, -.14, -.20, -.11, .31]]
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(df)):
xtemp.append((df.iloc[n].values * x).sum())
ytemp.append((df.iloc[n].values * y).sum())
ztemp.append((df.iloc[n].values * z).sum())
n+=1
df['x'] = xtemp
df['y'] = ytemp
df['z'] = ztemp
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(unfiltered_leads)):
xtemp.append((unfiltered_leads.iloc[n].values * x).sum())
ytemp.append((unfiltered_leads.iloc[n].values * y).sum())
ztemp.append((unfiltered_leads.iloc[n].values * z).sum())
n+=1
df['Unfiltered_x'] = xtemp
df['Unfiltered_y'] = ytemp
df['Unfiltered_z'] = ztemp
del xtemp
del ytemp
del ztemp
df['Date'] = data['Date']
df['ID'] = data['ID']
df['Time'] = data['Time']
df['Print'] = data['Print']
df['Ponset'] = data['Ponset']
df['Pdur'] = data['Pdur']
df['Poffset'] = data['Poffset']
df['Qonset'] = data['Qonset']
df['Qrsdur'] = data['Qrsdur']
df['Qtint'] = data['Qtint']
df['Qoffset'] = data['Qoffset']
df['Tonset'] = data['Tonset']
df['Tdur'] = data['Tdur']
df['Toffset'] = data['Toffset']
df['HeartRate'] = data['HeartRate']
df['QRSFrontAxis'] = data['QRSFrontAxis']
df['Sex'] = data['Sex']
df['QTC'] = data['QTC']
df['Age'] = data['Age']
df['Name'] = data['Name']
for n in range(count1000):
df['Ponset'][(n*1200):(n+1)*1200] = data['Ponset'][n*12]
df['Print'][(n*1200):(n+1)*1200] = data['Print'][n*12]
df['Pdur'][(n*1200):(n+1)*1200] = data['Pdur'][n*12]
df['Poffset'][(n*1200):(n+1)*1200] = data['Poffset'][n*12]
df['Qonset'][(n*1200):(n+1)*1200] = data['Qonset'][n*12]
df['Qrsdur'][(n*1200):(n+1)*1200] = data['Qrsdur'][n*12]
df['Qtint'][(n*1200):(n+1)*1200] = data['Qtint'][n*12]
df['Qoffset'][(n*1200):(n+1)*1200] = data['Qoffset'][n*12]
df['Tonset'][(n*1200):(n+1)*1200] = data['Tonset'][n*12]
df['Tdur'][(n*1200):(n+1)*1200] = data['Tdur'][n*12]
df['Toffset'][(n*1200):(n+1)*1200] = data['Toffset'][n*12]
df['HeartRate'][(n*1200):(n+1)*1200] = data['HeartRate'][n*12]
df['QRSFrontAxis'][(n*1200):(n+1)*1200] = data['QRSFrontAxis'][n*12]
df['Sex'][(n*1200):(n+1)*1200] = data['Sex'][n*12]
df['QTC'][(n*1200):(n+1)*1200] = data['QTC'][n*12]
df['Age'][(n*1200):(n+1)*1200] = data['Age'][n*12]
df['Date'][(n*1200):(n+1)*1200] = data['Date'][12*n]
df['Time'][(n*1200):(n+1)*1200] = data['Time'][12*n]
df['ID'][(n*1200):(n+1)*1200] = data['ID'][12*n]
df['Name'][(n*1200):(n+1)*1200] = data['Name'][12*n]
df[['III', 'aVF', 'aVL', 'aVR']] = VTI_leads
unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']] = Unfiltered_VTI_leads
df[['Unfiltered_I', 'Unfiltered_II', 'Unfiltered_III', 'Unfiltered_V1', 'Unfiltered_V2', 'Unfiltered_V3', 'Unfiltered_V4', 'Unfiltered_V5', 'Unfiltered_V6', 'Unfiltered_aVF', 'Unfiltered_aVL', 'Unfiltered_aVR']] = unfiltered_leads[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVF', 'aVL', 'aVR']]
del unfiltered_leads
del VTI_leads
if len(half_data) > 0:
array = np.unique(half_data[half_data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_half_data = half_data.loc[half_data['ID'].isin(array) & half_data['Date'].isin(array) & half_data['Time'].isin(array)]
half_data.drop(missing_half_data.index, axis=0,inplace=True)
missing_half_data = missing_half_data.reset_index(drop=True)
del Tag
del Tags
half_data = half_data.reset_index(drop=True)
for n in range(count500):
half_data.Tonset[n*12:(n+1)*12] = np.repeat(int(half_data.Tonset[n*12:(n+1)*12].sum()/12), 12)
half_data.Pdur[n*12:(n+1)*12] = np.repeat(int(half_data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(half_data.Waveform)):
t = base64.b64decode(half_data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
half_df = pd.DataFrame(a)
half_df.insert(0, 'Lead', half_data['Lead'])
blank = []
for n in range(count500):
blank.append(pd.pivot_table(half_df[(n*12):(n+1)*12], columns=half_df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
half_df = pd.DataFrame(array)
half_df = pd.pivot_table(half_df, columns=test.columns)
half_df = half_df.fillna(0)
blank = []
for n in range(count500):
blank.append(half_df[(n*1200):((n+1)*1200)-600])
test = pd.concat(blank)
half_df = test
half_df = half_df.reset_index(drop=True)
half_df = pd.pivot_table(half_df, columns=half_df.index)
array = []
for i in range(count500):
for x in range(12):
temp = []
new = []
for n in half_df.iloc[x,i*600:(i+1)*600]:
temp.append(n)
if len(temp) > 1:
new.append(temp[-2])
if len(temp) < 601 and len(temp) > 1:
new.append((temp[-1]+temp[-2])/2)
if len(temp) == 600:
new.append(temp[-1])
new.append(temp[-1])
array.append(new)
I = (np.asarray(array[::12])).reshape(count500*1200)
II = (np.asarray(array[1::12])).reshape(count500*1200)
III = (np.asarray(array[2::12])).reshape(count500*1200)
V1 = (np.asarray(array[3::12])).reshape(count500*1200)
V2 = (np.asarray(array[4::12])).reshape(count500*1200)
V3 = (np.asarray(array[5::12])).reshape(count500*1200)
V4 = (np.asarray(array[6::12])).reshape(count500*1200)
V5 = (np.asarray(array[7::12])).reshape(count500*1200)
V6 = (np.asarray(array[8::12])).reshape(count500*1200)
aVF = (np.asarray(array[9::12])).reshape(count500*1200)
aVL = (np.asarray(array[10::12])).reshape(count500*1200)
aVR = (np.asarray(array[11::12])).reshape(count500*1200)
half_df = pd.pivot_table(pd.DataFrame([I, II, III, V1, V2, V3, V4, V5, V6, aVF, aVL, aVR]), columns=test.columns)
half_df = half_df.fillna(0)
del I
del II
del III
del V1
del V2
del V3
del V4
del V5
del V6
del aVF
del aVL
del aVR
del a
del p
del o
del t
del blank
del new
del array
del temp
for n in range(count500):
for x in range(12):
if ((half_data.Toffset[n*12]-half_data.RRint[n*12]) >= half_data.Ponset[n*12]) or ((half_data.Ponset[n*12] + half_data.RRint[n*12]) - half_data.Toffset[n*12] == 1):
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
rrint = half_data.RRint[n*12]
if (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) > 0:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + half_data.Ponset[n*12] < 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
for x in range(12):
half_df.iloc[:,x] = half_df.iloc[:,x]*2.5
unfiltered_half_leads = half_df.copy()
for n in range(count500):
for inx in range(12):
test = half_df_fixer(half_df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
else:
point1 = [g[0]-1, test[g[0]-1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
for i in range(len(x)):
test[x[i]] = y[i]
if (trapz(abs(test[int(half_data.Qonset[n*12]):int(half_data.Qoffset[n*12])]))/trapz(abs(half_df.iloc[:,inx][int(half_data.Qonset[12*n]+(1200*n)):int(half_data.Qoffset[12*n]+(1200*n))]))) < .60:
test = half_df.iloc[:,inx][n*1200:(n+1)*1200]
test = medfilt(test, kernel_size=9)
half_df.iloc[:,inx][n*1200:(n+1)*1200] = test
del gaps
del lstOfNs
del gap
del test
half_VTI_leads = half_df[['III', 'aVF', 'aVL', 'aVR']]
half_df = half_df[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
Unfiltered_half_VTI_leads = unfiltered_half_leads[['III', 'aVF', 'aVL', 'aVR']]
unfiltered_half_leads = unfiltered_half_leads[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
matrix = [[.38, -.07, -.13, .05, -.01, .14, .06, .54],
[-.07, .93, .06, -.02, -.05, .06, -.17, .13],
[.11, -.23, -.43, -.06, -.14, -.20, -.11, .31]]
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(half_df)):
xtemp.append((half_df.iloc[n].values * x).sum())
ytemp.append((half_df.iloc[n].values * y).sum())
ztemp.append((half_df.iloc[n].values * z).sum())
n+=1
half_df['x'] = xtemp
half_df['y'] = ytemp
half_df['z'] = ztemp
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(unfiltered_half_leads)):
xtemp.append((unfiltered_half_leads.iloc[n].values * x).sum())
ytemp.append((unfiltered_half_leads.iloc[n].values * y).sum())
ztemp.append((unfiltered_half_leads.iloc[n].values * z).sum())
n+=1
half_df['Unfiltered_x'] = xtemp
half_df['Unfiltered_y'] = ytemp
half_df['Unfiltered_z'] = ztemp
del xtemp
del ytemp
del ztemp
half_df['Date'] = half_data['Date']
half_df['ID'] = half_data['ID']
half_df['Time'] = half_data['Time']
half_df['Ponset'] = half_data['Ponset']
half_df['Print'] = half_data['Print']
half_df['Pdur'] = half_data['Pdur']
half_df['Poffset'] = half_data['Poffset']
half_df['Qonset'] = half_data['Qonset']
half_df['Qrsdur'] = half_data['Qrsdur']
half_df['Qtint'] = half_data['Qtint']
half_df['Qoffset'] = half_data['Qoffset']
half_df['Tonset'] = half_data['Tonset']
half_df['Tdur'] = half_data['Tdur']
half_df['Toffset'] = half_data['Toffset']
half_df['HeartRate'] = half_data['HeartRate']
half_df['QRSFrontAxis'] = half_data['QRSFrontAxis']
half_df['Sex'] = half_data['Sex']
half_df['QTC'] = half_data['QTC']
half_df['Age'] = half_data['Age']
half_df['Name'] = half_data['Name']
for n in range(count500):
half_df['Ponset'][(n*1200):(n+1)*1200] = half_data['Ponset'][n*12]
half_df['Print'][(n*1200):(n+1)*1200] = half_data['Print'][n*12]
half_df['Pdur'][(n*1200):(n+1)*1200] = half_data['Pdur'][n*12]
half_df['Poffset'][(n*1200):(n+1)*1200] = half_data['Poffset'][n*12]
half_df['Qonset'][(n*1200):(n+1)*1200] = half_data['Qonset'][n*12]
half_df['Qrsdur'][(n*1200):(n+1)*1200] = half_data['Qrsdur'][n*12]
half_df['Qtint'][(n*1200):(n+1)*1200] = half_data['Qtint'][n*12]
half_df['Qoffset'][(n*1200):(n+1)*1200] = half_data['Qoffset'][n*12]
half_df['Tonset'][(n*1200):(n+1)*1200] = half_data['Tonset'][n*12]
half_df['Tdur'][(n*1200):(n+1)*1200] = half_data['Tdur'][n*12]
half_df['Toffset'][(n*1200):(n+1)*1200] = half_data['Toffset'][n*12]
half_df['HeartRate'][(n*1200):(n+1)*1200] = half_data['HeartRate'][n*12]
half_df['QRSFrontAxis'][(n*1200):(n+1)*1200] = half_data['QRSFrontAxis'][n*12]
half_df['Sex'][(n*1200):(n+1)*1200] = half_data['Sex'][n*12]
half_df['QTC'][(n*1200):(n+1)*1200] = half_data['QTC'][n*12]
half_df['Name'][(n*1200):(n+1)*1200] = half_data['Name'][12*n]
half_df['Age'][(n*1200):(n+1)*1200] = half_data['Age'][12*n]
half_df['ID'][(n*1200):(n+1)*1200] = half_data['ID'][12*n]
half_df['Date'][(n*1200):(n+1)*1200] = half_data['Date'][12*n]
half_df['Time'][(n*1200):(n+1)*1200] = half_data['Time'][12*n]
half_df[['III', 'aVF', 'aVL', 'aVR']] = half_VTI_leads
unfiltered_half_leads[['III', 'aVF', 'aVL', 'aVR']] = Unfiltered_half_VTI_leads
half_df[['Unfiltered_I', 'Unfiltered_II', 'Unfiltered_III', 'Unfiltered_V1', 'Unfiltered_V2', 'Unfiltered_V3', 'Unfiltered_V4', 'Unfiltered_V5', 'Unfiltered_V6', 'Unfiltered_aVF', 'Unfiltered_aVL', 'Unfiltered_aVR']] = unfiltered_half_leads[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVF', 'aVL', 'aVR']]
del unfiltered_half_leads
del half_VTI_leads
if (len(half_data) > 0) and (len(data) > 0):
df = pd.concat([df, half_df])
df = df.reset_index(drop=True)
del half_data
del data
del half_df
if (len(half_data) > 0) and (len(data) == 0):
df = half_df
del half_df
del half_data
if (len(half_data) == 0) and (len(data) > 0):
df = df
del data
df['total_xyz'] = ((df.x)**2 + (df.y)**2 + (df.z)**2)**0.5
QRSVTI = []
for n in range(count):
QRSVTI.append(trapz(df.total_xyz[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))]))
QRSVTI = np.repeat(QRSVTI, 1200)
df['QRSVTI'] = QRSVTI
del QRSVTI
QRStVTI = []
for n in range(count):
QRStVTI.append(trapz(df.total_xyz[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))]))
QRStVTI = np.repeat(QRStVTI, 1200)
df['QRStVTI'] = QRStVTI
del QRStVTI
XVTI = []
for n in range(count):
XVTI.append(trapz(abs(df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
XVTI = np.repeat(XVTI, 1200)
df['XVTI'] = XVTI
del XVTI
YVTI = []
for n in range(count):
YVTI.append(trapz(abs(df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
YVTI = np.repeat(YVTI, 1200)
df['YVTI'] = YVTI
del YVTI
ZVTI = []
for n in range(count):
ZVTI.append(trapz(abs(df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
ZVTI = np.repeat(ZVTI, 1200)
df['ZVTI'] = ZVTI
del ZVTI
df['QRS3DArea'] = ((df.XVTI)**2 + (df.YVTI)**2 + (df.ZVTI)**2)**0.5
XtVTI = []
for n in range(count):
XtVTI.append(trapz(abs(df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
XtVTI = np.repeat(XtVTI, 1200)
df['XtVTI'] = XtVTI
del XtVTI
YtVTI = []
for n in range(count):
YtVTI.append(trapz(abs(df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
YtVTI = np.repeat(YtVTI, 1200)
df['YtVTI'] = YtVTI
del YtVTI
ZtVTI = []
for n in range(count):
ZtVTI.append(trapz(abs(df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
ZtVTI = np.repeat(ZtVTI, 1200)
df['ZtVTI'] = ZtVTI
del ZtVTI
df['QRSt3DArea'] = ((df.XtVTI)**2 + (df.YtVTI)**2 + (df.ZtVTI)**2)**0.5
XVTI = []
for n in range(count):
XVTI.append(trapz((df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
XVTI = np.repeat(XVTI, 1200)
df['XVector_VTI'] = XVTI
del XVTI
YVTI = []
for n in range(count):
YVTI.append(trapz((df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
YVTI = np.repeat(YVTI, 1200)
df['YVector_VTI'] = YVTI
del YVTI
ZVTI = []
for n in range(count):
ZVTI.append(trapz((df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
ZVTI = np.repeat(ZVTI, 1200)
df['ZVector_VTI'] = ZVTI
del ZVTI
df['QRS3DVector_Area'] = ((df.XVector_VTI)**2 + (df.YVector_VTI)**2 + (df.ZVector_VTI)**2)**0.5
XtVTI = []
for n in range(count):
XtVTI.append(trapz((df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
XtVTI = np.repeat(XtVTI, 1200)
df['XtVector_VTI'] = XtVTI
del XtVTI
YtVTI = []
for n in range(count):
YtVTI.append(trapz((df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
YtVTI = np.repeat(YtVTI, 1200)
df['YtVector_VTI'] = YtVTI
del YtVTI
ZtVTI = []
for n in range(count):
ZtVTI.append(trapz((df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
ZtVTI = np.repeat(ZtVTI, 1200)
df['ZtVector_VTI'] = ZtVTI
del ZtVTI
df['QRSt3DVector_Area'] = ((df.XtVector_VTI)**2 + (df.YtVector_VTI)**2 + (df.ZtVector_VTI)**2)**0.5
Tamp = []
XTamp = []
YTamp = []
ZTamp = []
TpTe = []
XTpTe = []
YTpTe = []
ZTpTe = []
QpQe = []
for x in range(count):
if int(df.Tonset[1200*x]+(1200*x)) > int(df.Toffset[1200*x]+(1200*x)):
XTamp.append(np.nan)
XTpTe.append(np.nan)
YTamp.append(np.nan)
YTpTe.append(np.nan)
ZTamp.append(np.nan)
ZTpTe.append(np.nan)
Tamp.append(np.nan)
TpTe.append(np.nan)
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
elif df.Tonset[1200*x] == df.Toffset[1200*x]:
XTamp.append(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
XTpTe.append(len([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
YTamp.append(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
YTpTe.append(len([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
ZTamp.append(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
ZTpTe.append(len([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
Tamp.append(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
TpTe.append(len([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
else:
XTamp.append(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
XTpTe.append(len([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
YTamp.append(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
YTpTe.append(len([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
ZTamp.append(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
ZTpTe.append(len([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
Tamp.append(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
TpTe.append(len([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
QpQe = np.repeat(QpQe, 1200)
df['QpQe'] = QpQe
Tamp = np.repeat(Tamp, 1200)
df['Tamp'] = Tamp
XTamp = np.repeat(XTamp, 1200)
df['XTamp'] = XTamp
YTamp = np.repeat(YTamp, 1200)
df['YTamp'] = YTamp
ZTamp = np.repeat(ZTamp, 1200)
df['ZTamp'] = ZTamp
XTpTe = np.repeat(XTpTe, 1200)
df['XTpTe'] = XTpTe
YTpTe = np.repeat(YTpTe, 1200)
df['YTpTe'] = YTpTe
ZTpTe = np.repeat(ZTpTe, 1200)
df['ZTpTe'] = ZTpTe
TpTe = np.repeat(TpTe, 1200)
df['TpTe'] = TpTe
del Tamp
del XTamp
del YTamp
del ZTamp
del XTpTe
del YTpTe
del ZTpTe
del TpTe
del QpQe
temp = df[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVR', 'aVL', 'aVF', 'x', 'y', 'z', 'total_xyz']]
Qamp = []
for x in range(count):
for i in range(16):
if min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) > 0:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) - min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
elif min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) > 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) - min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
elif min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0:
Qamp.append(min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
else:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
del temp
XQamp = Qamp[12::16]
XQamp = np.repeat(XQamp, 1200)
YQamp = Qamp[13::16]
YQamp = np.repeat(YQamp, 1200)
ZQamp = Qamp[14::16]
ZQamp = np.repeat(ZQamp, 1200)
Qamp = Qamp[15::16]
Qamp = np.repeat(Qamp, 1200)
df['XQamp'] = XQamp
df['YQamp'] = YQamp
df['ZQamp'] = ZQamp
df['Qamp'] = Qamp
del XQamp
del YQamp
del ZQamp
del Qamp
text_df = df[['ID', 'Name', 'Age', 'Sex', 'Date', 'Time','HeartRate','Pdur','Print','Qrsdur','Qtint','QTC','TpTe','QRSFrontAxis',
'QRSVTI','XVector_VTI', 'YVector_VTI','ZVector_VTI',
'QRStVTI', 'XtVTI','YtVTI', 'ZtVTI',
'Qamp','XQamp','YQamp', 'ZQamp','Tamp','XTamp', 'YTamp','ZTamp']]
text_df = text_df[::1200]
# text_df.to_csv('Entresto_Final_Data.csv', index=False)
# signal_df.to_pickle('Entresto_Final_ML.pkl')
text_df.to_csv('data.csv', index=False)
for n in range(count):
# pd.DataFrame(text_df.iloc[n,:]).T.to_csv('{}.csv'.format(root_names[n][:-4]), index=False)
x = df.x[n*1200:(n+1)*1200]
y = df.y[n*1200:(n+1)*1200]
z = df.z[n*1200:(n+1)*1200]
rms = df.total_xyz[n*1200:(n+1)*1200]
fig, ((ax, ax1), (ax2, ax3)) = plt.subplots(2, 2, figsize=(15, 8))
ax.plot(x)
ax.set_title('Lead X')
ax1.plot(y)
ax1.set_title('Lead Y')
ax2.plot(z)
ax2.set_title('Lead Z')
ax3.plot(rms)
ax3.set_title('XYZ RMS')
fig.subplots_adjust(hspace=.3)
fig.subplots_adjust(wspace=.1)
fig.savefig('{}.png'.format(root_names[n][:-4]), dpi=1800, format='png')
del df
| 46.901195
| 381
| 0.518951
| 9,081
| 58,861
| 3.305913
| 0.034798
| 0.036308
| 0.025782
| 0.035975
| 0.818694
| 0.796676
| 0.778921
| 0.738949
| 0.724126
| 0.698145
| 0
| 0.102119
| 0.25601
| 58,861
| 1,255
| 382
| 46.901195
| 0.583417
| 0.004723
| 0
| 0.468973
| 0
| 0
| 0.097781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004069
| false
| 0
| 0.010173
| 0
| 0.018311
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d6adff9d311c47f0b9f78e43e24aa7eb48cd8acf
| 30
|
py
|
Python
|
src/export/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | 2
|
2021-04-15T15:47:33.000Z
|
2021-09-07T23:15:34.000Z
|
src/export/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | null | null | null |
src/export/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | 1
|
2020-09-28T01:48:09.000Z
|
2020-09-28T01:48:09.000Z
|
from .timeline import Timeline
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d6b2bde72f4c90235159cdd45776a32bac7e359a
| 27
|
py
|
Python
|
examples/sentences/__init__.py
|
nprezant/GAlgorithm
|
5259281fb7ed0efe1effcdc39ae1850c0a47b9a5
|
[
"MIT"
] | 1
|
2021-12-18T23:25:12.000Z
|
2021-12-18T23:25:12.000Z
|
examples/sentences/__init__.py
|
nprezant/GAlgorithm
|
5259281fb7ed0efe1effcdc39ae1850c0a47b9a5
|
[
"MIT"
] | 1
|
2022-03-12T01:04:13.000Z
|
2022-03-12T01:04:13.000Z
|
examples/sentences/__init__.py
|
nprezant/GAlgorithm
|
5259281fb7ed0efe1effcdc39ae1850c0a47b9a5
|
[
"MIT"
] | null | null | null |
from .sentences import run
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba70002b0dc10f81f84dbc1afaf276e7b86647a6
| 34
|
py
|
Python
|
discodo/server/__init__.py
|
eunwoo1104/discodo
|
699250d4fb62d970acd2573a5d967966872b7403
|
[
"MIT"
] | 105
|
2020-06-21T23:37:20.000Z
|
2022-02-11T14:27:07.000Z
|
discodo/server/__init__.py
|
eunwoo1104/discodo
|
699250d4fb62d970acd2573a5d967966872b7403
|
[
"MIT"
] | 116
|
2020-07-12T03:55:24.000Z
|
2022-03-31T23:02:54.000Z
|
discodo/server/__init__.py
|
eunwoo1104/discodo
|
699250d4fb62d970acd2573a5d967966872b7403
|
[
"MIT"
] | 32
|
2020-07-12T03:38:35.000Z
|
2022-02-02T23:03:29.000Z
|
from .server import app as server
| 17
| 33
| 0.794118
| 6
| 34
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 34
| 1
| 34
| 34
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bad5fe282253383257bd9aa423fef4be36fa68af
| 24
|
py
|
Python
|
dpy_cooldowns/psql/__init__.py
|
TheGabDooSan/dpy-psql-cooldowns
|
413d1dc536c70c256722d8649e4ced94debb8b30
|
[
"MIT"
] | 1
|
2021-04-05T16:29:32.000Z
|
2021-04-05T16:29:32.000Z
|
dpy_cooldowns/psql/__init__.py
|
gabriel-dahan/dpy-cooldowns
|
413d1dc536c70c256722d8649e4ced94debb8b30
|
[
"MIT"
] | null | null | null |
dpy_cooldowns/psql/__init__.py
|
gabriel-dahan/dpy-cooldowns
|
413d1dc536c70c256722d8649e4ced94debb8b30
|
[
"MIT"
] | null | null | null |
from .cooldowns import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
24042e88e3f05abb52b25897f4087c1761558f65
| 40
|
py
|
Python
|
w2_pscr/phw.py
|
polde-live/python-mich-2
|
f5890ca366451bde93b58e3b5ee167ee68f0aa6f
|
[
"Unlicense"
] | null | null | null |
w2_pscr/phw.py
|
polde-live/python-mich-2
|
f5890ca366451bde93b58e3b5ee167ee68f0aa6f
|
[
"Unlicense"
] | null | null | null |
w2_pscr/phw.py
|
polde-live/python-mich-2
|
f5890ca366451bde93b58e3b5ee167ee68f0aa6f
|
[
"Unlicense"
] | null | null | null |
print "Hello world from Linux shell :)"
| 20
| 39
| 0.725
| 6
| 40
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 1
| 40
| 40
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
79e8d11fa27da8d18cdfc4d15beba9e4eb65501e
| 39
|
py
|
Python
|
learning/encoders/__init__.py
|
jesse-michael-han/oracle
|
654c8e3aa27ab48a0b3533b102536d2a33cd701a
|
[
"Apache-2.0"
] | null | null | null |
learning/encoders/__init__.py
|
jesse-michael-han/oracle
|
654c8e3aa27ab48a0b3533b102536d2a33cd701a
|
[
"Apache-2.0"
] | null | null | null |
learning/encoders/__init__.py
|
jesse-michael-han/oracle
|
654c8e3aa27ab48a0b3533b102536d2a33cd701a
|
[
"Apache-2.0"
] | null | null | null |
from .text_encoders import TextEncoder
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79f1c52c4aa435759bd824ef1901192cadf183c4
| 29
|
py
|
Python
|
school/schema/__init__.py
|
iPalmTech/django-ariadne-starter
|
5930b6ca13c9d2a726d3889ce899f49fb6d5301c
|
[
"MIT"
] | null | null | null |
school/schema/__init__.py
|
iPalmTech/django-ariadne-starter
|
5930b6ca13c9d2a726d3889ce899f49fb6d5301c
|
[
"MIT"
] | null | null | null |
school/schema/__init__.py
|
iPalmTech/django-ariadne-starter
|
5930b6ca13c9d2a726d3889ce899f49fb6d5301c
|
[
"MIT"
] | null | null | null |
from .school import type_defs
| 29
| 29
| 0.862069
| 5
| 29
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0306ab6bcea018b13da0950e665833ecd5252f64
| 160
|
py
|
Python
|
fixtures/python/random-names/tests/function.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | 4
|
2021-12-22T18:07:10.000Z
|
2021-12-29T09:22:44.000Z
|
fixtures/python/random-names/tests/function.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | null | null | null |
fixtures/python/random-names/tests/function.py
|
guiloga/scalade
|
fd59b239fb35e8a7028baea3ed6d4b23282c200d
|
[
"MIT"
] | null | null | null |
from src.function import generate_random_names
def test_generate_random_names():
names = generate_random_names(12)
assert len(names.split(",")) == 12
| 22.857143
| 46
| 0.75
| 22
| 160
| 5.136364
| 0.590909
| 0.371681
| 0.504425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029197
| 0.14375
| 160
| 6
| 47
| 26.666667
| 0.79562
| 0
| 0
| 0
| 1
| 0
| 0.00625
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0323316ced880dd2e480e12e3f966e612afedddc
| 43
|
py
|
Python
|
smartblinds_client/__init__.py
|
zhangquan0126/smartblinds-client
|
940ae7a1d99d7e0172686b277c9195bd1bba76c1
|
[
"MIT"
] | null | null | null |
smartblinds_client/__init__.py
|
zhangquan0126/smartblinds-client
|
940ae7a1d99d7e0172686b277c9195bd1bba76c1
|
[
"MIT"
] | null | null | null |
smartblinds_client/__init__.py
|
zhangquan0126/smartblinds-client
|
940ae7a1d99d7e0172686b277c9195bd1bba76c1
|
[
"MIT"
] | null | null | null |
from .smartblinds import SmartBlindsClient
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
034a8d4b81d044394349125761ab8c5bc639bf80
| 29
|
py
|
Python
|
nn/transformer/__init__.py
|
ollmer/clickbait
|
3dd54e6b6c804b97d9d955c2f4bea7bfbcadbfc7
|
[
"MIT"
] | 22
|
2018-07-27T13:50:34.000Z
|
2021-01-05T08:46:34.000Z
|
nn/transformer/__init__.py
|
ollmer/clickbait
|
3dd54e6b6c804b97d9d955c2f4bea7bfbcadbfc7
|
[
"MIT"
] | 1
|
2020-06-07T23:06:10.000Z
|
2020-06-07T23:06:10.000Z
|
nn/transformer/__init__.py
|
ollmer/clickbait
|
3dd54e6b6c804b97d9d955c2f4bea7bfbcadbfc7
|
[
"MIT"
] | 7
|
2018-08-06T23:12:35.000Z
|
2020-05-09T08:46:33.000Z
|
from .decoder import Decoder
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
034f059f0fa3e01996a1f4415b3d6cfea37a0d0a
| 243
|
py
|
Python
|
pymtl3/dsl/Placeholder.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 152
|
2020-06-03T02:34:11.000Z
|
2022-03-30T04:16:45.000Z
|
pymtl3/dsl/Placeholder.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 139
|
2019-05-29T00:37:09.000Z
|
2020-05-17T16:49:26.000Z
|
pymtl3/dsl/Placeholder.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 22
|
2020-05-18T13:42:05.000Z
|
2022-03-11T08:37:51.000Z
|
"""
========================================================================
Placeholder.py
========================================================================
Author : Shunning Jiang
Date : June 1, 2019
"""
class Placeholder:
pass
| 20.25
| 72
| 0.26749
| 12
| 243
| 5.416667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.09465
| 243
| 11
| 73
| 22.090909
| 0.272727
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
035bc141ad5b616760dea191363435d4fc89d497
| 138
|
py
|
Python
|
pythainlp/tokenize/ssg.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/tokenize/ssg.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/tokenize/ssg.py
|
Subarna578/pythainlp
|
9650a40396719284add17bb09f50e948dea41053
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import List
import ssg
def segment(text: str) -> List[str]:
return ssg.syllable_tokenize(text)
| 15.333333
| 38
| 0.673913
| 20
| 138
| 4.6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 0.181159
| 138
| 8
| 39
| 17.25
| 0.80531
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
cee5dda684f675e865540b98daeb3dfbcf846de7
| 19,332
|
py
|
Python
|
autolens/data/array/util/grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
autolens/data/array/util/grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
autolens/data/array/util/grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
from autolens import decorator_util
import numpy as np
from autolens.data.array.util import mask_util
@decorator_util.jit()
def centres_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin):
"""Determine the (y,x) arc-second central coordinates of an array from its shape, pixel-scales and origin.
The coordinate system is defined such that the positive y axis is up and positive x axis is right.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the arc-second centre is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the centre is shifted to.
Returns
--------
tuple (float, float)
The (y,x) arc-second central coordinates of the input array.
Examples
--------
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
y_centre_arcsec = float(shape[0] - 1) / 2 + (origin[0] / pixel_scales[0])
x_centre_arcsec = float(shape[1] - 1) / 2 - (origin[1] / pixel_scales[1])
return (y_centre_arcsec, x_centre_arcsec)
@decorator_util.jit()
def regular_grid_2d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns).
Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \
and y values in arc seconds.
The regular grid is returned on an array of shape (total_pixels, total_pixels, 2) where coordinate indexes match \
those of the original 2D array. y coordinates are stored in the 0 index of the third dimension, x coordinates in \
the 1 index.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the regular grid of coordinates is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid \
array has dimensions (total_pixels, total_pixels, 2).
Examples
--------
regular_grid_1d = regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \
origin=(0.0, 0.0))
"""
grid_2d = np.zeros((shape[0], shape[1], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)
for y in range(shape[0]):
for x in range(shape[1]):
grid_2d[y, x, 0] = -(y - centres_arc_seconds[0]) * pixel_scales[0]
grid_2d[y, x, 1] = (x - centres_arc_seconds[1]) * pixel_scales[1]
return grid_2d
@decorator_util.jit()
def regular_grid_1d_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of an array of shape (rows, columns).
Coordinates are defined from the top-left corner, such that the first pixel at location [0, 0] has negative x \
and y values in arc seconds.
The regular grid is returned on an array of shape (total_pixels**2, 2) where the 2D dimension of the original 2D \
array are reduced to one dimension. y coordinates are stored in the 0 index of the second dimension, x coordinates
in the 1 index.
Parameters
----------
shape : (int, int)
The (y,x) shape of the 2D array the regular grid of coordinates is computed for.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel on a 2D array. The regular grid
array has dimensions (total_pixels**2, 2).
Examples
--------
regular_grid_1d = regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), \
origin=(0.0, 0.0))
"""
grid_1d = np.zeros((shape[0]*shape[1], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)
i=0
for y in range(shape[0]):
for x in range(shape[1]):
grid_1d[i, 0] = -(y - centres_arc_seconds[0]) * pixel_scales[0]
grid_1d[i, 1] = (x - centres_arc_seconds[1]) * pixel_scales[1]
i += 1
return grid_1d
@decorator_util.jit()
def regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask, pixel_scales, origin=(0.0, 0.0)):
"""Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns).
Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel \
at the top-left of the array has negative x and y values in arc seconds.
The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 \
index of the second dimension, x coordinates in the 1 index.
Parameters
----------
mask : ndarray
A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \
regular grid.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D mask array.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the regular grid is shifted around.
Returns
--------
ndarray
A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \
array. The regular grid array has dimensions (total_unmasked_pixels, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5),
origin=(0.0, 0.0))
"""
grid_2d = regular_grid_2d_from_shape_pixel_scales_and_origin(mask.shape, pixel_scales, origin)
total_regular_pixels = mask_util.total_regular_pixels_from_mask(mask)
regular_grid = np.zeros(shape=(total_regular_pixels, 2))
pixel_count = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
regular_grid[pixel_count, :] = grid_2d[y, x]
pixel_count += 1
return regular_grid
@decorator_util.jit()
def sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask, pixel_scales, sub_grid_size, origin=(0.0, 0.0)):
""" For the sub-grid, every unmasked pixel of a 2D mask array of shape (rows, columns) is divided into a finer \
uniform grid of shape (sub_grid_size, sub_grid_size). This routine computes the (y,x) arc second coordinates at \
the centre of every sub-pixel defined by this grid.
Coordinates are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. \
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second \
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_grid_size**2, 2). y coordinates are \
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Parameters
----------
mask : ndarray
A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \
regular grid.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the 2D mask array.
sub_grid_size : int
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
--------
ndarray
A sub grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \
array. The sub grid array has dimensions (total_unmasked_pixels*sub_grid_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
sub_grid_1d = sub_grid_1d_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
total_sub_pixels = mask_util.total_sub_pixels_from_mask_and_sub_grid_size(mask, sub_grid_size)
sub_grid = np.zeros(shape=(total_sub_pixels, 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=mask.shape, pixel_scales=pixel_scales,
origin=origin)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_grid_size + 1)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_grid_size + 1)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
y_arcsec = (y - centres_arc_seconds[0]) * pixel_scales[0]
x_arcsec = (x - centres_arc_seconds[1]) * pixel_scales[1]
for y1 in range(sub_grid_size):
for x1 in range(sub_grid_size):
sub_grid[sub_index, 0] = -(y_arcsec - y_sub_half + (y1 + 1) * y_sub_step)
sub_grid[sub_index, 1] = x_arcsec - x_sub_half + (x1 + 1) * x_sub_step
sub_index += 1
return sub_grid
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel coordinate values. Pixel coordinates \
are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to \
the input arc-second coordinate.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
the highest y arc-second coordinate and lowest x arc-second coordinate on the gird.
The arc-second grid is defined by an origin and coordinates are shifted to this origin before computing their \
1D grid pixel coordinate values.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to pixel value coordinates.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted to.
Returns
--------
ndarray
A grid of (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = np.zeros((grid_arc_seconds_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds_1d.shape[0]):
grid_pixels[i, 0] = (-grid_arc_seconds_1d[i, 0] / pixel_scales[0]) + centres_arc_seconds[0] + 0.5
grid_pixels[i, 1] = (grid_arc_seconds_1d[i, 1] / pixel_scales[1]) + centres_arc_seconds[1] + 0.5
return grid_pixels
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel values. Pixel coordinates are \
returned as integers such that they map directly to the pixel they are contained within.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y arc-second coordinate value and lowest x arc-second coordinate.
The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to pixel indexes.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted
Returns
--------
ndarray
A grid of (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = np.zeros((grid_arc_seconds_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds_1d.shape[0]):
grid_pixels[i, 0] = int((-grid_arc_seconds_1d[i, 0] / pixel_scales[0]) + centres_arc_seconds[0] + 0.5)
grid_pixels[i, 1] = int((grid_arc_seconds_1d[i, 1] / pixel_scales[1]) + centres_arc_seconds[1] + 0.5)
return grid_pixels
@decorator_util.jit()
def grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \
downwards.
For example:
The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.
The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.
The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.
The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to 1D pixel indexes.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted.
Returns
--------
ndarray
A grid of 1d pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=shape,
pixel_scales=pixel_scales, origin=origin)
grid_pixel_indexes = np.zeros(grid_pixels.shape[0])
for i in range(grid_pixels.shape[0]):
grid_pixel_indexes[i] = int(grid_pixels[i,0] * shape[1] + grid_pixels[i,1])
return grid_pixel_indexes
@decorator_util.jit()
def grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) pixel coordinates to a grid of (y,x) arc second values.
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \
higher y arc-second coordinate value and lowest x arc-second coordinate.
The arc-second coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \
origin after computing their values from the 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_pixels_1d: ndarray
The grid of (y,x) coordinates in pixel values which is converted to arc-second coordinates.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted.
Returns
--------
ndarray
A grid of 1d arc-second coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_1d = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_1d = grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_arc_seconds = np.zeros((grid_pixels_1d.shape[0], 2))
centres_arc_seconds = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales,
origin=origin)
for i in range(grid_arc_seconds.shape[0]):
grid_arc_seconds[i, 0] = -(grid_pixels_1d[i, 0] - centres_arc_seconds[0] - 0.5) * pixel_scales[0]
grid_arc_seconds[i, 1] = (grid_pixels_1d[i, 1] - centres_arc_seconds[1] - 0.5) * pixel_scales[1]
return grid_arc_seconds
| 43.936364
| 125
| 0.650372
| 3,022
| 19,332
| 3.976506
| 0.058901
| 0.079637
| 0.045436
| 0.045269
| 0.829075
| 0.784139
| 0.766581
| 0.74378
| 0.712407
| 0.692685
| 0
| 0.032439
| 0.253724
| 19,332
| 440
| 126
| 43.936364
| 0.800513
| 0.63713
| 0
| 0.350515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092784
| false
| 0
| 0.030928
| 0
| 0.216495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3061fe14ede5d5973d4e49b21690e046e878a7c7
| 141
|
py
|
Python
|
users/models.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 2
|
2016-12-05T03:31:47.000Z
|
2017-02-13T20:10:39.000Z
|
users/models.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 1
|
2016-12-14T10:53:15.000Z
|
2016-12-17T18:52:25.000Z
|
users/models.py
|
oangervuori/namubufferi
|
b9353b1d1a32e18e93cb1e9bd2b591950d54269a
|
[
"MIT"
] | 1
|
2017-01-14T10:56:28.000Z
|
2017-01-14T10:56:28.000Z
|
from django.contrib.auth.models import AbstractUser
from uuidmodels.models import UUIDModel
class User(AbstractUser, UUIDModel):
pass
| 17.625
| 51
| 0.808511
| 17
| 141
| 6.705882
| 0.705882
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134752
| 141
| 7
| 52
| 20.142857
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
3081381faca5799acc811fdd47eb2da69eaad970
| 37
|
py
|
Python
|
clustergram/__init__.py
|
mwielondek/clustergram
|
9906b433493f965c0168477bbd3d83c7c4ec03bf
|
[
"MIT"
] | 1
|
2020-09-05T10:58:45.000Z
|
2020-09-05T10:58:45.000Z
|
clustergram/__init__.py
|
mwielondek/clustergram
|
9906b433493f965c0168477bbd3d83c7c4ec03bf
|
[
"MIT"
] | null | null | null |
clustergram/__init__.py
|
mwielondek/clustergram
|
9906b433493f965c0168477bbd3d83c7c4ec03bf
|
[
"MIT"
] | null | null | null |
from .clustergram import Clustergram
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
067ba05f307e084995250f1f32cb3c3a656b97e6
| 1,870
|
py
|
Python
|
ray_tracer/tests/test_checker_board.py
|
jjason/RayTracerChallenge
|
ab3cea8968407426bddfa9e11319664fc0b595f6
|
[
"MIT"
] | 1
|
2020-05-13T03:54:00.000Z
|
2020-05-13T03:54:00.000Z
|
ray_tracer/tests/test_checker_board.py
|
jjason/RayTracerChallenge
|
ab3cea8968407426bddfa9e11319664fc0b595f6
|
[
"MIT"
] | null | null | null |
ray_tracer/tests/test_checker_board.py
|
jjason/RayTracerChallenge
|
ab3cea8968407426bddfa9e11319664fc0b595f6
|
[
"MIT"
] | null | null | null |
import unittest
from color import Color
from point import Point
from patterns.checker_board import CheckerBoard
class TestCheckerBoard(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.white = Color(red=1, green=1, blue=1)
cls.black = Color(red=0, green=0, blue=0)
def test_color_should_repeat_in_x(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0.99, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=1.01, y=0, z=0)),
self.__class__.black)
def test_color_should_repeat_in_y(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0.99, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=1.01, z=0)),
self.__class__.black)
def test_color_should_repeat_in_z(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0.99)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=1.01)),
self.__class__.black)
if __name__ == '__main__':
unittest.main()
| 39.787234
| 70
| 0.590909
| 257
| 1,870
| 3.914397
| 0.163424
| 0.134195
| 0.125249
| 0.187873
| 0.73161
| 0.73161
| 0.703777
| 0.703777
| 0.703777
| 0.703777
| 0
| 0.033632
| 0.284492
| 1,870
| 46
| 71
| 40.652174
| 0.718236
| 0
| 0
| 0.473684
| 0
| 0
| 0.004278
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0691e6b397f33e306d5327b51135e85cfb385e40
| 11,944
|
py
|
Python
|
app/forms.py
|
CS-Hunt/Get-Placed
|
f79f79f2dd37510405a24578b3a91acea00f9244
|
[
"MIT"
] | 14
|
2021-08-28T04:05:55.000Z
|
2022-02-20T07:03:16.000Z
|
app/forms.py
|
CS-Hunt/Get-Placed
|
f79f79f2dd37510405a24578b3a91acea00f9244
|
[
"MIT"
] | null | null | null |
app/forms.py
|
CS-Hunt/Get-Placed
|
f79f79f2dd37510405a24578b3a91acea00f9244
|
[
"MIT"
] | 9
|
2021-08-28T04:06:03.000Z
|
2021-09-26T16:45:28.000Z
|
from django import forms
from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.utils.translation import gettext,gettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm,UserChangeForm,PasswordChangeForm
from allauth.account.forms import LoginForm
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class Job_Post_Form(forms.ModelForm):
class Meta:
model = Placement_Company_Detail
fields = ('title','snippet','author','Company_image','Job_Description','apply_link','job_type')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Job Post'}),
'apply_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of apply button'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'job_type' : forms.Select(attrs={'class':'form-control','placeholder':"Job Type"}),
'Job_Description' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add short detail of job'}),
}
class Edit_Post_Form(forms.ModelForm):
class Meta:
model = Placement_Company_Detail
fields = ('title','snippet','Company_image','Job_Description','apply_link','job_type')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Job Post'}),
'apply_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of apply button'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'job_type' : forms.Select(attrs={'class':'form-control','placeholder':"Job Type"}),
'Job_Description' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add short detail of job'}),
}
class Blog_Post_Form(forms.ModelForm):
class Meta:
model = StudentBlogModel
fields = ('title','author','body','snippet')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'body' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add snippet of Blog'}),
}
class ResorcesModelForm(forms.ModelForm):
class Meta:
model = ResorcesModel
fields = ('title','docs','author','course1_title','course1_Img','course1_link','course2_title','course2_Img','course2_link','course3_title','course3_Img','course3_link','course4_title','course4_Img','course4_link','course5_title','course5_Img','course5_link',
'channel1_title','channel1_Img','channel1_link','channel2_title','channel2_Img','channel2_link','channel3_title','channel3_Img','channel3_link','channel4_title','channel4_Img','channel4_link','channel5_title','channel5_Img','channel5_link',
'Website1_title','Website1_Img','Website1_link','Website2_title','Website2_Img','Website2_link','Website3_title','Website3_Img','Website3_link','Website4_title','Website4_Img','Website4_link','Website5_title','Website5_Img','Website5_link',)
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'docs' : forms.TextInput(attrs={'class':'form-control','placeholder':'Link of documentation'}),
'course1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 1'}),
'course1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 1'}),
'course2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 2'}),
'course2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 2'}),
'course3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'course4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'course5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'channel1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 1'}),
'channel1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 1'}),
'channel2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 2'}),
'channel2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 2'}),
'channel3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'channel4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'channel5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'Website1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 1'}),
'Website1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 1'}),
'Website2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 2'}),
'Website2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 2'}),
'Website3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
'Website4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
'Website5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
}
class Edit_Blog_Post_Form(forms.ModelForm):
class Meta:
model = StudentBlogModel
fields = ('title','snippet','body')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
# 'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'body' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add snippet of Blog'}),
}
class UserLoginForm(LoginForm):
username=forms.CharField(widget=forms.TextInput(attrs={'autofocus':True,'class':'form-control'}))
password=forms.CharField(label=_('Password'),strip=False,widget=forms.PasswordInput(attrs={'autocomplete':'current-password','autofocus':True,'class':'form-control'}))
class ProfilePageView(forms.ModelForm):
class Meta:
model = Profile
fields = ('bio','Gender','Mobile_Number','city','state','profile_pic','twitter_url','instagram_url','linkdin_url','github_url')
widgets = {
'bio': forms.Textarea(attrs={'class':'form-control','placeholder':'Write a summary about you...'}),
# 'profile_pic': forms.ImageField(),
'Gender': forms.Select(attrs={'class':'form-control'}),
'Mobile_Number': forms.TextInput(attrs={'class':'form-control','placeholder':'Enter your Mobile number'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.Select(attrs={'class':'form-control'}),
'twitter_url': forms.TextInput(attrs={'class':'form-control'}),
'instagram_url': forms.TextInput(attrs={'class':'form-control'}),
'linkdin_url': forms.TextInput(attrs={'class':'form-control'}),
'github_url': forms.TextInput(attrs={'class':'form-control'}),
}
class EditProfileFormPage(forms.ModelForm):
class Meta:
model = Profile
fields = ('bio','Gender','Mobile_Number','city','state','profile_pic','twitter_url','instagram_url','linkdin_url','github_url')
widgets = {
'bio': forms.Textarea(attrs={'class':'form-control','placeholder':'Write a summary about you...'}),
# 'profile_pic': forms.ImageField(),
'Gender': forms.Select(attrs={'class':'form-control'}),
'Mobile_Number': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.Select(attrs={'class':'form-control'}),
'twitter_url': forms.TextInput(attrs={'class':'form-control'}),
'instagram_url': forms.TextInput(attrs={'class':'form-control'}),
'linkdin_url': forms.TextInput(attrs={'class':'form-control'}),
'github_url': forms.TextInput(attrs={'class':'form-control'}),
}
class EditProfileForm(UserChangeForm):
date_joined = forms.CharField(max_length=100,disabled=True)
password = ReadOnlyPasswordHashField(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"../accounts/password/change/\">this form</a>."))
class Meta:
model =User
fields = ['username','first_name','last_name','email','date_joined']
labels={
'first_name' : 'First Name',
'last_name':'Last Name',
'email': 'Email',
}
widgets = {
'username': forms.TextInput(attrs={'class':'form-control'}),
'first_name': forms.TextInput(attrs={'class':'form-control'}),
'last_name': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'date_joined': forms.TextInput(attrs={'class':'form-control'}),
}
| 71.951807
| 267
| 0.638228
| 1,319
| 11,944
| 5.671721
| 0.11903
| 0.09865
| 0.175378
| 0.224569
| 0.750835
| 0.73934
| 0.725705
| 0.700976
| 0.700976
| 0.694025
| 0
| 0.010952
| 0.174397
| 11,944
| 165
| 268
| 72.387879
| 0.747693
| 0.050569
| 0
| 0.413793
| 0
| 0
| 0.42344
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.048276
| 0.055172
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
06a0368a88d8af76595a8af881a8e66f694de51e
| 4,221
|
py
|
Python
|
hail_scripts/v02/utils/computed_fields/test_flags.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 15
|
2017-11-22T14:48:04.000Z
|
2020-10-05T18:22:24.000Z
|
hail_scripts/v02/utils/computed_fields/test_flags.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 94
|
2020-10-21T17:37:57.000Z
|
2022-03-29T14:59:46.000Z
|
hail_scripts/v02/utils/computed_fields/test_flags.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | 7
|
2019-01-29T09:08:10.000Z
|
2020-02-25T16:22:57.000Z
|
import unittest
import hail as hl
from .flags import (
get_expr_for_consequence_lc_lof_flag,
get_expr_for_variant_lc_lof_flag,
get_expr_for_genes_with_lc_lof_flag,
get_expr_for_consequence_loftee_flag_flag,
get_expr_for_variant_loftee_flag_flag,
get_expr_for_genes_with_loftee_flag_flag,
)
class TestFlags(unittest.TestCase):
def setUp(self):
self.all_lc_lof = hl.literal(
[
hl.struct(gene_id="foo", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="foo", lof="NC", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="LC", lof_flags="", lof_info=""),
]
)
self.some_lc_lof = hl.literal(
[
hl.struct(gene_id="foo", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="foo", lof="", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="LC", lof_flags="", lof_info=""),
]
)
self.all_loftee_flags = hl.literal(
[
hl.struct(gene_id="foo", lof="HC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="foo", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="baz", lof="", lof_flags="", lof_info=""),
]
)
self.some_loftee_flags = hl.literal(
[
hl.struct(gene_id="foo", lof="HC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="foo", lof="HC", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag3", lof_info=""),
]
)
def test_consequence_lc_lof_flag(self):
self.assertTrue(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof="LC"))))
self.assertFalse(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof="HC"))))
self.assertFalse(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof=""))))
def test_variant_lc_lof_flag(self):
self.assertTrue(hl.eval(get_expr_for_variant_lc_lof_flag(self.all_lc_lof)))
self.assertFalse(hl.eval(get_expr_for_variant_lc_lof_flag(self.some_lc_lof)))
def test_genes_with_lc_lof_flag(self):
self.assertSetEqual(hl.eval(get_expr_for_genes_with_lc_lof_flag(self.all_lc_lof)), set(["foo", "bar", "baz"]))
self.assertSetEqual(hl.eval(get_expr_for_genes_with_lc_lof_flag(self.some_lc_lof)), set(["foo", "bar"]))
def test_consequence_loftee_flag_flag(self):
self.assertTrue(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="HC", lof_flags="foo"))))
self.assertFalse(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="", lof_flags=""))))
self.assertFalse(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="", lof_flags="bar"))))
def test_variant_loftee_flag_flag(self):
self.assertTrue(hl.eval(get_expr_for_variant_loftee_flag_flag(self.all_loftee_flags)))
self.assertFalse(hl.eval(get_expr_for_variant_loftee_flag_flag(self.some_loftee_flags)))
def test_genes_with_loftee_flag_flag(self):
self.assertSetEqual(
hl.eval(get_expr_for_genes_with_loftee_flag_flag(self.all_loftee_flags)), set(["foo", "bar", "baz"])
)
self.assertSetEqual(
hl.eval(get_expr_for_genes_with_loftee_flag_flag(self.some_loftee_flags)), set(["bar", "baz"])
)
if __name__ == "__main__":
unittest.main()
| 46.384615
| 118
| 0.625918
| 602
| 4,221
| 3.973422
| 0.079734
| 0.056438
| 0.105351
| 0.12291
| 0.908027
| 0.896739
| 0.877926
| 0.790552
| 0.750418
| 0.744983
| 0
| 0.002707
| 0.212272
| 4,221
| 90
| 119
| 46.9
| 0.716692
| 0
| 0
| 0.213333
| 0
| 0
| 0.045487
| 0
| 0
| 0
| 0
| 0
| 0.186667
| 1
| 0.093333
| false
| 0
| 0.04
| 0
| 0.146667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2332081121773228b60742f99365108ae2a26db4
| 273
|
py
|
Python
|
Chapter_9/backAndRename.py
|
TravisLeeWolf/ATBS
|
9075eabaa0a788f58435eff9e0df488806a0770b
|
[
"Apache-2.0"
] | null | null | null |
Chapter_9/backAndRename.py
|
TravisLeeWolf/ATBS
|
9075eabaa0a788f58435eff9e0df488806a0770b
|
[
"Apache-2.0"
] | null | null | null |
Chapter_9/backAndRename.py
|
TravisLeeWolf/ATBS
|
9075eabaa0a788f58435eff9e0df488806a0770b
|
[
"Apache-2.0"
] | null | null | null |
#! python3
# - backAndRename.py - Learning the copytree and move functions of shutil
import shutil, os
os.chdir('S:\\Documents\\GitHub')
# Copies all folder content to new folder
shutil.copytree('S:\\Documents\\GitHub\\ATBS\\Chapter_9', 'S:\\Documents\\GitHub\\ATBS_C9')
| 30.333333
| 91
| 0.736264
| 39
| 273
| 5.102564
| 0.692308
| 0.150754
| 0.241206
| 0.201005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.10989
| 273
| 8
| 92
| 34.125
| 0.806584
| 0.443223
| 0
| 0
| 0
| 0
| 0.597315
| 0.597315
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
235bee18a3ffbdb1005be299c155afbe6b6c73b1
| 347
|
py
|
Python
|
utils/config.py
|
Elishanto/HarryBotter
|
e1977dbade44840288145f08aef60746ac66982b
|
[
"MIT"
] | 3
|
2016-06-12T19:37:05.000Z
|
2016-06-12T20:23:33.000Z
|
utils/config.py
|
Elishanto/HarryBotter
|
e1977dbade44840288145f08aef60746ac66982b
|
[
"MIT"
] | null | null | null |
utils/config.py
|
Elishanto/HarryBotter
|
e1977dbade44840288145f08aef60746ac66982b
|
[
"MIT"
] | null | null | null |
import yaml
class Config:
def __init__(self, config_file='config.yml'):
self.config_file = config_file
self.config = yaml.load(open(config_file))
def __getitem__(self, item):
return self.config[item]
def keys(self):
return self.config.keys()
def items(self):
return self.config.items()
| 20.411765
| 50
| 0.636888
| 45
| 347
| 4.644444
| 0.355556
| 0.287081
| 0.229665
| 0.191388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25072
| 347
| 16
| 51
| 21.6875
| 0.803846
| 0
| 0
| 0
| 0
| 0
| 0.028818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0.272727
| 0.818182
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2362ac8054549506bb5d7927a58398f0274a4c52
| 29
|
py
|
Python
|
testes e exercícios/sistemas/teste01.py
|
LightSnow17/exercicios-Python
|
3ac016ce284860f45d71cfb396d33a73ec06c25d
|
[
"MIT"
] | null | null | null |
testes e exercícios/sistemas/teste01.py
|
LightSnow17/exercicios-Python
|
3ac016ce284860f45d71cfb396d33a73ec06c25d
|
[
"MIT"
] | null | null | null |
testes e exercícios/sistemas/teste01.py
|
LightSnow17/exercicios-Python
|
3ac016ce284860f45d71cfb396d33a73ec06c25d
|
[
"MIT"
] | null | null | null |
print('teste do sistema 01')
| 14.5
| 28
| 0.724138
| 5
| 29
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.137931
| 29
| 1
| 29
| 29
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2383507e8bb580884ed53df332f90f74bca68ed3
| 38
|
py
|
Python
|
python/packages/isce3/antenna/__init__.py
|
isce3-testing/isce3-circleci-poc
|
ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3
|
[
"Apache-2.0"
] | null | null | null |
python/packages/isce3/antenna/__init__.py
|
isce3-testing/isce3-circleci-poc
|
ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3
|
[
"Apache-2.0"
] | 1
|
2021-12-23T00:00:31.000Z
|
2021-12-23T00:00:31.000Z
|
python/packages/isce3/antenna/__init__.py
|
isce3-testing/isce3-circleci-poc
|
ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3
|
[
"Apache-2.0"
] | 1
|
2021-12-02T21:10:11.000Z
|
2021-12-02T21:10:11.000Z
|
from isce3.ext.isce3.antenna import *
| 19
| 37
| 0.789474
| 6
| 38
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.105263
| 38
| 1
| 38
| 38
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc6bc9a0939dd857f5efb48bd5b22d3556c069b0
| 134
|
py
|
Python
|
camera.py
|
Zarchan/IoT_lockbox
|
2c39e8caaf35f176b15c496fb18c64add3b91eff
|
[
"MIT"
] | null | null | null |
camera.py
|
Zarchan/IoT_lockbox
|
2c39e8caaf35f176b15c496fb18c64add3b91eff
|
[
"MIT"
] | null | null | null |
camera.py
|
Zarchan/IoT_lockbox
|
2c39e8caaf35f176b15c496fb18c64add3b91eff
|
[
"MIT"
] | null | null | null |
import picamera
# https://picamera.readthedocs.io/en/release-1.10/recipes1.html provides recipes
# for using the raspberry pi camera
| 26.8
| 80
| 0.798507
| 20
| 134
| 5.35
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.104478
| 134
| 5
| 81
| 26.8
| 0.858333
| 0.835821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ccc48042b50a24725d97810cdd07772b3ca59030
| 604
|
py
|
Python
|
sdk/python/pulumi_aws/chime/__init__.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/chime/__init__.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/chime/__init__.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .voice_connector import *
from .voice_connector_group import *
from .voice_connector_logging import *
from .voice_connector_organization import *
from .voice_connector_streaming import *
from .voice_connector_termination import *
from .voice_connector_termination_credentials import *
from ._inputs import *
from . import outputs
| 35.529412
| 87
| 0.786424
| 83
| 604
| 5.53012
| 0.566265
| 0.174292
| 0.27451
| 0.313725
| 0.152505
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001923
| 0.139073
| 604
| 16
| 88
| 37.75
| 0.880769
| 0.362583
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e3648723131b1401c5d3d2978c53737452d7cb3
| 89
|
py
|
Python
|
gensim/untitled.py
|
Abas-Khan/thesis
|
b733bd4382371203cc4992571890619a2e314047
|
[
"MIT"
] | null | null | null |
gensim/untitled.py
|
Abas-Khan/thesis
|
b733bd4382371203cc4992571890619a2e314047
|
[
"MIT"
] | null | null | null |
gensim/untitled.py
|
Abas-Khan/thesis
|
b733bd4382371203cc4992571890619a2e314047
|
[
"MIT"
] | null | null | null |
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow, FAST_VERSION
| 29.666667
| 87
| 0.876404
| 14
| 89
| 5.142857
| 0.857143
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.078652
| 89
| 2
| 88
| 44.5
| 0.865854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9db217e56d9eca5152affca1ee18a4f7deba1e35
| 2,730
|
py
|
Python
|
unit/api/supervised/test_supervised_api.py
|
ONSdigital/dp-fastText
|
f21949e6c499e08b24423dfe75600bde96b055be
|
[
"MIT"
] | null | null | null |
unit/api/supervised/test_supervised_api.py
|
ONSdigital/dp-fastText
|
f21949e6c499e08b24423dfe75600bde96b055be
|
[
"MIT"
] | null | null | null |
unit/api/supervised/test_supervised_api.py
|
ONSdigital/dp-fastText
|
f21949e6c499e08b24423dfe75600bde96b055be
|
[
"MIT"
] | 2
|
2021-04-11T08:01:20.000Z
|
2021-12-20T12:52:16.000Z
|
"""
Tests all routes on the /supervised route
"""
from json import dumps
from unit.utils.test_app import FastTextTestApp
class TestSupervisedApi(FastTextTestApp):
def test_get_sentence_vector(self):
"""
Tests the /supervised/sentence/vector API
:return:
"""
# Set request params
data = {
"query": "rpi"
}
# Set the target
target = '/supervised/vector'
# Assert 200 response
request, response = self.post(target, 200, data=dumps(data))
# Check if response JSON is valid
self.assertTrue(hasattr(response, 'json'), "response should contain JSON")
json = response.json
self.assertIsInstance(json, dict, "JSON should be instanceof dict")
expected_keys = ["query", "vector"]
for key in expected_keys:
self.assertIn(key, json, "JSON should contain key '{0}'".format(key))
self.assertIsNotNone(json.get(key), "value for key '{0}' should not be None")
def test_get_sentence_vector_bad_request(self):
"""
Tests the /supervised/sentence/vector API returns a 400 for an invalid request
:return:
"""
# Set empty request params
data = {}
# Set the target
target = '/supervised/vector'
# Assert 200 response
request, response = self.post(target, 400, data=dumps(data))
def test_predict(self):
"""
Tests the /supervised/sentence/vector API
:return:
"""
# Set request params
data = {
"query": "rpi",
"num_labels": 5,
"threshold": 0.0
}
# Set the target
target = '/supervised/predict'
# Assert 200 response
request, response = self.post(target, 200, data=dumps(data))
# Check if response JSON is valid
self.assertTrue(hasattr(response, 'json'), "response should contain JSON")
json = response.json
self.assertIsInstance(json, dict, "JSON should be instanceof dict")
expected_keys = ["labels", "probabilities"]
for key in expected_keys:
self.assertIn(key, json, "JSON should contain key '{0}'".format(key))
self.assertIsNotNone(json.get(key), "value for key '{0}' should not be None")
def test_predict_bad_request(self):
"""
Tests the /supervised/sentence/vector API returns a 400 for an invalid request
:return:
"""
# Set empty request params
data = {}
# Set the target
target = '/supervised/predict'
# Assert 200 response
request, response = self.post(target, 400, data=dumps(data))
| 29.354839
| 89
| 0.589744
| 307
| 2,730
| 5.185668
| 0.237785
| 0.052764
| 0.030151
| 0.055276
| 0.872487
| 0.846734
| 0.846734
| 0.846734
| 0.846734
| 0.846734
| 0
| 0.019525
| 0.305861
| 2,730
| 92
| 90
| 29.673913
| 0.82058
| 0.224176
| 0
| 0.615385
| 0
| 0
| 0.201216
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 1
| 0.102564
| false
| 0
| 0.051282
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9dfb9a3f304731c4c7222dd34cf09486ba442f27
| 27
|
py
|
Python
|
covalent_api/__init__.py
|
pradeeptadas/uniswap-v3-project
|
8f938dc5602fdb6e58b2cf42393a01994f48682d
|
[
"MIT"
] | null | null | null |
covalent_api/__init__.py
|
pradeeptadas/uniswap-v3-project
|
8f938dc5602fdb6e58b2cf42393a01994f48682d
|
[
"MIT"
] | null | null | null |
covalent_api/__init__.py
|
pradeeptadas/uniswap-v3-project
|
8f938dc5602fdb6e58b2cf42393a01994f48682d
|
[
"MIT"
] | null | null | null |
from .covalent_api import *
| 27
| 27
| 0.814815
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d179c9c36ec554d566ae39407fcdfe54c05cb00c
| 123
|
py
|
Python
|
src/routes/setup.py
|
BlackSugarMilkyTea/tasks-list
|
51b2fa642de04d202b591d767815679e74b35a21
|
[
"MIT"
] | null | null | null |
src/routes/setup.py
|
BlackSugarMilkyTea/tasks-list
|
51b2fa642de04d202b591d767815679e74b35a21
|
[
"MIT"
] | null | null | null |
src/routes/setup.py
|
BlackSugarMilkyTea/tasks-list
|
51b2fa642de04d202b591d767815679e74b35a21
|
[
"MIT"
] | null | null | null |
from app import app
from .conf import tasks_api
from . import tasks # initialize routes of tasks
tasks_api.init_app(app)
| 20.5
| 49
| 0.788618
| 21
| 123
| 4.47619
| 0.47619
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162602
| 123
| 5
| 50
| 24.6
| 0.912621
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d17f850450525d03b5929ae15016e3ebef12d92f
| 15,617
|
py
|
Python
|
env/lib/python3.6/site-packages/torch/nn/_functions/thnn/upsampling.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | null | null | null |
env/lib/python3.6/site-packages/torch/nn/_functions/thnn/upsampling.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | null | null | null |
env/lib/python3.6/site-packages/torch/nn/_functions/thnn/upsampling.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | 1
|
2020-07-24T17:53:25.000Z
|
2020-07-24T17:53:25.000Z
|
from numbers import Integral
import torch
from torch.autograd.function import Function
from torch._thnn import type2backend
from . import _all_functions
from ...modules.utils import _single, _pair, _triple
import warnings
def _check_size_scale_factor(size, scale_factor):
if size is None and scale_factor is None:
raise ValueError('either size or scale_factor should be defined')
if scale_factor is not None and not isinstance(scale_factor, (Integral, tuple)):
raise ValueError('scale_factor must be of integer type or a tuple of integer types')
def _check_linear_scale_factor(scale_factor, dim=2):
if dim == 1:
scale_factor = _single(scale_factor)
elif dim == 2:
scale_factor = _pair(scale_factor)
elif dim == 3:
scale_factor = _triple(scale_factor)
else:
raise ValueError("dim has to be 1, 2 or 3")
try:
assert len(scale_factor) == 1 or len(scale_factor) == 2 or len(scale_factor) == 3
assert all(isinstance(s, Integral) and s >= 1 for s in scale_factor)
except AssertionError as e:
raise ValueError('scale_factor must be a non-negative integer, '
'or a tuple of non-negative integers for linear, bilinear and trilinear upsampling, but got: '
'{}'.format(scale_factor))
return scale_factor
class UpsamplingNearest1d(Function):
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 3
_check_size_scale_factor(size, scale_factor)
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None and not isinstance(ctx.scale_factor, Integral):
raise ValueError('scale_factor must be a single Integer value for nearest neighbor sampling')
if ctx.scale_factor is None:
if (ctx.size[0] % input.size(2) != 0):
raise RuntimeError("output size specified in UpsamplingNearest "
"({}) has to be divisible by the input size, but got: "
"{}".format('x'.join(map(str, ctx.size)),
'x'.join(map(str, input.size()))))
ctx.scale_factor = ctx.size[0] // input.size(2)
output = input.new()
backend = type2backend[type(input)]
ctx.save_for_backward(input)
backend.TemporalUpSamplingNearest_updateOutput(
backend.library_state,
input,
output,
ctx.scale_factor
)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_variables
grad_input = UpsamplingNearest1dBackward.apply(input, grad_output, ctx.scale_factor)
return grad_input, None, None
class UpsamplingNearest1dBackward(Function):
@staticmethod
def forward(ctx, input, grad_output, scale_factor):
assert grad_output.dim() == 3
ctx.scale_factor = scale_factor
grad_input = grad_output.new()
backend = type2backend[type(input)]
backend.TemporalUpSamplingNearest_updateGradInput(
backend.library_state,
input,
grad_output,
grad_input,
ctx.scale_factor
)
return grad_input
@staticmethod
def backward(ctx, ggI):
gI = None
ggO = UpsamplingNearest1d.apply(ggI, None, ctx.scale_factor)
return gI, ggO, None
class UpsamplingLinear1d(Function):
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 3
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None:
ctx.scale_factor = _check_linear_scale_factor(ctx.scale_factor, dim=1)
if ctx.scale_factor is not None:
ctx.output_size = (
input.size(2) * ctx.scale_factor[0],
)
else:
ctx.output_size = ctx.size
ctx.input_size = input.size()
output = input.new()
backend = type2backend[type(input)]
backend.TemporalUpSamplingLinear_updateOutput(
backend.library_state,
input,
output,
ctx.output_size[0]
)
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = UpsamplingLinear1dBackward.apply(grad_output, ctx.input_size, ctx.output_size)
return grad_input, None, None
class UpsamplingLinear1dBackward(Function):
@staticmethod
def forward(ctx, grad_output, input_size, output_size):
assert grad_output.dim() == 3
ctx.input_size = input_size
ctx.output_size = output_size
grad_output = grad_output.contiguous()
grad_input = grad_output.new()
backend = type2backend[type(grad_output)]
backend.TemporalUpSamplingLinear_updateGradInput(
backend.library_state,
grad_output,
grad_input,
ctx.input_size[0],
ctx.input_size[1],
ctx.input_size[2],
ctx.output_size[0],
)
return grad_input
@staticmethod
def backward(ctx, ggI):
ggO = UpsamplingLinear1d.apply(ggI, ctx.output_size, None)
return ggO, None, None
class UpsamplingNearest2d(Function):
@staticmethod
def symbolic(g, input, size=None, scale_factor=None):
if scale_factor is None:
scale_factor = 1.0
if size is not None and set(size) != set([None]):
warnings.warn("ONNX export failed on UpsamplingNearest2d because size is not supported")
return g.op("ResizeNearest", input, width_scale_f=scale_factor, height_scale_f=scale_factor)
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 4
_check_size_scale_factor(size, scale_factor)
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None and not isinstance(ctx.scale_factor, Integral):
raise ValueError('scale_factor must be a single Integer value for nearest neighbor sampling')
if ctx.scale_factor is None:
if (ctx.size[0] % input.size(2) != 0 or
ctx.size[1] % input.size(3) != 0):
raise RuntimeError("output size specified in UpsamplingNearest "
"({}) has to be divisible by the input size, but got: "
"{}".format('x'.join(map(str, ctx.size)),
'x'.join(map(str, input.size()))))
ctx.scale_factor = ctx.size[0] // input.size(2)
if ctx.scale_factor != ctx.size[1] // input.size(3):
raise RuntimeError("input aspect ratio doesn't match the "
"output ratio")
output = input.new()
backend = type2backend[type(input)]
ctx.save_for_backward(input)
backend.SpatialUpSamplingNearest_updateOutput(
backend.library_state,
input,
output,
ctx.scale_factor
)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_variables
grad_input = UpsamplingNearest2dBackward.apply(input, grad_output, ctx.scale_factor)
return grad_input, None, None
class UpsamplingNearest2dBackward(Function):
@staticmethod
def forward(ctx, input, grad_output, scale_factor):
assert grad_output.dim() == 4
ctx.scale_factor = scale_factor
grad_input = grad_output.new()
backend = type2backend[type(input)]
backend.SpatialUpSamplingNearest_updateGradInput(
backend.library_state,
input,
grad_output,
grad_input,
ctx.scale_factor
)
return grad_input
@staticmethod
def backward(ctx, ggI):
gI = None
ggO = UpsamplingNearest2d.apply(ggI, None, ctx.scale_factor)
return gI, ggO, None
class UpsamplingBilinear2d(Function):
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 4
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None:
ctx.scale_factor = _check_linear_scale_factor(ctx.scale_factor, dim=2)
if ctx.scale_factor is not None:
ctx.output_size = (
input.size(2) * ctx.scale_factor[0],
input.size(3) * ctx.scale_factor[1],
)
else:
ctx.output_size = ctx.size
ctx.input_size = input.size()
output = input.new()
backend = type2backend[type(input)]
backend.SpatialUpSamplingBilinear_updateOutput(
backend.library_state,
input,
output,
ctx.output_size[0],
ctx.output_size[1],
)
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = UpsamplingBilinear2dBackward.apply(grad_output, ctx.input_size, ctx.output_size)
return grad_input, None, None
class UpsamplingBilinear2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, input_size, output_size):
assert grad_output.dim() == 4
ctx.input_size = input_size
ctx.output_size = output_size
grad_output = grad_output.contiguous()
grad_input = grad_output.new()
backend = type2backend[type(grad_output)]
backend.SpatialUpSamplingBilinear_updateGradInput(
backend.library_state,
grad_output,
grad_input,
ctx.input_size[0],
ctx.input_size[1],
ctx.input_size[2],
ctx.input_size[3],
ctx.output_size[0],
ctx.output_size[1],
)
return grad_input
@staticmethod
def backward(ctx, ggI):
ggO = UpsamplingBilinear2d.apply(ggI, ctx.output_size, None)
return ggO, None, None
class UpsamplingNearest3d(Function):
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 5
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None and not isinstance(ctx.scale_factor, Integral):
raise ValueError('scale_factor must be a single Integer value for nearest neighbor sampling')
if ctx.scale_factor is None:
if (ctx.size[0] % input.size(2) != 0 or ctx.size[1] % input.size(3) != 0 or
ctx.size[2] % input.size(4) != 0):
raise RuntimeError("output size specified in UpSamplingNearest "
"({}) has to be divisible by the input size, but got: "
"{}".format('x'.join(map(str, ctx.size)),
'x'.join(map(str, input.size()))))
ctx.scale_factor = ctx.size[0] // input.size(2)
if (ctx.scale_factor != ctx.size[1] // input.size(3) or
ctx.scale_factor != ctx.size[2] // input.size(4)):
raise RuntimeError("input aspect ratio doesn't match the "
"output ratio")
output = input.new()
backend = type2backend[type(input)]
ctx.save_for_backward(input)
backend.VolumetricUpSamplingNearest_updateOutput(backend.library_state,
input,
output,
ctx.scale_factor)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_variables
grad_input = UpsamplingNearest3dBackward.apply(input, grad_output, ctx.scale_factor)
return grad_input, None, None
class UpsamplingNearest3dBackward(Function):
@staticmethod
def forward(ctx, input, grad_output, scale_factor):
assert grad_output.dim() == 5
ctx.scale_factor = scale_factor
grad_input = grad_output.new()
backend = type2backend[type(input)]
backend.VolumetricUpSamplingNearest_updateGradInput(backend.library_state,
input,
grad_output,
grad_input,
ctx.scale_factor)
return grad_input
@staticmethod
def backward(ctx, ggI):
gI = None
ggO = UpsamplingNearest3d.apply(ggI, None, ctx.scale_factor)
return gI, ggO, None
class UpsamplingTrilinear3d(Function):
@staticmethod
def forward(ctx, input, size=None, scale_factor=None):
assert input.dim() == 5
ctx.size = size
ctx.scale_factor = scale_factor
if ctx.scale_factor is not None:
ctx.scale_factor = _check_linear_scale_factor(ctx.scale_factor, dim=3)
if ctx.scale_factor is not None:
ctx.output_size = (
input.size(2) * ctx.scale_factor[0],
input.size(3) * ctx.scale_factor[1],
input.size(4) * ctx.scale_factor[2],
)
else:
ctx.output_size = ctx.size
ctx.input_size = input.size()
output = input.new()
backend = type2backend[type(input)]
backend.VolumetricUpSamplingTrilinear_updateOutput(
backend.library_state,
input,
output,
ctx.output_size[0],
ctx.output_size[1],
ctx.output_size[2]
)
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = UpsamplingTrilinear3dBackward.apply(grad_output, ctx.input_size, ctx.output_size)
return grad_input, None, None
class UpsamplingTrilinear3dBackward(Function):
@staticmethod
def forward(ctx, grad_output, input_size, output_size):
assert grad_output.dim() == 5
ctx.input_size = input_size
ctx.output_size = output_size
grad_output = grad_output.contiguous()
grad_input = grad_output.new()
backend = type2backend[type(grad_output)]
backend.VolumetricUpSamplingTrilinear_updateGradInput(
backend.library_state,
grad_output,
grad_input,
ctx.input_size[0],
ctx.input_size[1],
ctx.input_size[2],
ctx.input_size[3],
ctx.input_size[4],
ctx.output_size[0],
ctx.output_size[1],
ctx.output_size[2]
)
return grad_input
@staticmethod
def backward(ctx, ggI):
ggO = UpsamplingTrilinear3d.apply(ggI, ctx.output_size, None)
return ggO, None, None
_all_functions.append(UpsamplingNearest1d)
_all_functions.append(UpsamplingNearest1dBackward)
_all_functions.append(UpsamplingLinear1d)
_all_functions.append(UpsamplingLinear1dBackward)
_all_functions.append(UpsamplingNearest2d)
_all_functions.append(UpsamplingNearest2dBackward)
_all_functions.append(UpsamplingBilinear2d)
_all_functions.append(UpsamplingBilinear2dBackward)
_all_functions.append(UpsamplingNearest3d)
_all_functions.append(UpsamplingNearest3dBackward)
_all_functions.append(UpsamplingTrilinear3d)
_all_functions.append(UpsamplingTrilinear3dBackward)
| 33.22766
| 119
| 0.604469
| 1,728
| 15,617
| 5.269097
| 0.086227
| 0.131686
| 0.083031
| 0.024602
| 0.736518
| 0.731796
| 0.720593
| 0.713125
| 0.709171
| 0.691598
| 0
| 0.013372
| 0.310431
| 15,617
| 469
| 120
| 33.298507
| 0.832111
| 0
| 0
| 0.702703
| 0
| 0
| 0.06224
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 1
| 0.072973
| false
| 0
| 0.018919
| 0
| 0.194595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d199323ada9485947bbfa69536ab9ce4873ea68b
| 104
|
py
|
Python
|
hcl_translator/__init__.py
|
clearcare/cc_hcl_translator
|
2515356fc75fe6adfa6ac0b1ceb51f588e0ee2a8
|
[
"Apache-2.0"
] | null | null | null |
hcl_translator/__init__.py
|
clearcare/cc_hcl_translator
|
2515356fc75fe6adfa6ac0b1ceb51f588e0ee2a8
|
[
"Apache-2.0"
] | 1
|
2018-12-06T15:34:12.000Z
|
2018-12-06T15:34:13.000Z
|
hcl_translator/__init__.py
|
clearcare/cc_hcl_translator
|
2515356fc75fe6adfa6ac0b1ceb51f588e0ee2a8
|
[
"Apache-2.0"
] | null | null | null |
from .dynamodb2 import dynamodb2_translator # NOQA
from .dynamodb3 import dynamodb3_translator # NOQA
| 34.666667
| 51
| 0.826923
| 12
| 104
| 7
| 0.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0.134615
| 104
| 2
| 52
| 52
| 0.888889
| 0.086538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d1b9b4d92f386b67008f851afd77e2d8d5eef4a8
| 40
|
py
|
Python
|
shorttext/metrics/embedfuzzy/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 481
|
2016-10-07T16:48:40.000Z
|
2022-03-16T12:44:12.000Z
|
shorttext/metrics/embedfuzzy/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 56
|
2017-02-02T17:50:14.000Z
|
2021-12-15T05:14:28.000Z
|
shorttext/metrics/embedfuzzy/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 70
|
2017-01-28T15:20:46.000Z
|
2021-09-30T15:08:41.000Z
|
from .jaccard import jaccardscore_sents
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 2
| 39
| 20
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d1c647b99897254969f20de4b1a8d08a0ab0ebcf
| 141
|
py
|
Python
|
gentelella/app/forms.py
|
horoyoii/admin_dashboard_edgex
|
9aea5e43eeb3da17d9e9c65c3ed0337fe7694cb8
|
[
"MIT"
] | 2
|
2020-05-24T20:34:41.000Z
|
2021-08-28T07:27:45.000Z
|
dashboard/app/forms.py
|
horoyoii/graduation_piece
|
4f907a10636e3862d09e950c6eb5f12e95b1a8e5
|
[
"MIT"
] | 5
|
2021-03-19T09:14:10.000Z
|
2021-06-10T19:54:28.000Z
|
dashboard/app/forms.py
|
horoyoii/graduation_piece
|
4f907a10636e3862d09e950c6eb5f12e95b1a8e5
|
[
"MIT"
] | 1
|
2021-08-28T07:27:48.000Z
|
2021-08-28T07:27:48.000Z
|
from django import forms
class DeviceForm(forms.Form):
name = forms.CharField(max_length=30)
you = forms.CharField(max_length=30)
| 17.625
| 41
| 0.737589
| 20
| 141
| 5.1
| 0.65
| 0.27451
| 0.333333
| 0.45098
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.163121
| 141
| 7
| 42
| 20.142857
| 0.830508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
d1cc1eb43e5bf17d8024714b3dd40552826ea81d
| 43
|
py
|
Python
|
scripts/Scripts.py
|
joelcarlson/OpenKE
|
256b360b7920808911358fd06b33b1b77ae60cb4
|
[
"MIT"
] | null | null | null |
scripts/Scripts.py
|
joelcarlson/OpenKE
|
256b360b7920808911358fd06b33b1b77ae60cb4
|
[
"MIT"
] | null | null | null |
scripts/Scripts.py
|
joelcarlson/OpenKE
|
256b360b7920808911358fd06b33b1b77ae60cb4
|
[
"MIT"
] | null | null | null |
def convert_ecad_to_required_files():
pass
| 21.5
| 37
| 0.860465
| 7
| 43
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 2
| 38
| 21.5
| 0.825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d1ec32c7260d071f6cc9bfd6b5426f1d15f761a9
| 39
|
py
|
Python
|
datasets/__init__.py
|
bozliu/E2E-Keyword-Spotting
|
64fc6fe414370a12a22fdf8ca5c8379d2c60b64e
|
[
"MIT"
] | 2
|
2021-04-19T06:42:04.000Z
|
2021-05-05T04:07:12.000Z
|
datasets/__init__.py
|
bozliu/E2E-Keyword-Spotting
|
64fc6fe414370a12a22fdf8ca5c8379d2c60b64e
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
bozliu/E2E-Keyword-Spotting
|
64fc6fe414370a12a22fdf8ca5c8379d2c60b64e
|
[
"MIT"
] | null | null | null |
from .speech_commands_dataset import *
| 19.5
| 38
| 0.846154
| 5
| 39
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
060e2635ebab08fe6512eb8f1f4cdba098026d81
| 109
|
py
|
Python
|
gsfarc/gptool/parameter/templates/ulong64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | 1
|
2021-11-06T18:36:28.000Z
|
2021-11-06T18:36:28.000Z
|
gsfarc/gptool/parameter/templates/ulong64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | null | null | null |
gsfarc/gptool/parameter/templates/ulong64.py
|
geospatial-services-framework/gsfpyarc
|
5ef69299fbc0b763ad4c1857ceac3ff087c0dc14
|
[
"MIT"
] | null | null | null |
"""
"""
from .basic import BASIC
class ULONG64(BASIC): pass
def template():
return ULONG64('GPLong')
| 9.909091
| 28
| 0.651376
| 13
| 109
| 5.461538
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.192661
| 109
| 11
| 28
| 9.909091
| 0.761364
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
ae0fe3d826e27a3d2fa9cde0308fc3ce2411d52b
| 37
|
py
|
Python
|
yo_extensions/alg/__init__.py
|
okulovsky/yo_ds
|
9e1fa2e7a1b9746c3982afc152c024169fec45ca
|
[
"MIT"
] | 16
|
2019-09-26T09:05:42.000Z
|
2021-02-04T01:39:09.000Z
|
yo_extensions/alg/__init__.py
|
okulovsky/yo_ds
|
9e1fa2e7a1b9746c3982afc152c024169fec45ca
|
[
"MIT"
] | 2
|
2019-10-23T19:01:23.000Z
|
2020-06-11T09:08:45.000Z
|
yo_extensions/alg/__init__.py
|
okulovsky/yo_ds
|
9e1fa2e7a1b9746c3982afc152c024169fec45ca
|
[
"MIT"
] | 2
|
2019-09-26T09:05:50.000Z
|
2019-10-23T18:46:11.000Z
|
from yo_fluq_ds._fluq.pandas import *
| 37
| 37
| 0.837838
| 7
| 37
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae4fbf22afde19d6073678ffaa41cf42837e69cf
| 20
|
py
|
Python
|
MMO/external/__init__.py
|
laloc2496/cdn_configuration_optimization
|
58cf2278456d0ef8796570f12f1d00fd68aec686
|
[
"MIT"
] | 41
|
2020-10-21T01:17:45.000Z
|
2022-02-07T01:42:44.000Z
|
MMO/external/__init__.py
|
laloc2496/cdn_configuration_optimization
|
58cf2278456d0ef8796570f12f1d00fd68aec686
|
[
"MIT"
] | 2
|
2020-11-06T19:28:22.000Z
|
2021-03-11T15:19:45.000Z
|
MMO/external/__init__.py
|
laloc2496/cdn_configuration_optimization
|
58cf2278456d0ef8796570f12f1d00fd68aec686
|
[
"MIT"
] | 9
|
2020-11-16T05:24:49.000Z
|
2022-01-21T08:19:17.000Z
|
from .lhs import lhs
| 20
| 20
| 0.8
| 4
| 20
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae5b9c39157ec21f374799d98144d2c63bff5dc5
| 36
|
py
|
Python
|
src/pydev_conda/__init__.py
|
terasakisatoshi/pydev_conda
|
dc26fed9d329a06151354e692c6d18ac342cf08c
|
[
"MIT"
] | null | null | null |
src/pydev_conda/__init__.py
|
terasakisatoshi/pydev_conda
|
dc26fed9d329a06151354e692c6d18ac342cf08c
|
[
"MIT"
] | null | null | null |
src/pydev_conda/__init__.py
|
terasakisatoshi/pydev_conda
|
dc26fed9d329a06151354e692c6d18ac342cf08c
|
[
"MIT"
] | null | null | null |
from pydev_conda.greet import greet
| 18
| 35
| 0.861111
| 6
| 36
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8828085770cdce55775689dcc45185fa7221941b
| 77
|
py
|
Python
|
common/trainers/trecqa_trainer.py
|
karkaroff/castor
|
881673f3dadb4f757fdfdf5d2ab9031e08512406
|
[
"Apache-2.0"
] | 132
|
2017-04-02T12:31:55.000Z
|
2019-03-09T07:53:29.000Z
|
common/trainers/trecqa_trainer.py
|
sudipta90/castor
|
fa2f59535c71a0fb4586afbe543b81ba812c8630
|
[
"Apache-2.0"
] | 111
|
2017-04-01T23:00:24.000Z
|
2019-03-10T08:29:20.000Z
|
common/trainers/trecqa_trainer.py
|
karkaroff/Castor
|
881673f3dadb4f757fdfdf5d2ab9031e08512406
|
[
"Apache-2.0"
] | 53
|
2017-04-06T01:17:18.000Z
|
2019-02-27T03:10:35.000Z
|
from .qa_trainer import QATrainer
class TRECQATrainer(QATrainer):
pass
| 12.833333
| 33
| 0.779221
| 9
| 77
| 6.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 77
| 5
| 34
| 15.4
| 0.921875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
88a711508576854d420d1777216c31f9c52db2cf
| 46
|
py
|
Python
|
2021/examples-in-class-2021-09-24/helloworld1.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2021-02-04T16:59:11.000Z
|
2021-02-04T16:59:11.000Z
|
2021/examples-in-class-2021-09-24/helloworld1.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | null | null | null |
2021/examples-in-class-2021-09-24/helloworld1.py
|
ati-ozgur/course-python
|
38237d120043c07230658b56dc3aeb01c3364933
|
[
"Apache-2.0"
] | 1
|
2019-10-30T14:37:48.000Z
|
2019-10-30T14:37:48.000Z
|
print("Hello, I am Atilla, I am from Turkey")
| 23
| 45
| 0.695652
| 9
| 46
| 3.555556
| 0.777778
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 1
| 46
| 46
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
31f24458b47ca5609afdc3b297db01ad63d221bb
| 86
|
py
|
Python
|
customers/managers.py
|
moshthepitt/probsc
|
9b8cab206bb1c41238e36bd77f5e0573df4d8e2d
|
[
"MIT"
] | null | null | null |
customers/managers.py
|
moshthepitt/probsc
|
9b8cab206bb1c41238e36bd77f5e0573df4d8e2d
|
[
"MIT"
] | null | null | null |
customers/managers.py
|
moshthepitt/probsc
|
9b8cab206bb1c41238e36bd77f5e0573df4d8e2d
|
[
"MIT"
] | null | null | null |
from core.managers import CoreManager
class CustomerManager(CoreManager):
pass
| 12.285714
| 37
| 0.790698
| 9
| 86
| 7.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 86
| 6
| 38
| 14.333333
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ee07e5dd05abddd6a9c5bd9b0d0870dbb6657f22
| 148
|
py
|
Python
|
python/lib/davis/measures/__init__.py
|
flyingleafe/davis
|
4f90f1095761a062ab4f22781864a035fec568e7
|
[
"BSD-3-Clause"
] | null | null | null |
python/lib/davis/measures/__init__.py
|
flyingleafe/davis
|
4f90f1095761a062ab4f22781864a035fec568e7
|
[
"BSD-3-Clause"
] | null | null | null |
python/lib/davis/measures/__init__.py
|
flyingleafe/davis
|
4f90f1095761a062ab4f22781864a035fec568e7
|
[
"BSD-3-Clause"
] | null | null | null |
from .jaccard import db_eval_iou
# from .t_stability import db_eval_t_stab // tstab does not work
from .f_boundary import db_eval_boundary
| 24.666667
| 66
| 0.777027
| 25
| 148
| 4.24
| 0.6
| 0.226415
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182432
| 148
| 5
| 67
| 29.6
| 0.876033
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ee4941bf4c2024ed4b6ea863c72d581555d2c36d
| 174
|
py
|
Python
|
helpers/custom_exceptions.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | 1
|
2020-10-05T14:19:35.000Z
|
2020-10-05T14:19:35.000Z
|
helpers/custom_exceptions.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | null | null | null |
helpers/custom_exceptions.py
|
LavinaVRovine/hazard
|
e0408374dc0b76f8b9a0107f5f12cca2d4c033ef
|
[
"MIT"
] | null | null | null |
class TeamNotFound(Exception):
"""Raise when the team is not found in the database"""
class NoMatchData(Exception):
"""Raise when data for match cant be created"""
| 24.857143
| 58
| 0.712644
| 24
| 174
| 5.166667
| 0.791667
| 0.225806
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 174
| 6
| 59
| 29
| 0.873239
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ee5fb8bfdb0575fadea6249e37d89844ced705d6
| 136
|
py
|
Python
|
schemas/user_schema.py
|
JhonArroyo/fastapi-python
|
b6ddb250cd9d34534a576bb8948ce9f63458a73f
|
[
"MIT"
] | null | null | null |
schemas/user_schema.py
|
JhonArroyo/fastapi-python
|
b6ddb250cd9d34534a576bb8948ce9f63458a73f
|
[
"MIT"
] | null | null | null |
schemas/user_schema.py
|
JhonArroyo/fastapi-python
|
b6ddb250cd9d34534a576bb8948ce9f63458a73f
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from typing import Optional
class User(BaseModel):
id: Optional[str]
name: str
password: str
| 19.428571
| 30
| 0.735294
| 18
| 136
| 5.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 136
| 7
| 31
| 19.428571
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ee6e5b387a0cf283520091b6af9d7a1acc1e012a
| 42
|
py
|
Python
|
tests/roots/test-ext-autodoc/target/imported_members.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 4,973
|
2015-01-03T15:44:00.000Z
|
2022-03-31T03:11:51.000Z
|
tests/roots/test-ext-autodoc/target/imported_members.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 7,850
|
2015-01-02T08:09:25.000Z
|
2022-03-31T18:57:40.000Z
|
tests/roots/test-ext-autodoc/target/imported_members.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 2,179
|
2015-01-03T15:26:53.000Z
|
2022-03-31T12:22:44.000Z
|
from .partialfunction import func2, func3
| 21
| 41
| 0.833333
| 5
| 42
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.119048
| 42
| 1
| 42
| 42
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ee7b839672690424818093c14803c5cad1d05102
| 173
|
py
|
Python
|
diofant/printing/pretty/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 57
|
2016-09-13T23:16:26.000Z
|
2022-03-29T06:45:51.000Z
|
diofant/printing/pretty/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 402
|
2016-05-11T11:11:47.000Z
|
2022-03-31T14:27:02.000Z
|
diofant/printing/pretty/__init__.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | 20
|
2016-05-11T08:17:37.000Z
|
2021-09-10T09:15:51.000Z
|
"""ASCII-ART 2D pretty-printer"""
from .pretty import pprint, pprint_use_unicode, pretty, pretty_print
__all__ = 'pprint', 'pprint_use_unicode', 'pretty', 'pretty_print'
| 24.714286
| 68
| 0.751445
| 23
| 173
| 5.217391
| 0.521739
| 0.2
| 0.25
| 0.366667
| 0.65
| 0.65
| 0.65
| 0
| 0
| 0
| 0
| 0.006494
| 0.109827
| 173
| 6
| 69
| 28.833333
| 0.772727
| 0.156069
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
c990488bd2d79a3cb156178cf80764f9537d9b83
| 23
|
py
|
Python
|
engine/physics/__init__.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | 1
|
2020-10-11T15:34:56.000Z
|
2020-10-11T15:34:56.000Z
|
engine/physics/__init__.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | null | null | null |
engine/physics/__init__.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | null | null | null |
from . import collider
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c9b476d5ddaea068ff57cfa0cfa5517133c802a2
| 133
|
py
|
Python
|
dblinea/__init__.py
|
linea-it/lineadb
|
42e782e73691c5378a7a1182c70e2134b4409552
|
[
"MIT"
] | null | null | null |
dblinea/__init__.py
|
linea-it/lineadb
|
42e782e73691c5378a7a1182c70e2134b4409552
|
[
"MIT"
] | 9
|
2022-02-07T21:59:08.000Z
|
2022-03-18T18:15:45.000Z
|
dblinea/__init__.py
|
linea-it/lineadb
|
42e782e73691c5378a7a1182c70e2134b4409552
|
[
"MIT"
] | null | null | null |
from dblinea.dblinea import DBBase
from dblinea.db_postgresql import DBPostgresql
from dblinea.scienceserver import ScienceServerApi
| 33.25
| 50
| 0.887218
| 16
| 133
| 7.3125
| 0.5625
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090226
| 133
| 3
| 51
| 44.333333
| 0.966942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c9f33fb35eeafba4862326eaa1c7906d9e9a1c0a
| 119
|
py
|
Python
|
utils/__init__.py
|
SeaWakeYT/SeaWake-Bot-FIXED-
|
995d8a9ad9a045d42aca8fec78e04946f442db32
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
SeaWakeYT/SeaWake-Bot-FIXED-
|
995d8a9ad9a045d42aca8fec78e04946f442db32
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
SeaWakeYT/SeaWake-Bot-FIXED-
|
995d8a9ad9a045d42aca8fec78e04946f442db32
|
[
"MIT"
] | null | null | null |
from .buttons import *
from .extra import *
#this imports everything from buttons as it's the top of the buttons class.
| 39.666667
| 75
| 0.773109
| 20
| 119
| 4.6
| 0.7
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168067
| 119
| 3
| 75
| 39.666667
| 0.929293
| 0.621849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a01ea5bb2f14b4882d96813aaf5c358c76d09c2c
| 49
|
py
|
Python
|
DutchDraw/__init__.py
|
joris-pries/DutchDraw
|
7bc81fc5fb456a27bc977dc201c75c9caa2c6996
|
[
"MIT"
] | null | null | null |
DutchDraw/__init__.py
|
joris-pries/DutchDraw
|
7bc81fc5fb456a27bc977dc201c75c9caa2c6996
|
[
"MIT"
] | null | null | null |
DutchDraw/__init__.py
|
joris-pries/DutchDraw
|
7bc81fc5fb456a27bc977dc201c75c9caa2c6996
|
[
"MIT"
] | null | null | null |
from . import DutchDraw
from .DutchDraw import *
| 16.333333
| 24
| 0.77551
| 6
| 49
| 6.333333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 2
| 25
| 24.5
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e590ef1a5d6f87821e1561b8cb6bba36cc0f6ea
| 43
|
py
|
Python
|
src/ctc/db/schemas/block_gas_stats/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/db/schemas/block_gas_stats/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/db/schemas/block_gas_stats/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from .block_gas_stats_schema_defs import *
| 21.5
| 42
| 0.860465
| 7
| 43
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e6b2a614b1c3d092e97ac12ea0c1f8d1fb2f3ce
| 1,766
|
py
|
Python
|
test_flood.py
|
TK594/IA-flood-risk-project
|
17902a255d1e76b4085760d5ab655aa5dce762dc
|
[
"MIT"
] | null | null | null |
test_flood.py
|
TK594/IA-flood-risk-project
|
17902a255d1e76b4085760d5ab655aa5dce762dc
|
[
"MIT"
] | null | null | null |
test_flood.py
|
TK594/IA-flood-risk-project
|
17902a255d1e76b4085760d5ab655aa5dce762dc
|
[
"MIT"
] | null | null | null |
from floodsystem.station import *
from floodsystem.station import MonitoringStation
from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level
def test_stations_level_over_thershold():
station_A = MonitoringStation('ID A', 'Measurement ID A', 'Name A', (0,1), (1,10), 'river 1', 'Town 1')
station_B = MonitoringStation('ID B', 'Measurement ID B', 'Name B', (5,5), (1,3), 'river 2', 'Town 2')
station_C = MonitoringStation('ID C', 'Measurement ID C', 'Name C', (2,5), (4,7), 'river 3', 'Town 3')
station_D = MonitoringStation('ID D', 'Measurement ID D', 'Name D', (4,9), (2,8), 'river 4', 'Town 4')
station_A.latest_level = 5.8
station_B.latest_level = 1.7
station_C.latest_level = 6
station_D.latest_level = 6.7
list = stations_level_over_threshold((station_A, station_B, station_C, station_D), 0.4)
A, B, C, D = 8/15, 0.35, 2/3, 47/60
assert list == [(station_D, D), (station_C, C), (station_A, A)]
def test_stations_highest_rel_level():
station_A = MonitoringStation('ID A', 'Measurement ID A', 'Name A', (0,1), (1,10), 'river 1', 'Town 1')
station_B = MonitoringStation('ID B', 'Measurement ID B', 'Name B', (5,5), (1,3), 'river 2', 'Town 2')
station_C = MonitoringStation('ID C', 'Measurement ID C', 'Name C', (2,5), (4,7), 'river 3', 'Town 3')
station_D = MonitoringStation('ID D', 'Measurement ID D', 'Name D', (4,9), (2,8), 'river 4', 'Town 4')
station_A.latest_level = 5.8
station_B.latest_level = 1.7
station_C.latest_level = 6
station_D.latest_level = 6.7
list = stations_highest_rel_level((station_A, station_B, station_C, station_D), 2)
A, B, C, D = 8/15, 0.35, 2/3, 47/60
assert list == [(station_D, D), (station_C, C)]
| 50.457143
| 107
| 0.652888
| 295
| 1,766
| 3.715254
| 0.142373
| 0.138686
| 0.043796
| 0.062956
| 0.801095
| 0.801095
| 0.766423
| 0.766423
| 0.708029
| 0.708029
| 0
| 0.062629
| 0.177237
| 1,766
| 35
| 108
| 50.457143
| 0.691672
| 0
| 0
| 0.666667
| 0
| 0
| 0.17657
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.074074
| false
| 0
| 0.111111
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
14c8b337b1a866be138ce87f8287153a7f5720b0
| 13,092
|
py
|
Python
|
Scikit_Extensions_For_Stocks/time_series_scaler/_TimeSeriesScaler.py
|
uNRealCoder/Scikit-Extensions-Stock
|
0968f2319373b91218589119cb06c561aa315e58
|
[
"MIT"
] | null | null | null |
Scikit_Extensions_For_Stocks/time_series_scaler/_TimeSeriesScaler.py
|
uNRealCoder/Scikit-Extensions-Stock
|
0968f2319373b91218589119cb06c561aa315e58
|
[
"MIT"
] | null | null | null |
Scikit_Extensions_For_Stocks/time_series_scaler/_TimeSeriesScaler.py
|
uNRealCoder/Scikit-Extensions-Stock
|
0968f2319373b91218589119cb06c561aa315e58
|
[
"MIT"
] | null | null | null |
import numpy
from sklearn.base import BaseEstimator,TransformerMixin
from copy import deepcopy
class LinearAutoRegressiveScaler(TransformerMixin,BaseEstimator):
"""
Transformers Time Series/Linear Data by calculating AR(1) and dividing by max delta.
"""
def __init__(self):
self._initialValue = None
self.MaxDiff = None
def __reset_state(self):
self._initialValue = None
self.MaxDiff = None
def __isInitialized(self):
if(self._initialValue is None or self.MaxDiff is None):
return False
else:
return True
def __partial_fit(self,X,y=None,n=1,prepend=True,forceReshape=False,**fit_params):
"""Fit the Linear Time Series Transformer on X. Expects a 1D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
prepend (bool, optional): Prepend a 0 to data. Defaults to False.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
assert n==1, "Not Supported"
X = numpy.array(deepcopy(X)).astype(float) #Paranoia
if(forceReshape==True):
X = numpy.ravel(X) #OMG ravel has flattened them. Will someone stop the match already?!
assert X.ndim == 1, "Array should be 1D"
if(self.__isInitialized()==False):
self._initialValue = deepcopy(X[0])
DiffArray = numpy.diff(X, int(n))
if(prepend==True):
DiffArray = numpy.insert(DiffArray,[0],0,axis=None)
if(self.__isInitialized()==False):
self.MaxDiff = numpy.max(numpy.abs(DiffArray))
if(int(self.MaxDiff)==0): #0 protection
return DiffArray
DiffArray = DiffArray/self.MaxDiff
return DiffArray
def fit(self,X,y=None, **fit_params):
"""Fit the Linear Time Series Transformer on X. Expects a 1D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
prepend (bool, optional): Prepend a 0 to data. Defaults to False.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
self.__reset_state()
self.__partial_fit(X, y, **fit_params)
pass
def transform(self,X,prepend = True,forceReshape=False):
"""
Transform 1D array X.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to transform
forceReshape (bool, optional): [description]. Defaults to False.
"""
return self.__partial_fit(X,y=None,n=1,prepend=prepend, forceReshape=forceReshape)
def fit_transform(self, X, y=None,prepend = True,forceReshape=False, **fit_params):
if y is None:
self.fit(X, **fit_params)
return self.transform(X,prepend=prepend,forceReshape=forceReshape)
else:
self.fit(X, y, **fit_params)
return self.transform(X,prepend=prepend,forceReshape=forceReshape)
def inverse_transform(self,X,prepend=False):
arr = deepcopy(X)
arr = arr*self.MaxDiff
if(prepend==False):
arr = numpy.insert(arr,[0],0,axis=None)
else:
arr[0] = self._initialValue
arr = numpy.cumsum(arr)
return arr
class Linear2DAutoRegressiveScaler(TransformerMixin,BaseEstimator):
def __init__(self):
self.Transformers = []
def fit(self, X, y=None,axis=-1,**fit_params):
"""Fit the Linear Auto Regressive Transformer on X. Expects a 1D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
prepend (bool, optional): Prepend a 0 to data. Defaults to False.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
X = numpy.array(deepcopy(X))
for val in numpy.rollaxis(X,axis):
LAR = LinearAutoRegressiveScaler()
LAR.fit(val)
self.Transformers.append(LAR)
def transform(self,X,y=None,axis=-1,prepend = True,forceReshape=False):
"""[summary]
Args:
X ([type]): [description]
y ([type], optional): [description]. Defaults to None.
"""
X = numpy.array(deepcopy(X))
Res = []
assert len(self.Transformers) == X.shape[axis]
n = X.shape[axis]
for ind,val in zip(range(0,n),numpy.rollaxis(X,axis)):
Res.append(self.Transformers[ind].transform(val,prepend=prepend,forceReshape=forceReshape))
return numpy.array(Res)
def fit_transform(self, X, y=None,axis=-1,prepend = True,forceReshape=False, **fit_params):
X = numpy.array(deepcopy(X))
n = X.shape[axis]
Res = []
for val in numpy.rollaxis(X,axis):
LAR = LinearAutoRegressiveScaler()
Res.append(LAR.fit_transform(val,y,prepend=prepend,forceReshape=forceReshape,**fit_params))
self.Transformers.append(LAR)
return numpy.array(Res)
class Linear1DGainScaler(TransformerMixin,BaseEstimator):
"""
Transformers Time Series/Linear Data by calculating AR(1) and dividing by previous value.
"""
def __partial_fit(self,X,y=None,forceReshape=False,**fit_params):
"""Fit the Linear Time Series Transformer on X. Expects a 1D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
X = numpy.array(deepcopy(X)).astype(float) #Paranoia
if(forceReshape==True):
X = numpy.ravel(X) #OMG ravel has flattened them. Will someone stop the match already?!
assert X.ndim == 1, "Array should be 1D"
if(self.__isInitialized()==False):
self._initialValue = deepcopy(X[0])
DiffArray = numpy.diff(X, 1)
DiffArray = numpy.insert(DiffArray,[0],0,axis=None)
DiffArray[1:] = numpy.divide(DiffArray[1:],X[:-1])
return DiffArray
def inverse_transform(self,X,prepend=True):
f = []
X = numpy.array(deepcopy(X)).astype(float)
assert X.ndim == 1, "Array should be 1D"
if(prepend==False):
X = numpy.insert(X,[0],0)
assert int(X[0]) == 0, "First element should be zero, pass prepend=True"
f.append(self._initialValue)
for i,v in zip(range(1,len(X[1:])+1),X[1:]):
val = v*f[i-1]
val += f[i-1]
f.append(val)
return numpy.array(deepcopy(f)).astype(float)
class Linear2DGainScaler(TransformerMixin,BaseEstimator):
def __init__(self):
self.Scalers = []
def fit(self, X, y=None,axis=-1,**fit_params):
"""Fit the Linear Auto Regressive Transformer on X. Expects a 2D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
prepend (bool, optional): Prepend a 0 to data. Defaults to False.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
X = numpy.array(deepcopy(X))
for val in numpy.rollaxis(X,axis):
L1DG = Linear1DGainScaler()
L1DG.fit(val)
self.Transformers.append(L1DG)
def transform(self,X,y=None,axis=-1,prepend = True,forceReshape=False):
"""[summary]
Args:
X ([type]): [description]
y ([type], optional): [description]. Defaults to None.
"""
X = numpy.array(deepcopy(X))
Res = []
assert len(self.Transformers) == X.shape[axis]
n = X.shape[axis]
for ind,val in zip(range(0,n),numpy.rollaxis(X,axis)):
Res.append(self.Transformers[ind].transform(val,prepend=prepend,forceReshape=forceReshape))
return numpy.array(Res)
def fit_transform(self, X, y=None,axis=-1,prepend = True,forceReshape=False, **fit_params):
self.fit(X=X,y=y,axis=axis,**fit_params)
return self.transform(X=X,y=y,axis=axis,prepend=prepend,forceReshape=forceReshape,**fit_params)
class Linear1DLogGainScaler(TransformerMixin,BaseEstimator):
def __init__(self):
self._initialValue = None
def __reset_state(self):
self._initialValue = None
def __isInitialized(self):
if(self._initialValue is None):
return False
else:
return True
def __partial_fit(self, X, y=None, forceReshape=False,logbase=10 ,**fit_params):
"""
"""
X = numpy.array(deepcopy(X)).astype(float) #Paranoia
if(forceReshape==True):
X = numpy.ravel(X) #OMG ravel has flattened them. Will someone stop the match already?!
assert X.ndim == 1, "Array should be 1D"
self._initialValue = deepcopy(X[0])
Xshift = numpy.roll(X,1)
Xshift[0] = Xshift[1]
return deepcopy(numpy.log10(X/Xshift))
def inverse_transform(self,X):
f = []
X = numpy.array(deepcopy(X)).astype(float)
assert X.ndim == 1, "Array should be 1D"
return deepcopy(numpy.cumprod(10**X)*self._initialValue)
def fit(self,X,y=None, **fit_params):
"""Fit the Linear Time Series Transformer on X. Expects a 1D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
self.__partial_fit(X, y, **fit_params)
pass
def transform(self,X,**fit_params):
"""
Transform 1D array X.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to transform
forceReshape (bool, optional): [description]. Defaults to False.
"""
if(not self.__isInitialized()):
raise Exception("Not initialized")
return self.__partial_fit(X,y=None, **fit_params)
def fit_transform(self, X, y=None, **fit_params):
if y is None:
self.fit(X, **fit_params)
return self.transform(X,**fit_params)
else:
self.fit(X, y, **fit_params)
return self.transform(X,**fit_params)
class Linear2DLogGainScaler(TransformerMixin,BaseEstimator):
def __init__(self):
self.LogScalers = []
def fit(self, X, y=None,axis=-1,**fit_params):
"""Fit the Linear Auto Regressive Transformer on X. Expects a 2D numerical Array.
y will be ignored.
Args:
X {1D nd.array like} of shape (n,): Data for transformer to fit
y Will be ignored.
n (int, optional): NOT SUPPORTED . nth difference. Defaults to 1.
prepend (bool, optional): Prepend a 0 to data. Defaults to False.
forceReshape (bool, optional): Tries to force the shape of the array to 1D. Defaults to False.
"""
X = numpy.array(deepcopy(X))
for val in numpy.rollaxis(X,axis):
L1DG = Linear1DLogGainScaler()
L1DG.fit(val)
self.LogScalers.append(L1DG)
def transform(self,X,y=None,axis=-1):
"""[summary]
Args:
X ([type]): [description]
y ([type], optional): [description]. Defaults to None.
"""
X = numpy.array(deepcopy(X))
Res = []
assert len(self.LogScalers) == X.shape[axis]
n = X.shape[axis]
for ind,val in zip(range(0,n),numpy.rollaxis(X,axis)):
Res.append(self.LogScalers[ind].transform(val))
return deepcopy(numpy.array(Res).T)
def fit_transform(self, X, y=None,axis=-1,**fit_params):
self.fit(X=X,y=y,axis=axis,**fit_params)
return self.transform(X=X,y=y,axis=axis,**fit_params)
def inverse_transform(self,X,y=None,axis=-1,**fit_params):
X = numpy.array(deepcopy(X))
Res = []
assert len(self.LogScalers) == X.shape[axis]
n = X.shape[axis]
for ind,val in zip(range(0,n),numpy.rollaxis(X,axis)):
Res.append(self.LogScalers[ind].inverse_transform(val))
return deepcopy(numpy.array(Res).T)
| 43.207921
| 106
| 0.604109
| 1,703
| 13,092
| 4.577217
| 0.085731
| 0.033483
| 0.014625
| 0.021809
| 0.847338
| 0.822579
| 0.772675
| 0.75433
| 0.714689
| 0.678897
| 0
| 0.012072
| 0.27872
| 13,092
| 302
| 107
| 43.350993
| 0.813407
| 0.303391
| 0
| 0.643979
| 0
| 0
| 0.019657
| 0
| 0
| 0
| 0
| 0
| 0.057592
| 1
| 0.162304
| false
| 0.015707
| 0.015707
| 0
| 0.335079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
14dc685cda7f743f589218c9ae90117f5c9bd950
| 220
|
py
|
Python
|
plumbum/path/__init__.py
|
weka-io/plumbum
|
2d244c02f38498cacfb3519bdebe42e4c5dc72b3
|
[
"MIT"
] | 1
|
2019-06-12T19:42:55.000Z
|
2019-06-12T19:42:55.000Z
|
src/plumbum/path/__init__.py
|
ownport/playbook
|
6d3196ddf68f2c3c3efc4a52e26719c3e5596dca
|
[
"MIT"
] | null | null | null |
src/plumbum/path/__init__.py
|
ownport/playbook
|
6d3196ddf68f2c3c3efc4a52e26719c3e5596dca
|
[
"MIT"
] | null | null | null |
from plumbum.path.local import LocalPath, LocalWorkdir
from plumbum.path.remote import RemotePath, RemoteWorkdir
from plumbum.path.base import Path, FSUser, RelativePath
from plumbum.path.utils import copy, move, delete
| 44
| 57
| 0.836364
| 30
| 220
| 6.133333
| 0.566667
| 0.23913
| 0.326087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 220
| 4
| 58
| 55
| 0.929293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0942d6913393d350496dce103a858261d5a38ac2
| 76,180
|
py
|
Python
|
System/EnergySystem.py
|
fnbillimoria/OPEN
|
b63fb2e7bc5e43cc32034ed5f8b7df715b435461
|
[
"Apache-2.0"
] | 1
|
2020-05-14T01:56:23.000Z
|
2020-05-14T01:56:23.000Z
|
System/EnergySystem.py
|
fnbillimoria/OPEN
|
b63fb2e7bc5e43cc32034ed5f8b7df715b435461
|
[
"Apache-2.0"
] | null | null | null |
System/EnergySystem.py
|
fnbillimoria/OPEN
|
b63fb2e7bc5e43cc32034ed5f8b7df715b435461
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OPEN Energy System Module.
The EnergySystem Class has two types of methods
i) energy management system (EMS) methods which implement algorithms to
calculate Asset control references, and
ii) simulation methods which call an EMS method to obtain control
references for Asset objects, update the state of Asset objects by calling
their updatecontrol method and update the state of the Network by calling
its power flow method.
An EnergySystem has two separate time series, one for the EMS, and the
other for simulation.
OPEN includes two EMS methods for controllable Asset objects:
(i) one for multi-period optimisation
with a simple ‘copper plate’ network model, and
(ii) one for multi-period optimisation with a linear multi-phase
distribution network model which includes voltage and current flow
constraints.
Open has simulation methods for:
(i) open-loop optimisation, where the EMS method is run ahead of operation
to obtain controllable Asset references over the EMS time-series; and
(ii) for MPC, where the EMS method is implemented with a receding horizon
so that the flexible Asset references are updated at each step of the EMS
time series.
"""
#import modules
import copy
import pandas as pd
import pandapower as pp
import pandapower.networks as pn
import numpy as np
import picos as pic
import matplotlib.pyplot as plt
from System.Network_3ph_pf import Network_3ph
import cvxopt
__version__ = "1.0.2"
class EnergySystem:
"""
Base Energy Sysem Class
Parameters
----------
storage_assets : list of objects
Containing details of each storage asset
building_assets : list of objects
Containsing details of each building asset
nondispatch_assets : list of objects
Containsing details of each nondispatchable asset
network : object
Object containing information about the network
market : object
Object containing information about the market
dt_ems : float
EMS time interval duration (hours)
T_ems : int
Number of EMS time intervals
dt : float
time interval duration (hours)
T : int
number of time intervals
Returns
-------
EnergySystem
"""
def __init__(self, storage_assets, nondispatch_assets, network, market,
dt, T, dt_ems, T_ems, building_assets=[]):
self.storage_assets = storage_assets
self.building_assets = building_assets
self.nondispatch_assets = nondispatch_assets
self.network = network
self.market = market
self.dt_ems = dt_ems
self.T_ems = T_ems
self.dt = dt
self.T = T
#######################################
### Open Loop Control Methods
#######################################
def EMS_copper_plate(self):
"""
Energy management system optimization assuming all assets connected to
a single node.
Parameters
----------
self : EnergySystem object
Object containing information on assets, market, network and time
resolution.
Returns
-------
Output : dictionary
The following numpy.ndarrays are present depending upon asset mix:
P_ES_val : Charge/discharge power for storage assets (kW)
P_BLDG_val :Builfing power consumption (kW)
P_import_val :Power imported from central grid (kW)
P_export_val :Power exported to central grid (kW)
P_demand_val :System power demand at energy management time
resolution
"""
#setup and run a basic energy optimisation
#(single copper plate network model)
#######################################
### STEP 0: setup variables
#######################################
prob = pic.Problem()
N_ES = len(self.storage_assets)
N_BLDG = len(self.building_assets)
N_INDEPENDENT = N_ES + N_BLDG
N_nondispatch = len(self.nondispatch_assets)
P_demand_actual = np.zeros(self.T)
P_demand = np.zeros(self.T_ems)
for i in range(N_nondispatch):
P_demand_actual += self.nondispatch_assets[i].Pnet
#convert P_demand_actual to EMS time series scale
for t_ems in range(self.T_ems):
t_indexes = (t_ems*self.dt_ems/self.dt\
+ np.arange(0,self.dt_ems/self.dt)).astype(int)
P_demand[t_ems] = np.mean(P_demand_actual[t_indexes])
#######################################
### STEP 1: set up decision variables
#######################################
#controllable asset input powers
P_ctrl_asset = prob.add_variable('P_ctrl_asset',(self.T_ems,\
N_INDEPENDENT),\
vtype='continuous')
if N_BLDG > 0:
# cooling power
P_cooling = prob.add_variable('P_cooling',(self.T_ems,N_BLDG),\
vtype='continuous')
# heating power
P_heating = prob.add_variable('P_heating',(self.T_ems,N_BLDG),\
vtype='continuous')
# internal temperature
T_bldg = prob.add_variable('T_bldg',(self.T_ems,N_BLDG),\
vtype='continuous')
#(positive) net power imports
P_import = prob.add_variable('P_import',(self.T_ems,1),\
vtype='continuous')
#(positive) net power exports
P_export = prob.add_variable('P_export',(self.T_ems,1),\
vtype='continuous')
#(positive) maximum demand dummy variable
P_max_demand = prob.add_variable('P_max_demand',1,\
vtype='continuous')
#######################################
### STEP 2: set up constraints
#######################################
Asum_np = np.tril(np.ones([self.T_ems,self.T_ems])).astype('double')
#lower triangle matrix summing powers
Asum = pic.new_param('Asum',Asum_np)
#lbuilding thermal model constraints
for i in range(N_BLDG):
#maximum heating constraint
prob.add_constraint(P_heating[:,i] <= self.building_assets[i].Hmax)
#maximum cooling constraint
prob.add_constraint(P_cooling[:,i] <= self.building_assets[i].Cmax)
#minimum heating constraint
prob.add_constraint(P_heating[:,i] >= 0)
#minimum cooling constraint
prob.add_constraint(P_cooling[:,i] >= 0)
#maximum temperature constraint
prob.add_constraint(T_bldg[:,i] <= self.building_assets[i].Tmax)
#minimum temperature constraint
prob.add_constraint(T_bldg[:,i] >= self.building_assets[i].Tmin)
#power consumption is the sum of heating and cooling
prob.add_constraint(P_ctrl_asset[:,i] == P_cooling[:,i]\
+ P_heating[:,i])
for t in range(self.T_ems):
if t == 0:
# initial temperature constraint
prob.add_constraint(T_bldg[t,i] ==\
self.building_assets[i].T0)
else:
# Inside temperature is a function of heating/cooling and
# outside temperature. Alpha, beta and gamma are parameters
# derived from the R and C values of the building.
# Relation between alpha, beta, gamma, R and C can be found
# in the BuildingAsset class in the Assets.py file
prob.add_constraint(T_bldg[t,i] ==\
self.building_assets[i].\
alpha*T_bldg[t-1,i] \
- self.building_assets[i].\
beta*P_cooling[t-1,i] \
+ self.building_assets[i].\
beta*self.building_assets[i].\
CoP*P_heating[t-1,i] \
+ self.building_assets[i].\
gamma*self.building_assets[i].\
Ta[t-1])
#linear battery model constraints
for i in range(N_ES):
#maximum power constraint
prob.add_constraint(P_ctrl_asset[:,N_BLDG+i] <=\
self.storage_assets[i].Pmax)
#minimum power constraint
prob.add_constraint(P_ctrl_asset[:,N_BLDG+i] >=\
self.storage_assets[i].Pmin)
#maximum energy constraint
prob.add_constraint(self.dt_ems*Asum*P_ctrl_asset[:,N_BLDG+i] <=\
self.storage_assets[i].Emax\
-self.storage_assets[i].E0)
#minimum energy constraint
prob.add_constraint(self.dt_ems*Asum*P_ctrl_asset[:,N_BLDG+i] >=\
self.storage_assets[i].Emin\
-self.storage_assets[i].E0)
#final energy constraint
prob.add_constraint(self.dt_ems*Asum[self.T_ems-1,:]\
*P_ctrl_asset[:,N_BLDG+i] ==\
self.storage_assets[i].ET\
-self.storage_assets[i].E0)
#import/export constraints
for t in range(self.T_ems):
# power balance
prob.add_constraint(sum(P_ctrl_asset[t,:]) + P_demand[t] ==\
P_import[t]-P_export[t])
#maximum import constraint
prob.add_constraint(P_import[t] <= self.market.Pmax[t])
#maximum import constraint
prob.add_constraint(P_import[t] >= 0)
#maximum import constraint
prob.add_constraint(P_export[t] <= -self.market.Pmin[t])
#maximum import constraint
prob.add_constraint(P_export[t] >= 0)
#maximum demand dummy variable constraint
prob.add_constraint(P_max_demand >= P_import[t]-P_export[t])
if self.market.FR_window is not None:
FR_window = self.market.FR_window
FR_SoC_max = self.market.FR_SOC_max
FR_SoC_min = self.market.FR_SOC_min
for t in range(self.T_ems):
if FR_window[t] ==1:
for i in range(N_ES):
# final energy constraint
prob.add_constraint(self.dt_ems
* Asum[t,:]
* P_ctrl_asset[:,N_BLDG+i]
<= (FR_SoC_max
* self.storage_assets[i].Emax)
- self.storage_assets[i].E0)
# final energy constraint
prob.add_constraint(self.dt_ems
* Asum[t,:]
* P_ctrl_asset[:,N_BLDG+i]
>= (FR_SoC_min
* self.storage_assets[i].Emax)
- self.storage_assets[i].E0)
#######################################
### STEP 3: set up objective
#######################################
prob.set_objective('min',self.market.demand_charge*P_max_demand+\
sum(self.market.prices_import[t]*P_import[t]+\
-self.market.prices_export[t]*P_export[t]\
for t in range(self.T_ems)))
#######################################
### STEP 3: solve the optimisation
#######################################
print('*** SOLVING THE OPTIMISATION PROBLEM ***')
prob.solve(verbose = 0)
print('*** OPTIMISATION COMPLETE ***')
P_ctrl_asset_val = P_ctrl_asset.value
P_import_val = P_import.value
P_export_val = P_export.value
P_demand_val = P_demand
if N_BLDG > 0:
#Store internal temperature inside object
T_bldg_val = T_bldg.value
for b in range(N_BLDG):
self.building_assets[b].T_int = T_bldg_val[:,b]
if N_ES > 0 and N_BLDG > 0:
output = {'P_BLDG_val':P_ctrl_asset_val[:,:N_BLDG],\
'P_ES_val':P_ctrl_asset_val[:,N_BLDG:N_ES+N_BLDG],\
'P_import_val':P_import_val,\
'P_export_val':P_export_val,\
'P_demand_val':P_demand_val}
elif N_ES == 0 and N_BLDG > 0:
output = {'P_BLDG_val':P_ctrl_asset_val[:,:N_BLDG],\
'P_import_val':P_import_val,\
'P_export_val':P_export_val,\
'P_demand_val':P_demand_val}
elif N_ES > 0 and N_BLDG == 0:
output = {'P_ES_val':P_ctrl_asset_val[:,:N_ES],\
'P_import_val':P_import_val,\
'P_export_val':P_export_val,\
'P_demand_val':P_demand_val}
else:
raise ValueError('No dispatchable assets.')
return output
def simulate_network(self):
"""
Run the Energy Management System in open loop and simulate a pandapower
network.
Parameters
----------
self : EnergySystem object
Object containing information on assets, market, network and time
resolution.
Returns
-------
Output : dictionary
The following numpy.ndarrays are present depending upon asset mix:
buses_Vpu : Voltage magnitude at bus (V)
buses_Vang : Voltage angle at bus (rad)
buses_Pnet : Real power at bus (kW)
buses_Qnet : Reactive power at bus (kVAR)
Pnet_market : Real power seen by the market (kW)
Qnet_market : Reactive power seen by the market (kVAR)
P_ES_ems : Charge/discharge power for storage assets at energy
management time resolution (kW)
P_BLDG_ems :Builfing power consumption at energy management
time resolution (kW)
P_import_ems :Power imported from central grid at energy
management time resolution (kW)
P_export_ems :Power exported to central grid at energy
management time resolution(kW)
P_demand_ems :System power demand at energy management time
resolution (kW)
"""
#######################################
### STEP 1: solve the optimisation
#######################################
t0 = 0
output_ems = self.EMS_copper_plate()
N_ESs = len(self.storage_assets) #number of EVs
N_BLDGs = len(self.building_assets) #number of buildings
N_nondispatch = len(self.nondispatch_assets) #number of EVs
P_import_ems = output_ems['P_import_val']
P_export_ems = output_ems['P_export_val']
if N_ESs > 0:
P_ES_ems = output_ems['P_ES_val']
if N_BLDGs > 0:
P_BLDG_ems = output_ems['P_BLDG_val']
P_demand_ems = output_ems['P_demand_val']
#convert P_ES and P_BLDG signals to system time-series scale
if N_ESs > 0:
P_ESs = np.zeros([self.T,N_ESs])
for t in range(self.T):
t_ems = int(t/(self.dt_ems/self.dt))
P_ESs[t,:] = P_ES_ems[t_ems,:]
if N_BLDGs > 0:
P_BLDGs = np.zeros([self.T,N_BLDGs])
for t in range(self.T):
t_ems = int(t/(self.dt_ems/self.dt))
P_BLDGs[t,:] = P_BLDG_ems[t_ems,:]
#######################################
### STEP 2: update the controllable assets
#######################################
if N_ESs > 0:
for i in range(N_ESs):
self.storage_assets[i].update_control(P_ESs[:,i])
if N_BLDGs > 0:
for i in range(N_BLDGs):
self.building_assets[i].update_control(P_BLDGs[:,i])
#######################################
### STEP 3: simulate the network
#######################################
N_buses = self.network.bus['name'].size
P_demand_buses = np.zeros([self.T,N_buses])
Q_demand_buses = np.zeros([self.T,N_buses])
if N_ESs > 0:
#calculate the total real and reactive power demand at each bus
for i in range(N_ESs):
bus_id = self.storage_assets[i].bus_id
P_demand_buses[:,bus_id] += self.storage_assets[i].Pnet
Q_demand_buses[:,bus_id] += self.storage_assets[i].Qnet
if N_BLDGs > 0:
#calculate the total real and reactive power demand at each bus
for i in range(N_BLDGs):
bus_id = self.building_assets[i].bus_id
P_demand_buses[:,bus_id] += self.building_assets[i].Pnet
Q_demand_buses[:,bus_id] += self.building_assets[i].Qnet
for i in range(N_nondispatch):
bus_id = self.nondispatch_assets[i].bus_id
P_demand_buses[:,bus_id] += self.nondispatch_assets[i].Pnet
Q_demand_buses[:,bus_id] += self.nondispatch_assets[i].Qnet
buses_Vpu = np.zeros([self.T,N_buses])
buses_Vang = np.zeros([self.T,N_buses])
buses_Pnet = np.zeros([self.T,N_buses])
buses_Qnet = np.zeros([self.T,N_buses])
Pnet_market = np.zeros(self.T)
Qnet_market = np.zeros(self.T)
#print(P_demand_buses)
print('*** SIMULATING THE NETWORK ***')
for t in range(self.T):
#for each time interval:
#set up a copy of the network for simulation interval t
network_t = copy.deepcopy(self.network)
for bus_id in range(N_buses):
P_t = P_demand_buses[t,bus_id]
Q_t = Q_demand_buses[t,bus_id]
#add P,Q loads to the network copy
pp.create_load(network_t,bus_id,P_t/1e3,Q_t/1e3)
#run the power flow simulation
pp.runpp(network_t,max_iteration=100) # or “nr”
if t % 100 == 0:
print('network sim complete for t = '\
+ str(t) + ' of ' + str(self.T))
Pnet_market[t] = network_t.res_ext_grid['p_mw'][0]*1e3
Qnet_market[t] = network_t.res_ext_grid['q_mvar'][0]*1e3
for bus_i in range(N_buses):
buses_Vpu[t,bus_i] = network_t.res_bus['vm_pu'][bus_i]
buses_Vang[t,bus_i] = network_t.res_bus['va_degree'][bus_i]
buses_Pnet[t,bus_i] = network_t.res_bus['p_mw'][bus_i]*1e3
buses_Qnet[t,bus_i] = network_t.res_bus['q_mvar'][bus_i]*1e3
print('*** NETWORK SIMULATION COMPLETE ***')
if N_ESs > 0 and N_BLDGs > 0:
output = {'buses_Vpu':buses_Vpu,\
'buses_Vang':buses_Vang,\
'buses_Pnet':buses_Pnet,\
'buses_Qnet':buses_Qnet,\
'Pnet_market':Pnet_market,\
'Qnet_market':Qnet_market,\
'P_ES_ems':P_ES_ems,\
'P_BLDG_ems':P_BLDG_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
elif N_ESs == 0 and N_BLDGs > 0:
output = {'buses_Vpu':buses_Vpu,\
'buses_Vang':buses_Vang,\
'buses_Pnet':buses_Pnet,\
'buses_Qnet':buses_Qnet,\
'Pnet_market':Pnet_market,\
'Qnet_market':Qnet_market,\
'P_BLDG_ems':P_BLDG_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
elif N_ESs > 0 and N_BLDGs == 0:
output = {'buses_Vpu':buses_Vpu,\
'buses_Vang':buses_Vang,\
'buses_Pnet':buses_Pnet,\
'buses_Qnet':buses_Qnet,\
'Pnet_market':Pnet_market,\
'Qnet_market':Qnet_market,\
'P_ES_ems':P_ES_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
else:
raise ValueError('No dispatchable assets.')
return output
# NEEDED FOR OXEMF EV CASE STUDY
def simulate_network_3phPF(self, ems_type = '3ph',
i_unconstrained_lines=[],
v_unconstrained_buses = []):
"""
Run the Energy Management System in open loop and simulate an IEEE 13
bus network either copper plate or 3ph
Parameters
----------
self : EnergySystem object
Object containing information on assets, market, network and time
resolution.
ems_type : string
Identifies whether the system is copper plate or 3ph. Default 3ph
i_unconstrained_lines : list
List of network lines which have unconstrained current
v_unconstrained_buses : list
List of buses at which the voltage is not constrained
Returns
-------
Output : dictionary
PF_network_res : Network power flow results stored as a list of
objects
P_ES_ems : Charge/discharge power for storage assets at energy
management time resolution (kW)
P_import_ems :Power imported from central grid at energy
management time resolution (kW)
P_export_ems :Power exported to central grid at energy
management time resolution(kW)
P_demand_ems :System power demand at energy management time
resolution (kW)
"""
#######################################
### STEP 1: solve the optimisation
#######################################
t0 = 0
if ems_type == 'copper_plate':
output_ems = self.EMS_copper_plate_t0(t0)
else:
output_ems = self.EMS_3ph_linear_t0(t0,
i_unconstrained_lines,
v_unconstrained_buses)
P_import_ems = output_ems['P_import_val']
P_export_ems = output_ems['P_export_val']
P_ES_ems = output_ems['P_ES_val']
P_demand_ems = output_ems['P_demand_val']
#convert P_EV signals to system time-series scale
N_ESs = len(self.storage_assets)
N_nondispatch = len(self.nondispatch_assets)
P_ESs = np.zeros([self.T,N_ESs])
for t in range(self.T):
t_ems = int(t/(self.dt_ems/self.dt))
P_ESs[t,:] = P_ES_ems[t_ems,:]
#######################################
### STEP 2: update the controllable assets
#######################################
for i in range(N_ESs):
self.storage_assets[i].update_control(P_ESs[:,i])
#######################################
### STEP 3: simulate the network
#######################################
N_buses = self.network.N_buses
N_phases = self.network.N_phases
P_demand_buses = np.zeros([self.T,N_buses,N_phases])
Q_demand_buses = np.zeros([self.T,N_buses,N_phases])
#calculate the total real and reactive power demand at each bus phase
for i in range(N_ESs):
bus_id = self.storage_assets[i].bus_id
phases_i = self.storage_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in np.nditer(phases_i):
P_demand_buses[:,bus_id,ph_i] +=\
self.storage_assets[i].Pnet/N_phases_i
Q_demand_buses[:,bus_id,ph_i] +=\
self.storage_assets[i].Qnet/N_phases_i
for i in range(N_nondispatch):
bus_id = self.nondispatch_assets[i].bus_id
phases_i = self.nondispatch_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in np.nditer(phases_i):
P_demand_buses[:,bus_id,ph_i] +=\
self.nondispatch_assets[i].Pnet/N_phases_i
Q_demand_buses[:,bus_id,ph_i] +=\
self.nondispatch_assets[i].Qnet/N_phases_i
#Store power flow results as a list of network objects
PF_network_res = []
print('*** SIMULATING THE NETWORK ***')
for t in range(self.T):
#for each time interval:
#set up a copy of the network for simulation interval t
network_t = copy.deepcopy(self.network)
network_t.clear_loads()
for bus_id in range(N_buses):
for ph_i in range(N_phases):
Pph_t = P_demand_buses[t,bus_id,ph_i]
Qph_t = Q_demand_buses[t,bus_id,ph_i]
#add P,Q loads to the network copy
network_t.set_load(bus_id,ph_i,Pph_t,Qph_t)
#run the power flow simulation
network_t.zbus_pf()
PF_network_res.append(network_t)
print('*** NETWORK SIMULATION COMPLETE ***')
return {'PF_network_res' :PF_network_res,\
'P_ES_ems':P_ES_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
#######################################
### Model Predictive Control Methods
#######################################
def EMS_copper_plate_t0(self, t0):
"""
Setup and run a basic energy optimisation (single copper plate network
model) for MPC interval t0
"""
#######################################
### STEP 0: setup variables
#######################################
t0_dt = int(t0*self.dt_ems/self.dt)
T_mpc = self.T_ems-t0
T_range = np.arange(t0,self.T_ems)
prob = pic.Problem()
N_ES = len(self.storage_assets)
N_nondispatch = len(self.nondispatch_assets)
P_demand_actual = np.zeros(self.T)
P_demand_pred = np.zeros(self.T)
P_demand = np.zeros(T_mpc)
for i in range(N_nondispatch):
P_demand_actual += self.nondispatch_assets[i].Pnet
P_demand_pred += self.nondispatch_assets[i].Pnet_pred
# Assemble P_demand out of P actual and P predicted and convert to EMS
# time series scale
for t_ems in T_range:
t_indexes = ((t_ems * self.dt_ems / self.dt
+ np.arange(0, self.dt_ems / self.dt)).astype(int))
if t_ems == t0:
P_demand[t_ems-t0] = np.mean(P_demand_actual[t_indexes])
else:
P_demand[t_ems-t0] = np.mean(P_demand_pred[t_indexes])
# get total ES system demand (before optimisation)
Pnet_ES_sum = np.zeros(self.T)
for i in range(N_ES):
Pnet_ES_sum += self.storage_assets[i].Pnet
#get the maximum (historical) demand before t0
if t0 > 0:
P_max_demand_pre_t0 = np.max(P_demand_actual[0:t0_dt]\
+ Pnet_ES_sum[0:t0_dt])
else:
P_max_demand_pre_t0 = 0
#######################################
### STEP 1: set up decision variables
#######################################
# energy storage system input powers
P_ES = prob.add_variable('P_ES', (T_mpc,N_ES), vtype='continuous')
# energy storage system input powers
P_ES_ch = prob.add_variable('P_ES_ch', (T_mpc,N_ES),
vtype='continuous')
# energy storage system output powers
P_ES_dis = prob.add_variable('P_ES_dis', (T_mpc,N_ES),
vtype='continuous')
# (positive) net power imports
P_import = prob.add_variable('P_import', (T_mpc,1), vtype='continuous')
# (positive) net power exports
P_export = prob.add_variable('P_export', (T_mpc,1), vtype='continuous')
# (positive) maximum demand dummy variable
P_max_demand = prob.add_variable('P_max_demand', 1, vtype='continuous')
# (positive) minimum terminal energy dummy variable
E_T_min = prob.add_variable('E_T_min', 1, vtype='continuous')
#######################################
### STEP 2: set up constraints
#######################################
#lower triangle matrix summing powers
Asum = pic.new_param('Asum',np.tril(np.ones([T_mpc,T_mpc])))
eff_opt = self.storage_assets[i].eff_opt
# linear battery model constraints
for i in range(N_ES):
# maximum power constraint
prob.add_constraint((P_ES_ch[:,i] - P_ES_dis[:,i])\
<= self.storage_assets[i].Pmax[T_range])
# minimum power constraint
prob.add_constraint((P_ES_ch[:,i] - P_ES_dis[:,i])\
>= self.storage_assets[i].Pmin[T_range])
# maximum energy constraint
prob.add_constraint((self.dt_ems
* Asum
* (P_ES_ch[:,i] - P_ES_dis[:,i]))\
<= (self.storage_assets[i].Emax[T_range]
- self.storage_assets[i].E[t0_dt]))
# minimum energy constraint
prob.add_constraint((self.dt_ems
* Asum
* (P_ES_ch[:,i] - P_ES_dis[:,i]))\
>= (self.storage_assets[i].Emin[T_range]
- self.storage_assets[i].E[t0_dt]))
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[T_mpc-1,:]
* (P_ES_ch[:,i] - P_ES_dis[:,i])
+ E_T_min)\
>= (self.storage_assets[i].ET
- self.storage_assets[i].E[t0_dt]))
eff_opt = self.storage_assets[i].eff_opt
# P_ES_ch & P_ES_dis dummy variables
for t in range(T_mpc):
prob.add_constraint(P_ES[t, i] == (P_ES_ch[t, i]
/ eff_opt
- P_ES_dis[t, i]
* eff_opt))
prob.add_constraint(P_ES_ch[t, i] >= 0)
prob.add_constraint(P_ES_dis[t, i] >= 0)
# import/export constraints
for t in range(T_mpc):
# net import variables
prob.add_constraint((sum(P_ES[t, :]) + P_demand[t])\
== (P_import[t] - P_export[t]))
# maximum import constraint
prob.add_constraint(P_import[t] <= self.market.Pmax[t0+t])
# maximum import constraint
prob.add_constraint(P_import[t] >= 0)
# maximum import constraint
prob.add_constraint(P_export[t] <= -self.market.Pmin[t0+t])
# maximum import constraint
prob.add_constraint(P_export[t] >= 0)
#maximum demand dummy variable constraint
prob.add_constraint((P_max_demand + P_max_demand_pre_t0)\
>= (P_import[t] - P_export[t]) )
# maximum demand dummy variable constraint
prob.add_constraint(P_max_demand >= 0)
if self.market.FR_window is not None:
FR_window = self.market.FR_window
FR_SoC_max = self.market.FR_SOC_max
FR_SoC_min = self.market.FR_SOC_min
for t in range(t0,self.T_ems):
if FR_window[t] ==1:
for i in range(N_ES):
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t, :]
* (P_ES_ch[:, i]
- P_ES_dis[:, i]))\
<= (FR_SoC_max
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt])
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t, :]
* (P_ES_ch[:,i] - P_ES_dis[:,i]))\
>= (FR_SoC_min
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt])
# minimum terminal energy dummy variable constraint
prob.add_constraint(E_T_min >= 0)
#######################################
### STEP 3: set up objective
#######################################
prices_import = pic.new_param('prices_import',
self.market.prices_import)
prices_export = pic.new_param('prices_export',
self.market.prices_export)
terminal_const = 1e12 # coeff for objective terminal soft constraint
prob.set_objective('min', (self.market.demand_charge * P_max_demand +\
sum(sum(self.dt_ems
* self.storage_assets[i].c_deg_lin
* (P_ES_ch[t, i] + P_ES_dis[t,i])\
for i in range(N_ES))
+ self.dt_ems
* prices_import[t0 + t]
* P_import[t]
- self.dt_ems
* prices_export[t0 + t]
* P_export[t]\
for t in range(T_mpc))
+ terminal_const * E_T_min))
#######################################
### STEP 3: solve the optimisation
#######################################
print('*** SOLVING THE OPTIMISATION PROBLEM ***')
prob.solve(verbose = 0)
print('*** OPTIMISATION COMPLETE ***')
P_ES_val = np.array(P_ES.value)
P_import_val = np.array(P_import.value)
P_export_val = np.array(P_export.value)
P_demand_val = np.array(P_demand)
E_T_min_val = np.array(E_T_min.value)
return {'P_ES_val':P_ES_val,\
'P_import_val':P_import_val,\
'P_export_val':P_export_val,\
'P_demand_val':P_demand_val,\
'E_T_min_val':E_T_min_val}
def EMS_copper_plate_t0_c1deg(self, t0):
"""
setup and run a basic energy optimisation (single copper plate network
model) for MPC interval t0
"""
#######################################
### STEP 0: setup variables
#######################################
t0_dt = int(t0 * self.dt_ems / self.dt)
T_mpc = self.T_ems - t0
T_range = np.arange(t0,self.T_ems)
prob = pic.Problem()
N_ES = len(self.storage_assets)
N_nondispatch = len(self.nondispatch_assets)
P_demand_actual = np.zeros(self.T)
P_demand_pred = np.zeros(self.T)
P_demand = np.zeros(T_mpc)
for i in range(N_nondispatch):
P_demand_actual += self.nondispatch_assets[i].Pnet
P_demand_pred += self.nondispatch_assets[i].Pnet_pred
# Assemble P_demand out of P actual and P predicted and convert to
# EMS time series scale
for t_ems in T_range:
t_indexes = (t_ems
* self.dt_ems
/ self.dt
+ np.arange(0, self.dt_ems / self.dt)).astype(int)
if t_ems == t0:
P_demand[t_ems-t0] = np.mean(P_demand_actual[t_indexes])
else:
P_demand[t_ems-t0] = np.mean(P_demand_pred[t_indexes])
#get total ES system demand (before optimisation)
Pnet_ES_sum = np.zeros(self.T)
for i in range(N_ES):
Pnet_ES_sum += self.storage_assets[i].Pnet
#get the maximum (historical) demand before t0
if t0 > 0:
P_max_demand_pre_t0 = (np.max(P_demand_actual[0:t0_dt]
+ Pnet_ES_sum[0: t0_dt]))
else:
P_max_demand_pre_t0 = 0
#######################################
### STEP 1: set up decision variables
#######################################
# energy storage system input powers
P_ES = prob.add_variable('P_ES', (T_mpc,N_ES), vtype='continuous')
# energy storage system input powers
P_ES_ch = prob.add_variable('P_ES_ch', (T_mpc,N_ES),
vtype='continuous')
# energy storage system output powers
P_ES_dis = prob.add_variable('P_ES_dis', (T_mpc,N_ES),
vtype='continuous')
# (positive) net power imports
P_import = prob.add_variable('P_import', (T_mpc,1), vtype='continuous')
# (positive) net power exports
P_export = prob.add_variable('P_export', (T_mpc,1), vtype='continuous')
# (positive) maximum demand dummy variable
P_max_demand = prob.add_variable('P_max_demand', 1, vtype='continuous')
# (positive) minimum terminal energy dummy variable
E_T_min = prob.add_variable('E_T_min', 1, vtype='continuous')
#######################################
### STEP 2: set up constraints
#######################################
# lower triangle matrix summing powers
Asum = pic.new_param('Asum', np.tril(np.ones([T_mpc,T_mpc])))
# Asum = cvxopt.matrix(np.tril(np.ones([T_mpc,T_mpc])), (T_mpc,T_mpc),
# 'd')
# linear battery model constraints
for i in range(N_ES):
# maximum power constraint
prob.add_constraint((P_ES_ch[:, i] - P_ES_dis[:, i])\
<= self.storage_assets[i].Pmax[T_range])
# minimum power constraint
prob.add_constraint((P_ES_ch[:, i] - P_ES_dis[:, i])\
>= self.storage_assets[i].Pmin[T_range])
# maximum energy constraint
prob.add_constraint((self.dt_ems
* Asum
* (P_ES_ch[:,i] - P_ES_dis[:,i]))\
<= (self.storage_assets[i].Emax[T_range]
- self.storage_assets[i].E[t0_dt]))
# minimum energy constraint
prob.add_constraint((self.dt_ems
* Asum
* (P_ES_ch[:,i] - P_ES_dis[:,i]))\
>= (self.storage_assets[i].Emin[T_range]
- self.storage_assets[i].E[t0_dt]))
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[T_mpc-1, :]
* (P_ES_ch[:, i] - P_ES_dis[:,i])
+ E_T_min)\
>= (self.storage_assets[i].ET
- self.storage_assets[i].E[t0_dt]))
eff_opt = self.storage_assets[i].eff_opt
#P_ES_ch & P_ES_dis dummy variables
for t in range(T_mpc):
prob.add_constraint(P_ES[t, i] == (P_ES_ch[t, i]
/ eff_opt
- P_ES_dis[t, i]
* eff_opt))
prob.add_constraint(P_ES_ch[t, i] >= 0)
prob.add_constraint(P_ES_dis[t, i] >= 0)
#import/export constraints
for t in range(T_mpc):
# net import variables
prob.add_constraint(sum(P_ES[t, :]) + P_demand[t]\
== P_import[t] - P_export[t])
# maximum import constraint
prob.add_constraint(P_import[t] <= self.market.Pmax[t0+t])
# maximum import constraint
prob.add_constraint(P_import[t] >= 0)
# maximum import constraint
prob.add_constraint(P_export[t] <= -self.market.Pmin[t0 + t])
# maximum import constraint
prob.add_constraint(P_export[t] >= 0)
# maximum demand dummy variable constraint
prob.add_constraint(P_max_demand + P_max_demand_pre_t0\
>= P_import[t] - P_export[t])
# maximum demand dummy variable constraint
prob.add_constraint(P_max_demand >= 0)
# minimum terminal energy dummy variable constraint
prob.add_constraint(E_T_min[:] >= 0)
#if FFR energy constraints
if self.market.FR_window is not None:
FR_window = self.market.FR_window
FR_SoC_max = self.market.FR_SOC_max
FR_SoC_min = self.market.FR_SOC_min
for t in range(len(T_mpc)):
if FR_window[t] == 1:
for i in range(N_ES):
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t, :]
* P_ES[:, i])\
<= ((FR_SoC_max
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt]))
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t, :]
* P_ES[:, i])\
>= ((FR_SoC_min
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt]))
#######################################
### STEP 3: set up objective
#######################################
prices_import = pic.new_param('prices_import',
self.market.prices_import)
prices_export = pic.new_param('prices_export',
self.market.prices_export)
terminal_const = 1e12 #coeff for objective terminal soft constraint
prob.set_objective('min', (self.market.demand_charge
* P_max_demand
+ sum(sum(self.dt_ems
* self.storage_assets[i].c_deg_lin
* (P_ES_ch[t,i] + P_ES_dis[t,i])\
for i in range(N_ES))
+ self.dt_ems
* prices_import[t0 + t]
* P_import[t]
+ -self.dt_ems
* prices_export[t0 + t]
* P_export[t]\
for t in range(T_mpc))
+ terminal_const
* E_T_min))
#######################################
### STEP 3: solve the optimisation
#######################################
print('*** SOLVING THE OPTIMISATION PROBLEM ***')
#prob.solve(verbose = 0,solver='cvxopt')
prob.solve(verbose = 0)
print('*** OPTIMISATION COMPLETE ***')
P_ES_val = np.array(P_ES.value)
P_import_val = np.array(P_import.value)
P_export_val = np.array(P_export.value)
P_demand_val = np.array(P_demand)
return {'opt_prob':prob,\
'P_ES_val':P_ES_val,\
'P_import_val':P_import_val,\
'P_export_val':P_export_val,\
'P_demand_val':P_demand_val}
# NEEDED FOR OXEMF EV CASE
def EMS_3ph_linear_t0(self, t0, i_unconstrained_lines=[],
v_unconstrained_buses = []):
"""
Energy management system optimization assuming 3 phase linear network
model for Model Predictive Control interval t0
Parameters
----------
self : EnergySystem object
Object containing information on assets, market, network and time
resolution.
t0 : int
Interval in Model Predictive Control. If open loop, t0 = 0
i_unconstrained_lines : list
List of network lines which have unconstrained current
v_unconstrained_buses : list
List of buses at which the voltage is not constrained
Returns
-------
Output : dictionary
The following numpy.ndarrays are present depending upon asset mix:
P_ES_val : Charge/discharge power for storage assets (kW)
P_import_val : Power imported from central grid (kW)
P_export_val : Power exported to central grid (kW)
P_demand_val : System power demand at energy management time
resolution (kW)
PF_networks_lin : Network 3ph list of objects, one for each
optimisation interval, storing the linear power
flow model used to formulate netowrk
constraints
"""
#######################################
### STEP 0: setup variables
#######################################
prob = pic.Problem()
t0_dt = int(t0*self.dt_ems/self.dt)
T_mpc = self.T_ems-t0
T_range = np.arange(t0,self.T_ems)
N_buses = self.network.N_buses
N_phases = self.network.N_phases
N_ES = len(self.storage_assets)
N_nondispatch = len(self.nondispatch_assets)
P_demand_actual = np.zeros([self.T,N_nondispatch])
P_demand_pred = np.zeros([self.T,N_nondispatch])
P_demand = np.zeros([T_mpc,N_nondispatch])
Q_demand_actual = np.zeros([self.T,N_nondispatch])
Q_demand_pred = np.zeros([self.T,N_nondispatch])
Q_demand = np.zeros([T_mpc,N_nondispatch])
for i in range(N_nondispatch):
P_demand_actual[:,i] = self.nondispatch_assets[i].Pnet
P_demand_pred[:,i] = self.nondispatch_assets[i].Pnet_pred
Q_demand_actual[:,i] = self.nondispatch_assets[i].Qnet
Q_demand_pred[:,i] = self.nondispatch_assets[i].Qnet_pred
#Assemble P_demand out of P actual and P predicted and convert to EMS
#time series scale
for i in range(N_nondispatch):
for t_ems in T_range:
t_indexes = (t_ems*self.dt_ems/self.dt +
np.arange(0,self.dt_ems/self.dt)).astype(int)
if t_ems == t0:
P_demand[t_ems-t0,i] =\
np.mean(P_demand_actual[t_indexes,i])
Q_demand[t_ems-t0,i] = \
np.mean(Q_demand_actual[t_indexes,i])
else:
P_demand[t_ems-t0,i] = np.mean(P_demand_pred[t_indexes,i])
Q_demand[t_ems-t0,i] = np.mean(Q_demand_pred[t_indexes,i])
#get total ES system demand (before optimisation)
Pnet_ES_sum = np.zeros(self.T)
for i in range(N_ES):
Pnet_ES_sum += self.storage_assets[i].Pnet
#get the maximum (historical) demand before t0
if t0 == 0:
P_max_demand_pre_t0 = 0
else:
if N_nondispatch == 0: P_max_demand_pre_t0 = Pnet_ES_sum[0:t0_dt]
else:
P_demand_act_sum = sum(P_demand_actual[0:t0_dt,i] \
for i in range(N_nondispatch))
P_max_demand_pre_t0 = np.max(P_demand_act_sum +
Pnet_ES_sum[0:t0_dt])
#Set up Matrix linking nondispatchable assets to their bus and phase
G_wye_nondispatch = np.zeros([3*(N_buses-1),N_nondispatch])
G_del_nondispatch = np.zeros([3*(N_buses-1),N_nondispatch])
for i in range(N_nondispatch):
asset_N_phases = self.nondispatch_assets[i].phases.size
bus_id = self.nondispatch_assets[i].bus_id
# check if Wye connected
wye_flag = self.network.bus_df[self.\
network.bus_df['number']==\
bus_id]['connect'].values[0]=='Y'
for ph in np.nditer(self.nondispatch_assets[i].phases):
bus_ph_index = 3*(bus_id-1) + ph
if wye_flag is True:
G_wye_nondispatch[bus_ph_index,i] = 1/asset_N_phases
else:
G_del_nondispatch[bus_ph_index,i] = 1/asset_N_phases
#Set up Matrix linking energy storage assets to their bus and phase
G_wye_ES = np.zeros([3*(N_buses-1),N_ES])
G_del_ES = np.zeros([3*(N_buses-1),N_ES])
for i in range(N_ES):
asset_N_phases = self.storage_assets[i].phases.size
bus_id = self.storage_assets[i].bus_id
# check if Wye connected
wye_flag = self.network.bus_df[self.\
network.bus_df['number']==\
bus_id]['connect'].values[0]=='Y'
for ph in np.nditer(self.storage_assets[i].phases):
bus_ph_index = 3*(bus_id-1) + ph
if wye_flag is True:
G_wye_ES[bus_ph_index,i] = 1/asset_N_phases
else:
G_del_ES[bus_ph_index,i] = 1/asset_N_phases
G_wye_nondispatch_PQ = np.concatenate((G_wye_nondispatch,
G_wye_nondispatch),axis=0)
G_del_nondispatch_PQ = np.concatenate((G_del_nondispatch,
G_del_nondispatch),axis=0)
G_wye_ES_PQ = np.concatenate((G_wye_ES,G_wye_ES),axis=0)
G_del_ES_PQ = np.concatenate((G_del_ES,G_del_ES),axis=0)
#######################################
### STEP 1: set up decision variables
#######################################
# energy storage system input powers
P_ES = prob.add_variable('P_ES',
(T_mpc,N_ES), vtype='continuous')
# energy storage system input powers
P_ES_ch = prob.add_variable('P_ES_ch',
(T_mpc,N_ES), vtype='continuous')
# energy storage system output powers
P_ES_dis = prob.add_variable('P_ES_dis',
(T_mpc,N_ES), vtype='continuous')
# (positive) net power imports
P_import = prob.add_variable('P_import',
(T_mpc,1), vtype='continuous')
# (positive) net power exports
P_export = prob.add_variable('P_export',
(T_mpc,1), vtype='continuous')
# (positive) maximum demand dummy variable
P_max_demand = prob.add_variable('P_max_demand',
1, vtype='continuous')
# (positive) minimum terminal energy dummy variable
E_T_min = prob.add_variable('E_T_min',
N_ES, vtype='continuous')
#######################################
### STEP 2: set up linear power flow models
#######################################
PF_networks_lin = []
P_lin_buses = np.zeros([T_mpc,N_buses,N_phases])
Q_lin_buses = np.zeros([T_mpc,N_buses,N_phases])
for t in range(T_mpc):
#Setup linear power flow model:
for i in range(N_nondispatch):
bus_id = self.nondispatch_assets[i].bus_id
phases_i = self.nondispatch_assets[i].phases
for ph_i in np.nditer(phases_i):
bus_ph_index = 3*(bus_id-1) + ph_i
P_lin_buses[t,bus_id,ph_i] +=\
(G_wye_nondispatch[bus_ph_index,i]+\
G_del_nondispatch[bus_ph_index,i])*P_demand[t,i]
Q_lin_buses[t,bus_id,ph_i] +=\
(G_wye_nondispatch[bus_ph_index,i]+\
G_del_nondispatch[bus_ph_index,i])*Q_demand[t,i]
#set up a copy of the network for MPC interval t
network_t = copy.deepcopy(self.network)
network_t.clear_loads()
for bus_id in range(N_buses):
for ph_i in range(N_phases):
Pph_t = P_lin_buses[t,bus_id,ph_i]
Qph_t = Q_lin_buses[t,bus_id,ph_i]
#add P,Q loads to the network copy
network_t.set_load(bus_id,ph_i,Pph_t,Qph_t)
network_t.zbus_pf()
v_lin0 = network_t.v_net_res
S_wye_lin0 = network_t.S_PQloads_wye_res
S_del_lin0 = network_t.S_PQloads_del_res
network_t.linear_model_setup(v_lin0,S_wye_lin0,S_del_lin0)
# note that phases need to be 120degrees out for good results
network_t.linear_pf()
PF_networks_lin.append(network_t)
#######################################
### STEP 3: set up constraints
#######################################
# lower triangle matrix summing powers
Asum = pic.new_param('Asum',np.tril(np.ones([T_mpc,T_mpc])))
# energy storage asset constraints
for i in range(N_ES):
# maximum power constraint
prob.add_constraint(P_ES[:,i] <=
self.storage_assets[i].Pmax[T_range])
# minimum power constraint
prob.add_constraint(P_ES[:,i] >=
self.storage_assets[i].Pmin[T_range])
# maximum energy constraint
prob.add_constraint(self.dt_ems * Asum * (P_ES_ch[:,i] -
P_ES_dis[:,i]) <=
self.storage_assets[i].Emax[T_range] -
self.storage_assets[i].E[t0_dt])
# minimum energy constraint
prob.add_constraint(self.dt_ems * Asum * (P_ES_ch[:,i] -
P_ES_dis[:,i]) >=
self.storage_assets[i].Emin[T_range] -
self.storage_assets[i].E[t0_dt])
# final energy constraint
prob.add_constraint(self.dt_ems * Asum[T_mpc-1,:] * (P_ES_ch[:,i] -
P_ES_dis[:,i]) + E_T_min[i] >=
self.storage_assets[i].ET -
self.storage_assets[i].E[t0_dt])
eff_opt = self.storage_assets[i].eff_opt
#P_ES_ch & P_ES_dis dummy variables
for t in range(T_mpc):
prob.add_constraint(P_ES[t,i] == P_ES_ch[t,i]/eff_opt -
P_ES_dis[t,i] * eff_opt)
prob.add_constraint(P_ES_ch[t,i] >= 0)
prob.add_constraint(P_ES_dis[t,i] >= 0)
#import/export constraints
for t in range(T_mpc):
# maximum import constraint
prob.add_constraint(P_import[t] <= self.market.Pmax[t0 + t])
# maximum import constraint
prob.add_constraint(P_import[t] >= 0)
# maximum import constraint
prob.add_constraint(P_export[t] <= -self.market.Pmin[t0 + t])
# maximum import constraint
prob.add_constraint(P_export[t] >= 0)
# maximum demand dummy variable constraint
prob.add_constraint(P_max_demand + P_max_demand_pre_t0 >=
P_import[t]-P_export[t])
# maximum demand dummy variable constraint
prob.add_constraint(P_max_demand >= 0)
# Network constraints
for t in range(T_mpc):
network_t = PF_networks_lin[t]
# Note that linear power flow matricies are in units of W (not kW)
PQ0_wye = np.concatenate((np.real(network_t.S_PQloads_wye_res),\
np.imag(network_t.S_PQloads_wye_res)))\
*1e3
PQ0_del = np.concatenate((np.real(network_t.S_PQloads_del_res),\
np.imag(network_t.S_PQloads_del_res)))\
*1e3
A_Pslack = (np.matmul\
(np.real(np.matmul\
(network_t.vs.T,\
np.matmul(np.conj(network_t.Ysn),\
np.conj(network_t.M_wye)))),\
G_wye_ES_PQ)\
+ np.matmul\
(np.real(np.matmul\
(network_t.vs.T,\
np.matmul(np.conj(network_t.Ysn),\
np.conj(network_t.M_del)))),\
G_del_ES_PQ))
b_Pslack = np.real(np.matmul\
(network_t.vs.T,\
np.matmul(np.conj\
(network_t.Ysn),\
np.matmul(np.conj\
(network_t.M_wye),\
PQ0_wye))))\
+np.real(np.matmul\
(network_t.vs.T,\
np.matmul(np.conj\
(network_t.Ysn),\
np.matmul(np.conj\
(network_t.M_del),
PQ0_del))))\
+np.real(np.matmul\
(network_t.vs.T,\
(np.matmul(np.conj\
(network_t.Yss),\
np.conj(network_t.vs))\
+ np.matmul(np.conj\
(network_t.Ysn),\
np.conj(network_t.M0)))))
# net import variables
prob.add_constraint(P_import[t]-P_export[t] ==\
(np.sum(A_Pslack[i]*P_ES[t,i]\
*1e3 for i in range(N_ES))\
+ b_Pslack)/1e3)
# Voltage magnitude constraints
A_vlim = np.matmul(network_t.K_wye,G_wye_ES_PQ)\
+ np.matmul(network_t.K_del,G_del_ES_PQ)
b_vlim = network_t.v_lin_abs_res
#get max/min bus voltages, removing slack and reshaping in a column
v_abs_max_vec = network_t.v_abs_max[1:,:].reshape(-1,1)
v_abs_min_vec = network_t.v_abs_min[1:,:].reshape(-1,1)
for bus_ph_index in range(0,N_phases*(N_buses-1)):
if int(bus_ph_index/3) not in (np.array\
(v_unconstrained_buses)-1):
prob.add_constraint(sum(A_vlim[bus_ph_index,i]\
*(P_ES[t,i])\
*1e3 for i in range(N_ES))\
+ b_vlim[bus_ph_index] <=\
v_abs_max_vec[bus_ph_index])
prob.add_constraint(sum(A_vlim[bus_ph_index,i]\
*(P_ES[t,i])\
*1e3 for i in range(N_ES))\
+ b_vlim[bus_ph_index] >=\
v_abs_min_vec[bus_ph_index])
# Line current magnitude constraints:
for line_ij in range(network_t.N_lines):
if line_ij not in i_unconstrained_lines:
iabs_max_line_ij = network_t.i_abs_max[line_ij,:] #3 phases
# maximum current magnitude constraint
A_line = np.matmul(network_t.Jabs_dPQwye_list[line_ij],\
G_wye_ES_PQ)\
+ np.matmul(network_t.\
Jabs_dPQdel_list[line_ij],\
G_del_ES_PQ)
for ph in range(N_phases):
prob.add_constraint(sum(A_line[ph,i]\
* P_ES[t,i]\
* 1e3 for i in range(N_ES))\
+ network_t.\
Jabs_I0_list[line_ij][ph] <=\
iabs_max_line_ij[ph])
#if FFR energy constraints
if self.market.FR_window is not None:
FR_window = self.market.FR_window
FR_SoC_max = self.market.FR_SOC_max
FR_SoC_min = self.market.FR_SOC_min
for t in range(len(T_mpc)):
if FR_window[t] ==1:
for i in range(N_ES):
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t, :]
* P_ES[:,i])\
<= ((FR_SoC_max
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt]))
# final energy constraint
prob.add_constraint((self.dt_ems
* Asum[t,:]
* P_ES[:,i])\
>= ((FR_SoC_min
* self.storage_assets[i].Emax)
- self.storage_assets[i].E[t0_dt]))
#######################################
### STEP 4: set up objective
#######################################
# minimum terminal energy dummy variable constraint
prob.add_constraint(E_T_min[i] >= 0)
#coeff for objective terminal soft constraint
terminal_const = 1e12
prices_import = pic.new_param('prices_import',
self.market.prices_import)
prices_export = pic.new_param('prices_export',
self.market.prices_export)
prob.set_objective('min', self.market.demand_charge*\
(P_max_demand+P_max_demand_pre_t0) +
sum(sum(self.dt_ems*self.storage_assets[i].\
c_deg_lin*(P_ES_ch[t,i]+
P_ES_dis[t,i])\
for i in range(N_ES))\
+ self.dt_ems*prices_import[t0+t]*P_import[t]\
- self.dt_ems*prices_export[t0+t]*P_export[t]
for t in range(T_mpc))\
+ sum(terminal_const*E_T_min[i]\
for i in range(N_ES)))
#######################################
### STEP 5: solve the optimisation
#######################################
print('*** SOLVING THE OPTIMISATION PROBLEM ***')
prob.solve(verbose = 0)
print('*** OPTIMISATION COMPLETE ***')
P_ES_val = np.array(P_ES.value)
P_import_val = np.array(P_import.value)
P_export_val = np.array(P_export.value)
P_demand_val = np.array(P_demand)
return {'P_ES_val':P_ES_val,
'P_import_val':P_import_val,
'P_export_val':P_export_val,
'P_demand_val':P_demand_val,
'PF_networks_lin':PF_networks_lin}
# NEEDED FOR OXEMF EV CASE
def simulate_network_mpc_3phPF(self, ems_type = '3ph',
i_unconstrained_lines=[],
v_unconstrained_buses = []):
"""
Run the Energy Management System using Model Predictive Control (MPC)
and simulate an IEEE 13 bus network either copper plate or 3ph
Parameters
----------
self : EnergySystem object
Object containing information on assets, market, network and time
resolution.
ems_type : string
Identifies whether the system is copper plate or 3ph. Default 3ph
i_unconstrained_lines : list
List of network lines which have unconstrained current
v_unconstrained_buses : list
List of buses at which the voltage is not constrained
Returns
-------
Output : dictionary
PF_network_res : Network power flow results stored as a list of
objects
P_ES_ems : Charge/discharge power for storage assets at energy
management time resolution (kW)
P_import_ems :Power imported from central grid at energy
management time resolution (kW)
P_export_ems :Power exported to central grid at energy
management time resolution(kW)
P_demand_ems :System power demand at energy management time
resolution (kW)
"""
#######################################
### STEP 0: setup variables
#######################################
N_ESs = len(self.storage_assets) #number of EVs
N_nondispatch = len(self.nondispatch_assets) #number of EVs
P_import_ems = np.zeros(self.T_ems)
P_export_ems = np.zeros(self.T_ems)
P_ES_ems = np.zeros([self.T_ems,N_ESs])
if ems_type == 'copper_plate':
P_demand_ems = np.zeros(self.T_ems)
else:
P_demand_ems = np.zeros([self.T_ems,N_nondispatch])
N_buses = self.network.N_buses
N_phases = self.network.N_phases
P_demand_buses = np.zeros([self.T,N_buses,N_phases])
Q_demand_buses = np.zeros([self.T,N_buses,N_phases])
PF_network_res = []
#######################################
### STEP 1: MPC Loop
#######################################
print('*** MPC SIMULATION START ***')
for t_mpc in range(self.T_ems):
print('************************')
print('MPC Interval '+ str(t_mpc)+ ' of '+ str(self.T_ems))
print('************************')
#######################################
### STEP 1.1: Optimisation
#######################################
if ems_type == 'copper_plate':
output_ems = self.EMS_copper_plate_t0_c1deg(t_mpc)
P_demand_ems[t_mpc] = output_ems['P_demand_val'][0]
else:
output_ems = self.EMS_3ph_linear_t0(t_mpc,
i_unconstrained_lines,
v_unconstrained_buses)
P_demand_ems[t_mpc,:] = output_ems['P_demand_val'][0,:]
P_import_ems[t_mpc] = output_ems['P_import_val'][0]
P_export_ems[t_mpc] = output_ems['P_export_val'][0]
P_ES_ems[t_mpc,:] = output_ems['P_ES_val'][0,:]
# convert P_EV signals to system time-series scale
T_interval = int(self.dt_ems/self.dt)
P_ESs = np.zeros([T_interval,N_ESs])
for t in range(T_interval):
P_ESs[t,:] = P_ES_ems[t_mpc,:]
#######################################
### STEP 1.2: update the controllable assets
#######################################
t0 = int(t_mpc*(self.dt_ems/self.dt))
# get the simulation time intervals within each EMS time interval
# and implement the ES system control for them
t_range = np.arange(t0,t0+T_interval)
for i in range(N_ESs):
for t_index in range(T_interval):
t = t_range[t_index]
self.storage_assets[i].update_control_t(P_ESs[t_index,i],t)
#######################################
### STEP 1.3: simulate the network
#######################################
# total real and reactive power demand at each bus phase
for t_index in range(T_interval):
t = t_range[t_index]
for i in range(N_ESs):
bus_id = self.storage_assets[i].bus_id
phases_i = self.storage_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in phases_i:
P_demand_buses[t,bus_id,ph_i] +=\
self.storage_assets[i].Pnet[t]/N_phases_i
Q_demand_buses[t,bus_id,ph_i] +=\
self.storage_assets[i].Qnet[t]/N_phases_i
for i in range(N_nondispatch):
bus_id = self.nondispatch_assets[i].bus_id
phases_i = self.nondispatch_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in np.nditer(phases_i):
P_demand_buses[t,bus_id,ph_i] +=\
self.nondispatch_assets[i].Pnet[t]/N_phases_i
Q_demand_buses[t,bus_id,ph_i] +=\
self.nondispatch_assets[i].Qnet[t]/N_phases_i
# set up a copy of the network for simulation interval t
network_t = copy.deepcopy(self.network)
network_t.clear_loads()
for bus_id in range(N_buses):
for ph_i in range(N_phases):
Pph_t = P_demand_buses[t,bus_id,ph_i]
Qph_t = Q_demand_buses[t,bus_id,ph_i]
#add P,Q loads to the network copy
network_t.set_load(bus_id,ph_i,Pph_t,Qph_t)
# run the power flow simulation
network_t.zbus_pf()
# store power flow results as a list of network objects
PF_network_res.append(network_t)
print('*** MPC SIMULATION COMPLETE ***')
return {'PF_network_res' :PF_network_res,\
'P_ES_ems':P_ES_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
def simulate_network_3phPF_lean(self, ems_type = '3ph'):
"""
run the EMS in open loop and simulate a 3-phase AC network
"""
#######################################
### STEP 1: solve the optimisation
#######################################
t0 = 0
if ems_type == 'copper_plate':
# self.EMS_copper_plate()
output_ems = self.EMS_copper_plate_t0_c1deg(t0)
else:
# self.EMS_copper_plate()
output_ems = self.EMS_3ph_linear_t0(t0)
#output_ems = self.EMS_copper_plate
P_import_ems = output_ems['P_import_val']
P_export_ems = output_ems['P_export_val']
P_ES_ems = output_ems['P_ES_val']
P_demand_ems = output_ems['P_demand_val']
#convert P_EV signals to system time-series scale
N_ESs = len(self.storage_assets) #number of EVs
N_nondispatch = len(self.nondispatch_assets) #number of EVs
P_ESs = np.zeros([self.T,N_ESs])
for t in range(self.T):
t_ems = int(t/(self.dt_ems/self.dt))
P_ESs[t,:] = P_ES_ems[t_ems,:]
#######################################
### STEP 2: update the controllable assets
#######################################
for i in range(N_ESs):
self.storage_assets[i].update_control(P_ESs[:,i])
#######################################
### STEP 3: simulate the network
#######################################
N_buses = self.network.N_buses
N_phases = self.network.N_phases
P_demand_buses = np.zeros([self.T,N_buses,N_phases])
Q_demand_buses = np.zeros([self.T,N_buses,N_phases])
#calculate the total real and reactive power demand at each bus phase
for i in range(N_ESs):
bus_id = self.storage_assets[i].bus_id
phases_i = self.storage_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in np.nditer(phases_i):
P_demand_buses[:,bus_id,ph_i] += (self.storage_assets[i].Pnet
/ N_phases_i)
Q_demand_buses[:,bus_id,ph_i] += (self.storage_assets[i].Qnet
/ N_phases_i)
for i in range(N_nondispatch):
bus_id = self.nondispatch_assets[i].bus_id
phases_i = self.nondispatch_assets[i].phases
N_phases_i = np.size(phases_i)
for ph_i in np.nditer(phases_i):
P_demand_buses[:, bus_id, ph_i]\
+= (self.nondispatch_assets[i].Pnet / N_phases_i)
Q_demand_buses[:, bus_id, ph_i]\
+= (self.nondispatch_assets[i].Qnet / N_phases_i)
#Store power flow results as a list of network objects
PF_network_res = []
print('*** SIMULATING THE NETWORK ***')
for t in range(self.T):
#for each time interval:
#set up a copy of the network for simulation interval t
network_t = copy.deepcopy(self.network)
network_t.clear_loads()
for bus_id in range(N_buses):
for ph_i in range(N_phases):
Pph_t = P_demand_buses[t,bus_id,ph_i]
Qph_t = Q_demand_buses[t,bus_id,ph_i]
#add P,Q loads to the network copy
network_t.set_load(bus_id,ph_i,Pph_t,Qph_t)
#run the power flow simulation
network_t.zbus_pf()
if t % 1 == 0:
print('network sim complete for t = '
+ str(t) + ' of ' + str(self.T))
PF_network_res.append(network_t.res_bus_df)
print('*** NETWORK SIMULATION COMPLETE ***')
return {'PF_network_res' :PF_network_res,\
'P_ES_ems':P_ES_ems,\
'P_import_ems':P_import_ems,\
'P_export_ems':P_export_ems,\
'P_demand_ems':P_demand_ems}
| 47.642276
| 79
| 0.49152
| 8,968
| 76,180
| 3.889831
| 0.050624
| 0.01118
| 0.04386
| 0.04128
| 0.842077
| 0.814184
| 0.786894
| 0.753268
| 0.714425
| 0.697999
| 0
| 0.00805
| 0.383591
| 76,180
| 1,598
| 80
| 47.67209
| 0.734827
| 0.198582
| 0
| 0.639165
| 0
| 0
| 0.043502
| 0.000848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008946
| false
| 0
| 0.06163
| 0
| 0.079523
| 0.020875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
117eb4816a1a30cf1db2218352208c5aa8b9ca49
| 12
|
py
|
Python
|
Python/Tests/TestData/EditorTests/BackslashCompletion.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/EditorTests/BackslashCompletion.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/EditorTests/BackslashCompletion.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
x = 42
x\
| 4
| 7
| 0.333333
| 3
| 12
| 1.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.5
| 12
| 2
| 8
| 6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
11af74f8b58d6ea6fe15337fd57dce4618151dd1
| 48
|
py
|
Python
|
pyxb/bundles/opengis/misc/xAL.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 123
|
2015-01-12T06:43:22.000Z
|
2022-03-20T18:06:46.000Z
|
pyxb/bundles/opengis/misc/xAL.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 103
|
2015-01-08T18:35:57.000Z
|
2022-01-18T01:44:14.000Z
|
pyxb/bundles/opengis/misc/xAL.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 54
|
2015-02-15T17:12:00.000Z
|
2022-03-07T23:02:32.000Z
|
from pyxb.bundles.opengis.misc.raw.xAL import *
| 24
| 47
| 0.791667
| 8
| 48
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
11c2d2a035761b9b372e50166d8ee83d855a877b
| 117
|
py
|
Python
|
springust/command/entity_config.py
|
VEINHORN/springust
|
b53ac5b877824359b9d75a81a02cb4ddd987a1a8
|
[
"MIT"
] | null | null | null |
springust/command/entity_config.py
|
VEINHORN/springust
|
b53ac5b877824359b9d75a81a02cb4ddd987a1a8
|
[
"MIT"
] | null | null | null |
springust/command/entity_config.py
|
VEINHORN/springust
|
b53ac5b877824359b9d75a81a02cb4ddd987a1a8
|
[
"MIT"
] | null | null | null |
class EntityConfig:
def __init__(self, templates_folder = None):
self.templates_folder = templates_folder
| 39
| 48
| 0.752137
| 13
| 117
| 6.230769
| 0.615385
| 0.555556
| 0.469136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 117
| 3
| 49
| 39
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
11c75dec7f6822eb9fa730525b156e644265e4f1
| 98
|
py
|
Python
|
packages/routines/about_to_Quit.py
|
robmanganelly/PyJournal
|
dcf0e6e69a62ad5c6019b099104ae64880825814
|
[
"MIT"
] | 1
|
2021-02-02T03:58:56.000Z
|
2021-02-02T03:58:56.000Z
|
packages/routines/about_to_Quit.py
|
rlothbrock/PyJournal
|
e44bca524c46364a6931375d8ac3ab8b90f71ad2
|
[
"MIT"
] | null | null | null |
packages/routines/about_to_Quit.py
|
rlothbrock/PyJournal
|
e44bca524c46364a6931375d8ac3ab8b90f71ad2
|
[
"MIT"
] | null | null | null |
from packages.modules.app_clock import kill_clock
def about_to_quit_routine():
kill_clock()
| 16.333333
| 49
| 0.795918
| 15
| 98
| 4.8
| 0.8
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 5
| 50
| 19.6
| 0.847059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
11d8236fd82771a2cb954a723d80a984ac390aab
| 136
|
py
|
Python
|
setup.py
|
ervitis/vswitch
|
95e6ec173b3028e0f379d9b8834fbee4108a8ce0
|
[
"MIT"
] | null | null | null |
setup.py
|
ervitis/vswitch
|
95e6ec173b3028e0f379d9b8834fbee4108a8ce0
|
[
"MIT"
] | null | null | null |
setup.py
|
ervitis/vswitch
|
95e6ec173b3028e0f379d9b8834fbee4108a8ce0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from vswitch import install_params
setup(**install_params)
| 17
| 34
| 0.735294
| 19
| 136
| 5.157895
| 0.736842
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.132353
| 136
| 7
| 35
| 19.428571
| 0.822034
| 0.308824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e10a42e8615985c893f944d091e6f4371b9a17fc
| 122
|
py
|
Python
|
apps/ask_codex/history/admin.py
|
joy-void-joy/jarvis-codex-server
|
10b3d5dfbc958361ea5cbb085079456ee2b502ca
|
[
"MIT"
] | null | null | null |
apps/ask_codex/history/admin.py
|
joy-void-joy/jarvis-codex-server
|
10b3d5dfbc958361ea5cbb085079456ee2b502ca
|
[
"MIT"
] | null | null | null |
apps/ask_codex/history/admin.py
|
joy-void-joy/jarvis-codex-server
|
10b3d5dfbc958361ea5cbb085079456ee2b502ca
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Log
@admin.register(Log)
class LogAdmin(admin.ModelAdmin):
pass
| 17.428571
| 33
| 0.778689
| 17
| 122
| 5.588235
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139344
| 122
| 6
| 34
| 20.333333
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
01507b7d940698d7cc13a3c71dd5a4dff80efc5f
| 147
|
py
|
Python
|
lesson-01/01/hello_world.py
|
minimum-hsu/tutorial-python
|
667692e7cd13a8a4d061a4da530dc2dfe25ac1de
|
[
"MIT"
] | null | null | null |
lesson-01/01/hello_world.py
|
minimum-hsu/tutorial-python
|
667692e7cd13a8a4d061a4da530dc2dfe25ac1de
|
[
"MIT"
] | null | null | null |
lesson-01/01/hello_world.py
|
minimum-hsu/tutorial-python
|
667692e7cd13a8a4d061a4da530dc2dfe25ac1de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
print('hello world')
print('hello', 'world')
## use "sep" parameter to change output
print('hello', 'world', sep = '_')
| 14.7
| 39
| 0.639456
| 20
| 147
| 4.65
| 0.65
| 0.322581
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.14966
| 147
| 9
| 40
| 16.333333
| 0.736
| 0.394558
| 0
| 0
| 0
| 0
| 0.376471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6d6e317f1decc36fb28039fecf19e241c3a2e975
| 1,101
|
py
|
Python
|
cryptodataaccess/Transactions/TransactionStore.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
cryptodataaccess/Transactions/TransactionStore.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
cryptodataaccess/Transactions/TransactionStore.py
|
athanikos/cryptodataaccess
|
6189a44c65a9b03c02822a534e865740ab488809
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class TransactionStore(metaclass=ABCMeta):
@abstractmethod
def fetch_distinct_user_ids(self):
pass
@abstractmethod
def fetch_distinct_user_ids(self):
pass
@abstractmethod
def fetch_transaction(self, id):
pass
@abstractmethod
def fetch_transactions(self, user_id):
pass
@abstractmethod
def fetch_transactions(self, user_id):
pass
@abstractmethod
def fetch_transactions_before_date(self, user_id, date):
pass
@abstractmethod
def insert_transaction(self, user_id, volume, symbol, value, price, currency, date, source, source_id, operation):
pass
@abstractmethod
def update_transaction(self, id, user_id, volume, symbol, value, price, currency, date, source, source_id,
operation):
pass
@abstractmethod
def delete_transaction(self, id, throw_if_does_not_exist):
pass
@abstractmethod
def delete_transaction_by_source_id(self, source_id, throw_if_does_not_exist):
pass
| 24.466667
| 118
| 0.682107
| 124
| 1,101
| 5.774194
| 0.290323
| 0.23743
| 0.263966
| 0.181564
| 0.722067
| 0.650838
| 0.650838
| 0.581006
| 0.581006
| 0.581006
| 0
| 0
| 0.249773
| 1,101
| 44
| 119
| 25.022727
| 0.866828
| 0
| 0
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.30303
| false
| 0.30303
| 0.030303
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
6d7726ef07ea613510b793192de6063e6e113415
| 7,799
|
py
|
Python
|
ngraph/frontends/caffe2/tests/test_ops_constant.py
|
NervanaSystems/ngraph-python
|
ac032c83c7152b615a9ad129d54d350f9d6a2986
|
[
"Apache-2.0"
] | 18
|
2018-03-19T04:16:49.000Z
|
2021-02-08T14:44:58.000Z
|
ngraph/frontends/caffe2/tests/test_ops_constant.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 2
|
2019-04-16T06:41:49.000Z
|
2019-05-06T14:08:13.000Z
|
ngraph/frontends/caffe2/tests/test_ops_constant.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 11
|
2018-06-16T15:59:08.000Z
|
2021-03-06T00:45:30.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from __future__ import division
from caffe2.python import core, workspace
from ngraph.frontends.caffe2.c2_importer.importer import C2Importer
from ngraph.testing import ExecutorFactory
import numpy as np
import random as random
def test_constant():
workspace.ResetWorkspace()
shape = [10, 10]
val = random.random()
net = core.Net("net")
net.ConstantFill([], ["Y"], shape=shape, value=val, run_once=0, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.ma.allequal(f_result, workspace.FetchBlob("Y")))
assert(np.isclose(f_result[0][0], val, atol=1e-6, rtol=0))
def test_gaussianfill():
workspace.ResetWorkspace()
# Size of test matrix
N = 100
shape = [N, N]
net = core.Net("net")
net.GaussianFill([], ["Y"], shape=shape, mean=0.0, std=1.0, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# get caffe result
caffe_res = workspace.FetchBlob("Y")
# Elementwise difference of the two random matrixes
difference_res = caffe_res - f_result
# standard deviation of Difference Matrix
diffe_res_std = difference_res.std()
# testing can only be approximate (so in rare cases may fail!!)
# if fails once try to re-run a couple of times to make sure there is a problem)
# the difference must be still gaussian and P(|m'-m|)<3*std = 99.73%, and
# std(m) = std/N, having N*N elements
assert(np.isclose(difference_res.mean(), 0, atol=3 * diffe_res_std / N, rtol=0))
def test_uniformfill():
workspace.ResetWorkspace()
# Size of test matrix
N = 100
shape = [N, N]
net = core.Net("net")
net.UniformFill([], ["Y"], shape=shape, min=-2., max=2., name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# get caffe result
caffe_res = workspace.FetchBlob("Y")
# Elementwise difference of the two random matrixes
difference_res = caffe_res - f_result
# standard deviation of Difference Matrix
diffe_res_std = difference_res.std()
# testing can only be approximated, so sometimes can fail!!
# approach mimicking gaussian test, and this time the multiplier is set to 5
# to account for distorsion from gaussian
# if fails once try to re-run a couple of times to make sure there is a problem)
assert(np.isclose(difference_res.mean(), 0, atol=5 * diffe_res_std / N, rtol=0))
def test_uniformintfill():
workspace.ResetWorkspace()
N = 100
shape = [N, N]
net = core.Net("net")
net.UniformIntFill([], ["Y"], shape=shape, min=-2, max=2, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# get caffe result
caffe_res = workspace.FetchBlob("Y")
# Elementwise difference of the two random matrixes
difference_res = caffe_res - f_result
# standard deviation of Difference Matrix
diffe_res_std = difference_res.std()
# testing can only be approximated, so sometimes can fail!!
# approach mimicking gaussian test, and this time the multiplier is set
# to 8 to account for distorsion from gaussian
# if fails once try to re-run a couple of times to make sure there is a problem)
assert(np.isclose(difference_res.mean(), 0, atol=8 * diffe_res_std / N, rtol=0))
def test_xavierfill():
workspace.ResetWorkspace()
N = 100
shape = [N, N]
net = core.Net("net")
net.XavierFill([], ["Y"], shape=shape, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# get caffe result
caffe_res = workspace.FetchBlob("Y")
# Elementwise difference of the two random matrixes
difference_res = caffe_res - f_result
# standard deviation of Difference Matrix
diffe_res_std = difference_res.std()
# testing can only be approximated, so sometimes can fail!!
# approach mimicking gaussian test
# if fails once try to re-run a couple of times to make sure there is a problem)
assert(np.isclose(difference_res.mean(), 0, atol=3 * diffe_res_std / N, rtol=0))
def test_giventensorfill():
workspace.ResetWorkspace()
shape = [10, 10]
data1 = np.random.random(shape)
net = core.Net("net")
net.GivenTensorFill([], ["Y"], shape=shape, values=data1, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.ma.allequal(f_result, workspace.FetchBlob("Y")))
assert(np.ma.allclose(f_result, data1, atol=1e-6, rtol=0))
def test_giventensorintfill():
workspace.ResetWorkspace()
shape = [10, 10]
data1 = np.random.random_integers(-100, 100, shape)
net = core.Net("net")
net.GivenTensorIntFill([], ["Y"], shape=shape, values=data1, name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.ma.allequal(f_result, workspace.FetchBlob("Y")))
assert(np.ma.allequal(f_result, data1))
| 29.881226
| 88
| 0.647134
| 1,040
| 7,799
| 4.743269
| 0.185577
| 0.024123
| 0.017839
| 0.018447
| 0.778026
| 0.768295
| 0.75674
| 0.749037
| 0.739307
| 0.71863
| 0
| 0.018657
| 0.230286
| 7,799
| 260
| 89
| 29.996154
| 0.803098
| 0.347993
| 0
| 0.743363
| 0
| 0
| 0.009782
| 0
| 0
| 0
| 0
| 0
| 0.088496
| 1
| 0.061947
| false
| 0
| 0.247788
| 0
| 0.309735
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6dda9dca07ec1377d3bd38e50bdabaae79a0dd40
| 46
|
py
|
Python
|
src/scrawl/moves/core.py
|
astromancer/graphical
|
2d72407c53967714953485dd52ad72e34e549ef5
|
[
"MIT"
] | null | null | null |
src/scrawl/moves/core.py
|
astromancer/graphical
|
2d72407c53967714953485dd52ad72e34e549ef5
|
[
"MIT"
] | null | null | null |
src/scrawl/moves/core.py
|
astromancer/graphical
|
2d72407c53967714953485dd52ad72e34e549ef5
|
[
"MIT"
] | null | null | null |
from matplotlib.offsetbox import DraggableBase
| 46
| 46
| 0.913043
| 5
| 46
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
61e1c5a955a2ebe49ed9824bbd8ba7695260bf14
| 48,542
|
py
|
Python
|
stv/generators/ispl/bridge.py
|
wp777/stv-compute
|
313b574c43ef87b629e70c25c38dbb7b24d1f130
|
[
"MIT"
] | 2
|
2021-07-11T09:52:59.000Z
|
2022-02-13T17:34:59.000Z
|
stv/generators/ispl/bridge.py
|
wp777/stv-compute
|
313b574c43ef87b629e70c25c38dbb7b24d1f130
|
[
"MIT"
] | 3
|
2020-07-26T13:49:59.000Z
|
2021-01-19T18:04:10.000Z
|
stv/generators/ispl/bridge.py
|
wp777/stv-compute
|
313b574c43ef87b629e70c25c38dbb7b24d1f130
|
[
"MIT"
] | null | null | null |
from stv.generators.ispl.ispl_generator import IsplGenerator
import itertools
import random
class BridgeModelIsplGenerator(IsplGenerator):
@property
def card_names(self) -> [str]:
return ["Ace", "King", "Queen", "Jack", "ten", "nine", "eight", "seven", "six", "five", "four", "three", "two"]
@property
def card_colors(self) -> [str]:
return ["Spade", "Heart", "Diamond", "Club"]
@property
def player_names(self) -> [str]:
return ["SPlayer", "WPlayer", "NPlayer", "EPlayer"]
def __init__(self, number_of_cards, number_of_cards_in_hand, card_ordering=None):
super().__init__()
if card_ordering is None:
card_ordering = self.generate_random_card_array(4 * number_of_cards_in_hand)
self._number_of_cards = number_of_cards
self._number_of_cards_in_hand = number_of_cards_in_hand
self._card_ordering = card_ordering
self._cards = []
self._available_cards = []
self._cards_values = {}
self._cards_colors = {}
self._create_cards_array()
self._create_available_cards_array()
self._assign_cards_values()
self._assign_cards_colors()
def _create_cards_array(self) -> None:
self._cards = []
for card_name in self.card_names:
for card_color in self.card_colors:
self._cards.append(card_name + card_color)
def _create_available_cards_array(self) -> None:
self._available_cards = []
for j in range(0, 4 * self._number_of_cards):
self._available_cards.append(self._cards[j])
def _assign_cards_values(self) -> None:
i = 0
for card_value in range(0, 13):
for j in range(0, 4):
self._cards_values[self._cards[i]] = 13 - card_value
i += 1
def _assign_cards_colors(self) -> None:
i = 0
for _ in range(0, 13):
for color in self.card_colors:
self._cards_colors[self._cards[i]] = color
i += 1
def _create_agents(self) -> str:
agents = ""
players_ids = [0, 1, 3]
for player_id in players_ids:
agents += self._create_player(player_id)
return agents
def _define_semantics(self) -> str:
semantics = "Semantics=SingleAssignment;\n\n"
return semantics
def _create_environment_obsvars(self) -> str:
obsvars = f"\tObsvars:\n" \
f"\t\tfirstTeamScore: 0..{self._number_of_cards_in_hand};\n" \
f"\t\tsecondTeamScore: 0..{self._number_of_cards_in_hand};\n" \
f"\t\tbeginningPlayer: 0..3;\n" \
f"\t\tcurrentPlayer: 0..4;\n" \
f"\t\tclock: 0..4;\n"
obsvars += self._create_env_player_cards_obsvars()
obsvars += self._create_env_n_cards_obsvars()
obsvars += self._create_env_history_cards_obsvars()
obsvars += "\t\tsuit: {Spade, Heart, Diamond, Club, None};\n"
obsvars += self._create_env_has_color_obsvars()
obsvars += "\tend Obsvars\n"
return obsvars
def _create_env_player_cards_obsvars(self) -> str:
obsvars = ""
for player in self.player_names:
obsvars += f"\t\t{player}Card: {{"
for j in range(0, 4 * self._number_of_cards):
obsvars += f"{self._cards[j]}, "
obsvars += "None};\n"
return obsvars
def _create_env_n_cards_obsvars(self) -> str:
obsvars = ""
for i in range(1, self._number_of_cards_in_hand + 1):
obsvars += f"\t\tcardN{i}: {{"
for j in range(0, 4 * self._number_of_cards):
obsvars += f"N{self._cards[j]}, "
obsvars += "None};\n"
return obsvars
def _create_env_history_cards_obsvars(self) -> str:
obsvars = ""
for i in range(0, self._number_of_cards * 4):
obsvars += f"\t\t{self._cards[i]}H: boolean;\n"
return obsvars
def _create_env_has_color_obsvars(self) -> str:
obsvars = ""
for color in self.card_colors:
obsvars += f"\t\thas{color}: 0..{self._number_of_cards_in_hand};\n"
return obsvars
def _create_environment_vars(self) -> str:
vars = "\tVars:\n" \
"\t\tsmc: 0..1;\n" \
"\tend Vars\n"
return vars
def _create_environment_actions(self) -> str:
actions = "\tActions = {none};\n"
return actions
def _create_environment_protocol(self) -> str:
protocol = "\tProtocol:\n" \
"\t\tOther:{none};\n" \
"\tend Protocol\n"
return protocol
def _create_environment_evolution(self) -> str:
evolution = "\tEvolution:\n"
evolution += self._create_env_first_team_score_evolution()
evolution += self._create_env_second_team_score_evolution()
evolution += self._create_env_beginning_player_evolution()
evolution += self._create_env_current_player_evolution()
evolution += self._create_env_clock_evolution()
evolution += self._create_env_player_cards_evolution()
evolution += self._create_env_suit_evolution()
evolution += self._create_env_history_evolution()
evolution += self._create_env_n_cards_evolution()
evolution += self._create_env_has_color_evolution()
evolution += "\tend Evolution\n"
return evolution
def _create_env_first_team_score_evolution(self) -> str:
evolution = "\t\tfirstTeamScore=firstTeamScore+1 if\n"
for combination in itertools.permutations(self._available_cards, 4):
for beginning_player in range(0, 4):
winning_player_number = beginning_player
for i in range(0, 4):
if i == beginning_player:
continue
if self._cards_colors[combination[i]] == self._cards_colors[
combination[winning_player_number]]:
if self._cards_values[combination[i]] > self._cards_values[
combination[winning_player_number]]:
winning_player_number = i
if not (winning_player_number == 0 or winning_player_number == 2):
continue
evolution += "\t\t\t(\n"
for player in range(0, 4):
evolution += f"\t\t\t\t{self.player_names[player]}Card={combination[player]} and\n"
evolution += f"\t\t\t\tbeginningPlayer={beginning_player}) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
return evolution
def _create_env_second_team_score_evolution(self) -> str:
evolution = "\t\tsecondTeamScore=secondTeamScore+1 if\n"
for combination in itertools.permutations(self._available_cards, 4):
for beginning_player in range(0, 4):
winning_player_number = beginning_player
for i in range(0, 4):
if i == beginning_player:
continue
if self._cards_colors[combination[i]] == self._cards_colors[
combination[winning_player_number]]:
if self._cards_values[combination[i]] > self._cards_values[
combination[winning_player_number]]:
winning_player_number = i
if not (winning_player_number == 1 or winning_player_number == 3):
continue
evolution += "\t\t\t(\n"
for player in range(0, 4):
evolution += f"\t\t\t\t{self.player_names[player]}Card={combination[player]} and\n"
evolution += f"\t\t\t\tbeginningPlayer={beginning_player}) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
return evolution
def _create_env_beginning_player_evolution(self) -> str:
evolution = ""
for winning_player in range(0, 4):
evolution += f"\t\tbeginningPlayer={winning_player} if\n"
for combination in itertools.permutations(self._available_cards, 4):
for beginning_player in range(0, 4):
winning_player_number = beginning_player
for i in range(0, 4):
if i == beginning_player:
continue
if self._cards_colors[combination[i]] == self._cards_colors[
combination[winning_player_number]]:
if self._cards_values[combination[i]] > self._cards_values[
combination[winning_player_number]]:
winning_player_number = i
if not (winning_player_number == winning_player):
continue
evolution += "\t\t\t(\n"
for player in range(0, 4):
evolution += f"\t\t\t\t{self.player_names[player]}Card={combination[player]} and\n"
evolution += f"\t\t\t\tbeginningPlayer={beginning_player}) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
return evolution
def _create_env_current_player_evolution(self) -> str:
evolution = ""
for winning_player in range(0, 4):
evolution += f"\t\tcurrentPlayer={winning_player} if\n"
for combination in itertools.permutations(self._available_cards, 4):
for beginning_player in range(0, 4):
winning_player_number = beginning_player
for i in range(0, 4):
if i == beginning_player:
continue
if self._cards_colors[combination[i]] == self._cards_colors[
combination[winning_player_number]]:
if self._cards_values[combination[i]] > self._cards_values[
combination[winning_player_number]]:
winning_player_number = i
if not (winning_player_number == winning_player):
continue
evolution += "\t\t\t(\n"
for player in range(0, 4):
evolution += f"\t\t\t\t{self.player_names[player]}Card={combination[player]} and\n"
evolution += f"\t\t\t\tbeginningPlayer={beginning_player}) or\n"
previous_player = winning_player - 1
if previous_player == -1:
previous_player = 3
evolution += f"\t\t\t(currentPlayer={previous_player} and clock<4);\n"
return evolution
def _create_env_clock_evolution(self) -> str:
evolution = "\t\tsuit=None if clock=4;\n" \
"\t\tclock=0 if clock=4;\n"
for clock in range(1, 5):
evolution += f"\t\tclock={clock} if clock={clock - 1};\n"
for player in self.player_names:
evolution += f"\t\t{player}Card=None if clock=4;\n"
return evolution
def _create_env_player_cards_evolution(self) -> str:
evolution = ""
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
for player_number in range(0, 4):
player = self.player_names[player_number]
if player == self.player_names[2]:
evolution += f"\t\t{player}Card={card} if {self.player_names[0]}.Action=Play{card} " \
f"and currentPlayer=2;\n"
else:
evolution += f"\t\t{player}Card={card} if {player}.Action=Play{card} and " \
f"currentPlayer={player_number};\n"
return evolution
def _create_env_suit_evolution(self) -> str:
evolution = ""
for color in self.card_colors:
evolution += f"\t\tsuit={color} if clock=0 and (\n"
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
if self._cards_colors[card] != color:
continue
for player in self.player_names:
if player == self.player_names[2]:
continue
evolution += f"\t\t\t{player}.Action=Play{card} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
return evolution
def _create_env_history_evolution(self) -> str:
evolution = ""
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
evolution += f"\t\t{card}H=true if\n"
for player in self.player_names:
if player == self.player_names[2]:
continue
evolution += f"\t\t\t{player}.Action=Play{card} or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
return evolution
def _create_env_n_cards_evolution(self) -> str:
evolution = ""
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\tcardN{j}=None if {self.player_names[0]}.Action=Play{card} and cardN{j}=N{card};\n"
return evolution
def _create_env_has_color_evolution(self) -> str:
evolution = ""
for color in self.card_colors:
evolution += f"\t\thas{color}=has{color}+-1 if (\n"
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
if self._cards_colors[card] != color:
continue
evolution += f"\t\t\t({self.player_names[0]}.Action=Play{card} and ("
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"cardN{j}=N{card} or "
evolution = evolution.rstrip(" ro ")
evolution += ")) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ");\n"
return evolution
def _create_player(self, player_number) -> str:
player = f"Agent {self.player_names[player_number]}\n"
# if player_name != "ThirdPlayer":
# player += self.__create_player_lobsvars()
player += self._create_player_vars(player_number)
player += self._create_player_actions()
player += self._create_player_protocol(player_number)
player += self._create_player_evolution(player_number)
player += "end Agent\n\n"
return player
def _create_player_lobsvars(self) -> str:
lobsvars = "\tLobsvars = {"
for i in range(1, self._number_of_cards_in_hand + 1):
lobsvars += f"ThirdPlayer.card{i}"
if i != self._number_of_cards_in_hand:
lobsvars += ", "
lobsvars += "};\n"
return lobsvars
def _create_player_vars(self, player_number) -> str:
vars = "\tVars:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
vars += f"\t\t{self.player_names[player_number][0]}card{i}: {{"
for j in range(0, 4 * self._number_of_cards):
vars += f"{self.player_names[player_number][0]}{self._cards[j]}, "
vars += "None};\n"
for color in self.card_colors:
vars += f"\t\thas{color}: 0..{self._number_of_cards_in_hand};\n"
vars += "\tend Vars\n"
return vars
def _create_player_actions(self) -> str:
actions = "\tActions = {"
for i in range(0, 4 * self._number_of_cards):
actions += f"Play{self._cards[i]}, "
actions += "Wait};\n"
return actions
def _create_player_protocol(self, player_number) -> str:
protocol = "\tProtocol:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
protocol += f"\t\t{self.player_names[player_number][0]}card{i}=" \
f"{self.player_names[player_number][0]}{self._cards[j]} and " \
f"Environment.currentPlayer={player_number} and Environment.clock<4 and " \
f"(Environment.suit=None or Environment.suit={self._cards_colors[self._cards[j]]} or " \
f"((hasSpade<=0 and Environment.suit=Spade) or (hasClub<=0 and " \
f"Environment.suit=Club) or (hasDiamond<=0 and Environment.suit=Diamond) or " \
f"(hasHeart<=0 and Environment.suit=Heart))): {{Play" + \
self._cards[j] + "};\n"
if player_number == 0:
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
protocol += "\t\tEnvironment.cardN" + str(i) + "=N"
protocol += f"{self._cards[j]} and Environment.currentPlayer=2 and " \
"Environment.clock<4 and " \
"(Environment.suit=None or Environment.suit=" \
f"{self._cards_colors[self._cards[j]]} or " \
"((Environment.hasSpade<=0 and Environment.suit=Spade) or " \
"(Environment.hasClub<=0 and Environment.suit=Club) or " \
"(Environment.hasDiamond<=0 and Environment.suit=Diamond) or " \
"(Environment.hasHeart<=0 and Environment.suit=Heart))): " \
f"{{Play{self._cards[j]}}};\n"
if player_number != 0:
protocol += f"\t\t!(Environment.currentPlayer={player_number}) or Environment.clock=4: " \
f"{{Wait}};\n"
else:
protocol += f"\t\t(!(Environment.currentPlayer={player_number}) and " \
f"!(Environment.currentPlayer=2)) or Environment.clock=4: {{Wait}};\n"
protocol += "\tend Protocol\n"
return protocol
def _create_player_evolution(self, player_number) -> str:
evolution = "\tEvolution:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\t{self.player_names[player_number][0]}card{i}=None if\n"
for j in range(0, 4 * self._number_of_cards):
card = self._cards[j]
evolution += f"\t\t\t({self.player_names[player_number][0]}card{i}=" \
f"{self.player_names[player_number][0]}{card} and Action=Play{card}) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
for color in self.card_colors:
evolution += f"\t\thas{color}=has{color}+-1 if\n"
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
card = self._cards[j]
if self._cards_colors[card] != color:
continue
evolution += f"\t\t\t({self.player_names[player_number][0]}card{i}=" \
f"{self.player_names[player_number][0]}{card} and Action=Play{card}) or\n"
evolution = evolution.rstrip("\nro ")
evolution += ";\n"
evolution += "\tend Evolution\n"
return evolution
def _create_evaluation(self) -> str:
evaulation = "Evaluation\n" \
"\tFirstTeamWin if Environment.firstTeamScore>Environment.secondTeamScore and " \
f"Environment.firstTeamScore+Environment.secondTeamScore={self._number_of_cards_in_hand};\n" \
"\tSecondTeamWin if Environment.firstTeamScore<Environment.secondTeamScore and " \
"Environment.firstTeamScore+Environment.secondTeamScore={self.__number_of_cards_in_hand};\n" \
"end Evaluation\n\n"
return evaulation
def _create_init_states(self) -> str:
init_states = "InitStates\n"
oponents_cards = []
for k in range(self._number_of_cards_in_hand, self._number_of_cards_in_hand * 2):
oponents_cards.append(self._card_ordering[k])
for k in range(self._number_of_cards_in_hand * 3, self._number_of_cards_in_hand * 4):
oponents_cards.append(self._card_ordering[k])
oponents_cards.sort()
number_of_beginning_states = 0
for combination in itertools.combinations(oponents_cards, self._number_of_cards_in_hand):
second_player_cards = combination
fourth_player_cards = oponents_cards[:]
for card in second_player_cards:
fourth_player_cards.remove(card)
new_card_ordering = self._card_ordering[:]
i = 0
for k in range(self._number_of_cards_in_hand, self._number_of_cards_in_hand * 2):
new_card_ordering[k] = second_player_cards[i]
i += 1
i = 0
for k in range(self._number_of_cards_in_hand * 3, self._number_of_cards_in_hand * 4):
new_card_ordering[k] = fourth_player_cards[i]
i += 1
init_states += "\t(Environment.smc=0 and Environment.firstTeamScore=0 and " \
"Environment.secondTeamScore=0 and Environment.beginningPlayer=0 and " \
"Environment.currentPlayer=0 and Environment.clock=0 and " \
"Environment.SPlayerCard=None and Environment.WPlayerCard=None and " \
"Environment.NPlayerCard=None and Environment.EPlayerCard=None and " \
"Environment.suit=None"
colors_count = {}
i = 0
for player in self.player_names:
colors_count[player] = {}
for color in self.card_colors:
colors_count[player][color] = 0
for j in range(1, self._number_of_cards_in_hand + 1):
colors_count[player][self._cards_colors[self._cards[new_card_ordering[i]]]] += 1
i += 1
i = 0
for player in self.player_names:
for color in self.card_colors:
if player == "NPlayer":
init_states += f" and Environment.has{color}={colors_count[player][color]}"
else:
init_states += f" and {player}.has{color}={colors_count[player][color]}"
for player in self.player_names:
for j in range(1, self._number_of_cards_in_hand + 1):
if player == "NPlayer":
init_states += f" and Environment.cardN{j}=N{self._cards[new_card_ordering[i]]}"
else:
init_states += f" and {player}.{player[0]}card{j}=" \
f"{player[0]}{self._cards[new_card_ordering[i]]}"
i += 1
for j in range(0, self._number_of_cards * 4):
init_states += f" and Environment.{self._cards[j]}H=false"
init_states += ") or\n"
number_of_beginning_states += 1
print(f"Number of beginning states: {number_of_beginning_states}")
init_states = init_states.rstrip("\nro ")
init_states += ";\nend InitStates\n\n"
return init_states
def _create_groups(self) -> str:
groups = "Groups\n" \
"\tg1={SPlayer};\n" \
"end Groups\n\n"
return groups
def _create_formulae(self) -> str:
formulae = "Formulae\n" \
"\t<g1>F FirstTeamWin;\n" \
"end Formulae\n\n"
return formulae
@staticmethod
def generate_random_card_array(length: int) -> [int]:
array = []
used = []
for i in range(0, length):
used.append(False)
for i in range(0, length):
number = random.randrange(length)
while used[number]:
number = random.randrange(length)
array.append(number)
used[number] = True
return array
class AbsentMindedBridgeModelIsplGenerator(BridgeModelIsplGenerator):
def __init__(self, number_of_cards, number_of_cards_in_hand, card_ordering=None):
super().__init__(number_of_cards, number_of_cards_in_hand, card_ordering)
def _define_semantics(self) -> str:
semantics = "Semantics=MultiAssignment;\n\n"
return semantics
def _create_environment_obsvars(self):
obsvars = "\tObsvars:\n" \
f"\t\tfirstTeamScore: 0..{self._number_of_cards_in_hand};\n" \
f"\t\tsecondTeamScore: 0..{self._number_of_cards_in_hand};\n" \
"\t\tbeginningPlayer: 0..3;\n" \
"\t\tcurrentPlayer: 0..4;\n" \
"\t\tclock: 0..5;\n"
obsvars += self._create_env_player_cards_obsvars()
obsvars += self._create_env_n_cards_obsvars()
obsvars += self._create_env_history_cards_obsvars()
obsvars += "\tend Obsvars\n"
return obsvars
def _create_env_player_cards_obsvars(self) -> str:
obsvars = f"\t\t{self.player_names[0]}Card: {{"
for j in range(0, 4 * self._number_of_cards):
obsvars += f"{self._cards[j]}, "
obsvars += "None};\n"
return obsvars
def _create_env_n_cards_obsvars(self) -> str:
obsvars = ""
for i in range(1, self._number_of_cards_in_hand + 1):
obsvars += f"\t\tcardN{i}: {{"
for j in range(0, 4 * self._number_of_cards):
obsvars += f"{self._cards[j]}, "
obsvars += "None};\n"
return obsvars
def _create_environment_vars(self):
vars = "\tVars:\n"
vars += self._create_env_card_vars()
vars += self._create_env_n_card_vars()
vars += "\t\tsuit: {Spade, Heart, Diamond, Club, None};\n" \
"\tend Vars\n"
return vars
def _create_env_card_vars(self) -> str:
vars = ""
for player in self.player_names:
if player == self.player_names[0]:
continue
vars += f"\t\t{player}Card: {{"
for j in range(0, 4 * self._number_of_cards):
vars += f"{self._cards[j]}, "
vars += "None};\n"
return vars
def _create_env_n_card_vars(self) -> str:
vars = ""
for i in range(1, self._number_of_cards_in_hand + 1):
vars += f"\t\tcurrentCardN{i}: {{"
for j in range(0, 4 * self._number_of_cards):
vars += f"{self._cards[j]}, "
vars += "None};\n"
return vars
def _create_environment_evolution(self) -> str:
evolution = "\tEvolution:\n"
for winning_player in range(0, 4):
if winning_player % 2 == 0:
evolution += "\t\tfirstTeamScore=firstTeamScore+1"
else:
evolution += "\t\tsecondTeamScore=secondTeamScore+1"
evolution += f" and beginningPlayer={winning_player}" \
f" and clock=0 and suit=None and currentPlayer={winning_player}"
for player in self.player_names:
evolution += f" and {player}Card=None"
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f" and cardN{j}=currentCardN{j}"
evolution += " if\n"
add_or = False
for combination in itertools.permutations(self._available_cards, 4):
for beginning_player in range(0, 4):
winning_player_number = beginning_player
for i in range(0, 4):
if i == beginning_player:
continue
if self._cards_colors[combination[i]] == self._cards_colors[combination[winning_player_number]]:
if self._cards_values[combination[i]] > self._cards_values[
combination[winning_player_number]]:
winning_player_number = i
if not (winning_player_number == winning_player):
continue
if add_or:
evolution += " or\n"
else:
add_or = True
evolution += "\t\t\t("
for player in range(0, 4):
evolution += f"{self.player_names[player]}Card={combination[player]} and "
evolution += f"beginningPlayer={beginning_player} and clock>=4)"
evolution += ";\n"
for i in range(0, self._number_of_cards * 4):
card = self._cards[i]
# Player S plays
evolution += f"\t\tcurrentPlayer=1 and clock=clock+1 and SPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=0 and clock<4 and clock>0 and SPlayer.Action=Play{card};\n" \
f"\t\tcurrentPlayer=1 and clock=clock+1 and SPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=0 and clock<4 and clock=0 and SPlayer.Action=Play{card};\n"
# Player S should play, but play Player N card
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\tNPlayerCard={card} and {card}H=true and currentCardN{j}=None if\n" \
f"\t\t\tcurrentPlayer=0 and clock<4 and SPlayer.Action=PlayN{card} and " \
f"currentCardN{j}={card} and NPlayerCard=None;\n"
# Player W plays, Player S Wait
evolution += f"\t\tcurrentPlayer=2 and clock=clock+1 and WPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=1 and clock>0 and WPlayer.Action=Play{card} and SPlayer.Action=Wait and NPlayerCard=None;\n" \
f"\t\tcurrentPlayer=2 and clock=clock+1 and WPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and SPlayer.Action=Wait and NPlayerCard=None;\n" \
f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=1 and clock>0 and WPlayer.Action=Play{card} and SPlayer.Action=Wait and !(NPlayerCard=None);\n" \
f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and SPlayer.Action=Wait and !(NPlayerCard=None);\n"
# Player W plays, Player S Play his card
for i2 in range(0, self._number_of_cards * 4):
card2 = self._cards[i2]
if card == card2:
continue
evolution += f"\t\tcurrentPlayer=2 and clock=clock+1 and WPlayerCard={card} and {card}H=true" \
f" and SPlayerCard={card2} and {card2}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and SPlayer.Action=Play{card2} and NPlayerCard=None;\n" \
f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true" \
f" and SPlayerCard={card2} and {card2}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and SPlayer.Action=Play{card2} and !(NPlayerCard=None);\n"
# Player W plays, Player S Play N card
for i2 in range(0, self._number_of_cards * 4):
card2 = self._cards[i2]
if card == card2:
continue
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true" \
f" and NPlayerCard={card2} and {card2}H=true" \
f" and currentCardN{j}=None if\n" \
f"\t\t\tcurrentPlayer=1 and clock>0 and WPlayer.Action=Play{card} " \
f"and SPlayer.Action=PlayN{card2} and NPlayerCard=None" \
f" and currentCardN{j}={card2};\n" \
f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true" \
f" and NPlayerCard={card2} and {card2}H=true" \
f" and currentCardN{j}=None" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and " \
f"SPlayer.Action=PlayN{card2} and NPlayerCard=None" \
f" and currentCardN{j}={card2};\n"
evolution += f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=1 and clock>0 and WPlayer.Action=Play{card} and SPlayer.Action=PlayN{card2} " \
f"and !(NPlayerCard=None);\n" \
f"\t\tcurrentPlayer=3 and clock=clock+2 and WPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=1 and clock=0 and WPlayer.Action=Play{card} and SPlayer.Action=PlayN{card2} " \
f"and !(NPlayerCard=None);\n"
# Player N Plays
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\tcurrentPlayer=3 and clock=clock+1 and NPlayerCard={card} and {card}H=true " \
f"and currentCardN{j}=None if\n" \
f"\t\t\tcurrentPlayer=2 and clock>0 and clock<4 and SPlayer.Action=PlayN{card} " \
f"and currentCardN{j}={card} and NPlayerCard=None;\n" \
f"\t\tcurrentPlayer=3 and clock=clock+1 and NPlayerCard={card} and {card}H=true and " \
f"currentCardN{j}=None and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=2 and clock=0 and SPlayer.Action=PlayN{card} and " \
f"currentCardN{j}={card} and NPlayerCard=None;\n"
# Player N should Play, Player S play his own card
evolution += f"\t\tSPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=2 and clock<4 and SPlayer.Action=Play{card};\n"
# Player E Plays, Player S Wait
evolution += f"\t\tcurrentPlayer=0 and clock=clock+1 and EPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=3 and clock>0 and EPlayer.Action=Play{card} and SPlayer.Action=Wait " \
f"and SPlayerCard=None;\n" \
f"\t\tcurrentPlayer=0 and clock=clock+1 and EPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=3 and clock=0 and EPlayer.Action=Play{card} and SPlayer.Action=Wait " \
f"and SPlayerCard=None;\n" \
f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=3 and clock>0 and EPlayer.Action=Play{card} and SPlayer.Action=Wait " \
f"and !(SPlayerCard=None);\n" \
f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=3 and clock=0 and EPlayer.Action=Play{card} and SPlayer.Action=Wait and !(SPlayerCard=None);\n"
# Player E Plays, Player S Play his card
for i2 in range(0, self._number_of_cards * 4):
card2 = self._cards[i2]
if card == card2:
continue
evolution += f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true" \
f" and SPlayerCard={card2} and {card2}H=true" \
f" and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=3 and clock=0 and EPlayer.Action=Play{card} and SPlayer.Action=Play{card2};\n" \
f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true" \
f" and SPlayerCard={card2} and {card2}H=true if\n" \
f"\t\t\tcurrentPlayer=3 and clock>0 and EPlayer.Action=Play{card} and SPlayer.Action=Play{card2};\n"
# Player E Plays, Player S Play N card
for i2 in range(0, self._number_of_cards * 4):
card2 = self._cards[i2]
if card == card2:
continue
for j in range(1, self._number_of_cards_in_hand + 1):
evolution += f"\t\tcurrentPlayer=0 and clock=clock+1 and EPlayerCard={card} and {card}H=true " \
f"and NPlayerCard={card2} and {card2}H=true and currentCardN{j}=None " \
f"and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=3 and clock=0 and EPlayer.Action=Play{card} " \
f"and SPlayer.Action=PlayN{card2} and NPlayerCard=None " \
f"and currentCardN{j}={card2} and SPlayerCard=None;\n" \
f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true " \
f"and NPlayerCard={card2} and {card2}H=true " \
f"and currentCardN{j}=None and suit={self._cards_colors[card]} if\n" \
f"\t\t\tcurrentPlayer=3 and clock=0 and EPlayer.Action=Play{card} " \
f"and SPlayer.Action=PlayN{card2} and NPlayerCard=None and currentCardN{j}={card2} " \
f"and !(SPlayerCard=None);\n"
evolution += f"\t\tcurrentPlayer=0 and clock=clock+1 and EPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=3 and clock>0 and EPlayer.Action=Play{card} and SPlayer.Action=PlayN{card2} " \
f"and !(NPlayerCard=None) and SPlayerCard=None;\n" \
f"\t\tcurrentPlayer=1 and clock=clock+2 and EPlayerCard={card} and {card}H=true if\n" \
f"\t\t\tcurrentPlayer=3 and clock>0 and EPlayer.Action=Play{card} and SPlayer.Action=PlayN{card2} " \
f"and !(NPlayerCard=None) and !(SPlayerCard=None);\n"
evolution += "\tend Evolution\n"
return evolution
def _create_player(self, player_number) -> str:
player = f"Agent {self.player_names[player_number]}\n"
if player_number != 0:
player += self._create_player_lobsvars()
player += self._create_player_vars(player_number)
player += self._create_player_actions(player_number)
player += self._create_player_protocol(player_number)
player += self._create_player_evolution(player_number)
player += "end Agent\n\n"
return player
def _create_player_lobsvars(self) -> str:
lobsvars = "\tLobsvars = {"
for player in self.player_names:
if player == self.player_names[0]:
continue
lobsvars += f"{player}Card, "
for i in range(1, self._number_of_cards_in_hand + 1):
lobsvars += f"currentCardN{i}, "
lobsvars += "suit};\n"
return lobsvars
def _create_player_vars(self, player_number: int) -> str:
vars = "\tVars:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
vars += f"\t\tcard{i}: {{"
for j in range(0, 4 * self._number_of_cards):
vars += f"{self._cards[j]}, "
vars += "None};\n"
if player_number != 0:
for color in self.card_colors:
vars += f"\t\thas{color}: 0..{self._number_of_cards_in_hand};\n"
vars += "\tend Vars\n"
return vars
def _create_player_actions(self, player_number: int) -> str:
actions = "\tActions = {"
for i in range(0, 4 * self._number_of_cards):
actions += f"Play{self._cards[i]}, "
if player_number == 0:
for i in range(0, 4 * self._number_of_cards):
actions += f"PlayN{self._cards[i]}, "
actions += "Wait};\n"
return actions
def _create_player_protocol(self, player_number: int) -> str:
protocol = "\tProtocol:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
protocol += f"\t\tcard{i}={self._cards[j]}"
if player_number != 0:
protocol += f" and Environment.currentPlayer={player_number} and Environment.clock<4 and " \
f"(Environment.suit=None or Environment.suit={self._cards_colors[self._cards[j]]} " \
f"or ((hasSpade<=0 and Environment.suit=Spade) or " \
f"(hasClub<=0 and Environment.suit=Club) or (hasDiamond<=0 and " \
f"Environment.suit=Diamond) or (hasHeart<=0 and Environment.suit=Heart))):"
else:
protocol += " and Environment.SPlayerCard=None:"
protocol += f" {{Play{self._cards[j]}"
if player_number == 0:
protocol += ", Wait"
protocol += "};\n"
if player_number == 0:
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
protocol += f"\t\tEnvironment.cardN{i}={self._cards[j]}: {{PlayN{self._cards[j]}, Wait}};\n"
protocol += "\t\tOther: {Wait};\n" \
"\tend Protocol\n"
return protocol
def _create_player_evolution(self, player_number: int) -> str:
evolution = "\tEvolution:\n"
for i in range(1, self._number_of_cards_in_hand + 1):
for j in range(0, 4 * self._number_of_cards):
evolution += f"\t\tcard{i}=None"
if player_number != 0:
evolution += f" and has{self._cards_colors[self._cards[j]]}=" \
f"has{self._cards_colors[self._cards[j]]}-1"
evolution += f" if card{i}={self._cards[j]} and Action=Play{self._cards[j]}"
if player_number == 0:
evolution += " and Environment.SPlayerCard=None;\n"
else:
evolution += f" and Environment.currentPlayer={player_number};\n"
evolution += "\tend Evolution\n"
return evolution
def _create_evaluation(self) -> str:
evaulation = f"Evaluation\n" \
f"\tFirstTeamWin if Environment.firstTeamScore>Environment.secondTeamScore " \
f"and Environment.firstTeamScore+Environment.secondTeamScore={self._number_of_cards_in_hand};\n" \
f"\tSecondTeamWin if Environment.firstTeamScore<Environment.secondTeamScore and " \
f"Environment.firstTeamScore+Environment.secondTeamScore={self._number_of_cards_in_hand};\n" \
f"end Evaluation\n\n"
return evaulation
def _create_init_states(self) -> str:
init_states = "InitStates\n"
oponents_cards = []
for k in range(self._number_of_cards_in_hand, self._number_of_cards_in_hand * 2):
oponents_cards.append(self._card_ordering[k])
for k in range(self._number_of_cards_in_hand * 3, self._number_of_cards_in_hand * 4):
oponents_cards.append(self._card_ordering[k])
oponents_cards.sort()
number_of_beginning_states = 0
for combination in itertools.combinations(oponents_cards, self._number_of_cards_in_hand):
second_player_cards = combination
fourth_player_cards = oponents_cards[:]
for card in second_player_cards:
fourth_player_cards.remove(card)
new_card_ordering = self._card_ordering[:]
i = 0
for k in range(self._number_of_cards_in_hand, self._number_of_cards_in_hand * 2):
new_card_ordering[k] = second_player_cards[i]
i += 1
i = 0
for k in range(self._number_of_cards_in_hand * 3, self._number_of_cards_in_hand * 4):
new_card_ordering[k] = fourth_player_cards[i]
i += 1
init_states += "\t(Environment.firstTeamScore=0 and Environment.secondTeamScore=0 and " \
"Environment.beginningPlayer=0 and Environment.currentPlayer=0 and " \
"Environment.clock=0 and Environment.SPlayerCard=None and " \
"Environment.WPlayerCard=None and Environment.NPlayerCard=None and " \
"Environment.EPlayerCard=None and Environment.suit=None"
colors_count = {}
i = 0
for player in self.player_names:
colors_count[player] = {}
for color in self.card_colors:
colors_count[player][color] = 0
for j in range(1, self._number_of_cards_in_hand + 1):
colors_count[player][self._cards_colors[self._cards[new_card_ordering[i]]]] += 1
i += 1
i = 0
for player in self.player_names:
for color in self.card_colors:
if player != "NPlayer" and player != 'SPlayer':
init_states += f" and {player}.has{color}={colors_count[player][color]}"
for player in self.player_names:
for j in range(1, self._number_of_cards_in_hand + 1):
if player == "NPlayer":
init_states += f" and Environment.cardN{j}={self._cards[new_card_ordering[i]]}" \
f" and Environment.currentCardN{j}={self._cards[new_card_ordering[i]]}"
else:
init_states += f" and {player}.card{j}={self._cards[new_card_ordering[i]]}"
i += 1
for j in range(0, self._number_of_cards * 4):
init_states += f" and Environment.{self._cards[j]}H=false"
init_states += ") or\n"
number_of_beginning_states += 1
init_states = init_states.rstrip("\nro ")
init_states += ";\nend InitStates\n\n"
return init_states
def _create_groups(self) -> str:
groups = "Groups\n"
groups += "\tg1={SPlayer};\n"
groups += "end Groups\n\n"
return groups
def _create_formulae(self) -> str:
formulae = "Formulae\n"
formulae += "\t<g1>F FirstTeamWin;\n"
formulae += "end Formulae\n\n"
return formulae
if __name__ == "__main__":
n = 2
ispl_generator = BridgeModelIsplGenerator(n, n)
model_txt = ispl_generator.create_model()
file = open("bridge.ispl", "w")
file.write(model_txt)
file.close()
| 47.543585
| 153
| 0.547567
| 5,817
| 48,542
| 4.351728
| 0.035929
| 0.032867
| 0.050328
| 0.061112
| 0.879158
| 0.837955
| 0.802204
| 0.765821
| 0.731295
| 0.716639
| 0
| 0.014869
| 0.337728
| 48,542
| 1,021
| 154
| 47.543585
| 0.772552
| 0.008529
| 0
| 0.620853
| 0
| 0.078199
| 0.299007
| 0.153005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069905
| false
| 0
| 0.003555
| 0.003555
| 0.138626
| 0.001185
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61fa2a85412e3dc12da42668a285f9756a7abb1a
| 845
|
py
|
Python
|
tests/test_hello_svc_acceptance_scenarios.py
|
pppillai/armageddon
|
6c9780a3f3e2a7ea50bc1d62e9140dd7067efb7f
|
[
"MIT"
] | null | null | null |
tests/test_hello_svc_acceptance_scenarios.py
|
pppillai/armageddon
|
6c9780a3f3e2a7ea50bc1d62e9140dd7067efb7f
|
[
"MIT"
] | null | null | null |
tests/test_hello_svc_acceptance_scenarios.py
|
pppillai/armageddon
|
6c9780a3f3e2a7ea50bc1d62e9140dd7067efb7f
|
[
"MIT"
] | null | null | null |
def test_hello_svc_without_param(hello_svc_client):
"""
Given: hello svc running on cluster
And: I have cluster ip address
And: I have service port
When: I do a get call
Then: I should get back 200 Ok
And: I should get back string Hi there, !"
"""
status, response = hello_svc_client.get()
assert status == 200
assert response == f"Hi there, !"
def test_hello_svc_with_param(hello_svc_client):
"""
Given: hello svc running on cluster
And: I have cluster ip address
And: I have service port
When: I do a get call with string parameter
Then: I should get back 200 Ok
And: I should get back string Hi there, <param>!"
"""
name = "pradeep"
status, response = hello_svc_client.get(name=name)
assert status == 200
assert response == f"Hi there, {name}!"
| 28.166667
| 54
| 0.660355
| 131
| 845
| 4.137405
| 0.29771
| 0.118081
| 0.103321
| 0.103321
| 0.837638
| 0.837638
| 0.723247
| 0.723247
| 0.586716
| 0.586716
| 0
| 0.019108
| 0.256805
| 845
| 30
| 55
| 28.166667
| 0.843949
| 0.47929
| 0
| 0.222222
| 0
| 0
| 0.095109
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.222222
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61fbb837045dd9d06e93d9588bbbc7840e374e34
| 277
|
py
|
Python
|
ann/fme/fme_renderer.py
|
yt7589/iching
|
6673da38f4c80e7fd297c86fedc5616aee8ac09b
|
[
"Apache-2.0"
] | 32
|
2020-04-14T08:32:18.000Z
|
2022-02-09T07:05:08.000Z
|
ann/fme/fme_renderer.py
|
trinh-hoang-hiep/iching
|
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
|
[
"Apache-2.0"
] | 1
|
2020-04-08T10:42:15.000Z
|
2020-04-15T01:38:03.000Z
|
ann/fme/fme_renderer.py
|
trinh-hoang-hiep/iching
|
e1feae5741c3cbde535d7a275b01d4f0cf9e21ed
|
[
"Apache-2.0"
] | 4
|
2020-08-25T03:56:46.000Z
|
2021-05-11T05:55:51.000Z
|
#
class FmeRenderer(object):
RENDER_MODE_CONSOLE = 1
RENDER_MODE_GRAPH = 2
def __init__(self, render_mode=RENDER_MODE_CONSOLE):
self.name = 'apps.fme.FmeRender'
self.render_mode = render_mode
def render_obs(self, obs):
pass
| 23.083333
| 57
| 0.646209
| 35
| 277
| 4.714286
| 0.514286
| 0.363636
| 0.206061
| 0.242424
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.270758
| 277
| 12
| 58
| 23.083333
| 0.806931
| 0
| 0
| 0
| 0
| 0
| 0.067669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.125
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.