hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff2a5bf6dbeb1144b91a070950e79c6e061e6015
| 42
|
py
|
Python
|
l10ndiff/__init__.py
|
zbraniecki/l10ndiff
|
93435ea9bab40cfcc15edf31b5928e2bc9c32954
|
[
"BSD-3-Clause"
] | 1
|
2017-04-04T06:55:27.000Z
|
2017-04-04T06:55:27.000Z
|
l10ndiff/__init__.py
|
zbraniecki/l10ndiff
|
93435ea9bab40cfcc15edf31b5928e2bc9c32954
|
[
"BSD-3-Clause"
] | null | null | null |
l10ndiff/__init__.py
|
zbraniecki/l10ndiff
|
93435ea9bab40cfcc15edf31b5928e2bc9c32954
|
[
"BSD-3-Clause"
] | null | null | null |
from .entity import *
from .list import *
| 14
| 21
| 0.714286
| 6
| 42
| 5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 22
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ff462f2789e6a2a436b3c028283a2706cf3b0cb9
| 25
|
py
|
Python
|
trainer/__init__.py
|
JasonJYang/GraphSynergy
|
e2e69e9b55c09ec62af1e51f01988b7d7c46c616
|
[
"MIT"
] | 1
|
2022-03-04T06:48:43.000Z
|
2022-03-04T06:48:43.000Z
|
trainer/__init__.py
|
JasonJYang/GraphSynergy
|
e2e69e9b55c09ec62af1e51f01988b7d7c46c616
|
[
"MIT"
] | null | null | null |
trainer/__init__.py
|
JasonJYang/GraphSynergy
|
e2e69e9b55c09ec62af1e51f01988b7d7c46c616
|
[
"MIT"
] | 2
|
2021-05-21T01:23:50.000Z
|
2021-06-28T04:36:50.000Z
|
# from .trainer import *
| 12.5
| 24
| 0.68
| 3
| 25
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 25
| 1
| 25
| 25
| 0.85
| 0.88
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ff4f1a7c0b17dcbb8a5a9ce1cb3a6a7dfeb870c4
| 121
|
py
|
Python
|
src/metaDMG/fit/__init__.py
|
metaDMG-dev/metaDMG-core
|
8894a2069e4fe4261ca3d96c7dae7d0a580228fc
|
[
"MIT"
] | null | null | null |
src/metaDMG/fit/__init__.py
|
metaDMG-dev/metaDMG-core
|
8894a2069e4fe4261ca3d96c7dae7d0a580228fc
|
[
"MIT"
] | null | null | null |
src/metaDMG/fit/__init__.py
|
metaDMG-dev/metaDMG-core
|
8894a2069e4fe4261ca3d96c7dae7d0a580228fc
|
[
"MIT"
] | null | null | null |
from metaDMG.fit.workflow import run_workflow
from metaDMG.loggers.loggers import get_logger_port_and_path, setup_logger
| 40.333333
| 74
| 0.884298
| 19
| 121
| 5.315789
| 0.684211
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07438
| 121
| 2
| 75
| 60.5
| 0.901786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ff4fb3383856e7a3479f4553607ce259615f7d93
| 1,908
|
py
|
Python
|
test/unit/test_template_selector.py
|
izakp/hokusai
|
a54778755c4b0db43a1e5773677c3e40b2ac0578
|
[
"MIT"
] | 85
|
2017-01-05T11:50:37.000Z
|
2022-02-05T13:22:50.000Z
|
test/unit/test_template_selector.py
|
izakp/hokusai
|
a54778755c4b0db43a1e5773677c3e40b2ac0578
|
[
"MIT"
] | 192
|
2017-01-26T18:06:55.000Z
|
2022-03-29T16:28:36.000Z
|
test/unit/test_template_selector.py
|
izakp/hokusai
|
a54778755c4b0db43a1e5773677c3e40b2ac0578
|
[
"MIT"
] | 23
|
2016-11-29T17:18:02.000Z
|
2021-08-23T16:51:31.000Z
|
import os
import yaml
from test import HokusaiUnitTestCase
from hokusai import CWD
from hokusai.lib.exceptions import HokusaiError
from hokusai.lib.template_selector import TemplateSelector
class TestTemplateSelector(HokusaiUnitTestCase):
def setUp(self):
self.template_path = os.path.join(CWD, 'test/fixtures/project/hokusai')
def test_finds_yml_file(self):
test_file = os.path.join(self.template_path, 'test.yml')
open(test_file, 'a').close()
self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test')), test_file)
os.remove(test_file)
def test_finds_yaml_file(self):
test_file = os.path.join(self.template_path, 'test.yaml')
open(test_file, 'a').close()
self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test')), test_file)
os.remove(test_file)
def test_finds_yml_j2_file(self):
test_file = os.path.join(self.template_path, 'test.yml.j2')
open(test_file, 'a').close()
self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test')), test_file)
os.remove(test_file)
def test_finds_yaml_j2_file(self):
test_file = os.path.join(self.template_path, 'test.yaml.j2')
open(test_file, 'a').close()
self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test')), test_file)
os.remove(test_file)
def test_finds_explicit_file_or_errors(self):
with self.assertRaises(HokusaiError):
TemplateSelector().get(os.path.join(self.template_path, 'test.yml'))
test_file = os.path.join(self.template_path, 'test.yml')
open(test_file, 'a').close()
self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test.yml')), test_file)
os.remove(test_file)
def test_errors_with_no_template_found(self):
with self.assertRaises(HokusaiError):
TemplateSelector().get(os.path.join(self.template_path, 'test'))
| 37.411765
| 101
| 0.740566
| 274
| 1,908
| 4.945255
| 0.145985
| 0.118081
| 0.153506
| 0.123985
| 0.749816
| 0.749816
| 0.749816
| 0.749816
| 0.734317
| 0.734317
| 0
| 0.00238
| 0.118973
| 1,908
| 50
| 102
| 38.16
| 0.803688
| 0
| 0
| 0.461538
| 0
| 0
| 0.061845
| 0.015199
| 0
| 0
| 0
| 0
| 0.179487
| 1
| 0.179487
| false
| 0
| 0.153846
| 0
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ff59a83557658312e508613aa3caaa840d65c188
| 10,869
|
py
|
Python
|
tests/test_browser_user.py
|
pekrau/Pleko
|
63797835cea24e228710530111ae6c132f6fda94
|
[
"MIT"
] | null | null | null |
tests/test_browser_user.py
|
pekrau/Pleko
|
63797835cea24e228710530111ae6c132f6fda94
|
[
"MIT"
] | 235
|
2019-02-18T14:58:04.000Z
|
2019-04-23T15:19:09.000Z
|
tests/test_browser_user.py
|
pekrau/Pleko
|
63797835cea24e228710530111ae6c132f6fda94
|
[
"MIT"
] | null | null | null |
"""Test operations for an ordinary logged-in user in a browser.
Requires a user account specified in the file 'settings.json' given by
- USER_USERNAME
- USER_PASSWORD
After installing from PyPi using the 'requirements.txt' file, one must do:
$ playwright install
To run while displaying browser window:
$ pytest --headed
Much of the code below was created using the playwright code generation feature:
$ playwright codegen http://localhost:5001/
"""
import http.client
import urllib.parse
import pytest
import playwright.sync_api
import utils
@pytest.fixture(scope="module")
def settings():
"Get the settings from file 'settings.json' in this directory."
return utils.get_settings(
BASE_URL="http://localhost:5001", USER_USERNAME=None, USER_PASSWORD=None
)
def login_user(settings, page):
"Login to the system as ordinary user."
page.goto(settings["BASE_URL"])
page.click("text=Login")
assert page.url == f"{settings['BASE_URL']}/user/login?"
page.click('input[name="username"]')
page.fill('input[name="username"]', settings["USER_USERNAME"])
page.press('input[name="username"]', "Tab")
page.fill('input[name="password"]', settings["USER_PASSWORD"])
page.click("id=login")
assert page.url == f"{settings['BASE_URL']}/"
def test_table_data(settings, page): # 'page' fixture from 'pytest-playwright'
"Test login, creating a table, inserting data 'by hand'."
login_user(settings, page)
# Create a database 'test'.
page.goto(f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}")
page.click("text=Create")
assert page.url == f"{settings['BASE_URL']}/db/"
page.click('input[name="name"]')
page.fill('input[name="name"]', "test")
page.click('textarea[name="description"]')
page.fill('textarea[name="description"]', "test database")
page.click('button:has-text("Create")')
assert page.url == f"{settings['BASE_URL']}/db/test"
# Create a table 't1'.
page.click("text=Create table")
assert page.url == f"{settings['BASE_URL']}/table/test"
page.click('input[name="name"]')
page.fill('input[name="name"]', "t1")
page.click('input[name="column0name"]')
page.fill('input[name="column0name"]', "i")
page.check("#column0primarykey")
page.click('input[name="column1name"]')
page.fill('input[name="column1name"]', "f")
page.select_option('select[name="column1type"]', "REAL")
page.click('input[name="column2name"]')
page.fill('input[name="column2name"]', "s")
page.select_option('select[name="column2type"]', "TEXT")
page.click('input[name="column3name"]')
page.fill('input[name="column3name"]', "r")
page.select_option('select[name="column3type"]', "REAL")
page.check('input[name="column3notnull"]')
page.click('button:has-text("Create")')
assert page.url == f"{settings['BASE_URL']}/table/test/t1"
# Insert a row into the table.
page.click("text=Insert row")
assert page.url == f"{settings['BASE_URL']}/table/test/t1/row"
page.click('input[name="i"]')
page.fill('input[name="i"]', "1")
page.click('input[name="f"]')
page.fill('input[name="f"]', "3.0")
page.click('input[name="s"]')
page.fill('input[name="s"]', "apa")
page.click('input[name="r"]')
page.fill('input[name="r"]', "3.141")
page.click('button:has-text("Insert")')
assert page.url == f"{settings['BASE_URL']}/table/test/t1/row"
# Insert another row into the table.
page.click('input[name="i"]')
page.fill('input[name="i"]', "2")
page.click('input[name="f"]')
page.click('input[name="s"]')
page.fill('input[name="s"]', "blah")
page.click('input[name="r"]')
page.fill('input[name="r"]', "-1.0")
page.click('button:has-text("Insert")')
assert page.url == f"{settings['BASE_URL']}/table/test/t1/row"
# Delete the table.
page.click("text=2 rows")
table_url = f"{settings['BASE_URL']}/table/test/t1"
assert page.url == table_url
page.once("dialog", lambda dialog: dialog.accept()) # Callback for next click.
page.click("text=Delete")
assert page.url == f"{settings['BASE_URL']}/db/test"
page.goto(table_url)
locator = page.locator("text=No such table")
playwright.sync_api.expect(locator).to_have_count(1)
# Delete the database.
page.once("dialog", lambda dialog: dialog.accept()) # Callback for next click.
page.click("text=Delete")
assert page.url == f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}"
def test_table_csv(settings, page): # 'page' fixture from 'pytest-playwright'
"Test login, creating a table, inserting data from a CSV file."
login_user(settings, page)
# Create a database 'test'.
page.goto(f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}")
page.click("text=Create")
assert page.url == f"{settings['BASE_URL']}/db/"
page.click('input[name="name"]')
page.fill('input[name="name"]', "test")
page.click('button:has-text("Create")')
assert page.url == f"{settings['BASE_URL']}/db/test"
# Create a table 't1'.
page.click("text=Create table")
assert page.url == f"{settings['BASE_URL']}/table/test"
page.click('input[name="name"]')
page.fill('input[name="name"]', "t1")
page.click('input[name="column0name"]')
page.fill('input[name="column0name"]', "i")
page.check("#column0primarykey")
page.click('input[name="column1name"]')
page.fill('input[name="column1name"]', "r")
page.select_option('select[name="column1type"]', "REAL")
page.click('input[name="column2name"]')
page.fill('input[name="column2name"]', "j")
page.click('input[name="column3name"]')
page.fill('input[name="column3name"]', "t")
page.select_option('select[name="column3type"]', "TEXT")
page.check('input[name="column3notnull"]')
page.click('button:has-text("Create")')
assert page.url == f"{settings['BASE_URL']}/table/test/t1"
# Insert data from file.
page.click("text=Insert from file")
assert page.url == "http://localhost:5001/table/test/t1/insert"
with page.expect_file_chooser() as fc_info:
page.click('input[name="csvfile"]')
file_chooser = fc_info.value
file_chooser.set_files("test.csv")
page.click("text=Insert from CSV file")
assert page.url == "http://localhost:5001/table/test/t1"
page.click("text=Database test")
assert page.url == f"{settings['BASE_URL']}/db/test"
# Query the database.
page.click("text=Query")
assert page.url == "http://localhost:5001/query/test"
page.click('textarea[name="select"]')
page.fill('textarea[name="select"]', "i,r")
page.click('textarea[name="from"]')
page.fill('textarea[name="from"]', "t1")
page.click('textarea[name="where"]')
page.fill('textarea[name="where"]', 't = "blah"')
page.click("text=Execute query")
assert page.url == "http://localhost:5001/query/test/rows"
assert page.locator("#nrows").text_content() == "1"
locator = page.locator("#rows > tbody > tr")
playwright.sync_api.expect(locator).to_have_count(1)
# Modify the query.
page.click("text=Edit query")
assert page.url.startswith("http://localhost:5001/query/test")
page.click('textarea[name="where"]')
page.fill('textarea[name="where"]', "j = 3")
page.click("text=Execute query")
assert page.url == "http://localhost:5001/query/test/rows"
assert page.locator("#nrows").text_content() == "2"
locator = page.locator("#rows > tbody > tr")
playwright.sync_api.expect(locator).to_have_count(2)
# Delete the database.
page.click("text=Database test")
assert page.url == "http://localhost:5001/db/test"
page.once("dialog", lambda dialog: dialog.accept()) # Callback for next click.
page.click("text=Delete")
assert page.url == f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}"
def test_db_upload(settings, page):
"Test uploading a Sqlite3 database file."
login_user(settings, page)
# Upload database file.
page.goto(f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}")
page.click("text=Upload")
assert page.url == "http://localhost:5001/dbs/upload"
page.once("filechooser", lambda fc: fc.set_files("test.sqlite3"))
page.click('input[name="sqlite3file"]')
page.click("text=Upload SQLite3 file")
assert page.url == "http://localhost:5001/db/test"
page.click("text=3 rows")
assert page.url == "http://localhost:5001/table/test/t1"
locator = page.locator("#rows > tbody > tr")
playwright.sync_api.expect(locator).to_have_count(3)
page.click('a[role="button"]:has-text("Schema")')
assert page.url == "http://localhost:5001/table/test/t1/schema"
locator = page.locator("#columns > tbody > tr")
playwright.sync_api.expect(locator).to_have_count(7)
# Delete the database.
page.click("text=Database test")
assert page.url == "http://localhost:5001/db/test"
page.once("dialog", lambda dialog: dialog.accept()) # Callback for next click.
page.click("text=Delete")
assert page.url == f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}"
def test_view(settings, page):
"Test view creation, based on a table in an uploaded Sqlite3 database file."
login_user(settings, page)
# Upload database file.
page.goto(f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}")
page.click("text=Upload")
assert page.url == "http://localhost:5001/dbs/upload"
page.once("filechooser", lambda fc: fc.set_files("test.sqlite3"))
page.click('input[name="sqlite3file"]')
page.click("text=Upload SQLite3 file")
assert page.url == "http://localhost:5001/db/test"
# Create the view.
page.click("text=Query")
assert page.url == "http://localhost:5001/query/test"
page.click('textarea[name="select"]')
page.fill('textarea[name="select"]', "i, r1")
page.click('textarea[name="from"]')
page.fill('textarea[name="from"]', "t1")
page.click('textarea[name="where"]')
page.fill('textarea[name="where"]', "i2 < 0")
page.click("text=Execute query")
assert page.url == "http://localhost:5001/query/test/rows"
page.click("text=Create view")
page.click('input[name="name"]')
page.fill('input[name="name"]', "v1")
page.click('button:has-text("Create")')
page.wait_for_timeout(3000)
locator = page.locator("#rows > tbody > tr")
playwright.sync_api.expect(locator).to_have_count(2)
# Delete the database.
page.click("text=Database test")
assert page.url == "http://localhost:5001/db/test"
page.once("dialog", lambda dialog: dialog.accept()) # Callback for next click.
page.click("text=Delete")
assert page.url == f"{settings['BASE_URL']}/dbs/owner/{settings['USER_USERNAME']}"
# page.wait_for_timeout(3000)
| 39.667883
| 86
| 0.658662
| 1,495
| 10,869
| 4.725084
| 0.121739
| 0.091733
| 0.068092
| 0.063703
| 0.782135
| 0.758918
| 0.732729
| 0.732729
| 0.715317
| 0.687571
| 0
| 0.017055
| 0.147668
| 10,869
| 273
| 87
| 39.813187
| 0.745466
| 0.128347
| 0
| 0.616505
| 0
| 0
| 0.460413
| 0.23752
| 0
| 0
| 0
| 0
| 0.18932
| 1
| 0.029126
| false
| 0.009709
| 0.024272
| 0
| 0.058252
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ffa60f1f8af18963c45ed7e712c185286768893f
| 217
|
wsgi
|
Python
|
CTF/HSCTF/apache/django.wsgi
|
IAryan/NULLify-HSCTF-2014
|
9e1c2aca9fc6b0cb98e73f8abd76299cb3cc0fb7
|
[
"MIT"
] | 1
|
2016-03-20T19:35:33.000Z
|
2016-03-20T19:35:33.000Z
|
CTF/HSCTF/apache/django.wsgi
|
IAryan/NULLify-HSCTF-2014
|
9e1c2aca9fc6b0cb98e73f8abd76299cb3cc0fb7
|
[
"MIT"
] | null | null | null |
CTF/HSCTF/apache/django.wsgi
|
IAryan/NULLify-HSCTF-2014
|
9e1c2aca9fc6b0cb98e73f8abd76299cb3cc0fb7
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append('/home/nullify/CTF/HSCTF')
os.environ['DJANGO_SETTINGS_MODULE'] = 'HSCTF.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| 24.111111
| 56
| 0.760369
| 29
| 217
| 5.62069
| 0.62069
| 0.122699
| 0.220859
| 0.269939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105991
| 217
| 9
| 57
| 24.111111
| 0.840206
| 0
| 0
| 0
| 0
| 0
| 0.280952
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4402bc9c4a874e29d4c4f848093f9ecf53668762
| 658
|
py
|
Python
|
easyrec/models/__init__.py
|
xu-zhiwei/easyrec
|
4e42a356efe799bcd469a568d356852e4230bbc8
|
[
"MIT"
] | 5
|
2021-08-12T22:54:07.000Z
|
2022-03-27T11:46:48.000Z
|
easyrec/models/__init__.py
|
xu-zhiwei/pyrec
|
4e42a356efe799bcd469a568d356852e4230bbc8
|
[
"MIT"
] | null | null | null |
easyrec/models/__init__.py
|
xu-zhiwei/pyrec
|
4e42a356efe799bcd469a568d356852e4230bbc8
|
[
"MIT"
] | null | null | null |
from easyrec.models.afm import AFM
from easyrec.models.autoint import AutoInt
from easyrec.models.dcn import DCN
from easyrec.models.deep_crossing import DeepCrossing
from easyrec.models.deepfm import DeepFM
from easyrec.models.dssm import DSSM
from easyrec.models.ffm import FFM
from easyrec.models.fm import FM
from easyrec.models.fnn import FNN
from easyrec.models.lr import LR
from easyrec.models.mlp import MLP
from easyrec.models.mmoe import MMOE
from easyrec.models.neumf import NeuMF
from easyrec.models.nfm import NFM
from easyrec.models.pnn import PNN
from easyrec.models.wide_and_deep import WideAndDeep
from easyrec.models.xdeepfm import xDeepFM
| 36.555556
| 53
| 0.844985
| 105
| 658
| 5.266667
| 0.238095
| 0.338156
| 0.522604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103343
| 658
| 17
| 54
| 38.705882
| 0.937288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
440cc911d4a24c89013e1a56756fa37966a6a574
| 32
|
py
|
Python
|
CoinMktCap/__init__.py
|
sarthakkimtani/CoinMktCap
|
325ea27cb09440d325aa532e086054804f181366
|
[
"MIT"
] | 1
|
2021-06-09T12:38:04.000Z
|
2021-06-09T12:38:04.000Z
|
CoinMktCap/__init__.py
|
sarthakkimtani/CoinMktCap
|
325ea27cb09440d325aa532e086054804f181366
|
[
"MIT"
] | null | null | null |
CoinMktCap/__init__.py
|
sarthakkimtani/CoinMktCap
|
325ea27cb09440d325aa532e086054804f181366
|
[
"MIT"
] | null | null | null |
from .core import CoinMarketCap
| 16
| 31
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4437ad23b732a2d6e8ab0de7c1c961c5d80ddd4b
| 65
|
py
|
Python
|
snake_games/circlez/__init__.py
|
cclauss/snakeware
|
9b857d132dcabc2aedb407d694b8c1d9e13cad1e
|
[
"MIT"
] | null | null | null |
snake_games/circlez/__init__.py
|
cclauss/snakeware
|
9b857d132dcabc2aedb407d694b8c1d9e13cad1e
|
[
"MIT"
] | null | null | null |
snake_games/circlez/__init__.py
|
cclauss/snakeware
|
9b857d132dcabc2aedb407d694b8c1d9e13cad1e
|
[
"MIT"
] | null | null | null |
from .circ import CirclezApp
def load():
CirclezApp().run()
| 13
| 28
| 0.676923
| 8
| 65
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 4
| 29
| 16.25
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4453276425c153e5e1c6052fecce86e9896f21f6
| 40
|
py
|
Python
|
boa3_test/test_sc/variable_test/MismatchedTypeAugAssign.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/variable_test/MismatchedTypeAugAssign.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/variable_test/MismatchedTypeAugAssign.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
def Main():
a: int = 1
a += '2'
| 10
| 14
| 0.35
| 7
| 40
| 2
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.425
| 40
| 3
| 15
| 13.333333
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
445b3fd78610197862f170bee8136e37049c1dca
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/parso/python/tree.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/parso/python/tree.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/parso/python/tree.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c4/0e/2a/6f50bd15fe507d5bd9458501ecace9282a8d3bc0cc765fb7e1c22c283b
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0
| 96
| 1
| 96
| 96
| 0.520833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4463bbb1b8349c07ee0a96280bf9dd61c02ce966
| 41
|
py
|
Python
|
catalyst/contrib/dl/__init__.py
|
denyhoof/catalyst
|
a340450076f7846007bc5695e5163e15b7ad9575
|
[
"Apache-2.0"
] | 1
|
2020-09-24T00:34:06.000Z
|
2020-09-24T00:34:06.000Z
|
catalyst/contrib/dl/__init__.py
|
denyhoof/catalyst
|
a340450076f7846007bc5695e5163e15b7ad9575
|
[
"Apache-2.0"
] | null | null | null |
catalyst/contrib/dl/__init__.py
|
denyhoof/catalyst
|
a340450076f7846007bc5695e5163e15b7ad9575
|
[
"Apache-2.0"
] | 1
|
2020-09-24T00:34:07.000Z
|
2020-09-24T00:34:07.000Z
|
# flake8: noqa
from .callbacks import *
| 10.25
| 24
| 0.707317
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.195122
| 41
| 3
| 25
| 13.666667
| 0.848485
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
44799b8f43072e0545ac854032ffc219475ec03a
| 188
|
py
|
Python
|
DNA_strand.py
|
lluxury/codewars
|
2cacc9d5411a248c199ad21949617c5acc9c7f24
|
[
"MIT"
] | null | null | null |
DNA_strand.py
|
lluxury/codewars
|
2cacc9d5411a248c199ad21949617c5acc9c7f24
|
[
"MIT"
] | null | null | null |
DNA_strand.py
|
lluxury/codewars
|
2cacc9d5411a248c199ad21949617c5acc9c7f24
|
[
"MIT"
] | null | null | null |
pairs = {'A':'T','T':'A','C':'G','G':'C'}
def DNA_strand(dna):
'''
DNA_strand("AAAA")
"TTTT"
'''
return ''.join([pairs[x] for x in dna])
# 标准的解法,构造一个表,用值来替代键返回
| 20.888889
| 44
| 0.478723
| 28
| 188
| 3.142857
| 0.642857
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239362
| 188
| 9
| 45
| 20.888889
| 0.615385
| 0.25
| 0
| 0
| 0
| 0
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
44934720143d8e56e5a86c09e91f767f70d0f5a7
| 91
|
py
|
Python
|
tabitha/__init__.py
|
robladbrook/tabitha
|
7beb87cc313eb841eb4a1da6c1116b8627590a92
|
[
"MIT"
] | 1
|
2017-01-10T00:43:26.000Z
|
2017-01-10T00:43:26.000Z
|
tabitha/__init__.py
|
robladbrook/tabitha
|
7beb87cc313eb841eb4a1da6c1116b8627590a92
|
[
"MIT"
] | 1
|
2019-10-31T22:01:42.000Z
|
2019-10-31T22:01:42.000Z
|
tabitha/__init__.py
|
robladbrook/tabitha
|
7beb87cc313eb841eb4a1da6c1116b8627590a92
|
[
"MIT"
] | null | null | null |
""" init """
from __future__ import absolute_import
from .voiceclient import VoiceClient
| 15.166667
| 38
| 0.78022
| 10
| 91
| 6.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 5
| 39
| 18.2
| 0.846154
| 0.043956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9240633c551d1d88e59700e54c4969c97090ad93
| 114
|
py
|
Python
|
pbo1-project/Models/__init__.py
|
hifra01/PBO1-Project
|
82b9abbdf1cab4da18e0f9514e92d298d9661b26
|
[
"MIT"
] | null | null | null |
pbo1-project/Models/__init__.py
|
hifra01/PBO1-Project
|
82b9abbdf1cab4da18e0f9514e92d298d9661b26
|
[
"MIT"
] | null | null | null |
pbo1-project/Models/__init__.py
|
hifra01/PBO1-Project
|
82b9abbdf1cab4da18e0f9514e92d298d9661b26
|
[
"MIT"
] | null | null | null |
from .CustomerModel import CustomerModel
from .AdminModel import AdminModel
from .OrderModel import OrderModel
| 28.5
| 41
| 0.842105
| 12
| 114
| 8
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 114
| 3
| 42
| 38
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9250dfbc33c90b4778b3efcc044337f4ae57fbc6
| 131
|
py
|
Python
|
Geometry/tests/test_rectangle.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 23
|
2015-10-28T15:21:41.000Z
|
2022-03-29T13:52:41.000Z
|
Geometry/tests/test_rectangle.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 7
|
2021-01-26T11:57:25.000Z
|
2022-02-07T11:00:06.000Z
|
Geometry/tests/test_rectangle.py
|
liuxiang0/Geometry
|
3500f815fa56c535b36d1b6fd0afe69ce5d055be
|
[
"MIT"
] | 16
|
2016-07-17T12:47:05.000Z
|
2021-06-21T21:02:48.000Z
|
import unittest
from .. import Point, Triangle
from ..exceptions import *
class RectangleTestCase(unittest.TestCase):
pass
| 13.1
| 43
| 0.755725
| 14
| 131
| 7.071429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167939
| 131
| 9
| 44
| 14.555556
| 0.908257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
9255ce7fa25b1d5ce466954f3bd99a5838a635be
| 50
|
py
|
Python
|
lc_classifier/features/preprocess/__init__.py
|
alercebroker/late_classifier
|
72fb640ee37bff67f865945499b417f3ca36c3cb
|
[
"MIT"
] | 6
|
2021-04-27T03:12:43.000Z
|
2022-01-23T06:36:48.000Z
|
lc_classifier/features/preprocess/__init__.py
|
alercebroker/late_classifier
|
72fb640ee37bff67f865945499b417f3ca36c3cb
|
[
"MIT"
] | 8
|
2020-11-27T04:39:14.000Z
|
2022-01-13T17:47:45.000Z
|
lc_classifier/features/preprocess/__init__.py
|
alercebroker/lc_classifier
|
72fb640ee37bff67f865945499b417f3ca36c3cb
|
[
"MIT"
] | 2
|
2021-08-10T08:06:23.000Z
|
2022-01-14T12:31:43.000Z
|
from .base import *
from .preprocess_ztf import *
| 16.666667
| 29
| 0.76
| 7
| 50
| 5.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 30
| 25
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
926d28c67fbc975ff39f397e1695245872e036bd
| 23
|
py
|
Python
|
models/__init__.py
|
Sacinandan/flask-api
|
f73a45a8ac2bd5aa2184866225dceaf78187f2f0
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Sacinandan/flask-api
|
f73a45a8ac2bd5aa2184866225dceaf78187f2f0
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Sacinandan/flask-api
|
f73a45a8ac2bd5aa2184866225dceaf78187f2f0
|
[
"MIT"
] | null | null | null |
from .note import Note
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92756f38936311d66073dba333ec145ed7c6dbe7
| 174
|
py
|
Python
|
bnf/test/utils.py
|
Nikita-Boyarskikh/bnf
|
1293b0f2187593989e2484a7af9612477fa8bbe0
|
[
"MIT"
] | null | null | null |
bnf/test/utils.py
|
Nikita-Boyarskikh/bnf
|
1293b0f2187593989e2484a7af9612477fa8bbe0
|
[
"MIT"
] | null | null | null |
bnf/test/utils.py
|
Nikita-Boyarskikh/bnf
|
1293b0f2187593989e2484a7af9612477fa8bbe0
|
[
"MIT"
] | null | null | null |
from typing import Iterator
from bnf.rule import Rule
def assert_all_tests_is_valid(rule: Rule, tests: Iterator[str]):
assert all([rule(i).is_valid() for i in tests])
| 21.75
| 64
| 0.747126
| 30
| 174
| 4.166667
| 0.533333
| 0.144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 174
| 7
| 65
| 24.857143
| 0.844595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
927968c937e65ca96fbdefdc5f63531d8f9151fb
| 160
|
py
|
Python
|
xlib/onnxruntime/__init__.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | 4
|
2021-07-23T16:34:24.000Z
|
2022-03-01T18:31:59.000Z
|
xlib/onnxruntime/__init__.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | 1
|
2022-02-08T01:29:03.000Z
|
2022-02-08T01:29:03.000Z
|
xlib/onnxruntime/__init__.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | 1
|
2021-12-14T09:18:15.000Z
|
2021-12-14T09:18:15.000Z
|
from .device import (ORTDeviceInfo, get_available_devices_info,
get_cpu_device)
from .InferenceSession import InferenceSession_with_device
| 40
| 63
| 0.76875
| 17
| 160
| 6.823529
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19375
| 160
| 3
| 64
| 53.333333
| 0.899225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92915c50d9cb8eb973748cd01dfd2bad96533da2
| 231
|
py
|
Python
|
simple_bson/__init__.py
|
DeltaLaboratory/simple-bson
|
fed15745e9cbc700b8573994041a0faca575d2d4
|
[
"Apache-2.0"
] | 77
|
2021-07-01T16:29:27.000Z
|
2021-09-25T03:34:04.000Z
|
simple_bson/__init__.py
|
DeltaLaboratory/simple-bson
|
fed15745e9cbc700b8573994041a0faca575d2d4
|
[
"Apache-2.0"
] | 1
|
2021-07-02T02:41:07.000Z
|
2021-07-02T05:02:55.000Z
|
simple_bson/__init__.py
|
DeltaLaboratory/simple-bson
|
fed15745e9cbc700b8573994041a0faca575d2d4
|
[
"Apache-2.0"
] | null | null | null |
import typing
from . import encoder, decoder
def dumps(document: typing.Dict) -> bytes:
return encoder.encode_document(document)
def loads(document: bytes) -> typing.Dict:
return decoder.decode_root_document(document)
| 19.25
| 49
| 0.757576
| 29
| 231
| 5.931034
| 0.517241
| 0.116279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147186
| 231
| 11
| 50
| 21
| 0.873096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
92a0091a9fcc2e2776e26a98baed3d15155b44b6
| 89
|
py
|
Python
|
solutions/Leetcode_1689/leetcode_1689.py
|
YuhanShi53/Leetcode_solutions
|
cdcad34656d25d6af09b226e17250c6070305ab0
|
[
"MIT"
] | null | null | null |
solutions/Leetcode_1689/leetcode_1689.py
|
YuhanShi53/Leetcode_solutions
|
cdcad34656d25d6af09b226e17250c6070305ab0
|
[
"MIT"
] | null | null | null |
solutions/Leetcode_1689/leetcode_1689.py
|
YuhanShi53/Leetcode_solutions
|
cdcad34656d25d6af09b226e17250c6070305ab0
|
[
"MIT"
] | null | null | null |
class Solution1:
def min_partitions(self, n: str) -> int:
return int(max(n))
| 22.25
| 44
| 0.617978
| 13
| 89
| 4.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.247191
| 89
| 3
| 45
| 29.666667
| 0.791045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
92a12011ba78b814ab1e55cf2facfb6c5a9947c0
| 559
|
py
|
Python
|
app.py
|
hmyhehe/hmy.git.io
|
15d6fd6204af8b1be05dd7738390aa61e72d3399
|
[
"MIT"
] | null | null | null |
app.py
|
hmyhehe/hmy.git.io
|
15d6fd6204af8b1be05dd7738390aa61e72d3399
|
[
"MIT"
] | null | null | null |
app.py
|
hmyhehe/hmy.git.io
|
15d6fd6204af8b1be05dd7738390aa61e72d3399
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route("/")
def index():
return render_template("login.html")
@app.route("/login",methods=['POST','GET'])
def login():
if request.method == 'GET':
return render_template("login.html")
else:
name = request.form['id']
password = request.form['pwd']
if name == 'zhangsan' and password == '123'
return render_template("welcome.html",name = name)
else:
return render_template("login.html")
| 26.619048
| 62
| 0.617174
| 67
| 559
| 5.014925
| 0.462687
| 0.208333
| 0.238095
| 0.223214
| 0.258929
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009217
| 0.223614
| 559
| 20
| 63
| 27.95
| 0.764977
| 0.030411
| 0
| 0.3125
| 0
| 0
| 0.138632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.125
| 0.0625
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
92b19077d649f9a75064da9c0b2b6f257669231b
| 334
|
py
|
Python
|
routing/exceptions.py
|
Atari4800/fast-backend
|
4864300351f5cb93638deb6c18017149eb7f17ca
|
[
"MIT"
] | 1
|
2021-11-25T22:57:59.000Z
|
2021-11-25T22:57:59.000Z
|
routing/exceptions.py
|
Capstone-Team-Fast/fast-backend
|
7854a2731ba0923e89581c1fbb6cc95ea210fed0
|
[
"MIT"
] | null | null | null |
routing/exceptions.py
|
Capstone-Team-Fast/fast-backend
|
7854a2731ba0923e89581c1fbb6cc95ea210fed0
|
[
"MIT"
] | 3
|
2022-01-29T21:32:54.000Z
|
2022-03-09T09:30:44.000Z
|
class GeocodeError(Exception):
pass
class MatrixServiceError(Exception):
pass
class RelationshipError(Exception):
pass
class EmptyRouteException(Exception):
pass
class RouteStateException(Exception):
pass
class LocationStateException(Exception):
pass
class LanguageOptionError(Exception):
pass
| 12.37037
| 40
| 0.754491
| 28
| 334
| 9
| 0.357143
| 0.361111
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182635
| 334
| 26
| 41
| 12.846154
| 0.923077
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
92ba436788c4aef1d68009261d0b4834ceb1f22a
| 265
|
py
|
Python
|
firmware/gen_oldnewball.py
|
brainsmoke/hex2811-penta
|
449614ab6be273e92e8bffbd066acdeb4da7154b
|
[
"MIT"
] | 4
|
2017-09-06T18:58:58.000Z
|
2021-08-05T00:04:21.000Z
|
firmware/gen_oldnewball.py
|
brainsmoke/hex2811-penta
|
449614ab6be273e92e8bffbd066acdeb4da7154b
|
[
"MIT"
] | null | null | null |
firmware/gen_oldnewball.py
|
brainsmoke/hex2811-penta
|
449614ab6be273e92e8bffbd066acdeb4da7154b
|
[
"MIT"
] | 1
|
2018-06-08T10:56:27.000Z
|
2018-06-08T10:56:27.000Z
|
for i in [21,22,1,19,15,16,17,18,2,6,7,8,9,5,3,52,53,54,50,51,4,12,13,14,10,11,0,23,24,20,34,30,44,35,36,37,38,39,40,49,45,46,47,48,41,59,55,56,57,58,42,26,27,28,29,25,43,31,32,33]:
print ("\t"+''.join(str(i*15+j*3+rgb)+"," for j in range(5) for rgb in (1,0,2)))
| 66.25
| 181
| 0.603774
| 83
| 265
| 1.927711
| 0.843373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.46988
| 0.060377
| 265
| 3
| 182
| 88.333333
| 0.172691
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
2b7bf6ad248f61c6c95b7425e9770c842f5b4dd9
| 174,862
|
py
|
Python
|
CalculationSK.py
|
atranel/resqdb
|
76b8a5089732ae63c867b734c5053908687122bc
|
[
"MIT"
] | null | null | null |
CalculationSK.py
|
atranel/resqdb
|
76b8a5089732ae63c867b734c5053908687122bc
|
[
"MIT"
] | null | null | null |
CalculationSK.py
|
atranel/resqdb
|
76b8a5089732ae63c867b734c5053908687122bc
|
[
"MIT"
] | null | null | null |
#### Filename: CalculationSK.py
#### Version: v1.0
#### Author: Marie Jankujova
#### Date: March 4, 2019
#### Description: Connect to database, export Slovakia data and calculate statistics.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
from datetime import datetime, time, date
import time
import sqlite3
from numpy import inf
import pytz
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell, xl_col_to_name
class Connection:
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
"""
def __init__(self, nprocess=1):
start = time.time()
debug = 'debug_' + datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('CalculationSK: Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Set section
datamix = 'datamix-backup'
# Create empty dictionary
self.sqls = ['SELECT * from slovakia', 'SELECT * from slovakia_2018']
#self.sqls = ['SELECT * from slovakia']
# List of dataframe names
self.names = ['slovakia', 'slovakia_2018']
#self.names = ['slovakia']
self.dictdb_df = {}
# Dictioanry initialization - prepared dataframes
self.dict_df = {}
# Export data from the database for slovakia
df_name = self.names[0]
self.connect(self.sqls[0], datamix, nprocess, df_name=df_name)
# Export data from the database for slovakia_2018
df_name = self.names[1]
self.connect(self.sqls[1], datamix, nprocess, df_name=df_name)
for k, v in self.dictdb_df.items():
self.prepare_df(df=v, name=k)
self.df = pd.DataFrame()
print(self.dict_df)
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!".format(self.names[i]))
self.countries = self._get_countries(df=self.df)
# Get preprocessed data
# self.preprocessed_data = self.check_data(df=self.df)
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'sk_mapping.csv')
with open(path, 'r') as csv_file:
sk_names_dict = pd.read_csv(csv_file)
def change_name(name):
if pd.isna(name):
return ''
else:
changed_name = sk_names_dict.loc[
sk_names_dict['Hospital name'].str.contains(name), 'Angels Awards name'].iloc[0]
return changed_name
dateForm = '%Y-%m-%d'
self.df['HOSPITAL_DATE'] = pd.to_datetime(self.df['HOSPITAL_DATE'], format=dateForm, errors="coerce")
self.df['DISCHARGE_DATE'] = pd.to_datetime(self.df['DISCHARGE_DATE'], format=dateForm, errors="coerce")
self.df['CT_DATE'] = pd.to_datetime(self.df['CT_DATE'], format=dateForm, errors="ignore")
# raw_df = raw_df.loc[raw_df['ROK_SPRAC'] == 2019].copy()
self.df['Protocol ID'] = self.df['HOSPITAL_NAME']
self.df['Protocol ID'] = self.df.apply(
lambda x: change_name(x['HOSPITAL_NAME']), axis=1)
self.df['Site Name'] = self.df['Protocol ID']
end = time.time()
tdelta = (end-start)/60
logging.info('The conversion and merging run {0} minutes.'.format(tdelta))
def config(self, section):
""" The function reading and parsing the config of database file.
:param section: the name of the section in database.ini file
:type section: str
:returns: the dictionary with the parsed section values
:raises: Exception
"""
parser = ConfigParser()
parser.read(self.database_ini)
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
logging.error('CalculationSK: Section {0} not found in the {1} file'.format(section, self.database_ini))
raise Exception('Section {0} not found in the {1} file'.format(section, self.database_ini))
return db
def connect(self, sql, section, nprocess, df_name=None):
""" The function connecting to te database.
:param sql: the sql query
:type sql: str
:param section: the section from the database.ini
:type section: str
:param nprocess: the number of processes run simultaneously
:type nprocess: int
:param df_name: the name of the dataframe used as key in the dictionary
:type df_name: str
:raises: Exception
"""
conn = None
try:
params = self.config(section) # Get parameters from config file
logging.info('Process{0}: Connecting to the PostgreSQL database... '.format(nprocess))
conn = psycopg2.connect(**params) # Connect to server
if df_name is not None:
# For each sql query create new dataframe in the dictionary using df_name as key
self.dictdb_df[df_name] = pd.read_sql_query(sql, conn)
logging.info('CalculationSK: Process{0}: Dataframe {1} has been created created.'.format(nprocess, df_name))
else:
logging.info('CalculationSK: Process{0}: Name of dataframe is missing.'.format(nprocess))
except (Exception, psycopg2.DatabaseError) as error:
logging.error(error)
finally:
if conn is not None:
conn.close() # Close connection
logging.info('Process{0}: Database connection has been closed.'.format(nprocess))
def _calculate_time(self, ct_date, hospital_date, rec_date, used_col=None):
""" The function calculating difference between two times in minutes. The function checking if hospital date is after recanalization date, and if it's TRUE then CT date is used as hospitalization date.
:param ct_date: the date when CT/MRI was performed
:type ct_date: date
:param hospital_date: the date of hospitalization
:type hospital_date: date
:param rec_date: the date when recanalization procedure was performed
:type rec_date: date
:param used_col: the column which was used for calculation of DTN
:type used_col: str
:returns: tdeltamin, used_col
"""
rec_time = rec_date - hospital_date
tdeltamin = rec_time.total_seconds()/60.0
col = 'HOSPITAL_DATE'
if used_col is None:
if tdeltamin <= 1:
try:
if hospital_date.strftime('%Y-%m-%d') > rec_date.strftime('%Y-%m-%d'):
rec_time = rec_date - ct_date
tdeltamin = rec_time.total_seconds()/60.0
col = 'CT_TIME'
else:
if hospital_date.strftime('%Y-%m-%d') == rec_date.strftime('%Y-%m-%d'):
rec_time = rec_date - hospital_date
tdeltamin = rec_time.total_seconds()/60.0
col = 'HOSPITAL_DATE'
if tdeltamin <= 1:
rec_time = rec_date - ct_date
tdeltamin = rec_time.total_seconds()/60.0
col = 'CT_TIME'
except ValueError:
return None
elif tdeltamin > 1 and tdeltamin <= 10:
hosp_time = rec_date - hospital_date
hosp_time_mins = hosp_time.total_seconds()/60.0
rec_time = rec_date - ct_date
tdeltamin = rec_time.total_seconds()/60.0
col = 'CT_TIME'
if hosp_time_mins > tdeltamin:
tdeltamin = hosp_time_mins
col = 'HOSPITAL_DATE'
else:
if used_col == 'HOSPITAL_DATE':
rec_time = rec_date - hospital_date
tdeltamin = rec_time.total_seconds()/60.0
elif used_col == 'CT_TIME':
rec_time = rec_date - ct_date
tdeltamin = rec_time.total_seconds()/60.0
return tdeltamin, used_col
def _calculate_ct_time(self, hospital_date, ct_date):
""" The function calculating door to CT date time in minutes.
:param hospital_date: the date of hospitalization
:type hospital_date: timestamp
:param ct_date: the date when the CT/MRI was performed
:type ct_date: timestamp
:returns: 1 if datetime > 0 and < 60, else returns 2
"""
ct_diff = ct_date - hospital_date
tdeltamin = ct_diff.total_seconds()/60.0
if tdeltamin < 0 or tdeltamin > 60:
return 2
else:
return 1
def prepare_df(self, df, name):
""" The function preparing the raw data from the database to be used for statistic calculation. The prepared dataframe is entered into dict_df and the name is used as key.
:param df: the raw dataframe exported from the database
:type df: pandas dataframe
:param name: the name of the database
:type name: str
"""
if name == 'slovakia':
res = df.copy()
# Remove _en suffix from column names
cols = res.columns
new_cols = []
for c in cols:
if c == 'anonym':
new_cols.append("Protocol ID")
elif c == 'subject_id':
new_cols.append("Subject ID")
else:
new_cols.append(c.upper())
res.rename(columns=dict(zip(res.columns[0:], new_cols)), inplace=True)
# Calculate the needle time in the minutes from hospital date and needle time. If hospital date is > needle time then as hospital time ct time is used
res['NEEDLE_TIME_MIN'], res['USED_COL'] = zip(*res.apply(lambda x: self._calculate_time(x['CT_TIME'], x['HOSPITAL_DATE'], x['NEEDLE_TIME']) if x['NEEDLE_TIME'].date else (np.nan, None), axis=1))
# Calculate the groin time in the minutes from hospital date and groin time. If hospital date is > groin time then as hospital time ct time is used
res['GROIN_TIME_MIN'], res['USED_COL'] = zip(*res.apply(lambda x: self._calculate_time(x['CT_TIME'], x['HOSPITAL_DATE'], x['GROIN_TIME'], x['USED_COL']) if x['GROIN_TIME'].date else (np.nan, None), axis=1))
# Get values if CT was performed within 1 hour after admission or after
res['CT_TIME_WITHIN'] = res.apply(lambda x: self._calculate_ct_time(x['HOSPITAL_DATE'], x['CT_TIME']) if x['CT_MRI'] == 2 else np.nan, axis=1)
res.drop(['USED_COL'], inplace=True, axis=1)
res.rename(columns={'DOOR_TO_NEEDLE': 'DOOR_TO_NEEDLE_OLD', 'NEEDLE_TIME_MIN': 'DOOR_TO_NEEDLE', 'DOOR_TO_GROIN': 'DOOR_TO_GROIN_OLD', 'GROIN_TIME_MIN': 'DOOR_TO_GROIN', 'CT_TIME': 'CT_DATE', 'CT_TIME_WITHIN': 'CT_TIME'}, inplace=True)
logging.info("CalculationSK: Connection: Column names in Slovakia were changed successfully.")
self.dict_df[name] = res
elif name == 'slovakia_2018':
res = df.copy()
# Remove _en suffix from column names
cols = res.columns
new_cols = []
for c in cols:
if c == 'anonym':
new_cols.append("Protocol ID")
elif c == 'subject_id':
new_cols.append("Subject ID")
else:
new_cols.append(c.upper())
res.rename(columns=dict(zip(res.columns[0:], new_cols)), inplace=True)
# Calculate the needle time in the minutes from hospital date and needle time. If hospital date is > needle time then as hospital time ct time is used
res['NEEDLE_TIME_MIN'], res['USED_COL'] = zip(*res.apply(lambda x: self._calculate_time(x['CT_TIME'], x['HOSPITAL_DATE'], x['NEEDLE_TIME']) if x['NEEDLE_TIME'].date else (np.nan, None), axis=1))
# Calculate the groin time in the minutes from hospital date and groin time. If hospital date is > groin time then as hospital time ct time is used
res['GROIN_TIME_MIN'], res['USED_COL'] = zip(*res.apply(lambda x: self._calculate_time(x['CT_TIME'], x['HOSPITAL_DATE'], x['GROIN_TIME'], x['USED_COL']) if x['GROIN_TIME'].date else (np.nan, None), axis=1))
# Get values if CT was performed within 1 hour after admission or after
res['CT_TIME_WITHIN'] = res.apply(lambda x: self._calculate_ct_time(x['HOSPITAL_DATE'], x['CT_TIME']) if x['CT_MRI'] == 2 else np.nan, axis=1)
res.drop(['USED_COL'], inplace=True, axis=1)
res.rename(columns={'DOOR_TO_NEEDLE': 'DOOR_TO_NEEDLE_OLD', 'NEEDLE_TIME_MIN': 'DOOR_TO_NEEDLE', 'DOOR_TO_GROIN': 'DOOR_TO_GROIN_OLD', 'GROIN_TIME_MIN': 'DOOR_TO_GROIN', 'CT_TIME': 'CT_DATE', 'CT_TIME_WITHIN': 'CT_TIME'}, inplace=True)
logging.info("Connection: Column names in Slovakia_2018 were changed successfully.")
self.dict_df[name] = res
def _get_countries(self, df):
""" The function obtaining all possible countries in the dataframe.
:param df: the preprossed dataframe
:type df: pandas dataframe
:returns: the list of countries
"""
# site_ids = df['Protocol ID'].apply(lambda x: pd.Series(str(x).split("_")))
# countries_list = list(set(site_ids[0]))
countries_list = ['SK']
logging.info("calculationSK: Data: Countries in the dataset: {0}.".format(countries_list))
return countries_list
class FilterDataset:
""" The class filtering preprocessed data by country or by date.
:param df: the preprocessed dataframe
:type df: pandas dataframe
:param country: the country code of country included in the resulted dataframe
:type country: str
:param date1: the first date included in the filtered dataframe
:type date1: date object
:param date2: the last date included in the filtered dataframe
:type date2: date object
"""
def __init__(self, df, country=None, date1=None, date2=None):
debug = 'debug_' + datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
self.fdf = df.copy()
self.country = country
self.date1 = date1
self.date2 = date2
if self.country is not None:
# Append "_" to the country code, because e.g. ES_MD was included in dataset for MD as well.
country = self.country + "_"
self.fdf = self._filter_by_country()
logging.info('CalculationSK: FilterDataset: Data have been filtered for country {0}!'.format(self.country))
if self.date1 is not None and self.date2 is not None:
self.fdf = self._filter_by_date()
logging.info('CalculationSK: FilterDataset: Data have been filtered for date {0} - {1}!'.format(self.date1, self.date2))
def _filter_by_country(self):
""" The function filtering the dataframe by country.
:returns: filtered dataframe including only rows belongs to the country
"""
df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True]
return df
def _filter_by_date(self):
""" The function filtering the dataframe by time period.
:returns: filtered dataframe including only rows where discharge date is between date1 and date2
"""
if isinstance(self.date1, datetime):
self.date1 = self.date1.date()
if isinstance(self.date2, datetime):
self.date2 = self.date2.date()
df = self.fdf[(self.fdf['DISCHARGE_DATE'] >= self.date1) & (self.fdf['DISCHARGE_DATE'] <= self.date2)].copy()
return df
class GeneratePreprocessedData:
""" The class generating the preprocessed data and legend data in the excel file.
:param df: the preprocessed data dataframe
:type df: pandas dataframe
:param split_sites: True if for each site should be generated individual reports including whole country
:type split_sites: bool
:param site: site ID
:type site: str
:param report: the type of the report (quater, year, half)
:type report: str
:param quarter: the type of the period (Q1_2019, H1_2019, ...)
:type quarter: str
"""
def __init__(self, df, split_sites=False, site=None, report=None, quarter=None, country_code=None):
debug = 'debug_' + datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
self.df = df
self.split_sites = split_sites
self.report = report
self.quarter = quarter
self.country_code = country_code
# # If Site is not None, filter dataset according to site code
# if site is not None:
# df = self.df[self.df['Protocol ID'].str.contains(site) == True]
# self._generate_preprocessed_data(df=df, site_code=site)
# logging.info('CalculationSK: Preprocessed data: The preprocessed data were generated for site {0}'.format(site))
# # Generate formatted statistics per site + country as site is included
# if (split_sites) and site is None:
# logging.info('CalculationSK: Preprocessed data: Generate preprocessed data per site.')
# # Get set of all site ids
# site_ids = set(self.df['Protocol ID'].tolist())
# for i in site_ids:
# df = self.df[self.df['Protocol ID'].str.contains(i) == True]
# self._generate_preprocessed_data(df=df, site_code=i)
# logging.info('CalculationSK: Preprocessed data: The preprocessed data were generated for site {0}'.format(site))
self._generate_preprocessed_data(self.df, site_code=None)
logging.info('CalculationSK: Preprocessed data: The preprocessed data were generate for all data.')
def convert_to_string(datetime, format):
""" The function converting the date, timestamp or time to the string.
:param datetime: the date, timestamp or time value to be converted
:type datetime: date/timestamp/time
:param format: the format of the date, timestamp or time
:type format: the string
:returns: the datetime argument in the string
"""
if datetime is None or datetime is np.nan:
return datetime
else:
return datetime.strftime(format)
def _generate_preprocessed_data(self, df, site_code):
""" The function creating the workbook and generating the preprocessed data in the excel file.
:param df: the dataframe with preprocessed data
:type df: pandas dataframe
:param site_code: the site code if split sites is True
:type site_code: str
"""
if site_code is not None:
output_file = self.report + "_" + site_code + "_" + self.quarter + "_preprocessed_data.xlsx"
else:
output_file = self.report + "_" + self.country_code + "_" + self.quarter + "_preprocessed_data.xlsx"
df = df.copy()
# Set date/timestamp/time formats
dateformat = "%Y-%m-%d"
timestamp = "%Y-%m-%d %H:%M"
timeformat = "%H:%M"
# df['VISIT_DATE'] = df.apply(lambda x: convert_to_string(x['VISIT_DATE'], dateformat), axis=1)
# df['VISIT_TIME'] = df.apply(lambda x: convert_to_string(x['VISIT_TIME'], timeformat), axis=1)
df['HOSPITAL_DATE'] = df.apply(lambda x: convert_to_string(x['HOSPITAL_DATE'], timestamp), axis=1)
# df['HOSPITAL_TIME'] = df.apply(lambda x: convert_to_string(x['HOSPITAL_TIME'], timeformat), axis=1)
df['DISCHARGE_DATE'] = df.apply(lambda x: convert_to_string(x['DISCHARGE_DATE'], timestamp), axis=1)
df.fillna(value="", inplace=True)
workbook = xlsxwriter.Workbook(output_file)
logging.info('Preprocessed data: The workbook was created.')
preprocessed_data_sheet = workbook.add_worksheet('Preprocessed_raw_data')
### PREPROCESSED DATA ###
preprocessed_data = df.values.tolist()
# Set width of columns
preprocessed_data_sheet.set_column(0, 150, 30)
ncol = len(df.columns) - 1
nrow = len(df)
# Create header
col = []
for j in range(0, ncol + 1):
tmp = {}
tmp['header'] = df.columns.tolist()[j]
col.append(tmp)
options = {'data': preprocessed_data,
'header_row': True,
'columns': col,
'style': 'Table Style Light 1'
}
preprocessed_data_sheet.add_table(0, 0, nrow, ncol, options)
logging.info('Preprocessed data: The sheet "Preprocessed data" was added.')
workbook.close()
class ComputeStats:
""" The class calculating the statistics from Slovakia data.
:param df: the dataframe with preprocessed data
:type df: pandas dataframe
:param country: `True` if country should be included as site into results
:type country: bool
:param country_code: the country code of country
:type country_code: str
:param comparison: `True` if comparison statistic is generated
:type comparison: bool
"""
def __init__(self, df, country = False, country_code = "", comparison=False):
self.df = df.copy()
self.df.fillna(0, inplace=True)
# def get_country_name(value):
# """ The function obtaining the country name for the given country code.
# :param value: the country code
# :type value: str
# :returns: country name
# """
# if value == "UZB":
# value = 'UZ'
# country_name = pytz.country_names[value]
# return country_name
# if comparison == False:
# self.df['Protocol ID'] = self.df.apply(lambda row: row['Protocol ID'].split()[2] if (len(row['Protocol ID'].split()) == 3) else row['Protocol ID'].split()[0], axis=1)
# # uncomment if you want stats between countries and set comparison == True
# #self.df['Protocol ID'] = self.df.apply(lambda x: x['Protocol ID'].split("_")[0], axis=1)
# # If you want to compare, instead of Site Names will be Country names.
# if comparison:
# if self.df['Protocol ID'].dtype == np.object:
# self.df['Site Name'] = self.df.apply(lambda x: get_country_name(x['Protocol ID']) if get_country_name(x['Protocol ID']) != "" else x['Protocol ID'], axis=1)
# if (country):
# country = self.df.copy()
# self.country_name = pytz.country_names[country_code]
# country['Protocol ID'] = self.country_name
# country['Site Name'] = self.country_name
# self.df = pd.concat([self.df, country])
# else:
# self.country_name = ""
# if comparison == False:
# self.statsDf = self.df.groupby(['Protocol ID']).size().reset_index(name="Total Patients")
# self.statsDf['Site Name'] = 'Slovakia'
# self.statsDf = self.statsDf[['Protocol ID', 'Site Name', 'Total Patients']]
# else:
# self.statsDf = self.df.groupby(['Protocol ID', 'Site Name']).size().reset_index(name="Total Patients")
self.statsDf = self.df.groupby(['Protocol ID']).size().reset_index(name="Total Patients")
self.statsDf['Median patient age'] = self.df.groupby(['Protocol ID']).AGE.agg(['median']).rename(columns={'median': 'Median patient age'})['Median patient age'].tolist()
self.df.drop(['ANTITHROMBOTICS'], inplace=True, axis=1)
# get patietns with ischemic stroke (ISch) (1)
isch = self.df[self.df['STROKE_TYPE'].isin([1])]
self.statsDf['isch_patients'] = self._count_patients(dataframe=isch)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), transient ischemic attack (TIA) or cerebral venous thrombosis (CVT) (1, 2, 3, 5)
is_ich_tia_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 3, 5])]
self.statsDf['is_ich_tia_cvt_patients'] = self._count_patients(dataframe=is_ich_tia_cvt)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), or cerebral venous thrombosis (CVT) (1, 2, 5)
is_ich_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 5])]
self.statsDf['is_ich_cvt_patients'] = self._count_patients(dataframe=is_ich_cvt)
# Get dataframe with patients who had ischemic stroke (IS) or intracerebral hemorrhage (ICH)
is_ich = self.df[self.df['STROKE_TYPE'].isin([1,2])]
self.statsDf['is_ich_patients'] = self._count_patients(dataframe=is_ich)
# get patietns with ischemic stroke (IS) and transient ischemic attack (TIA) (1, 3)
is_tia = self.df[self.df['STROKE_TYPE'].isin([1, 3])]
self.statsDf['is_tia_patients'] = self._count_patients(dataframe=is_tia)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), subarrachnoid hemorrhage (SAH) or cerebral venous thrombosis (CVT) (1, 2, 4, 5)
is_ich_sah_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 4, 5])]
self.statsDf['is_ich_sah_cvt_patients'] = self._count_patients(dataframe=is_ich_sah_cvt)
# get patietns with ischemic stroke (IS), transient ischemic attack (TIA) or cerebral venous thrombosis (CVT) (1, 3, 5)
is_tia_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 3, 5])]
self.statsDf['is_tia_cvt_patients'] = self._count_patients(dataframe=is_tia_cvt)
# get patients with cerebral venous thrombosis (CVT) (5)
cvt = self.df[self.df['STROKE_TYPE'].isin([5])]
self.statsDf['cvt_patients'] = self._count_patients(dataframe=cvt)
# get patietns with intracerebral hemorrhage (ICH) and subarrachnoid hemorrhage (SAH) (2, 4)
ich_sah = self.df[self.df['STROKE_TYPE'].isin([2, 4])]
self.statsDf['ich_sah_patients'] = self._count_patients(dataframe=ich_sah)
# get patietns with intracerebral hemorrhage (ICH) (2)
ich = self.df[self.df['STROKE_TYPE'].isin([2])]
self.statsDf['ich_patients'] = self._count_patients(dataframe=ich)
# get patietns with subarrachnoid hemorrhage (SAH) (4)
sah = self.df[self.df['STROKE_TYPE'].isin([4])]
self.statsDf['sah_patients'] = self._count_patients(dataframe=sah)
# create subset with no referrals (RECANALIZATION_PROCEDURE != [5,6]) AND (HEMICRANIECTOMY != 3)
discharge_subset = self.df[~self.df['RECANALIZATION_PROCEDURES'].isin([5, 6])]
self.statsDf['discharge_subset_patients'] = self._count_patients(dataframe=discharge_subset)
# Create discharge subset alive
discharge_subset_alive = self.df[~self.df['DISCHARGE_DESTINATION'].isin([5])]
self.statsDf['discharge_subset_alive_patients'] = self._count_patients(dataframe=discharge_subset_alive)
##########
# GENDER #
##########
# self.tmp = self.df.groupby(['Protocol ID', 'GENDER']).size().to_frame('count').reset_index()
# self.statsDf = self._get_values_for_factors(column_name="GENDER", value=2, new_column_name='# patients female')
# self.statsDf['% patients female'] = self.statsDf.apply(lambda x: round(((x['# patients female']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
# self.statsDf = self._get_values_for_factors(column_name="GENDER", value=1, new_column_name='# patients male')
# self.statsDf['% patients male'] = self.statsDf.apply(lambda x: round(((x['# patients male']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
###################
# HOSPITALIZED IN #
###################
self.tmp = self.df.groupby(['Protocol ID', 'HOSPITALIZED_IN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=1, new_column_name='# patients hospitalized in stroke unit / ICU')
self.statsDf['% patients hospitalized in stroke unit / ICU'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in stroke unit / ICU']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=2, new_column_name='# patients hospitalized in monitored bed with telemetry')
self.statsDf['% patients hospitalized in monitored bed with telemetry'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in monitored bed with telemetry']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=3, new_column_name='# patients hospitalized in standard bed')
self.statsDf['% patients hospitalized in standard bed'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in standard bed']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
###############
# STROKE TYPE #
###############
self.tmp = self.df.groupby(['Protocol ID', 'STROKE_TYPE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=1, new_column_name='# stroke type - ischemic stroke')
self.statsDf['% stroke type - ischemic stroke'] = self.statsDf.apply(lambda x: round(((x['# stroke type - ischemic stroke']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=2, new_column_name='# stroke type - intracerebral hemorrhage')
self.statsDf['% stroke type - intracerebral hemorrhage'] = self.statsDf.apply(lambda x: round(((x['# stroke type - intracerebral hemorrhage']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=3, new_column_name='# stroke type - transient ischemic attack')
self.statsDf['% stroke type - transient ischemic attack'] = self.statsDf.apply(lambda x: round(((x['# stroke type - transient ischemic attack']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=4, new_column_name='# stroke type - subarrachnoid hemorrhage')
self.statsDf['% stroke type - subarrachnoid hemorrhage'] = self.statsDf.apply(lambda x: round(((x['# stroke type - subarrachnoid hemorrhage']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=5, new_column_name='# stroke type - cerebral venous thrombosis')
self.statsDf['% stroke type - cerebral venous thrombosis'] = self.statsDf.apply(lambda x: round(((x['# stroke type - cerebral venous thrombosis']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=6, new_column_name='# stroke type - undetermined stroke')
self.statsDf['% stroke type - undetermined stroke'] = self.statsDf.apply(lambda x: round(((x['# stroke type - undetermined stroke']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
#########
# NIHSS #
#########
# Get Median of NIHSS score
# tmpDf = is_ich_cvt.groupby(['Protocol ID']).NIHSS_SCORE.agg(['median']).rename(columns={'median': 'NIHSS median score'})
# factorDf = self.statsDf.merge(tmpDf, how='outer', left_on='Protocol ID', right_on='Protocol ID')
# factorDf.fillna(0, inplace=True)
# self.statsDf['NIHSS median score'] = factorDf['NIHSS median score']
##########
# CT/MRI #
##########
self.tmp = is_ich_tia_cvt.groupby(['Protocol ID', 'CT_MRI']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=3, new_column_name='# CT/MRI - In other hospital')
self.statsDf['% CT/MRI - In other hospital'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - In other hospital']/x['is_ich_tia_cvt_patients']) * 100), 2) if x['is_ich_tia_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=2, new_column_name='# CT/MRI - performed')
self.statsDf['% CT/MRI - performed'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - performed']/(x['is_ich_tia_cvt_patients'] - x['# CT/MRI - In other hospital'])) * 100), 2) if (x['is_ich_tia_cvt_patients'] - x['# CT/MRI - In other hospital']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=1, new_column_name='# CT/MRI - Not performed')
self.statsDf['% CT/MRI - Not performed'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Not performed']/(x['is_ich_tia_cvt_patients'] - x['# CT/MRI - In other hospital'])) * 100), 2) if (x['is_ich_tia_cvt_patients'] - x['# CT/MRI - In other hospital']) > 0 else 0, axis=1)
# Get CT/MRI performed subset
ct_mri = is_ich_tia_cvt[is_ich_tia_cvt['CT_MRI'].isin([2])]
self.tmp = ct_mri.groupby(['Protocol ID', 'CT_TIME']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CT_TIME", value=1, new_column_name='# CT/MRI - Performed within 1 hour after admission')
self.statsDf['% CT/MRI - Performed within 1 hour after admission'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Performed within 1 hour after admission']/x['# CT/MRI - performed']) * 100), 2) if x['# CT/MRI - performed'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_TIME", value=2, new_column_name='# CT/MRI - Performed later than 1 hour after admission')
self.statsDf['% CT/MRI - Performed later than 1 hour after admission'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Performed later than 1 hour after admission']/x['# CT/MRI - performed']) * 100), 2) if x['# CT/MRI - performed'] > 0 else 0, axis=1)
#############################
# RECANALIZATION PROCEDURES #
#############################
# Filter negative or too high door to needle times
needle = isch.loc[(isch['DOOR_TO_NEEDLE'] < 0) | (isch['DOOR_TO_NEEDLE'] > 400)].copy()
# Filter negative and too high door to groin time
groin = isch.loc[(isch['DOOR_TO_NEEDLE'] == 0) & ((isch['DOOR_TO_GROIN'] < 0) | (isch['DOOR_TO_GROIN'] > 700))].copy()
number_of_patients = len(needle.index.values) + len(groin.index.values)
recan_tmp = isch.drop(needle.index.values)
recan_tmp.drop(groin.index.values, inplace=True)
self.tmp = recan_tmp.groupby(['Protocol ID', 'RECANALIZATION_PROCEDURES']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=1, new_column_name='# recanalization procedures - Not done')
self.statsDf['% recanalization procedures - Not done'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Not done']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=2, new_column_name='# recanalization procedures - IV tPa')
self.statsDf['% recanalization procedures - IV tPa'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=3, new_column_name='# recanalization procedures - IV tPa + endovascular treatment')
self.statsDf['% recanalization procedures - IV tPa + endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa + endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=4, new_column_name='# recanalization procedures - Endovascular treatment alone')
self.statsDf['% recanalization procedures - Endovascular treatment alone'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Endovascular treatment alone']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=5, new_column_name='# recanalization procedures - IV tPa + referred to another centre for endovascular treatment')
self.statsDf['% recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=6, new_column_name='# recanalization procedures - Referred to another centre for endovascular treatment')
self.statsDf['% recanalization procedures - Referred to another centre for endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred to another centre for endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=7, new_column_name='# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre')
self.statsDf['% recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=8, new_column_name='# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre')
self.statsDf['% recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=9, new_column_name='# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre')
self.statsDf['% recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf['# patients recanalized'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'], axis=1)
self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']) > 0 else 0, axis=1)
##############
# MEDIAN DTN #
##############
# Get patients receiving IV tpa
self.statsDf.loc[:, '# IV tPa'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'], axis=1)
self.statsDf['% IV tPa'] = self.statsDf.apply(lambda x: round(((x['# IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Get only patients recanalized
# recanalization_procedure_iv_tpa = isch.loc[(isch['RECANALIZATION_PROCEDURES'].isin([2, 3, 5])) & (isch['DOOR_TO_NEEDLE'] > 0) & (isch['DOOR_TO_NEEDLE'] <= 400)]
recanalization_procedure_iv_tpa = isch[isch['RECANALIZATION_PROCEDURES'].isin([2, 3, 5])].copy()
recanalization_procedure_iv_tpa.fillna(0, inplace=True)
recanalization_procedure_iv_tpa['IVTPA'] = recanalization_procedure_iv_tpa['DOOR_TO_NEEDLE']
tmp = recanalization_procedure_iv_tpa.groupby(['Protocol ID']).IVTPA.agg(['median']).rename(columns={'median': 'Median DTN (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
##############
# MEDIAN DTG #
##############
# Get patients receiving TBY
self.statsDf.loc[:, '# TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + endovascular treatment'], axis=1)
self.statsDf['% TBY'] = self.statsDf.apply(lambda x: round(((x['# TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Get only patients recanalized TBY
recanalization_procedure_tby_dtg = isch[isch['RECANALIZATION_PROCEDURES'].isin([4, 3])].copy()
#recanalization_procedure_tby_dtg = isch.loc[(isch['RECANALIZATION_PROCEDURES'].isin([4, 3])) & (isch['DOOR_TO_GROIN'] > 0) & (isch['DOOR_TO_GROIN'] <= 700)]
recanalization_procedure_tby_dtg.fillna(0, inplace=True)
recanalization_procedure_tby_dtg['TBY'] = recanalization_procedure_tby_dtg['DOOR_TO_GROIN']
tmp = recanalization_procedure_tby_dtg.groupby(['Protocol ID']).TBY.agg(['median']).rename(columns={'median': 'Median DTG (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
#######################
# DYPSHAGIA SCREENING #
#######################
self.tmp = is_ich_cvt.groupby(['Protocol ID', 'DYSPHAGIA_SCREENING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=6, new_column_name='# dysphagia screening - not known')
self.statsDf['% dysphagia screening - not known'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - not known']/x['is_ich_cvt_patients']) * 100), 2) if x['is_ich_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=1, new_column_name='# dysphagia screening - Guss test')
self.statsDf['% dysphagia screening - Guss test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Guss test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=2, new_column_name='# dysphagia screening - Other test')
self.statsDf['% dysphagia screening - Other test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Other test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=3, new_column_name='# dysphagia screening - Another centre')
self.statsDf['% dysphagia screening - Another centre'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Another centre']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=4, new_column_name='# dysphagia screening - Not done')
self.statsDf['% dysphagia screening - Not done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Not done']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=5, new_column_name='# dysphagia screening - Unable to test')
self.statsDf['% dysphagia screening - Unable to test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Unable to test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf['# dysphagia screening done'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test'] + self.statsDf['# dysphagia screening - Another centre']
self.statsDf['% dysphagia screening done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening done']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
############################
# DYPSHAGIA SCREENING TIME #
############################
self.tmp = self.df.groupby(['Protocol ID', 'DYSPHAGIA_SCREENING_TIME']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING_TIME", value=1, new_column_name='# dysphagia screening time - Within first 24 hours')
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING_TIME", value=2, new_column_name='# dysphagia screening time - After first 24 hours')
self.statsDf['% dysphagia screening time - Within first 24 hours'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening time - Within first 24 hours']/(x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours'])) * 100), 2) if (x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours']) > 0 else 0, axis=1)
self.statsDf['% dysphagia screening time - After first 24 hours'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening time - After first 24 hours']/(x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours'])) * 100), 2) if (x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours']) > 0 else 0, axis=1)
########
# AFIB #
########
# patients not reffered
not_reffered = is_tia[~is_tia['RECANALIZATION_PROCEDURES'].isin([7])].copy()
self.statsDf['not_reffered_patients'] = self._count_patients(dataframe=not_reffered)
# patients referred to another hospital
reffered = is_tia[is_tia['RECANALIZATION_PROCEDURES'].isin([7])].copy()
self.statsDf['reffered_patients'] = self._count_patients(dataframe=reffered)
self.tmp = not_reffered.groupby(['Protocol ID', 'AFIB_FLUTTER']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=1, new_column_name='# afib/flutter - Known')
self.statsDf['% afib/flutter - Known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=2, new_column_name='# afib/flutter - Newly-detected at admission')
self.statsDf['% afib/flutter - Newly-detected at admission'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Newly-detected at admission']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=3, new_column_name='# afib/flutter - Detected during hospitalization')
self.statsDf['% afib/flutter - Detected during hospitalization'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Detected during hospitalization']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=4, new_column_name='# afib/flutter - Not detected')
self.statsDf['% afib/flutter - Not detected'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not detected']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=5, new_column_name='# afib/flutter - Not known')
self.statsDf['% afib/flutter - Not known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
############################
# CAROTID ARTERIES IMAGING #
############################
self.tmp = is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
###############################
# ANTITHROMBOTICS WITHOUT CVT #
###############################
def get_antithrombotics(vals):
""" The function converting the values for antithrombotics in one value.
:param vals: the list of values for antithrombotics
:type vals: list
"""
set_vals = list(set(vals)) # remove duplicate values
if len(set_vals) == 1:
if set_vals[0] == 2: # no antithrombotics prescribed
res = 2
elif set_vals[0] == 0:
res = None
else:
res = 1 # antitrhbomtics prescribed
else:
res = 1 # if both values (1, 2) in set_vals then antithrombotics prescribed we don't care about which were prescribed
return res
is_tia.loc[:, 'ANTITHROMBOTICS'] = is_tia.apply(lambda x: get_antithrombotics([x['UKON_WARFARIN'], x['UKON_DABIGATRAN'], x['UKON_RIVAROXABAN'], x['UKON_APIXABAN'], x['UKON_EDOXABAN'], x['UKON_LMW'],x['UKON_ANTIKOAGULANCIA'], x['UKON_HEPARIN_VTE'], x['UKON_ASA'], x['UKON_CLOPIDOGREL']]), axis=1)
# filter not dead patient with ischemic and transient CMP
antithrombotics = is_tia[~is_tia['DISCHARGE_DESTINATION'].isin([5])].copy()
# calculate antithrombotics df patients
self.statsDf['antithrombotics_patients'] = self._count_patients(dataframe=antithrombotics)
# Filter dead patients with ischemic and transient CMP
ischemic_transient_dead = is_tia[is_tia['DISCHARGE_DESTINATION'].isin([5])].copy()
# Count patients
self.statsDf['ischemic_transient_dead_patients'] = self._count_patients(dataframe=ischemic_transient_dead)
ischemic_transient_dead_prescribed = is_tia[is_tia['DISCHARGE_DESTINATION'].isin([5]) & is_tia['ANTITHROMBOTICS'].isin([1])].copy()
self.statsDf['ischemic_transient_dead_patients_prescribed'] = self._count_patients(dataframe=ischemic_transient_dead_prescribed)
# Calculate antiplatelets (ASA and clopidogrel)
antithrombotics['ANTIPLATELETS'] = antithrombotics.apply(lambda x: 2 if x['UKON_ASA'] == 2 and x['UKON_CLOPIDOGREL'] == 2 else 1, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'ANTIPLATELETS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIPLATELETS", value=1, new_column_name='# patients receiving antiplatelets')
self.statsDf['% patients receiving antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# patients receiving antiplatelets']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_WARFARIN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_WARFARIN", value=1, new_column_name='# patients receiving Vit. K antagonist')
# self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_DABIGATRAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_DABIGATRAN", value=1, new_column_name='# patients receiving dabigatran')
# self.statsDf['% patients receiving dabigatran'] = self.statsDf.apply(lambda x: round(((x['# patients receiving dabigatran']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_RIVAROXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_RIVAROXABAN", value=1, new_column_name='# patients receiving rivaroxaban')
# self.statsDf['% patients receiving rivaroxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving rivaroxaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_APIXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_APIXABAN", value=1, new_column_name='# patients receiving apixaban')
# self.statsDf['% patients receiving apixaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving apixaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_EDOXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_EDOXABAN", value=1, new_column_name='# patients receiving edoxaban')
# self.statsDf['% patients receiving edoxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving edoxaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_HEPARIN_VTE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_HEPARIN_VTE", value=1, new_column_name='# patients receiving LMWH or heparin in prophylactic dose')
# self.statsDf['% patients receiving LMWH or heparin in prophylactic dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in prophylactic dose']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
antithrombotics['UKON_LMW_ANTICOAGULACNI'] = antithrombotics.apply(lambda x: 2 if x['UKON_LMW'] == 2 and x['UKON_ANTIKOAGULANCIA'] == 2 else 1, axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'UKON_LMW_ANTICOAGULACNI']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_LMW_ANTICOAGULACNI", value=1, new_column_name='# patients receiving LMWH or heparin in full anticoagulant dose')
# self.statsDf['% patients receiving LMWH or heparin in full anticoagulant dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in full anticoagulant dose']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf['# patients not prescribed antithrombotics, but recommended'] = 0
# self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=9, new_column_name='# patients not prescribed antithrombotics, but recommended')
self.statsDf['% patients not prescribed antithrombotics, but recommended'] = self.statsDf.apply(lambda x: round(((x['# patients not prescribed antithrombotics, but recommended']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf['# patients neither receiving antithrombotics nor recommended'] = 0
# self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=10, new_column_name='# patients neither receiving antithrombotics nor recommended')
self.statsDf['% patients neither receiving antithrombotics nor recommended'] = self.statsDf.apply(lambda x: round(((x['# patients neither receiving antithrombotics nor recommended']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
## ANTITHROMBOTICS - PATIENTS PRESCRIBED + RECOMMENDED
# self.statsDf.loc[:, '# patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: x['# patients receiving antiplatelets'] + x['# patients receiving Vit. K antagonist'] + x['# patients receiving dabigatran'] + x['# patients receiving rivaroxaban'] + x['# patients receiving apixaban'] + x['# patients receiving edoxaban'] + x['# patients receiving LMWH or heparin in prophylactic dose'] + x['# patients receiving LMWH or heparin in full anticoagulant dose'], axis=1)
self.tmp = antithrombotics.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients prescribed antithrombotics')
# self.statsDf['% patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics']/(x['is_tia_cvt_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended']) > 0 else 0, axis=1)
self.statsDf['% patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients prescribed or recommended antithrombotics')
# From patients prescribed or recommended antithrombotics remove patient who had prescribed antithrombotics and were dead (nominator)
# self.statsDf['% patients prescribed or recommended antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed or recommended antithrombotics'] - x['ischemic_transient_dead_patients_prescribed'])/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended'])) * 100, 2) if ((x['is_tia_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended']) > 0) else 0, axis=1)
self.statsDf['% patients prescribed or recommended antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed or recommended antithrombotics'] - x['ischemic_transient_dead_patients_prescribed'])/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100, 2) if ((x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0) else 0, axis=1)
self.statsDf.drop(['# patients receiving Vit. K antagonist', '# patients receiving dabigatran', '# patients receiving rivaroxaban', '# patients receiving apixaban', '# patients receiving edoxaban', '# patients receiving LMWH or heparin in prophylactic dose','# patients receiving LMWH or heparin in full anticoagulant dose'], axis=1, inplace=True)
self.statsDf.fillna(0, inplace=True)
###########################################
# ANTIPLATELETS - PRESCRIBED WITHOUT AFIB #
###########################################
is_tia['ANTIPLATELETS'] = is_tia.apply(lambda x: get_antithrombotics([x['UKON_ASA'], x['UKON_CLOPIDOGREL']]), axis=1)
# patients not referred
afib_flutter_not_detected_or_not_known = is_tia[is_tia['AFIB_FLUTTER'].isin([4, 5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_patients'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known)
afib_flutter_not_detected_or_not_known_dead = afib_flutter_not_detected_or_not_known[afib_flutter_not_detected_or_not_known['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_dead_patients'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known_dead)
prescribed_antiplatelets_no_afib = afib_flutter_not_detected_or_not_known[afib_flutter_not_detected_or_not_known['ANTIPLATELETS'].isin([1])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_patients'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib)
prescribed_antiplatelets_no_afib_dead = prescribed_antiplatelets_no_afib[prescribed_antiplatelets_no_afib['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_dead_patients'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib_dead)
self.tmp = afib_flutter_not_detected_or_not_known.groupby(['Protocol ID', 'ANTIPLATELETS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIPLATELETS", value=1, new_column_name='# patients prescribed antiplatelets without aFib')
self.statsDf['% patients prescribed antiplatelets without aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antiplatelets without aFib'] - x['prescribed_antiplatelets_no_afib_dead_patients'])/(x['afib_flutter_not_detected_or_not_known_patients'] - x['afib_flutter_not_detected_or_not_known_dead_patients'])) * 100, 2) if ((x['afib_flutter_not_detected_or_not_known_patients'] - x['afib_flutter_not_detected_or_not_known_dead_patients']) > 0) else 0, axis=1)
#########################################
# ANTICOAGULANTS - PRESCRIBED WITH AFIB #
#########################################
# patients not referred
afib_flutter_detected = is_tia[is_tia['AFIB_FLUTTER'].isin([1, 2, 3])].copy()
self.statsDf['afib_flutter_detected_patients'] = self._count_patients(dataframe=afib_flutter_detected)
# Get patients with prescribed anticoagulants
afib_flutter_detected['ANTICOAGULANTS'] = afib_flutter_detected.apply(lambda x: get_antithrombotics([x['UKON_WARFARIN'], x['UKON_DABIGATRAN'], x['UKON_RIVAROXABAN'], x['UKON_APIXABAN'], x['UKON_EDOXABAN'], x['UKON_LMW'], x['UKON_ANTIKOAGULANCIA'], x['UKON_HEPARIN_VTE']]), axis=1)
afib_flutter_detected_not_dead = afib_flutter_detected[~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_detected_patients_not_dead'] = self._count_patients(dataframe=afib_flutter_detected_not_dead)
anticoagulants_prescribed = afib_flutter_detected[afib_flutter_detected['ANTICOAGULANTS'].isin([1]) & ~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['# patients prescribed anticoagulants with aFib'] = self._count_patients(dataframe=anticoagulants_prescribed)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_WARFARIN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_WARFARIN", value=1, new_column_name='# patients receiving Vit. K antagonist')
# self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/x['# patients prescribed anticoagulants with aFib']) * 100), 2) if x['# patients prescribed anticoagulants with aFib'] > 0 else 0, axis=1)
self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_DABIGATRAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_DABIGATRAN", value=1, new_column_name='# patients receiving dabigatran')
self.statsDf['% patients receiving dabigatran'] = self.statsDf.apply(lambda x: round(((x['# patients receiving dabigatran']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_RIVAROXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_RIVAROXABAN", value=1, new_column_name='# patients receiving rivaroxaban')
self.statsDf['% patients receiving rivaroxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving rivaroxaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_APIXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_APIXABAN", value=1, new_column_name='# patients receiving apixaban')
self.statsDf['% patients receiving apixaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving apixaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_EDOXABAN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_EDOXABAN", value=1, new_column_name='# patients receiving edoxaban')
self.statsDf['% patients receiving edoxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving edoxaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_HEPARIN_VTE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_HEPARIN_VTE", value=1, new_column_name='# patients receiving LMWH or heparin in prophylactic dose')
self.statsDf['% patients receiving LMWH or heparin in prophylactic dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in prophylactic dose']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
anticoagulants_prescribed['UKON_LMW_ANTICOAGULACNI'] = anticoagulants_prescribed.apply(lambda x: 2 if x['UKON_LMW'] == 2 and x['UKON_ANTIKOAGULANCIA'] == 2 else 1, axis=1)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'UKON_LMW_ANTICOAGULACNI']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="UKON_LMW_ANTICOAGULACNI", value=1, new_column_name='# patients receiving LMWH or heparin in full anticoagulant dose')
self.statsDf['% patients receiving LMWH or heparin in full anticoagulant dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in full anticoagulant dose']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
# anticoagulants_recommended = afib_flutter_detected[afib_flutter_detected['ANTITHROMBOTICS'].isin([9])].copy()
# self.statsDf['anticoagulants_recommended_patients'] = self._count_patients(dataframe=anticoagulants_recommended)
self.statsDf['anticoagulants_recommended_patients'] = 0
afib_flutter_detected_dead = afib_flutter_detected[afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_detected_dead_patients'] = self._count_patients(dataframe=afib_flutter_detected_dead)
self.statsDf['% patients prescribed anticoagulants with aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed anticoagulants with aFib']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients']) > 0 else 0, axis=1)
##########################################
# ANTITHROMBOTICS - PRESCRIBED WITH AFIB #
##########################################
# patients not reffered
antithrombotics_prescribed = afib_flutter_detected[afib_flutter_detected['ANTITHROMBOTICS'].isin([1]) & ~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['# patients prescribed antithrombotics with aFib'] = self._count_patients(dataframe=antithrombotics_prescribed)
# recommended_antithrombotics_with_afib_alive = afib_flutter_detected[afib_flutter_detected['ANTITHROMBOTICS'].isin([9]) & ~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
# self.statsDf['recommended_antithrombotics_with_afib_alive_patients'] = self._count_patients(dataframe=recommended_antithrombotics_with_afib_alive)
self.statsDf['recommended_antithrombotics_with_afib_alive_patients'] = 0
self.statsDf['% patients prescribed antithrombotics with aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics with aFib']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'] - x['recommended_antithrombotics_with_afib_alive_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'] - x['recommended_antithrombotics_with_afib_alive_patients']) > 0 else 0, axis=1)
###########
# STATINS #
###########
self.tmp = is_tia.groupby(['Protocol ID', 'STATIN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=1, new_column_name='# patients prescribed statins - Yes')
self.statsDf['% patients prescribed statins - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Yes']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=2, new_column_name='# patients prescribed statins - No')
self.statsDf['% patients prescribed statins - No'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - No']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=3, new_column_name='# patients prescribed statins - Not known')
self.statsDf['% patients prescribed statins - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
#########################
# DISCHARGE DESTINATION #
#########################
self.tmp = discharge_subset.groupby(['Protocol ID', 'DISCHARGE_DESTINATION']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=1, new_column_name='# discharge destination - Home')
self.statsDf['% discharge destination - Home'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Home']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=2, new_column_name='# discharge destination - Transferred within the same centre')
self.statsDf['% discharge destination - Transferred within the same centre'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Transferred within the same centre']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=3, new_column_name='# discharge destination - Transferred to another centre')
self.statsDf['% discharge destination - Transferred to another centre'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Transferred to another centre']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=4, new_column_name='# discharge destination - Social care facility')
self.statsDf['% discharge destination - Social care facility'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Social care facility']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=5, new_column_name='# discharge destination - Dead')
self.statsDf['% discharge destination - Dead'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Dead']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
#######################################
# DISCHARGE DESTINATION - SAME CENTRE #
#######################################
# discharge_subset_same_centre = discharge_subset[discharge_subset['DISCHARGE_DESTINATION'].isin([2])].copy()
# self.statsDf['discharge_subset_same_centre_patients'] = self._count_patients(dataframe=discharge_subset_same_centre)
# self.tmp = discharge_subset_same_centre.groupby(['Protocol ID', 'DISCHARGE_SAME_FACILITY']).size().to_frame('count').reset_index()
# self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=1, new_column_name='# transferred within the same centre - Acute rehabilitation')
# self.statsDf['% transferred within the same centre - Acute rehabilitation'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Acute rehabilitation']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
# self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=2, new_column_name='# transferred within the same centre - Post-care bed')
# self.statsDf['% transferred within the same centre - Post-care bed'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Post-care bed']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
# self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=3, new_column_name='# transferred within the same centre - Another department')
# self.statsDf['% transferred within the same centre - Another department'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Another department']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
################
# ANGEL AWARDS #
################
#### TOTAL PATIENTS #####
self.statsDf['# total patients >= 30'] = self.statsDf['Total Patients'] >= 30
#### DOOR TO THROMBOLYSIS THERAPY - MINUTES ####
# self.statsDf.loc[:, 'patients_eligible_recanalization'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Not done'] + x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'], axis=1)
self.statsDf.loc[:, 'patients_eligible_recanalization'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'], axis=1)
self.statsDf.loc[:, '# patients eligible thrombolysis'] = self.statsDf.apply(lambda x: x['# IV tPa'], axis=1)
self.statsDf.loc[:, '# patients eligible thrombectomy'] = self.statsDf.apply(lambda x: (x['# TBY']), axis=1)
# patients treated with door to recanalization therapy < 60 minutes
# for tby, we are only looking at patients that have had ONLY tby, not tpa + tby, as we awould be counting those patients twice (penalizing twice)
# recanalization_procedure_tby_only_dtg = recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['RECANALIZATION_PROCEDURES'].isin([4])]
########### OLD
recanalization_procedure_tby_only_dtg = recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['RECANALIZATION_PROCEDURES'].isin([4])]
recanalization_procedure_iv_tpa_under_60 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 60)]
recanalization_procedure_tby_only_dtg_under_60 = recanalization_procedure_tby_only_dtg.loc[(recanalization_procedure_tby_only_dtg['TBY'] > 0) & (recanalization_procedure_tby_only_dtg['TBY'] <= 60)]
self.statsDf['# patients treated with door to recanalization therapy < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_60)
self.statsDf['% patients treated with door to recanalization therapy < 60 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 60 minutes']/x['patients_eligible_recanalization']) * 100), 2) if x['patients_eligible_recanalization'] > 0 else 0, axis=1)
recanalization_procedure_iv_tpa_under_45 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 45)]
recanalization_procedure_tby_only_dtg_under_45 = recanalization_procedure_tby_only_dtg.loc[(recanalization_procedure_tby_only_dtg['TBY'] > 0) & (recanalization_procedure_tby_only_dtg['TBY'] <= 45)]
self.statsDf['# patients treated with door to recanalization therapy < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_45)
self.statsDf['% patients treated with door to recanalization therapy < 45 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 45 minutes']/x['patients_eligible_recanalization']) * 100), 2) if x['patients_eligible_recanalization'] > 0 else 0, axis=1)
########### OLD
recanalization_procedure_iv_tpa_under_60 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 60)]
self.statsDf['# patients treated with door to thrombolysis < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60)
self.statsDf['% patients treated with door to thrombolysis < 60 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombolysis < 60 minutes']/x['# patients eligible thrombolysis']) * 100), 2) if x['# patients eligible thrombolysis'] > 0 else 0, axis=1)
del recanalization_procedure_iv_tpa_under_60
recanalization_procedure_iv_tpa_under_45 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 45)]
self.statsDf['# patients treated with door to thrombolysis < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45)
self.statsDf['% patients treated with door to thrombolysis < 45 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombolysis < 45 minutes']/x['# patients eligible thrombolysis']) * 100), 2) if x['# patients eligible thrombolysis'] > 0 else 0, axis=1)
del recanalization_procedure_iv_tpa_under_45
recanalization_procedure_tby_only_dtg_under_120 = recanalization_procedure_tby_dtg.loc[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 120)]
recanalization_procedure_tby_only_dtg_under_90 = recanalization_procedure_tby_dtg.loc[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 90)]
self.statsDf['# patients treated with door to thrombectomy < 120 minutes'] = self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_120)
self.statsDf['% patients treated with door to thrombectomy < 120 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombectomy < 120 minutes']/x['# patients eligible thrombectomy']) * 100), 2) if x['# patients eligible thrombectomy'] > 0 else 0, axis=1)
del recanalization_procedure_tby_only_dtg_under_120
self.statsDf['# patients treated with door to thrombectomy < 90 minutes'] = self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_90)
self.statsDf['% patients treated with door to thrombectomy < 90 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombectomy < 90 minutes']/x['# patients eligible thrombectomy']) * 100), 2) if x['# patients eligible thrombectomy'] > 0 else 0, axis=1)
del recanalization_procedure_tby_only_dtg_under_90
# self.statsDf['# patients treated with door to recanalization therapy < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_60)
# # self.statsDf['# patients treated with door to recanalization therapy < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60)
# self.statsDf['% patients treated with door to recanalization therapy < 60 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 60 minutes']/x['patients_eligible_recanalization']) * 100), 2) if x['patients_eligible_recanalization'] > 0 else 0, axis=1)
# recanalization_procedure_iv_tpa_under_45 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 45)]
# # recanalization_procedure_iv_tpa_under_45 = recanalization_procedure_iv_tpa[recanalization_procedure_iv_tpa['IVTPA'] <= 45]
# recanalization_procedure_tby_only_dtg_under_45 = recanalization_procedure_tby_only_dtg.loc[(recanalization_procedure_tby_only_dtg['TBY'] > 0) & (recanalization_procedure_tby_only_dtg['TBY'] <= 45)]
# # recanalization_procedure_tby_only_dtg_under_45 = recanalization_procedure_tby_only_dtg[recanalization_procedure_tby_only_dtg['TBY'] <= 45]
# self.statsDf['# patients treated with door to recanalization therapy < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_45)
# # self.statsDf['# patients treated with door to recanalization therapy < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45)
# self.statsDf['% patients treated with door to recanalization therapy < 45 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 45 minutes']/x['patients_eligible_recanalization']) * 100), 2) if x['patients_eligible_recanalization'] > 0 else 0, axis=1)
# Get % patients recanalized
# self.statsDf['patient_recan_%'] = self.statsDf.apply(lambda x: round(((x['patients_eligible_recanalization']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Endovascular treatment alone'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Endovascular treatment alone']) > 0 else 0, axis=1)
self.statsDf['patient_recan_%'] = self.statsDf.apply(lambda x: round(((x['patients_eligible_recanalization']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']) > 0 else 0, axis=1)
#### RECANALIZATION RATE ####
self.statsDf['# recanalization rate out of total ischemic incidence'] = self.statsDf['patients_eligible_recanalization']
self.statsDf['% recanalization rate out of total ischemic incidence'] = self.statsDf['patient_recan_%']
self.statsDf.drop(['patient_recan_%'], inplace=True, axis=1)
#### CT/MRI ####
self.statsDf['# suspected stroke patients undergoing CT/MRI'] = self.statsDf['# CT/MRI - performed']
self.statsDf['% suspected stroke patients undergoing CT/MRI'] = self.statsDf['% CT/MRI - performed']
#### DYSPHAGIA SCREENING ####
self.statsDf['# all stroke patients undergoing dysphagia screening'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test']
self.statsDf['% all stroke patients undergoing dysphagia screening'] = self.statsDf.apply(lambda x: round(((x['# all stroke patients undergoing dysphagia screening']/(x['# all stroke patients undergoing dysphagia screening'] + x['# dysphagia screening - Not done'])) * 100), 2) if (x['# all stroke patients undergoing dysphagia screening'] + x['# dysphagia screening - Not done']) > 0 else 0, axis=1)
#### ISCHEMIC STROKE + NO AFIB + ANTIPLATELETS ####
non_transferred_antiplatelets = antithrombotics[~antithrombotics['RECANALIZATION_PROCEDURES'].isin([5,6])]
#antithrombotics_discharged_home = antithrombotics[antithrombotics['DISCHARGE_DESTINATION'].isin([1])]
# Get temporary dataframe with patients who have prescribed antithrombotics and ischemic stroke
antiplatelets = non_transferred_antiplatelets[non_transferred_antiplatelets['STROKE_TYPE'].isin([1])]
#antiplatelets = antithrombotics[antithrombotics['STROKE_TYPE'].isin([1])]
#antiplatelets = antithrombotics_discharged_home[antithrombotics_discharged_home['STROKE_TYPE'].isin([1])]
# Filter temporary dataframe and get only patients who have not been detected or not known for aFib flutter.
antiplatelets = antiplatelets[antiplatelets['AFIB_FLUTTER'].isin([4, 5])]
# Get patients who have prescribed antithrombotics
except_recommended = antiplatelets[antiplatelets['ANTITHROMBOTICS'].isin([1,2])]
# Get number of patients who have prescribed antithrombotics and ischemic stroke, have not been detected or not known for aFib flutter.
self.statsDf['except_recommended_patients'] = self._count_patients(dataframe=except_recommended)
# Get temporary dataframe groupby protocol ID and antithrombotics column
self.tmp = antiplatelets.groupby(['Protocol ID', 'ANTIPLATELETS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIPLATELETS", value=1, new_column_name='# ischemic stroke patients discharged with antiplatelets')
self.statsDf['% ischemic stroke patients discharged with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged with antiplatelets']/x['except_recommended_patients']) * 100), 2) if x['except_recommended_patients'] > 0 else 0, axis=1)
# discharged home
antiplatelets_discharged_home = antiplatelets[antiplatelets['DISCHARGE_DESTINATION'].isin([1])]
if (antiplatelets_discharged_home.empty):
# Get temporary dataframe groupby protocol ID and antithrombotics column
self.tmp = antiplatelets.groupby(['Protocol ID', 'ANTIPLATELETS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIPLATELETS", value=1, new_column_name='# ischemic stroke patients discharged home with antiplatelets')
self.statsDf['% ischemic stroke patients discharged home with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged home with antiplatelets']/x['except_recommended_patients']) * 100), 2) if x['except_recommended_patients'] > 0 else 0, axis=1)
self.statsDf['except_recommended_discharged_home_patients'] = self.statsDf['except_recommended_patients']
else:
# Get temporary dataframe groupby protocol ID and antithrombotics column
self.tmp = antiplatelets_discharged_home.groupby(['Protocol ID', 'ANTIPLATELETS']).size().to_frame('count').reset_index()
# Get patients who have prescribed antithrombotics
except_recommended_discharged_home = except_recommended[except_recommended['DISCHARGE_DESTINATION'].isin([1])]
# Get number of patients who have prescribed antithrombotics and ischemic stroke, have not been detected or not known for aFib flutter.
self.statsDf['except_recommended_discharged_home_patients'] = self._count_patients(dataframe=except_recommended_discharged_home)
self.statsDf = self._get_values_for_factors(column_name="ANTIPLATELETS", value=1, new_column_name='# ischemic stroke patients discharged home with antiplatelets')
self.statsDf['% ischemic stroke patients discharged home with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged home with antiplatelets']/x['except_recommended_discharged_home_patients']) * 100), 2) if x['except_recommended_discharged_home_patients'] > 0 else 0, axis=1)
self.statsDf['# ischemic stroke patients discharged (home) with antiplatelets'] = self.statsDf.apply(lambda x: x['# ischemic stroke patients discharged with antiplatelets'] if x['# ischemic stroke patients discharged with antiplatelets'] > x['# ischemic stroke patients discharged home with antiplatelets'] else x['# ischemic stroke patients discharged home with antiplatelets'], axis=1)
self.statsDf['% ischemic stroke patients discharged (home) with antiplatelets'] = self.statsDf.apply(lambda x: x['% ischemic stroke patients discharged with antiplatelets'] if x['% ischemic stroke patients discharged with antiplatelets'] > x['% ischemic stroke patients discharged home with antiplatelets'] else x['% ischemic stroke patients discharged home with antiplatelets'], axis=1)
# afib patients discharged with anticoagulants
self.statsDf['# afib patients discharged with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed)
# Get temporary dataframe with patients who are not dead with detected aFib flutter and with prescribed antithrombotics
#afib_detected_discharged_home = afib_flutter_detected[(~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])) & (afib_flutter_detected['ANTICOAGULANTS'].isin([1]))]
afib_detected_discharged_home = afib_flutter_detected[(~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])) & (afib_flutter_detected['ANTICOAGULANTS'].isin([1, 2]))]
# Get afib patients discharged and not dead
self.statsDf['afib_detected_discharged_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
# % afib patients discharged with anticoagulants
#self.statsDf['% afib patients discharged with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged with anticoagulants']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients']) > 0 else 0, axis=1)
self.statsDf['% afib patients discharged with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged with anticoagulants']/x['afib_detected_discharged_patients']) * 100), 2) if (x['afib_detected_discharged_patients']) > 0 else 0, axis=1)
# Get temporary dataframe with patients who have prescribed anticoagulats and were discharged home
non_trasferred_anticoagulants = anticoagulants_prescribed[~anticoagulants_prescribed['RECANALIZATION_PROCEDURES'].isin([5,6])]
anticoagulants_prescribed_discharged_home = non_trasferred_anticoagulants[non_trasferred_anticoagulants['DISCHARGE_DESTINATION'].isin([1])]
#anticoagulants_prescribed_discharged_home = anticoagulants_prescribed[anticoagulants_prescribed['DISCHARGE_DESTINATION'].isin([1])]
# Get temporary dataframe with patients who have been discharge at home with detected aFib flutter and with prescribed antithrombotics
#afib_detected_discharged_home = afib_flutter_detected[(afib_flutter_detected['DISCHARGE_DESTINATION'].isin([1])) & (~afib_flutter_detected['ANTITHROMBOTICS'].isin([9]))]
afib_detected_discharged_home = afib_flutter_detected[(afib_flutter_detected['DISCHARGE_DESTINATION'].isin([1])) & (afib_flutter_detected['ANTICOAGULANTS'].isin([1, 2])) & (~afib_flutter_detected['RECANALIZATION_PROCEDURES'].isin([5,6]))]
# Check if temporary dataframe is empty. If yes, the value is calculated not only for discharged home, but only dead patients are excluded
if (anticoagulants_prescribed_discharged_home.empty):
# afib patients discharged home with anticoagulants
anticoagulants_prescribed_discharged_home = anticoagulants_prescribed[~anticoagulants_prescribed['DISCHARGE_DESTINATION'].isin([5])]
# Get temporary dataframe with patients who are not dead with detected aFib flutter and with prescribed antithrombotics
afib_detected_discharged_home = afib_flutter_detected[(~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])) & (afib_flutter_detected['ANTICOAGULANTS'].isin([1, 2]))]
# Get # afib patients discharged home with anticoagulants
self.statsDf['# afib patients discharged home with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed_discharged_home)
# Get afib patients discharged and not dead
self.statsDf['afib_detected_discharged_home_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
# Get % afib patients discharge with anticoagulants and not dead
self.statsDf['% afib patients discharged home with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged home with anticoagulants']/x['afib_detected_discharged_home_patients']) * 100), 2) if x['afib_detected_discharged_home_patients'] > 0 else 0, axis=1)
else:
# Get # afib patients discharged home with anticoagulants
self.statsDf['# afib patients discharged home with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed_discharged_home)
# Get afib patients discharged home
self.statsDf['afib_detected_discharged_home_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
# Get % afib patients discharged home with anticoagulants
self.statsDf['% afib patients discharged home with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged home with anticoagulants']/x['afib_detected_discharged_home_patients']) * 100), 2) if x['afib_detected_discharged_home_patients'] > 0 else 0, axis=1)
self.statsDf['# afib patients discharged (home) with anticoagulants'] = self.statsDf.apply(lambda x: x['# afib patients discharged with anticoagulants'] if x['% afib patients discharged with anticoagulants'] > x['% afib patients discharged home with anticoagulants'] else x['# afib patients discharged home with anticoagulants'], axis=1)
self.statsDf['% afib patients discharged (home) with anticoagulants'] = self.statsDf.apply(lambda x: x['% afib patients discharged with anticoagulants'] if x['% afib patients discharged with anticoagulants'] > x['% afib patients discharged home with anticoagulants'] else x['% afib patients discharged home with anticoagulants'], axis=1)
#### STROKE UNIT ####
# stroke patients treated in a dedicated stroke unit / ICU
self.statsDf['# stroke patients treated in a dedicated stroke unit / ICU'] = self.statsDf['# patients hospitalized in stroke unit / ICU']
self.statsDf['% stroke patients treated in a dedicated stroke unit / ICU'] = self.statsDf['% patients hospitalized in stroke unit / ICU']
# SK doesn't collect the stroke unit, then we put here always 1
self.statsDf['% stroke patients treated in a dedicated stroke unit / ICU'] = self.statsDf.apply(lambda x: x['% patients hospitalized in stroke unit / ICU'] if x['# patients hospitalized in stroke unit / ICU'] > 0 else 1, axis=1)
# Create temporary dataframe to calculate final award
self.total_patient_column = '# total patients >= {0}'.format(30)
# self.angels_awards_tmp = self.statsDf[[self.total_patient_column, '% patients treated with door to recanalization therapy < 60 minutes', '% patients treated with door to recanalization therapy < 45 minutes', '% recanalization rate out of total ischemic incidence', '% suspected stroke patients undergoing CT/MRI', '% all stroke patients undergoing dysphagia screening', '% ischemic stroke patients discharged (home) with antiplatelets', '% afib patients discharged (home) with anticoagulants', '% stroke patients treated in a dedicated stroke unit / ICU']]
# self.statsDf.fillna(0, inplace=True)
self.statsDf[self.total_patient_column] = self.statsDf['Total Patients'] >= 30
self.angels_awards_tmp = self.statsDf[[self.total_patient_column, '% patients treated with door to thrombolysis < 60 minutes', '% patients treated with door to thrombolysis < 45 minutes', '% patients treated with door to thrombectomy < 120 minutes', '% patients treated with door to thrombectomy < 90 minutes', '% recanalization rate out of total ischemic incidence', '% suspected stroke patients undergoing CT/MRI', '% all stroke patients undergoing dysphagia screening', '% ischemic stroke patients discharged (home) with antiplatelets', '% afib patients discharged (home) with anticoagulants', '% stroke patients treated in a dedicated stroke unit / ICU', '# patients eligible thrombectomy', '# patients eligible thrombolysis']]
self.statsDf.fillna(0, inplace=True)
self.angels_awards_tmp['Proposed Award'] = self.angels_awards_tmp.apply(lambda x: self._get_final_award(x), axis=1)
self.statsDf['Proposed Award'] = self.angels_awards_tmp['Proposed Award']
self.statsDf.fillna(0, inplace=True)
self.statsDf.rename(columns={"Protocol ID": "Site ID"}, inplace=True)
self.statsDf['Site Name'] = self.statsDf['Site ID']
# self.sites = self._get_sites(self.statsDf)
def _get_final_award(self, x):
""" The function calculating the proposed award.
:param x: the row from temporary dataframe
:type x: pandas series
:returns: award -- the proposed award
"""
# if x[self.total_patient_column] == False:
# award = "NONE"
# else:
# award = "TRUE"
# recan_therapy_lt_60min = x['% patients treated with door to recanalization therapy < 60 minutes']
# # Calculate award for thrombolysis, if no patients were eligible for thrombolysis and number of total patients was greater than minimum than the award is set to DIAMOND
# if award == "TRUE":
# if (float(recan_therapy_lt_60min) >= 50 and float(recan_therapy_lt_60min) <= 74.99):
# award = "GOLD"
# elif (float(recan_therapy_lt_60min) >= 75):
# award = "DIAMOND"
# else:
# award = "NONE"
# recan_therapy_lt_45min = x['% patients treated with door to recanalization therapy < 45 minutes']
# if award != "NONE":
# if (float(recan_therapy_lt_45min) <= 49.99):
# if (award != "GOLD" or award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(recan_therapy_lt_45min) >= 50):
# if (award != "GOLD"):
# award = "DIAMOND"
# else:
# award = "NONE"
# recan_rate = x['% recanalization rate out of total ischemic incidence']
# if award != "NONE":
# if (float(recan_rate) >= 5 and float(recan_rate) <= 14.99):
# if (award == "PLATINUM" or award == "DIAMOND"):
# award = "GOLD"
# elif (float(recan_rate) >= 15 and float(recan_rate) <= 24.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(recan_rate) >= 25):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# ct_mri = x['% suspected stroke patients undergoing CT/MRI']
# if award != "NONE":
# if (float(ct_mri) >= 80 and float(ct_mri) <= 84.99):
# if (award == "PLATINUM" or award == "DIAMOND"):
# award = "GOLD"
# elif (float(ct_mri) >= 85 and float(ct_mri) <= 89.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(ct_mri) >= 90):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# dysphagia_screening = x['% all stroke patients undergoing dysphagia screening']
# if award != "NONE":
# if (float(dysphagia_screening) >= 80 and float(dysphagia_screening) <= 84.99):
# if (award == "PLATINUM" or award == "DIAMOND"):
# award = "GOLD"
# elif (float(dysphagia_screening) >= 85 and float(dysphagia_screening) <= 89.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(dysphagia_screening) >= 90):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# discharged_with_antiplatelets_final = x['% ischemic stroke patients discharged (home) with antiplatelets']
# if award != "NONE":
# if (float(discharged_with_antiplatelets_final) >= 80 and float(discharged_with_antiplatelets_final) <= 84.99):
# if (award == "PLATINUM" or award == "DIAMOND"):
# award = "GOLD"
# elif (float(discharged_with_antiplatelets_final) >= 85 and float(discharged_with_antiplatelets_final) <= 89.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(discharged_with_antiplatelets_final) >= 90):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# discharged_with_anticoagulants_final = x['% afib patients discharged (home) with anticoagulants']
# if award != "NONE":
# if (float(discharged_with_anticoagulants_final) >= 80 and float(discharged_with_anticoagulants_final) <= 84.99):
# if (award == "PLATINUM" or award == "DIAMOND"):
# award = "GOLD"
# elif (float(discharged_with_anticoagulants_final) >= 85 and float(discharged_with_anticoagulants_final) <= 89.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(discharged_with_anticoagulants_final) >= 90):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# stroke_unit = x['% stroke patients treated in a dedicated stroke unit / ICU']
# if award != "NONE":
# if (float(stroke_unit) <= 0.99):
# if (award == "DIAMOND"):
# award = "PLATINUM"
# elif (float(stroke_unit) >= 1):
# if (award == "DIAMOND"):
# award = "DIAMOND"
# else:
# award = "NONE"
# return award
if x[self.total_patient_column] == False:
award = "STROKEREADY"
else:
thrombolysis_therapy_lt_60min = x['% patients treated with door to thrombolysis < 60 minutes']
# Calculate award for thrombolysis, if no patients were eligible for thrombolysis and number of total patients was greater than minimum than the award is set to DIAMOND
if (float(thrombolysis_therapy_lt_60min) >= 50 and float(thrombolysis_therapy_lt_60min) <= 74.99):
award = "GOLD"
elif (float(thrombolysis_therapy_lt_60min) >= 75):
award = "DIAMOND"
else:
award = "STROKEREADY"
thrombolysis_therapy_lt_45min = x['% patients treated with door to thrombolysis < 45 minutes']
if award != "STROKEREADY":
if (float(thrombolysis_therapy_lt_45min) <= 49.99):
if (award != "GOLD" or award == "DIAMOND"):
award = "PLATINUM"
elif (float(thrombolysis_therapy_lt_45min) >= 50):
if (award != "GOLD"):
award = "DIAMOND"
else:
award = "STROKEREADY"
# Calculate award for thrombectomy, if no patients were eligible for thrombectomy and number of total patients was greater than minimum than the award is set to the possible proposed award (eg. if in thrombolysis step award was set to GOLD then the award will be GOLD)
thrombectomy_pts = x['# patients eligible thrombectomy']
# if thrombectomy_pts != 0:
if thrombectomy_pts > 3:
thrombectomy_therapy_lt_120min = x['% patients treated with door to thrombectomy < 120 minutes']
if award != "STROKEREADY":
if (float(thrombectomy_therapy_lt_120min) >= 50 and float(thrombectomy_therapy_lt_120min) <= 74.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(thrombectomy_therapy_lt_120min) >= 75):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
thrombectomy_therapy_lt_90min = x['% patients treated with door to thrombectomy < 90 minutes']
if award != "STROKEREADY":
if (float(thrombectomy_therapy_lt_90min) <= 49.99):
if (award != "GOLD" or award == "DIAMOND"):
award = "PLATINUM"
elif (float(thrombectomy_therapy_lt_90min) >= 50):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
recan_rate = x['% recanalization rate out of total ischemic incidence']
if award != "STROKEREADY":
if (float(recan_rate) >= 5 and float(recan_rate) <= 14.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(recan_rate) >= 15 and float(recan_rate) <= 24.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(recan_rate) >= 25):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
ct_mri = x['% suspected stroke patients undergoing CT/MRI']
if award != "STROKEREADY":
if (float(ct_mri) >= 80 and float(ct_mri) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(ct_mri) >= 85 and float(ct_mri) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(ct_mri) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
dysphagia_screening = x['% all stroke patients undergoing dysphagia screening']
if award != "STROKEREADY":
if (float(dysphagia_screening) >= 80 and float(dysphagia_screening) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(dysphagia_screening) >= 85 and float(dysphagia_screening) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(dysphagia_screening) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
discharged_with_antiplatelets_final = x['% ischemic stroke patients discharged (home) with antiplatelets']
if award != "STROKEREADY":
if (float(discharged_with_antiplatelets_final) >= 80 and float(discharged_with_antiplatelets_final) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(discharged_with_antiplatelets_final) >= 85 and float(discharged_with_antiplatelets_final) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(discharged_with_antiplatelets_final) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
discharged_with_anticoagulants_final = x['% afib patients discharged (home) with anticoagulants']
if award != "STROKEREADY":
if (float(discharged_with_anticoagulants_final) >= 80 and float(discharged_with_anticoagulants_final) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(discharged_with_anticoagulants_final) >= 85 and float(discharged_with_anticoagulants_final) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(discharged_with_anticoagulants_final) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
stroke_unit = x['% stroke patients treated in a dedicated stroke unit / ICU']
if award != "STROKEREADY":
if (float(stroke_unit) <= 0.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(stroke_unit) >= 1):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
return award
def _count_patients(self, dataframe):
""" The function calculating the number of patients per site.
:param dataframe: the dataframe with preprocessed data
:type dataframe: pandas dataframe
:returns: the column with the number of patients
"""
tmpDf = dataframe.groupby(['Protocol ID']).size().reset_index(name='count_patients')
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.fillna(0, inplace=True)
return factorDf['count_patients']
def _get_values_only_columns(self, column_name, value, dataframe):
""" The function calculating the numbeer of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value for which we would like to get number of patients from the specific column
:type value: int
:param dataframe: the dataframe with the raw data
:type dataframe: pandas dataframe
:returns: the column with the number of patients
"""
tmpDf = dataframe[dataframe[column_name] == value].reset_index()[['Protocol ID', 'count']]
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.fillna(0, inplace=True)
return factorDf['count']
def _get_values_for_factors(self, column_name, value, new_column_name, df=None):
""" The function calculating the numbeer of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value for which we would like to get number of patients from the specific column
:type value: int
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
# Check if type of column name is type of number, if not convert value into string
if (np.issubdtype(self.tmp[column_name].dtype, np.number)):
value = value
else:
value = str(value)
tmpDf = self.tmp[self.tmp[column_name] == value].reset_index()[['Protocol ID', 'count']]
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_values_for_factors_more_values(self, column_name, value, new_column_name, df=None):
""" The function calculating the number of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the list of values for which we would like to get number of patients from the specific column
:type value: list
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
if df is None:
tmpDf = self.tmp[self.tmp[column_name].isin(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
else:
tmpDf = df[df[column_name].isin(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_values_for_factors_containing(self, column_name, value, new_column_name, df=None):
""" The function calculating the number of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value of string type for which we would like to get number of patients from the specific column
:type value: str
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
if df is None:
tmpDf = self.tmp[self.tmp[column_name].str.contains(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
else:
tmpDf = df[df[column_name].str.contains(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_ctmri_delta(self, hosp_time, ct_time):
""" The function calculating the difference between two times in minutes.
:param hosp_time: the time of hospitalization
:type hosp_time: time
:param ct_time: the time when CT/MRI was performed
:type ct_time: time
:returns: tdelta between two times in minutes
"""
timeformat = '%H:%M:%S'
# Check if both time are not None if yes, return 0 else return tdelta
if hosp_time is None or ct_time is None or pd.isnull(hosp_time) or pd.isnull(ct_time):
tdeltaMin = 0
elif hosp_time == 0 or ct_time == 0:
tdeltaMin = 0
else:
if isinstance(ct_time, time) and isinstance(hosp_time, time):
tdelta = datetime.combine(date.today(), ct_time) - datetime.combine(date.today(), hosp_time)
elif isinstance(ct_time, time):
tdelta = datetime.combine(date.today(), ct_time) - datetime.strptime(hosp_time, timeformat)
elif isinstance(hosp_time, time):
tdelta = datetime.strptime(ct_time, timeformat) - datetime.strptime(hosp_time, timeformat)
else:
tdelta = datetime.strptime(ct_time, timeformat) - datetime.strptime(hosp_time, timeformat)
tdeltaMin = tdelta.total_seconds()/60.0
if tdeltaMin > 60:
res = 2
elif tdeltaMin <= 60 and tdeltaMin > 0:
res = 1
else:
res = -2
return res
def _return_dataset(self):
""" The function returning dataframe. """
return self.df
def _return_stats(self):
""" The function returning the dataframe with the calculated statistics!
:returns: the dataframe with the statistics
"""
return self.statsDf
def _get_sites(self, df):
""" The function returning the list of sites in the preprocessed data.
:returns: the list of sites
"""
site_ids = df['Site ID'].tolist()
site_list = list(set(site_ids))
return site_list
def _return_sites(self):
return self.sites
class GenerateFormattedStats:
""" The class generating the formatted statistics in Excel format. Angels Awards columns are colored based on the meeting of the condition.
:param df: the dataframe with calculated statistics
:type df: pandas dataframe
:param country: True if country should be included as site
:type country: bool
:param country_code: the code of country used in filenames
:type country_code: str
:param split_sites: `True` if the reports should be generated per sites
:type split_sites: bool
:param site: the site code
:type site: str
:param report: type of the report, eg. quarter
:type report: str
:param quarter: type of the period, eq. Q1_2019
:type quarter: str
:param comp: `True` if the comparison reports are calculated
:type comp: bool
"""
def __init__(self, df, country=False, country_code=None, split_sites=False, site=None, report=None, quarter=None, comp=False):
self.df_unformatted = df.copy()
self.df = df.copy()
self.country_code = country_code
self.report = report
self.quarter = quarter
self.comp = comp
def delete_columns(columns):
""" The function deleting the columns from the dataframe which should not be displayed in the excel statistics (temporary columns used to generate graphs).
:param columns: the list of column names to be deleted
:type columns: list
"""
for i in columns:
if i in self.df.columns:
self.df.drop([i], inplace=True, axis=1)
delete_columns(['isch_patients', 'is_ich_patients', 'is_ich_tia_cvt_patients', 'is_ich_cvt_patients', 'is_tia_patients', 'is_ich_sah_cvt_patients', 'is_tia_cvt_patients', 'cvt_patients', 'ich_sah_patients', 'ich_patients', 'sah_patients', 'discharge_subset_patients','discharge_subset_alive_patients', 'neurosurgery_patients', 'not_reffered_patients', 'reffered_patients', 'afib_detected_during_hospitalization_patients', 'afib_not_detected_or_not_known_patients', 'antithrombotics_patients', 'ischemic_transient_dead_patients', 'afib_flutter_not_detected_or_not_known_patients', 'afib_flutter_not_detected_or_not_known_dead_patients', 'prescribed_antiplatelets_no_afib_patients', 'prescribed_antiplatelets_no_afib_dead_patients', 'afib_flutter_detected_patients', 'anticoagulants_recommended_patients', 'afib_flutter_detected_dead_patients', 'recommended_antithrombotics_with_afib_alive_patients', 'discharge_subset_same_centre_patients', 'discharge_subset_another_centre_patients', 'patients_eligible_recanalization', '# patients having stroke in the hospital - No', '% patients having stroke in the hospital - No', '# recurrent stroke - No', '% recurrent stroke - No', '# patients assessed for rehabilitation - Not known', '% patients assessed for rehabilitation - Not known', '# level of consciousness - not known', '% level of consciousness - not known', '# CT/MRI - Performed later than 1 hour after admission', '% CT/MRI - Performed later than 1 hour after admission', '# patients put on ventilator - Not known', '% patients put on ventilator - Not known', '# patients put on ventilator - No', '% patients put on ventilator - No', '# IV tPa', '% IV tPa', '# TBY', '% TBY', '# DIDO TBY', '# dysphagia screening - not known', '% dysphagia screening - not known', '# dysphagia screening time - After first 24 hours', '% dysphagia screening time - After first 24 hours', '# other afib detection method - Not detected or not known', '% other afib detection method - Not detected or not known', '# carotid arteries imaging - Not known', '% carotid arteries imaging - Not known', '# carotid arteries imaging - No', '% carotid arteries imaging - No', 'vascular_imaging_cta_norm', 'vascular_imaging_mra_norm', 'vascular_imaging_dsa_norm', 'vascular_imaging_none_norm', 'bleeding_arterial_hypertension_perc_norm', 'bleeding_aneurysm_perc_norm', 'bleeding_arterio_venous_malformation_perc_norm', 'bleeding_anticoagulation_therapy_perc_norm', 'bleeding_amyloid_angiopathy_perc_norm', 'bleeding_other_perc_norm', 'intervention_endovascular_perc_norm', 'intervention_neurosurgical_perc_norm', 'intervention_other_perc_norm', 'intervention_referred_perc_norm', 'intervention_none_perc_norm', 'vt_treatment_anticoagulation_perc_norm', 'vt_treatment_thrombectomy_perc_norm', 'vt_treatment_local_thrombolysis_perc_norm', 'vt_treatment_local_neurological_treatment_perc_norm', 'except_recommended_patients', 'afib_detected_discharged_home_patients', '% dysphagia screening done', '# dysphagia screening done', 'alert_all', 'alert_all_perc', 'drowsy_all', 'drowsy_all_perc', 'comatose_all', 'comatose_all_perc', 'antithrombotics_patients_with_cvt', 'ischemic_transient_cerebral_dead_patients', '# patients receiving antiplatelets with CVT', '% patients receiving antiplatelets with CVT', '# patients receiving Vit. K antagonist with CVT', '% patients receiving Vit. K antagonist with CVT', '# patients receiving dabigatran with CVT', '% patients receiving dabigatran with CVT', '# patients receiving rivaroxaban with CVT', '% patients receiving rivaroxaban with CVT', '# patients receiving apixaban with CVT', '% patients receiving apixaban with CVT', '# patients receiving edoxaban with CVT', '% patients receiving edoxaban with CVT', '# patients receiving LMWH or heparin in prophylactic dose with CVT', '% patients receiving LMWH or heparin in prophylactic dose with CVT', '# patients receiving LMWH or heparin in full anticoagulant dose with CVT', '% patients receiving LMWH or heparin in full anticoagulant dose with CVT', '# patients not prescribed antithrombotics, but recommended with CVT', '% patients not prescribed antithrombotics, but recommended with CVT', '# patients neither receiving antithrombotics nor recommended with CVT', '% patients neither receiving antithrombotics nor recommended with CVT', '# patients prescribed antithrombotics with CVT', '% patients prescribed antithrombotics with CVT', '# patients prescribed or recommended antithrombotics with CVT', '% patients prescribed or recommended antithrombotics with CVT', 'afib_flutter_not_detected_or_not_known_patients_with_cvt', 'afib_flutter_not_detected_or_not_known_dead_patients_with_cvt', 'prescribed_antiplatelets_no_afib_patients_with_cvt', 'prescribed_antiplatelets_no_afib_dead_patients_with_cvt', '# patients prescribed antiplatelets without aFib with CVT', '% patients prescribed antiplatelets without aFib with CVT', 'afib_flutter_detected_patients_with_cvt', '# patients prescribed anticoagulants with aFib with CVT', 'anticoagulants_recommended_patients_with_cvt', 'afib_flutter_detected_dead_patients_with_cvt', '% patients prescribed anticoagulants with aFib with CVT', '# patients prescribed antithrombotics with aFib with CVT', 'recommended_antithrombotics_with_afib_alive_patients_with_cvt', '% patients prescribed antithrombotics with aFib with CVT', 'afib_flutter_detected_patients_not_dead', 'except_recommended_discharged_home_patients', 'afib_detected_discharged_patients', 'ischemic_transient_dead_patients_prescribed', 'is_tia_discharged_home_patients'])
def select_country(value):
""" The function getting the country name from the database using country code.
:param value: the country code
:type value: str
"""
country_name = pytz.country_names[value]
return country_name
# If country is used as site, the country name is selected from countries dictionary by country code. :)
if (country):
if self.country_code == 'UZB':
self.country_code = 'UZ'
self.country_name = select_country(self.country_code)
else:
self.country_name = None
# If split_sites is True, then go through dataframe and generate graphs for each site (the country will be included as site in each file).
site_ids = self.df['Site ID'].tolist()
# Delete country name from side ids list.
try:
site_ids.remove(self.country_name)
except:
pass
if site is not None:
df = self.df[self.df['Site ID'].isin([site, self.country_name])].copy()
df_unformatted = self.df_unformatted[self.df_unformatted['Site ID'].isin([site, self.country_name])].copy()
self._generate_formatted_statistics(df=df, df_tmp=df_unformatted, site_code=site)
# Generate formatted statistics for all sites individualy + country as site is included
if (split_sites) and site is None:
for i in site_ids:
df = self.df[self.df['Site ID'].isin([i, self.country_name])].copy()
df_unformatted = self.df_unformatted[self.df_unformatted['Site ID'].isin([i, self.country_name])].copy()
self._generate_formatted_statistics(df=df, df_tmp=df_unformatted, site_code=i)
# Produce formatted statistics for all sites + country as site
if site is None:
self._generate_formatted_statistics(df=self.df, df_tmp=self.df_unformatted)
def _generate_formatted_statistics(self, df, df_tmp, site_code=None):
""" The function creating the new excel document with the statistic data.
:param df: the dataframe with statistics with already deleted temporary columns
:type df: pandas dataframe
:param df_tmp: the dataframe with statistics containing temporary columns
:type df_tmp: pandas dataframe
:param site_code: the site code
:type site_code: str
"""
if self.country_code is None and site_code is None:
# General report containing all sites in one document
name_of_unformatted_stats = self.report + "_" + self.quarter + ".csv"
name_of_output_file = self.report + "_" + self.quarter + ".xlsx"
elif site_code is None:
# General report for whole country
name_of_unformatted_stats = self.report + "_" + self.country_code + "_" + self.quarter + ".csv"
name_of_output_file = self.report + "_" + self.country_code + "_" + self.quarter + ".xlsx"
else:
# General report for site
name_of_unformatted_stats = self.report + "_" + site_code + "_" + self.quarter + ".csv"
name_of_output_file = self.report + "_" + site_code + "_" + self.quarter + ".xlsx"
df_tmp.to_csv(name_of_unformatted_stats, sep=",", encoding='utf-8', index=False)
workbook1 = xlsxwriter.Workbook(name_of_output_file, {'strings_to_numbers': True})
worksheet = workbook1.add_worksheet()
# set width of columns
worksheet.set_column(0, 4, 15)
worksheet.set_column(4, 350, 60)
thrombectomy_patients = df['# patients eligible thrombectomy'].values
df.drop(['# patients eligible thrombectomy'], inplace=True, axis=1)
ncol = len(df.columns) - 1
nrow = len(df) + 2
col = []
column_names = df.columns.tolist()
# Set headers
for i in range(0, ncol + 1):
tmp = {}
tmp['header'] = column_names[i]
col.append(tmp)
statistics = df.values.tolist()
########################
# DICTIONARY OF COLORS #
########################
colors = {
"gender": "#477187",
"stroke_hosp": "#535993",
"recurrent_stroke": "#D4B86A",
"department_type": "#D4A46A",
"hospitalization": "#D4916A",
"rehab": "#D4BA6A",
"stroke": "#565595",
"consciousness": "#468B78",
"gcs": "#B9D6C1",
"nihss": "#C5D068",
"ct_mri": "#AA8739",
"vasc_img": "#277650",
"ventilator": "#AA5039",
"recanalization_procedure": "#7F4C91",
"median_times": "#BEBCBC",
"dysphagia": "#F49B5B",
"hemicraniectomy": "#A3E4D7",
"neurosurgery": "#F8C471",
"neurosurgery_type": "#CACFD2",
"bleeding_reason": "#CB4335",
"bleeding_source": "#9B59B6",
"intervention": "#5DADE2",
"vt_treatment": "#F5CBA7",
"afib": "#A2C3F3",
"carot": "#F1C40F",
"antithrombotics": "#B5E59F",
"statin": "#28B463",
"carotid_stenosis": "#B9D6C1",
"carot_foll": "#BFC9CA",
"antihypertensive": "#7C7768",
"smoking": "#F9C991",
"cerebrovascular": "#91C09E",
"discharge_destination": "#C0EFF5",
"discharge_destination_same_centre": "#56A3A6",
"discharge_destination_another_centre": "#E8DF9C",
"discharge_destination_within_another_centre": "#538083",
"angel_awards": "#B87333",
"angel_resq_awards": "#341885",
"columns": "#3378B8",
"green": "#A1CCA1",
"orange": "#DF7401",
"gold": "#FFDF00",
"platinum": "#c0c0c0",
"black": "#ffffff",
"red": "#F45D5D"
}
################
# angel awards #
################
awards = workbook1.add_format({
'bold': 2,
'border': 0,
'align': 'center',
'valign': 'vcenter',
'fg_color': colors.get("angel_awards")})
awards_color = workbook1.add_format({
'fg_color': colors.get("angel_awards")})
self.total_patients_column = '# total patients >= {0}'.format(30)
first_index = column_names.index(self.total_patients_column)
last_index = column_names.index('% stroke patients treated in a dedicated stroke unit / ICU')
first_cell = xl_rowcol_to_cell(0, first_index)
last_cell = xl_rowcol_to_cell(0, last_index)
worksheet.merge_range(first_cell + ":" + last_cell, 'ESO ANGELS AWARDS', awards)
for i in range(first_index, last_index+1):
if column_names[i].startswith('%'):
worksheet.write(xl_rowcol_to_cell(1, i), '', awards_color)
else:
worksheet.write(xl_rowcol_to_cell(1, i), '', awards_color)
hidden_columns = ['# patients treated with door to recanalization therapy < 60 minutes', '% patients treated with door to recanalization therapy < 60 minutes', '# patients treated with door to recanalization therapy < 45 minutes', '% patients treated with door to recanalization therapy < 45 minutes', '# patients treated with door to thrombolysis < 60 minutes', '# patients treated with door to thrombolysis < 60 minutes', '# patients treated with door to thrombolysis < 45 minutes', '# patients treated with door to thrombectomy < 120 minutes', '# patients treated with door to thrombectomy < 90 minutes', '# recanalization rate out of total ischemic incidence', '# suspected stroke patients undergoing CT/MRI', '# all stroke patients undergoing dysphagia screening', '# ischemic stroke patients discharged with antiplatelets', '% ischemic stroke patients discharged with antiplatelets', '# ischemic stroke patients discharged home with antiplatelets', '% ischemic stroke patients discharged home with antiplatelets', '# ischemic stroke patients discharged (home) with antiplatelets', '# afib patients discharged with anticoagulants', '% afib patients discharged with anticoagulants', '# afib patients discharged home with anticoagulants', '% afib patients discharged home with anticoagulants', '# afib patients discharged (home) with anticoagulants', '# stroke patients treated in a dedicated stroke unit / ICU']
for i in hidden_columns:
index = column_names.index(i)
column = xl_col_to_name(index)
worksheet.set_column(column + ":" + column, None, None, {'hidden': True})
# format for green color
green = workbook1.add_format({
'bold': 2,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("green")})
# format for gold color
gold = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("gold")})
# format for platinum color
plat = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("platinum")})
# format for gold black
black = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': '#000000',
'color': colors.get("black")})
# format for red color
red = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("red")})
# add table into worksheet
options = {'data': statistics,
'header_row': True,
'columns': col,
'style': 'Table Style Light 8'
}
#worksheet.set_column('E:V', 100)
worksheet.add_table(2, 0, nrow, ncol, options)
# total number of rows
number_of_rows = len(statistics) + 2
if not self.comp:
row = 4
index = column_names.index(self.total_patients_column)
while row < nrow + 2:
cell_n = xl_col_to_name(index) + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'TRUE',
'format': green})
row += 1
def angels_awards_ivt_60(column_name, tmp_column=None):
"""Add conditional formatting to angels awards for ivt < 60."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 50,
'maximum': 74.99,
'format': gold})
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 75,
'format': black})
row += 1
row = 4
if tmp_column is not None:
while row < number_of_rows + 2:
cell_n = column_name + str(row)
tmp_value = thrombectomy_patients[row-4]
if (float(tmp_value) == 0.0):
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '==',
'value': 0.0,
'format': black})
row += 1
index = column_names.index('% patients treated with door to thrombolysis < 60 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_60(column)
index = column_names.index('% patients treated with door to thrombectomy < 120 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_60(column, tmp_column='# patients eligible thrombectomy')
def angels_awards_ivt_45(column_name, tmp_column=None):
"""Add conditional formatting to angels awards for ivt < 45."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
if tmp_column is not None:
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 0.99,
'maximum': 49.99,
'format': plat})
else:
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 49.99,
'format': plat})
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': black})
row += 1
if tmp_column is not None:
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
tmp_value = thrombectomy_patients[row-4]
if (float(tmp_value) == 0.0):
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 0.99,
'format': black})
row += 1
index = column_names.index('% patients treated with door to thrombolysis < 45 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_45(column)
index = column_names.index('% patients treated with door to thrombectomy < 90 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_45(column, tmp_column='# patients eligible thrombectomy')
# setting colors of cells according to their values
def angels_awards_recan(column_name):
"""Add conditional formatting to angels awards for recaalization procedures."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 5,
'maximum': 14.99,
'format': gold})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 15,
'maximum': 24.99,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 25,
'format': black})
row += 1
index = column_names.index('% recanalization rate out of total ischemic incidence')
column = xl_col_to_name(index)
angels_awards_recan(column)
def angels_awards_processes(column_name, count=True):
"""Add conditional formatting to angels awards for processes."""
count = count
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 80,
'maximum': 84.99,
'format': gold})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 85,
'maximum': 89.99,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 90,
'format': black})
row += 1
index = column_names.index('% suspected stroke patients undergoing CT/MRI')
column = xl_col_to_name(index)
angels_awards_processes(column)
index = column_names.index('% all stroke patients undergoing dysphagia screening')
column = xl_col_to_name(index)
angels_awards_processes(column)
index = column_names.index('% ischemic stroke patients discharged (home) with antiplatelets')
column = xl_col_to_name(index)
angels_awards_processes(column)
index = column_names.index('% afib patients discharged (home) with anticoagulants')
column = xl_col_to_name(index)
angels_awards_processes(column)
# setting colors of cells according to their values
def angels_awards_hosp(column_name):
"""Add conditional formatting to angels awards for hospitalization."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 0,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 0.99,
'format': black})
row += 1
index = column_names.index('% stroke patients treated in a dedicated stroke unit / ICU')
column = xl_col_to_name(index)
angels_awards_hosp(column)
# set color for proposed angel award
def proposed_award(column_name):
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'STROKEREADY',
'format': green})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'GOLD',
'format': gold})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'PLATINUM',
'format': plat})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'DIAMOND',
'format': black})
row += 1
index = column_names.index('Proposed Award')
column = xl_col_to_name(index)
proposed_award(column)
else:
pass
workbook1.close()
class GenerateFormattedAngelsAwards:
""" This class generate formatted statistics only for angels awards.
:param df: the dataframe with angels awards statistics
:type df: pandas dataframe
:param report: the type of report, eg. quarter
:type report: str
:param quarter: the type of the period, eg. Q1_2019
:type quarter: str
"""
def __init__(self, df, report=None, quarter=None, minimum_patients=30):
self.df = df
self.report = report
self.quarter = quarter
self.minimum_patients = minimum_patients
self.formate(self.df)
def formate(self, df):
if self.report is None and self.quarter is None:
output_file = "angels_awards.xslx"
else:
output_file = self.report + "_" + self.quarter + "_angels_awards.xlsx"
workbook1 = xlsxwriter.Workbook(output_file, {'strings_to_numbers': True})
worksheet = workbook1.add_worksheet()
# set width of columns
worksheet.set_column(0, 2, 15)
worksheet.set_column(2, 20, 40)
thrombectomy_patients = df['# patients eligible thrombectomy'].values
df.drop(['# patients eligible thrombectomy'], inplace=True, axis=1)
ncol = len(df.columns) - 1
nrow = len(df) + 2
col = []
column_names = df.columns.tolist()
for i in range(0, ncol + 1):
tmp = {}
tmp['header'] = column_names[i]
col.append(tmp)
statistics = df.values.tolist()
colors = {
"angel_awards": "#B87333",
"angel_resq_awards": "#341885",
"columns": "#3378B8",
"green": "#A1CCA1",
"orange": "#DF7401",
"gold": "#FFDF00",
"platinum": "#c0c0c0",
"black": "#ffffff",
"red": "#F45D5D"
}
################
# angel awards #
################
awards = workbook1.add_format({
'bold': 2,
'border': 0,
'align': 'center',
'valign': 'vcenter',
'fg_color': colors.get("angel_awards")})
awards_color = workbook1.add_format({
'fg_color': colors.get("angel_awards")})
first_cell = xl_rowcol_to_cell(0, 2)
last_cell = xl_rowcol_to_cell(0, ncol)
worksheet.merge_range(first_cell + ":" + last_cell, 'ESO ANGELS AWARDS', awards)
for i in range(2, ncol + 1):
cell = xl_rowcol_to_cell(1, i)
worksheet.write(cell, '', awards_color)
# format for green color
green = workbook1.add_format({
'bold': 2,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("green")})
# format for gold color
gold = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("gold")})
# format for platinum color
plat = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("platinum")})
# format for gold black
black = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': '#000000',
'color': colors.get("black")})
# format for red color
red = workbook1.add_format({
'bold': 1,
'align': 'center',
'valign': 'vcenter',
'bg_color': colors.get("red")})
# add table into worksheet
options = {'data': statistics,
'header_row': True,
'columns': col,
'style': 'Table Style Light 8'
}
first_col = xl_col_to_name(0)
last_col = xl_col_to_name(ncol + 1)
worksheet.set_column(first_col + ":" + last_col, 30)
worksheet.add_table(2, 0, nrow, ncol, options)
# total number of rows
number_of_rows = len(statistics) + 2
self.total_patients_column = '# total patients >= {0}'.format(self.minimum_patients)
# if cell contain TRUE in column > 30 patients (DR) it will be colored to green
awards = []
row = 4
while row < nrow + 2:
index = column_names.index(self.total_patients_column)
cell_n = xl_col_to_name(index) + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'TRUE',
'format': green})
row += 1
def angels_awards_ivt_60(column_name, tmp_column=None):
"""Add conditional formatting to angels awards for ivt < 60."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 50,
'maximum': 74.99,
'format': gold})
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 75,
'format': black})
row += 1
row = 4
if tmp_column is not None:
while row < number_of_rows + 2:
cell_n = column_name + str(row)
tmp_value = thrombectomy_patients[row-4]
if (float(tmp_value) == 0.0):
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '==',
'value': 0.0,
'format': black})
row += 1
index = column_names.index('% patients treated with door to thrombolysis < 60 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_60(column)
index = column_names.index('% patients treated with door to thrombectomy < 120 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_60(column, tmp_column='# patients eligible thrombectomy')
def angels_awards_ivt_45(column_name, tmp_column=None):
"""Add conditional formatting to angels awards for ivt < 45."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
if tmp_column is not None:
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 0.99,
'maximum': 49.99,
'format': plat})
else:
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 49.99,
'format': plat})
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': black})
row += 1
if tmp_column is not None:
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
tmp_value = thrombectomy_patients[row-4]
if (float(tmp_value) == 0.0):
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 0.99,
'format': black})
row += 1
index = column_names.index('% patients treated with door to thrombolysis < 45 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_45(column)
index = column_names.index('% patients treated with door to thrombectomy < 90 minutes')
column = xl_col_to_name(index)
angels_awards_ivt_45(column, tmp_column='# patients eligible thrombectomy')
# setting colors of cells according to their values
def angels_awards_recan(column_name, coln):
"""Add conditional formatting to angels awards for recaalization procedures."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 5,
'maximum': 14.99,
'format': gold})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 15,
'maximum': 24.99,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 25,
'format': black})
row += 1
index = column_names.index('% recanalization rate out of total ischemic incidence')
angels_awards_recan(column_name=xl_col_to_name(index), coln=index)
#angels_awards_recan('F')
def angels_awards_processes(column_name, coln, count=True):
"""Add conditional formatting to angels awards for processes."""
count = count
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 80,
'maximum': 84.99,
'format': gold})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': 'between',
'minimum': 85,
'maximum': 89.99,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 90,
'format': black})
row += 1
index = column_names.index('% suspected stroke patients undergoing CT/MRI')
angels_awards_processes(column_name=xl_col_to_name(index), coln=index)
index = column_names.index('% all stroke patients undergoing dysphagia screening')
angels_awards_processes(column_name=xl_col_to_name(index), coln=index)
index = column_names.index('% ischemic stroke patients discharged (home) with antiplatelets')
angels_awards_processes(column_name=xl_col_to_name(index), coln=index)
index = column_names.index('% afib patients discharged (home) with anticoagulants')
angels_awards_processes(column_name=xl_col_to_name(index), coln=index)
#angels_awards_processes('G', 4)
#angels_awards_processes('H', 5)
#angels_awards_processes('I', 6)
#angels_awards_processes('J', 7)
# setting colors of cells according to their values
def angels_awards_hosp(column_name, coln):
"""Add conditional formatting to angels awards for hospitalization."""
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '<=',
'value': 0,
'format': plat})
row += 1
row = 4
while row < number_of_rows + 2:
cell_n = column_name + str(row)
worksheet.conditional_format(cell_n, {'type': 'cell',
'criteria': '>=',
'value': 0.99,
'format': black})
row += 1
index = column_names.index('% stroke patients treated in a dedicated stroke unit / ICU')
angels_awards_hosp(column_name=xl_col_to_name(index), coln=index)
# set color for proposed angel award
def proposed_award(column_name, coln):
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'STROKEREADY',
'format': green})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'GOLD',
'format': gold})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'PLATINUM',
'format': plat})
row += 1
row = 4
while row < nrow + 2:
cell_n = column + str(row)
worksheet.conditional_format(cell_n, {'type': 'text',
'criteria': 'containing',
'value': 'DIAMOND',
'format': black})
row += 1
index = column_names.index('Proposed Award')
column = xl_col_to_name(index)
proposed_award(column, coln=index)
workbook1.close()
| 65.861394
| 5,553
| 0.623492
| 20,438
| 174,862
| 5.136706
| 0.039583
| 0.043168
| 0.015317
| 0.02347
| 0.823192
| 0.782253
| 0.74056
| 0.699564
| 0.662396
| 0.619999
| 0
| 0.017358
| 0.259719
| 174,862
| 2,655
| 5,554
| 65.861394
| 0.793661
| 0.223033
| 0
| 0.53593
| 0
| 0
| 0.320709
| 0.053129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030222
| false
| 0.001343
| 0.012089
| 0.000672
| 0.06313
| 0.000672
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2ba45c52a1df277d8243a7c389f1e7071d05b66d
| 104
|
py
|
Python
|
IHOPython.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 17
|
2018-09-19T09:44:33.000Z
|
2022-01-17T15:17:11.000Z
|
IHOPython.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 2
|
2020-02-24T15:28:33.000Z
|
2021-11-16T00:04:52.000Z
|
IHOPython.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 8
|
2020-02-20T00:02:06.000Z
|
2022-01-06T17:25:51.000Z
|
pancakes = int(input())
if pancakes > 3:
print('Yum!')
if pancakes <= 3:
print('Still hungry!')
| 17.333333
| 26
| 0.596154
| 14
| 104
| 4.428571
| 0.642857
| 0.322581
| 0.354839
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.211538
| 104
| 5
| 27
| 20.8
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0.163462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2bb0e646301d382fa19ef4ac3fe22bff82b972da
| 125
|
py
|
Python
|
PythonServer/server.py
|
MarkFranciscus/DevMIDbot
|
c50212488babce79f362954b689b92deda6ef30f
|
[
"MIT"
] | null | null | null |
PythonServer/server.py
|
MarkFranciscus/DevMIDbot
|
c50212488babce79f362954b689b92deda6ef30f
|
[
"MIT"
] | null | null | null |
PythonServer/server.py
|
MarkFranciscus/DevMIDbot
|
c50212488babce79f362954b689b92deda6ef30f
|
[
"MIT"
] | null | null | null |
import time
import daemon
import MIDBot
import BotInfo
with daemon.DaemonContext():
MIDBot.midbot.run(BotInfo.BOT_TOKEN)
| 17.857143
| 40
| 0.808
| 17
| 125
| 5.882353
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 125
| 7
| 40
| 17.857143
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
921122a50ba8eb6b64f5809d777068d920c2699f
| 1,460
|
py
|
Python
|
tests/synapse/test_detect_tbars.py
|
jingpengw/reneu
|
f69a9ab53ea1f4852493f3d92ec142e60ad0812b
|
[
"Apache-2.0"
] | null | null | null |
tests/synapse/test_detect_tbars.py
|
jingpengw/reneu
|
f69a9ab53ea1f4852493f3d92ec142e60ad0812b
|
[
"Apache-2.0"
] | 13
|
2019-09-04T03:56:05.000Z
|
2020-04-28T00:37:42.000Z
|
tests/synapse/test_detect_tbars.py
|
jingpengw/reneu
|
f69a9ab53ea1f4852493f3d92ec142e60ad0812b
|
[
"Apache-2.0"
] | 1
|
2019-11-07T11:24:21.000Z
|
2019-11-07T11:24:21.000Z
|
import numpy as np
import fill_voids
from edt import edt
from reneu.lib.synapse import detect_points, get_object_average_intensity
def test_detect_tbars():
seg = np.zeros((7, 7, 7), dtype=bool)
seg[2:5, 2:5, 2:5] = True
df = edt(seg)
seg = seg.astype(np.uint64)
points = detect_points(seg, df)
# print('points: ', points)
np.testing.assert_array_equal(points, np.asarray([[3,3,3]], dtype=np.uint64))
average_intensity = get_object_average_intensity(seg, df)
# print('average intensity: ', average_intensity)
avg = np.sum(df[seg>0]) / np.count_nonzero(seg)
np.testing.assert_array_almost_equal(average_intensity, np.asarray([avg], dtype=np.float32))
# def test_fill_voids():
# seg[2, 3, 4] = False
# fill_voids.fill(seg, in_place=True)
# assert seg[2,3,4] == True
def test_detect_tbars_non_symmetric():
seg = np.zeros((7, 7, 7), dtype=bool)
seg[1:4, 2:5, 3:6] = True
# seg[2, 3, 4] = False
# fill_voids.fill(seg, in_place=True)
# assert seg[2,3,4] == True
df = edt(seg)
seg = seg.astype(np.uint64)
points = detect_points(seg, df)
# print('points: ', points)
np.testing.assert_array_equal(points, np.asarray([[2,3,4]], dtype=np.uint64))
average_intensity = get_object_average_intensity(seg, df)
avg = np.sum(df[seg>0]) / np.count_nonzero(seg)
np.testing.assert_array_almost_equal(average_intensity, np.asarray([avg], dtype=np.float32))
| 29.795918
| 96
| 0.669178
| 233
| 1,460
| 4.012876
| 0.227468
| 0.154011
| 0.016043
| 0.085562
| 0.752941
| 0.752941
| 0.752941
| 0.752941
| 0.752941
| 0.699465
| 0
| 0.041701
| 0.178767
| 1,460
| 48
| 97
| 30.416667
| 0.738115
| 0.205479
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9211ce7b19db9b4c6caab69e7ea3d94d0f74dee1
| 202
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/pyllist/compat.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/pyllist/compat.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/pyllist/compat.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
| 16.833333
| 67
| 0.554455
| 33
| 202
| 3.30303
| 0.575758
| 0.183486
| 0.256881
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046358
| 0.252475
| 202
| 11
| 68
| 18.363636
| 0.675497
| 0
| 0
| 0.222222
| 0
| 0
| 0.09901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.222222
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
921aaf96e0a9ba0fa54fbed38422036b910199f7
| 10,566
|
py
|
Python
|
ckanext-showcase/ckanext/showcase/tests/action/test_create.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 4
|
2017-06-12T15:18:30.000Z
|
2019-10-11T15:12:43.000Z
|
ckanext-showcase/ckanext/showcase/tests/action/test_create.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 64
|
2017-05-14T22:15:53.000Z
|
2020-03-08T15:26:49.000Z
|
ckanext-showcase/ckanext/showcase/tests/action/test_create.py
|
smallmedia/iod-ckan
|
dfd85b41286fe86924ec16b0a88efc7292848ceb
|
[
"Apache-2.0"
] | 2
|
2018-09-08T08:02:25.000Z
|
2020-04-24T13:02:06.000Z
|
from nose import tools as nosetools
from ckan.model.package import Package
import ckan.model as model
import ckan.plugins.toolkit as toolkit
try:
import ckan.tests.factories as factories
except ImportError: # for ckan <= 2.3
import ckan.new_tests.factories as factories
try:
import ckan.tests.helpers as helpers
except ImportError: # for ckan <= 2.3
import ckan.new_tests.helpers as helpers
from ckanext.showcase.model import ShowcasePackageAssociation, ShowcaseAdmin
from ckanext.showcase.tests import ShowcaseFunctionalTestBase
class TestCreateShowcase(ShowcaseFunctionalTestBase):
def test_showcase_create_no_args(self):
'''
Calling showcase create without args raises ValidationError.
'''
sysadmin = factories.Sysadmin()
context = {'user': sysadmin['name']}
# no showcases exist.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 0)
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_create',
context=context)
# no showcases (dataset of type 'showcase') created.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 0)
def test_showcase_create_with_name_arg(self):
'''
Calling showcase create with a name arg creates a showcase package.
'''
sysadmin = factories.Sysadmin()
context = {'user': sysadmin['name']}
# no showcases exist.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 0)
helpers.call_action('ckanext_showcase_create',
context=context, name='my-showcase')
# a showcases (dataset of type 'showcase') created.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 1)
def test_showcase_create_with_existing_name(self):
'''
Calling showcase create with an existing name raises ValidationError.
'''
sysadmin = factories.Sysadmin()
context = {'user': sysadmin['name']}
factories.Dataset(type='showcase', name='my-showcase')
# a single showcases exist.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 1)
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_create',
context=context, name='my-showcase')
# still only one showcase exists.
nosetools.assert_equal(model.Session.query(Package)
.filter(Package.type == 'showcase').count(), 1)
class TestCreateShowcasePackageAssociation(ShowcaseFunctionalTestBase):
def test_association_create_no_args(self):
'''
Calling sc/pkg association create with no args raises
ValidationError.
'''
sysadmin = factories.User(sysadmin=True)
context = {'user': sysadmin['name']}
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_package_association_create',
context=context)
nosetools.assert_equal(model.Session.query(ShowcasePackageAssociation).count(), 0)
def test_association_create_missing_arg(self):
'''
Calling sc/pkg association create with a missing arg raises
ValidationError.
'''
sysadmin = factories.User(sysadmin=True)
package_id = factories.Dataset()['id']
context = {'user': sysadmin['name']}
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_package_association_create',
context=context, package_id=package_id)
nosetools.assert_equal(model.Session.query(ShowcasePackageAssociation).count(), 0)
def test_association_create_by_id(self):
'''
Calling sc/pkg association create with correct args (package ids)
creates an association.
'''
sysadmin = factories.User(sysadmin=True)
package_id = factories.Dataset()['id']
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
association_dict = helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_id,
showcase_id=showcase_id)
# One association object created
nosetools.assert_equal(model.Session.query(ShowcasePackageAssociation).count(), 1)
# Association properties are correct
nosetools.assert_equal(association_dict.get('showcase_id'), showcase_id)
nosetools.assert_equal(association_dict.get('package_id'), package_id)
def test_association_create_by_name(self):
'''
Calling sc/pkg association create with correct args (package names)
creates an association.
'''
sysadmin = factories.User(sysadmin=True)
package = factories.Dataset()
package_name = package['name']
showcase = factories.Dataset(type='showcase')
showcase_name = showcase['name']
context = {'user': sysadmin['name']}
association_dict = helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_name,
showcase_id=showcase_name)
nosetools.assert_equal(model.Session.query(ShowcasePackageAssociation).count(), 1)
nosetools.assert_equal(association_dict.get('showcase_id'), showcase['id'])
nosetools.assert_equal(association_dict.get('package_id'), package['id'])
def test_association_create_existing(self):
'''
Attempt to create association with existing details returns Validation
Error.
'''
sysadmin = factories.User(sysadmin=True)
package_id = factories.Dataset()['id']
showcase_id = factories.Dataset(type='showcase')['id']
context = {'user': sysadmin['name']}
# Create association
helpers.call_action('ckanext_showcase_package_association_create',
context=context, package_id=package_id,
showcase_id=showcase_id)
# Attempted duplicate creation results in ValidationError
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_package_association_create',
context=context, package_id=package_id,
showcase_id=showcase_id)
class TestCreateShowcaseAdmin(ShowcaseFunctionalTestBase):
def test_showcase_admin_add_creates_showcase_admin_user(self):
'''
Calling ckanext_showcase_admin_add adds user to showcase admin list.
'''
user_to_add = factories.User()
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 0)
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_to_add['name'])
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 1)
nosetools.assert_true(user_to_add['id'] in ShowcaseAdmin.get_showcase_admin_ids())
def test_showcase_admin_add_multiple_users(self):
'''
Calling ckanext_showcase_admin_add for multiple users correctly adds
them to showcase admin list.
'''
user_to_add = factories.User()
second_user_to_add = factories.User()
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 0)
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_to_add['name'])
helpers.call_action('ckanext_showcase_admin_add', context={},
username=second_user_to_add['name'])
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 2)
nosetools.assert_true(user_to_add['id'] in ShowcaseAdmin.get_showcase_admin_ids())
nosetools.assert_true(second_user_to_add['id'] in ShowcaseAdmin.get_showcase_admin_ids())
def test_showcase_admin_add_existing_user(self):
'''
Calling ckanext_showcase_admin_add twice for same user raises a
ValidationError.
'''
user_to_add = factories.User()
# Add once
helpers.call_action('ckanext_showcase_admin_add', context={},
username=user_to_add['name'])
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 1)
# Attempt second add
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_admin_add', context={},
username=user_to_add['name'])
# Still only one ShowcaseAdmin object.
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 1)
def test_showcase_admin_add_username_doesnot_exist(self):
'''
Calling ckanext_showcase_admin_add with non-existent username raises
ValidationError and no ShowcaseAdmin object is created.
'''
nosetools.assert_raises(toolkit.ObjectNotFound, helpers.call_action,
'ckanext_showcase_admin_add', context={},
username='missing')
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 0)
nosetools.assert_equal(ShowcaseAdmin.get_showcase_admin_ids(), [])
def test_showcase_admin_add_no_args(self):
'''
Calling ckanext_showcase_admin_add with no args raises ValidationError
and no ShowcaseAdmin object is created.
'''
nosetools.assert_raises(toolkit.ValidationError, helpers.call_action,
'ckanext_showcase_admin_add', context={})
nosetools.assert_equal(model.Session.query(ShowcaseAdmin).count(), 0)
nosetools.assert_equal(ShowcaseAdmin.get_showcase_admin_ids(), [])
| 41.928571
| 97
| 0.638842
| 1,070
| 10,566
| 6.066355
| 0.109346
| 0.080881
| 0.073949
| 0.069327
| 0.792482
| 0.756894
| 0.751656
| 0.70359
| 0.70359
| 0.621322
| 0
| 0.002847
| 0.268692
| 10,566
| 251
| 98
| 42.095618
| 0.837194
| 0.144615
| 0
| 0.654412
| 0
| 0
| 0.090499
| 0.05883
| 0
| 0
| 0
| 0
| 0.257353
| 1
| 0.095588
| false
| 0
| 0.088235
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a6263d7f1359691c24925f4520c1820b4adb4410
| 80
|
py
|
Python
|
torchOnVideo/super_resolution/SuperResolution.py
|
torchOnVideo/torchOnVideo
|
aa07d5661f772eca027ecc6b79e14bd68a515aa1
|
[
"MIT"
] | 2
|
2021-03-19T08:05:06.000Z
|
2021-05-22T21:54:10.000Z
|
torchOnVideo/super_resolution/SuperResolution.py
|
torchOnVideo/torchOnVideo
|
aa07d5661f772eca027ecc6b79e14bd68a515aa1
|
[
"MIT"
] | null | null | null |
torchOnVideo/super_resolution/SuperResolution.py
|
torchOnVideo/torchOnVideo
|
aa07d5661f772eca027ecc6b79e14bd68a515aa1
|
[
"MIT"
] | null | null | null |
class SuperResolution():
def __init__(self, scale):
self.scale = 4
| 16
| 30
| 0.625
| 9
| 80
| 5.111111
| 0.777778
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.2625
| 80
| 4
| 31
| 20
| 0.762712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a68e82144de35283ffec3359e93c3f724d52832f
| 47
|
py
|
Python
|
Game/gameRunner.py
|
AhmedAlzubairi1/COMS4995
|
c56c2549bf13538d89b001357f658ae04e5f3f8d
|
[
"MIT"
] | 2
|
2021-09-23T01:58:35.000Z
|
2022-01-30T00:34:52.000Z
|
Game/gameRunner.py
|
AhmedAlzubairi1/Chess
|
c56c2549bf13538d89b001357f658ae04e5f3f8d
|
[
"MIT"
] | 10
|
2020-10-02T00:37:52.000Z
|
2020-12-02T07:12:28.000Z
|
Game/gameRunner.py
|
AhmedAlzubairi1/COMS4995
|
c56c2549bf13538d89b001357f658ae04e5f3f8d
|
[
"MIT"
] | 2
|
2020-11-06T21:10:31.000Z
|
2020-12-08T19:27:57.000Z
|
from Game import Game
x = Game()
x.startGame()
| 11.75
| 21
| 0.702128
| 8
| 47
| 4.125
| 0.625
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 3
| 22
| 15.666667
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a68fa46eed4ed1e6e783afef10e305992fe31d5a
| 53
|
py
|
Python
|
planet/control/__init__.py
|
alec-tschantz/planet
|
bf68722993c93129263bb9606a582d24cb4f2a58
|
[
"MIT"
] | 7
|
2020-03-08T08:28:12.000Z
|
2022-01-23T17:19:56.000Z
|
planet/control/__init__.py
|
alec-tschantz/planet
|
bf68722993c93129263bb9606a582d24cb4f2a58
|
[
"MIT"
] | null | null | null |
planet/control/__init__.py
|
alec-tschantz/planet
|
bf68722993c93129263bb9606a582d24cb4f2a58
|
[
"MIT"
] | null | null | null |
from .agent import Agent
from .planner import Planner
| 26.5
| 28
| 0.830189
| 8
| 53
| 5.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 2
| 28
| 26.5
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a6aa63e521c191fe8439353ac9b539ce7d3e904f
| 141
|
py
|
Python
|
care/care/doctype/item_expiry/test_item_expiry.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | 1
|
2021-08-07T12:49:13.000Z
|
2021-08-07T12:49:13.000Z
|
care/care/doctype/item_expiry/test_item_expiry.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | null | null | null |
care/care/doctype/item_expiry/test_item_expiry.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | 1
|
2021-08-07T12:49:13.000Z
|
2021-08-07T12:49:13.000Z
|
# Copyright (c) 2021, RF and Contributors
# See license.txt
# import frappe
import unittest
class TestItemExpiry(unittest.TestCase):
pass
| 15.666667
| 41
| 0.77305
| 18
| 141
| 6.055556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.148936
| 141
| 8
| 42
| 17.625
| 0.875
| 0.489362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
a6fde74de1b3253ae0e88418fb7c3fd572921b2c
| 3,612
|
py
|
Python
|
solicitudes/admin.py
|
shiminasai/cantera
|
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
|
[
"MIT"
] | null | null | null |
solicitudes/admin.py
|
shiminasai/cantera
|
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
|
[
"MIT"
] | null | null | null |
solicitudes/admin.py
|
shiminasai/cantera
|
90f162351e1ad6ffaaf79cf90c361e302ab6e09f
|
[
"MIT"
] | 2
|
2019-04-10T19:45:42.000Z
|
2019-04-24T17:16:40.000Z
|
from django.contrib import admin
from .models import *
from users.models import *
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
# Register your models here.
class SolicitudesOrgAdmin(admin.ModelAdmin):
list_display = ('usuario','organizacion','aprobado')
def add_view(self, request, form_url='', extra_context=None):
self.readonly_fields = ('usuario','organizacion','aprobado')
return super(SolicitudesOrgAdmin, self).add_view(request)
def change_view(self,request, object_id, form_url='', extra_context=None):
obj = SolicitudesOrg.objects.get(id = object_id)
if obj.aprobado == False:
self.readonly_fields = ('usuario','organizacion')
else:
self.readonly_fields = ('usuario','organizacion','aprobado')
return super(SolicitudesOrgAdmin, self).change_view(request,object_id)
def save_model(self, request, obj, form, change):
if obj.aprobado == True:
user = User.objects.get(id = obj.usuario.id)
user.organizacion = obj.organizacion
user.save()
try:
subject, from_email = 'Plataforma Género y Metodologías', 'generoymetodologias@gmail.com'
text_content = render_to_string('email/solicitud_aprobada.txt', {'obj': user,})
html_content = render_to_string('email/solicitud_aprobada.txt', {'obj': user,})
list_mail = User.objects.filter(id = user.id).values_list('email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
except:
pass
super(SolicitudesOrgAdmin, self).save_model(request, obj, form, change)
def has_delete_permission(self, request, obj=None):
return False
class SolicitudesNuevasOrgAdmin(admin.ModelAdmin):
list_display = ('usuario','nombre_org','aprobado')
def add_view(self, request, form_url='', extra_context=None):
self.readonly_fields = ('usuario','nombre_org','siglas_org','pais_org','aprobado')
return super(SolicitudesNuevasOrgAdmin, self).add_view(request)
def change_view(self, request, object_id, form_url='', extra_context=None):
obj = SolicitudesNuevasOrg.objects.get(id = object_id)
if obj.aprobado == False:
self.readonly_fields = ('usuario','nombre_org','siglas_org','pais_org')
else:
self.readonly_fields = ('usuario','nombre_org','siglas_org','pais_org','aprobado')
return super(SolicitudesNuevasOrgAdmin, self).change_view(request,object_id)
def save_model(self, request, obj, form, change):
if obj.aprobado == True:
org = Contraparte(nombre = obj.nombre_org,siglas = obj.siglas_org,pais = obj.pais_org)
org.save()
user = User.objects.get(id = obj.usuario.id)
user.organizacion = org
user.save()
try:
subject, from_email = 'Plataforma Género y Metodologías', 'generoymetodologias@gmail.com'
text_content = render_to_string('email/solicitud_aprobada.txt', {'obj': user,})
html_content = render_to_string('email/solicitud_aprobada.txt', {'obj': user,})
list_mail = User.objects.filter(id = user.id).values_list('email',flat=True)
msg = EmailMultiAlternatives(subject, text_content, from_email, list_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
except:
pass
super(SolicitudesNuevasOrgAdmin, self).save_model(request, obj, form, change)
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(SolicitudesOrg,SolicitudesOrgAdmin)
admin.site.register(SolicitudesNuevasOrg,SolicitudesNuevasOrgAdmin)
| 38.83871
| 94
| 0.728682
| 447
| 3,612
| 5.695749
| 0.201342
| 0.034564
| 0.042419
| 0.058916
| 0.788295
| 0.754517
| 0.754517
| 0.754517
| 0.754517
| 0.754517
| 0
| 0
| 0.144795
| 3,612
| 92
| 95
| 39.26087
| 0.824215
| 0.007198
| 0
| 0.637681
| 0
| 0
| 0.148912
| 0.048683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0.028986
| 0.072464
| 0.028986
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
47176d5a9b25d878b434e125117311dab8121dcd
| 290
|
py
|
Python
|
jupyter_releaser/actions/draft_changelog.py
|
jupyter1029/jupyter_releaser
|
f7f896886d0cef2b83a6a61f3433dd2c10bb09ea
|
[
"BSD-3-Clause"
] | null | null | null |
jupyter_releaser/actions/draft_changelog.py
|
jupyter1029/jupyter_releaser
|
f7f896886d0cef2b83a6a61f3433dd2c10bb09ea
|
[
"BSD-3-Clause"
] | 59
|
2021-03-09T10:11:27.000Z
|
2021-04-13T09:06:46.000Z
|
jupyter_releaser/actions/draft_changelog.py
|
jupyter1029/jupyter_releaser
|
f7f896886d0cef2b83a6a61f3433dd2c10bb09ea
|
[
"BSD-3-Clause"
] | 1
|
2021-05-02T16:04:02.000Z
|
2021-05-02T16:04:02.000Z
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_releaser.util import run
run("jupyter-releaser prep-git")
run("jupyter-releaser bump-version")
run("jupyter-releaser build-changelog")
run("jupyter-releaser draft-changelog")
| 32.222222
| 58
| 0.793103
| 40
| 290
| 5.725
| 0.625
| 0.327511
| 0.31441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 290
| 8
| 59
| 36.25
| 0.877395
| 0.331034
| 0
| 0
| 0
| 0
| 0.617801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5b2f469d02d7221f07c83c36f4371dabd4b8e600
| 41
|
py
|
Python
|
python/myfunc.py
|
chaojie-fu/robotics_tutorial
|
12affebfe6cb3810cc1e8fde4c674ed077b926a5
|
[
"MIT"
] | 1
|
2021-12-23T13:05:26.000Z
|
2021-12-23T13:05:26.000Z
|
python/myfunc.py
|
cyoahs/robotics_tutorial
|
3aed846c5e95eb32dbcdeebac0b22e54cd74ea02
|
[
"MIT"
] | null | null | null |
python/myfunc.py
|
cyoahs/robotics_tutorial
|
3aed846c5e95eb32dbcdeebac0b22e54cd74ea02
|
[
"MIT"
] | 1
|
2020-04-06T11:25:51.000Z
|
2020-04-06T11:25:51.000Z
|
def my_func(a):
print(f'This is {a}')
| 20.5
| 25
| 0.585366
| 9
| 41
| 2.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 25
| 20.5
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5b6a6275ef0c5566227c9bf1e2faebb0985e8444
| 157
|
py
|
Python
|
models/game/bots/__init__.py
|
zachdj/ultimate-tic-tac-toe
|
b8e6128d9d19628f6f889a3958d30854527a8645
|
[
"MIT"
] | null | null | null |
models/game/bots/__init__.py
|
zachdj/ultimate-tic-tac-toe
|
b8e6128d9d19628f6f889a3958d30854527a8645
|
[
"MIT"
] | null | null | null |
models/game/bots/__init__.py
|
zachdj/ultimate-tic-tac-toe
|
b8e6128d9d19628f6f889a3958d30854527a8645
|
[
"MIT"
] | null | null | null |
from .Bot import Bot
from .BogoBot import BogoBot
from .RandoMaxBot import RandoMaxBot
from .MCTSBot import MCTSBot
from .Heuristic1Bot import Heuristic1Bot
| 26.166667
| 40
| 0.840764
| 20
| 157
| 6.6
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0.127389
| 157
| 5
| 41
| 31.4
| 0.948905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5b73c47354ba360c1ed639c535a91bb55b97b735
| 126
|
py
|
Python
|
katas/kyu_8/barking_mad.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_8/barking_mad.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_8/barking_mad.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
class Dog:
def __init__(self, breed):
self.breed = breed
@staticmethod
def bark():
return 'Woof'
| 15.75
| 30
| 0.563492
| 14
| 126
| 4.785714
| 0.714286
| 0.268657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 126
| 7
| 31
| 18
| 0.797619
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
5b7959ae72b93ac7365403ad2d24b31a120af54a
| 203
|
py
|
Python
|
FWCore/Framework/python/test/cmsExceptionsFatal_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
FWCore/Framework/python/test/cmsExceptionsFatal_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
FWCore/Framework/python/test/cmsExceptionsFatal_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
options = cms.untracked.PSet(
Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow
)
| 29
| 70
| 0.847291
| 23
| 203
| 7.391304
| 0.608696
| 0.141176
| 0.223529
| 0.505882
| 0.541176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078818
| 203
| 6
| 71
| 33.833333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5bb29eaea93a1ff514403563a4b1757dca026252
| 50
|
py
|
Python
|
Asymmetric/RSA/common-modulus/secret.py
|
killua4564/Symmetric
|
183ea2ec1d1342e9124e710a2de0fcad8b399f3d
|
[
"MIT"
] | 1
|
2021-05-05T14:03:10.000Z
|
2021-05-05T14:03:10.000Z
|
Asymmetric/RSA/common-modulus/secret.py
|
killua4564/Symmetric
|
183ea2ec1d1342e9124e710a2de0fcad8b399f3d
|
[
"MIT"
] | null | null | null |
Asymmetric/RSA/common-modulus/secret.py
|
killua4564/Symmetric
|
183ea2ec1d1342e9124e710a2de0fcad8b399f3d
|
[
"MIT"
] | null | null | null |
FLAG = 'flag{xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}'
| 50
| 50
| 0.86
| 3
| 50
| 14.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 50
| 1
| 50
| 50
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0.803922
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5bbbebe6068539670bf06280e2dc47c0484d0409
| 40
|
py
|
Python
|
fairing/strategies/pbt/__init__.py
|
wbuchwalter/fairing-1
|
e6d459dc8413ffd3e8c4b0771a2ec79c74c383ab
|
[
"Apache-2.0"
] | 21
|
2018-08-09T19:13:47.000Z
|
2020-07-22T05:21:11.000Z
|
fairing/strategies/pbt/__init__.py
|
wbuchwalter/fairing-1
|
e6d459dc8413ffd3e8c4b0771a2ec79c74c383ab
|
[
"Apache-2.0"
] | 14
|
2018-08-02T18:44:09.000Z
|
2018-11-08T15:32:55.000Z
|
fairing/strategies/pbt/__init__.py
|
wbuchwalter/fairing-1
|
e6d459dc8413ffd3e8c4b0771a2ec79c74c383ab
|
[
"Apache-2.0"
] | 4
|
2018-08-09T19:13:59.000Z
|
2018-10-08T05:44:31.000Z
|
from .pbt import PopulationBasedTraining
| 40
| 40
| 0.9
| 4
| 40
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5bc28c8e5fe0d68a4178a18ff74bc1c5ee07698d
| 34
|
py
|
Python
|
bokeh-app/scripts/crossplot.py
|
rmsare/ec-dashboard
|
2be3315620ce50000357ed9b18099e0c41068107
|
[
"MIT"
] | null | null | null |
bokeh-app/scripts/crossplot.py
|
rmsare/ec-dashboard
|
2be3315620ce50000357ed9b18099e0c41068107
|
[
"MIT"
] | null | null | null |
bokeh-app/scripts/crossplot.py
|
rmsare/ec-dashboard
|
2be3315620ce50000357ed9b18099e0c41068107
|
[
"MIT"
] | null | null | null |
def crossplot_tab(data):
pass
| 11.333333
| 24
| 0.705882
| 5
| 34
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 2
| 25
| 17
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5be7b2ad4dce5f5889d787dfe5475bc8f5c1c0d6
| 86
|
py
|
Python
|
tests/event_log/test_event_log_aggregate.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/event_log/test_event_log_aggregate.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/event_log/test_event_log_aggregate.py
|
nadirhamid/protean
|
d31bc634e05c9221e82136bf18c2ceaa0982c1c8
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.mark.skip
def test_timestamps_are_logged_properly():
pass
| 12.285714
| 42
| 0.790698
| 12
| 86
| 5.333333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 6
| 43
| 14.333333
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7513992e21effe068998d9ebcc9323881e5ecab6
| 491
|
py
|
Python
|
tests/conftest.py
|
mwegrzynek/pysupla
|
b28ba23a551e70dcf9151ee70f252519e71b1f13
|
[
"Apache-2.0"
] | 2
|
2019-10-03T16:01:05.000Z
|
2020-05-09T19:34:16.000Z
|
tests/conftest.py
|
mwegrzynek/pysupla
|
b28ba23a551e70dcf9151ee70f252519e71b1f13
|
[
"Apache-2.0"
] | 1
|
2020-09-04T11:32:46.000Z
|
2020-09-04T11:32:46.000Z
|
tests/conftest.py
|
mwegrzynek/pysupla
|
b28ba23a551e70dcf9151ee70f252519e71b1f13
|
[
"Apache-2.0"
] | 2
|
2019-02-12T18:39:35.000Z
|
2020-09-04T06:26:12.000Z
|
# -*- coding: UTF-8 -*-
import pytest
import os
@pytest.fixture
def SERVER():
return os.environ['SUPLA_SERVER']
@pytest.fixture
def PERSONAL_ACCESS_TOKEN():
return os.environ['SUPLA_PERSONAL_ACCESS_TOKEN']
@pytest.fixture
def SHUTTER_ID():
return int(os.environ['SUPLA_SHUTTER_ID'])
@pytest.fixture
def api(SERVER, PERSONAL_ACCESS_TOKEN):
from pysupla import SuplaAPI
return SuplaAPI(
server=SERVER,
personal_access_token=PERSONAL_ACCESS_TOKEN
)
| 19.64
| 52
| 0.727088
| 63
| 491
| 5.428571
| 0.349206
| 0.204678
| 0.277778
| 0.116959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002445
| 0.167006
| 491
| 25
| 53
| 19.64
| 0.833741
| 0.04277
| 0
| 0.222222
| 0
| 0
| 0.117271
| 0.057569
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.166667
| 0.166667
| 0.611111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
75217400c8b3f96824962a12628baf8021aa718c
| 113
|
py
|
Python
|
backend/medtagger/ground_truth/algorithms/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 71
|
2019-01-31T19:50:31.000Z
|
2022-02-20T07:36:49.000Z
|
backend/medtagger/ground_truth/algorithms/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 379
|
2019-02-16T19:12:01.000Z
|
2022-03-11T23:12:24.000Z
|
backend/medtagger/ground_truth/algorithms/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 16
|
2019-01-31T16:44:39.000Z
|
2022-02-14T15:23:29.000Z
|
"""Module responsible for definition of all algorithms that may used during Ground Truth data set generation."""
| 56.5
| 112
| 0.79646
| 16
| 113
| 5.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 113
| 1
| 113
| 113
| 0.927835
| 0.938053
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7523c59c5baecb2bc1170b61e00177aad839ab9f
| 42
|
py
|
Python
|
src/strigiform/secrets/__init__.py
|
X-McKay/strigiform
|
5db74c99c6778303ec98f30f77097b9cb0cd7a36
|
[
"MIT"
] | null | null | null |
src/strigiform/secrets/__init__.py
|
X-McKay/strigiform
|
5db74c99c6778303ec98f30f77097b9cb0cd7a36
|
[
"MIT"
] | 76
|
2021-10-31T21:14:46.000Z
|
2022-03-30T18:32:49.000Z
|
src/strigiform/secrets/__init__.py
|
X-McKay/kingfisher
|
5db74c99c6778303ec98f30f77097b9cb0cd7a36
|
[
"MIT"
] | null | null | null |
"""Placeholder for secrets management."""
| 21
| 41
| 0.738095
| 4
| 42
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.815789
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
752dcb29f28f3f3cfa2d89a1d96860ce6a93b13e
| 93
|
py
|
Python
|
main.py
|
2022AC12SDD/Aurora_SDD_Template
|
aa0653ee917e68d8e9fdac8811403424b784835c
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
2022AC12SDD/Aurora_SDD_Template
|
aa0653ee917e68d8e9fdac8811403424b784835c
|
[
"CC0-1.0"
] | 1
|
2021-10-13T00:58:39.000Z
|
2021-10-13T00:58:39.000Z
|
main.py
|
2022AC12SDD/Aurora_SDD_Template
|
aa0653ee917e68d8e9fdac8811403424b784835c
|
[
"CC0-1.0"
] | null | null | null |
""" Don't forget your docstring."""
import helpers as h
print('main finished successfully')
| 18.6
| 35
| 0.731183
| 13
| 93
| 5.230769
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 5
| 36
| 18.6
| 0.85
| 0.301075
| 0
| 0
| 0
| 0
| 0.440678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
754525ccdb1cd01198e678ab30edcaffb65c5991
| 78
|
py
|
Python
|
scripts/quest/autogen_q57961s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/quest/autogen_q57961s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/quest/autogen_q57961s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
# Character field ID when accessed: 100010000
# ObjectID: 0
# ParentID: 57961
| 19.5
| 45
| 0.75641
| 10
| 78
| 5.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.166667
| 78
| 3
| 46
| 26
| 0.676923
| 0.910256
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f37be23d9f777d081baa3638605b096961f52fe7
| 233
|
py
|
Python
|
broker/__init__.py
|
davidkaggulire/kafkacli_interfaces
|
08dd42217b1ca0c3afa4a55f3e007b75a512c297
|
[
"MIT"
] | null | null | null |
broker/__init__.py
|
davidkaggulire/kafkacli_interfaces
|
08dd42217b1ca0c3afa4a55f3e007b75a512c297
|
[
"MIT"
] | null | null | null |
broker/__init__.py
|
davidkaggulire/kafkacli_interfaces
|
08dd42217b1ca0c3afa4a55f3e007b75a512c297
|
[
"MIT"
] | null | null | null |
# init.py
from .broker_interface import IMessageBroker
from .kafka_broker import KafkaBroker
from .kafka_broker_listener import KafkaBrokerListener
from .inmemory_broker import InMemoryBroker
from .kafka_mock import KafkaBrokerMock
| 29.125
| 54
| 0.871245
| 28
| 233
| 7.035714
| 0.535714
| 0.137056
| 0.152284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098712
| 233
| 7
| 55
| 33.285714
| 0.938095
| 0.030043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f38b22df905451c5cedd68786a309f544c358d2c
| 115
|
py
|
Python
|
owmeta_core/commands/__init__.py
|
mwatts15/owmeta-core
|
b072178f8e7b83cc8665a29f4d038554d18adc35
|
[
"MIT"
] | 2
|
2021-03-06T16:25:35.000Z
|
2022-03-24T15:00:03.000Z
|
owmeta_core/commands/__init__.py
|
mwatts15/owmeta-core
|
b072178f8e7b83cc8665a29f4d038554d18adc35
|
[
"MIT"
] | 39
|
2020-02-08T21:58:33.000Z
|
2022-01-03T15:28:18.000Z
|
owmeta_core/commands/__init__.py
|
openworm/owmeta-core
|
b072178f8e7b83cc8665a29f4d038554d18adc35
|
[
"MIT"
] | null | null | null |
'''
Various commands of the same kind as `~owmeta_core.command.OWM`, mostly intended as
sub-commands of `OWM`.
'''
| 23
| 83
| 0.721739
| 18
| 115
| 4.555556
| 0.777778
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13913
| 115
| 4
| 84
| 28.75
| 0.828283
| 0.921739
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
340e527984ffb3e4c0b55d79b0a8ca42cdbfa58f
| 97
|
py
|
Python
|
tests/integration/lambdas/python3/lambda1/handler1.py
|
cknave/localstack
|
67941331c74dded97284698aba64984ab69cdf43
|
[
"Apache-2.0"
] | 31,928
|
2017-07-04T03:06:28.000Z
|
2022-03-31T22:33:27.000Z
|
tests/integration/lambdas/python3/lambda1/handler1.py
|
cknave/localstack
|
67941331c74dded97284698aba64984ab69cdf43
|
[
"Apache-2.0"
] | 5,216
|
2017-07-04T11:45:41.000Z
|
2022-03-31T22:02:14.000Z
|
tests/integration/lambdas/python3/lambda1/handler1.py
|
cknave/localstack
|
67941331c74dded97284698aba64984ab69cdf43
|
[
"Apache-2.0"
] | 3,056
|
2017-06-05T13:29:11.000Z
|
2022-03-31T20:54:43.000Z
|
import settings
constant = settings.SETTING1
def handler(event, context):
return constant
| 12.125
| 28
| 0.762887
| 11
| 97
| 6.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.175258
| 97
| 7
| 29
| 13.857143
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
341250a0673882d87fb5d7a841e9efe66cbb2bf0
| 274
|
py
|
Python
|
niworkflows/anat/__init__.py
|
effigies/niworkflows
|
2b3d9aa8fa81d312bdf148a9af590ecacaea8c84
|
[
"BSD-3-Clause"
] | null | null | null |
niworkflows/anat/__init__.py
|
effigies/niworkflows
|
2b3d9aa8fa81d312bdf148a9af590ecacaea8c84
|
[
"BSD-3-Clause"
] | null | null | null |
niworkflows/anat/__init__.py
|
effigies/niworkflows
|
2b3d9aa8fa81d312bdf148a9af590ecacaea8c84
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-07-21 10:47:37
# @Last Modified by: oesteban
# @Last Modified time: 2016-09-23 15:17:32
from niworkflows.anat.mni import RobustMNINormalization
from niworkflows.anat.skullstrip import afni_wf as skullstrip_afni
| 30.444444
| 66
| 0.740876
| 41
| 274
| 4.902439
| 0.756098
| 0.119403
| 0.189055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122881
| 0.138686
| 274
| 8
| 67
| 34.25
| 0.728814
| 0.507299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3415cc121424e86c412fa8b7ced570431fa0a311
| 236
|
py
|
Python
|
pytorch_tabular/models/category_embedding/__init__.py
|
Actis92/pytorch_tabular
|
78dabf5e7b97d8ff24db4bc83d9d0a2273941bbe
|
[
"MIT"
] | 1
|
2021-12-11T03:18:36.000Z
|
2021-12-11T03:18:36.000Z
|
pytorch_tabular/models/category_embedding/__init__.py
|
Actis92/pytorch_tabular
|
78dabf5e7b97d8ff24db4bc83d9d0a2273941bbe
|
[
"MIT"
] | null | null | null |
pytorch_tabular/models/category_embedding/__init__.py
|
Actis92/pytorch_tabular
|
78dabf5e7b97d8ff24db4bc83d9d0a2273941bbe
|
[
"MIT"
] | null | null | null |
from .category_embedding_model import CategoryEmbeddingModel, CategoryEmbeddingBackbone
from .config import CategoryEmbeddingModelConfig
__all__ = ["CategoryEmbeddingModel", "CategoryEmbeddingModelConfig", "CategoryEmbeddingBackbone"]
| 47.2
| 97
| 0.877119
| 15
| 236
| 13.4
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063559
| 236
| 4
| 98
| 59
| 0.909502
| 0
| 0
| 0
| 0
| 0
| 0.317797
| 0.317797
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
343a0534e4ce6a86e3c440ac771d04db0a978161
| 62
|
py
|
Python
|
geopayment/providers/credo/__init__.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | 7
|
2020-07-18T16:11:45.000Z
|
2022-01-30T20:47:57.000Z
|
geopayment/providers/credo/__init__.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | 3
|
2017-12-01T05:55:39.000Z
|
2020-07-17T17:37:28.000Z
|
geopayment/providers/credo/__init__.py
|
Lh4cKg/tbcpay
|
481ef6148defc9897643919f7c47ce78d149acbf
|
[
"MIT"
] | 1
|
2021-12-18T02:42:09.000Z
|
2021-12-18T02:42:09.000Z
|
from geopayment.providers.credo.provider import CredoProvider
| 31
| 61
| 0.887097
| 7
| 62
| 7.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 1
| 62
| 62
| 0.948276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
caa3736f73450099dce590b19ac27adaf0b42016
| 1,232
|
py
|
Python
|
pysnowball/finance.py
|
xing2387/pysnowball
|
64e680b7d339cf8cdafdce6e5540725408bbb130
|
[
"Apache-2.0"
] | null | null | null |
pysnowball/finance.py
|
xing2387/pysnowball
|
64e680b7d339cf8cdafdce6e5540725408bbb130
|
[
"Apache-2.0"
] | null | null | null |
pysnowball/finance.py
|
xing2387/pysnowball
|
64e680b7d339cf8cdafdce6e5540725408bbb130
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import json
import os
import sys
from . import cons
from . import api_ref
from . import utls
def cash_flow(symbol, is_annals=0, count=10):
url = api_ref.finance_cash_flow_url+symbol
if is_annals == 1:
url = url + '&type=Q4'
url = url + '&count='+str(count)
return utls.fetch(url)
def indicator(symbol, is_annals=0, count=10):
url = api_ref.finance_indicator_url+symbol
if is_annals == 1:
url = url + '&type=Q4'
url = url + '&count='+str(count)
return utls.fetch(url)
def balance(symbol, is_annals=0, count=10):
url = api_ref.finance_balance_url+symbol
if is_annals == 1:
url = url + '&type=Q4'
url = url + '&count='+str(count)
return utls.fetch(url)
def income(symbol, is_annals=0, count=10):
url = api_ref.finance_income_url+symbol
if is_annals == 1:
url = url + '&type=Q4'
url = url + '&count='+str(count)
return utls.fetch(url)
def business(symbol, is_annals=0, count=10):
url = api_ref.finance_business_url+symbol
if is_annals == 1:
url = url + '&type=Q4'
url = url + '&count='+str(count)
return utls.fetch(url)
| 17.6
| 46
| 0.616883
| 183
| 1,232
| 3.972678
| 0.180328
| 0.110041
| 0.096286
| 0.103164
| 0.752407
| 0.752407
| 0.752407
| 0.752407
| 0.752407
| 0.752407
| 0
| 0.027233
| 0.25487
| 1,232
| 69
| 47
| 17.855072
| 0.764706
| 0
| 0
| 0.540541
| 0
| 0
| 0.060877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.189189
| 0
| 0.459459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cabbfab934c0bf057e98af356a7c83006140b262
| 4,517
|
py
|
Python
|
ProjectEuler/8.py
|
RobVor/Python
|
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
|
[
"FSFAP"
] | null | null | null |
ProjectEuler/8.py
|
RobVor/Python
|
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
|
[
"FSFAP"
] | 4
|
2021-06-02T03:44:24.000Z
|
2022-03-12T00:52:58.000Z
|
ProjectEuler/8.py
|
RobVor/Python
|
5cfcd9a72c3899a453c0ec8f4fadea71fe453c49
|
[
"FSFAP"
] | null | null | null |
"""The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?"""
Series =[7, 3, 1, 6, 7, 1, 7, 6, 5, 3, 1, 3, 3, 0, 6, 2, 4, 9, 1, 9, 2, 2, 5, 1, 1, 9, 6, 7, 4, 4, 2, 6, 5, 7, 4, 7, 4, 2, 3, 5, 5, 3, 4, 9, 1, 9, 4, 9, 3, 4, 9, 6, 9, 8, 3, 5, 2, 0, 3, 1, 2, 7, 7, 4, 5, 0, 6, 3, 2, 6, 2, 3, 9, 5, 7, 8, 3, 1, 8, 0, 1, 6, 9, 8, 4, 8, 0, 1, 8, 6, 9, 4, 7, 8, 8, 5, 1, 8, 4, 3, 8, 5, 8, 6, 1, 5, 6, 0, 7, 8, 9, 1, 1, 2, 9, 4, 9, 4, 9, 5, 4, 5, 9, 5, 0, 1, 7, 3, 7, 9, 5, 8, 3, 3, 1, 9, 5, 2, 8, 5, 3, 2, 0, 8, 8, 0, 5, 5, 1, 1, 1, 2, 5, 4, 0, 6, 9, 8, 7, 4, 7, 1, 5, 8, 5, 2, 3, 8, 6, 3, 0, 5, 0, 7, 1, 5, 6, 9, 3, 2, 9, 0, 9, 6, 3, 2, 9, 5, 2, 2, 7, 4, 4, 3, 0, 4, 3, 5, 5, 7, 6, 6, 8, 9, 6, 6, 4, 8, 9, 5, 0, 4, 4, 5, 2, 4, 4, 5, 2, 3, 1, 6, 1, 7, 3, 1, 8, 5, 6, 4, 0, 3, 0, 9, 8, 7, 1, 1, 1, 2, 1, 7, 2, 2, 3, 8, 3, 1, 1, 3, 6, 2, 2, 2, 9, 8, 9, 3, 4, 2, 3, 3, 8, 0, 3, 0, 8, 1, 3, 5, 3, 3, 6, 2, 7, 6, 6, 1, 4, 2, 8, 2, 8, 0, 6, 4, 4, 4, 4, 8, 6, 6, 4, 5, 2, 3, 8, 7, 4, 9, 3, 0, 3, 5, 8, 9, 0, 7, 2, 9, 6, 2, 9, 0, 4, 9, 1, 5, 6, 0, 4, 4, 0, 7, 7, 2, 3, 9, 0, 7, 1, 3, 8, 1, 0, 5, 1, 5, 8, 5, 9, 3, 0, 7, 9, 6, 0, 8, 6, 6, 7, 0, 1, 7, 2, 4, 2, 7, 1, 2, 1, 8, 8, 3, 9, 9, 8, 7, 9, 7, 9, 0, 8, 7, 9, 2, 2, 7, 4, 9, 2, 1, 9, 0, 1, 6, 9, 9, 7, 2, 0, 8, 8, 8, 0, 9, 3, 7, 7, 6, 6, 5, 7, 2, 7, 3, 3, 3, 0, 0, 1, 0, 5, 3, 3, 6, 7, 8, 8, 1, 2, 2, 0, 2, 3, 5, 4, 2, 1, 8, 0, 9, 7, 5, 1, 2, 5, 4, 5, 4, 0, 5, 9, 4, 7, 5, 2, 2, 4, 3, 5, 2, 5, 8, 4, 9, 0, 7, 7, 1, 1, 6, 7, 0, 5, 5, 6, 0, 1, 3, 6, 0, 4, 8, 3, 9, 5, 8, 6, 4, 4, 6, 7, 0, 6, 3, 2, 4, 4, 1, 5, 7, 2, 2, 1, 5, 5, 3, 9, 7, 5, 3, 6, 9, 7, 8, 1, 7, 9, 7, 7, 8, 4, 6, 1, 7, 4, 0, 6, 4, 9, 5, 5, 1, 4, 9, 2, 9, 0, 8, 6, 2, 5, 6, 9, 3, 2, 1, 9, 7, 8, 4, 6, 8, 6, 2, 2, 4, 8, 2, 8, 3, 9, 7, 2, 2, 4, 1, 3, 7, 5, 6, 5, 7, 0, 5, 6, 0, 5, 7, 4, 9, 0, 2, 6, 1, 4, 0, 7, 9, 7, 2, 9, 6, 8, 6, 5, 2, 4, 1, 4, 5, 3, 5, 1, 0, 0, 4, 7, 4, 8, 2, 1, 6, 6, 3, 7, 0, 4, 8, 4, 4, 0, 3, 1, 9, 9, 8, 9, 0, 0, 0, 8, 8, 9, 5, 2, 4, 3, 4, 5, 0, 6, 5, 8, 5, 4, 1, 2, 2, 7, 5, 8, 8, 6, 6, 6, 8, 8, 1, 1, 6, 4, 2, 7, 1, 7, 1, 4, 7, 9, 9, 2, 4, 4, 4, 2, 9, 2, 8, 2, 3, 0, 8, 6, 3, 4, 6, 5, 6, 7, 4, 8, 1, 3, 9, 1, 9, 1, 2, 3, 1, 6, 2, 8, 2, 4, 5, 8, 6, 1, 7, 8, 6, 6, 4, 5, 8, 3, 5, 9, 1, 2, 4, 5, 6, 6, 5, 2, 9, 4, 7, 6, 5, 4, 5, 6, 8, 2, 8, 4, 8, 9, 1, 2, 8, 8, 3, 1, 4, 2, 6, 0, 7, 6, 9, 0, 0, 4, 2, 2, 4, 2, 1, 9, 0, 2, 2, 6, 7, 1, 0, 5, 5, 6, 2, 6, 3, 2, 1, 1, 1, 1, 1, 0, 9, 3, 7, 0, 5, 4, 4, 2, 1, 7, 5, 0, 6, 9, 4, 1, 6, 5, 8, 9, 6, 0, 4, 0, 8, 0, 7, 1, 9, 8, 4, 0, 3, 8, 5, 0, 9, 6, 2, 4, 5, 5, 4, 4, 4, 3, 6, 2, 9, 8, 1, 2, 3, 0, 9, 8, 7, 8, 7, 9, 9, 2, 7, 2, 4, 4, 2, 8, 4, 9, 0, 9, 1, 8, 8, 8, 4, 5, 8, 0, 1, 5, 6, 1, 6, 6, 0, 9, 7, 9, 1, 9, 1, 3, 3, 8, 7, 5, 4, 9, 9, 2, 0, 0, 5, 2, 4, 0, 6, 3, 6, 8, 9, 9, 1, 2, 5, 6, 0, 7, 1, 7, 6, 0, 6, 0, 5, 8, 8, 6, 1, 1, 6, 4, 6, 7, 1, 0, 9, 4, 0, 5, 0, 7, 7, 5, 4, 1, 0, 0, 2, 2, 5, 6, 9, 8, 3, 1, 5, 5, 2, 0, 0, 0, 5, 5, 9, 3, 5, 7, 2, 9, 7, 2, 5, 7, 1, 6, 3, 6, 2, 6, 9, 5, 6, 1, 8, 8, 2, 6, 7, 0, 4, 2, 8, 2, 5, 2, 4, 8, 3, 6, 0, 0, 8, 2, 3, 2, 5, 7, 5, 3, 0, 4, 2, 0, 7, 5, 2, 9, 6, 3, 4, 5, 0]
def GetProd_13(Num):
Track = 0
for i in Num:
Prod = 1
for j in range(13):
Prod *= Num[j]
if Prod > Track:
Track = Prod
del Num[0]
return Track
print(GetProd_13(Series))
| 115.820513
| 3,008
| 0.50963
| 1,100
| 4,517
| 2.093636
| 0.060909
| 0.014763
| 0.006513
| 0.0165
| 0.065132
| 0.052106
| 0.052106
| 0.052106
| 0.052106
| 0.052106
| 0
| 0.606106
| 0.26035
| 4,517
| 39
| 3,009
| 115.820513
| 0.082311
| 0.277618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cafc93f6a096e49c56bfe5b87cac989f899ad6a3
| 2,621
|
py
|
Python
|
test/test_phones.py
|
kesbo/python_training
|
5d7908c13bd00b06738d6f9eff93cff0041acab8
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
kesbo/python_training
|
5d7908c13bd00b06738d6f9eff93cff0041acab8
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
kesbo/python_training
|
5d7908c13bd00b06738d6f9eff93cff0041acab8
|
[
"Apache-2.0"
] | null | null | null |
import re
from random import randrange
from model.contact import Contact
def test_random_contact_on_home_page_from_edit_page(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Bruce", lastname="Wayne", address="Gotam",
home="a", mobile="b", work="c",
email="batman@32.32", email2="123", email3="2"))
contacts_list = app.contact.get_contact_list()
index = randrange(len(contacts_list))
contact_from_home_page = contacts_list[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_phones_home_page == merge_phones_like_one_home_page(contact_from_edit_page)
assert contact_from_home_page.all_emails_home_page == merge_emails_like_one_home_page(contact_from_edit_page)
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_home_page == merge_phones_like_one_home_page(contact_from_edit_page)
def test_phones_contact_view_page(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Bruce", lastname="Wayne", address="Gotam",
home="a", mobile="b", work="c",
email="batman@32.32", email2="123", email3="2"))
contacts_list = app.contact.get_contact_list()
index = randrange(len(contacts_list))
contact_from_home_page = contacts_list[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_emails_home_page == merge_emails_like_one_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_one_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work]))))
def merge_emails_like_one_home_page(contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
| 46.803571
| 113
| 0.682564
| 358
| 2,621
| 4.592179
| 0.175978
| 0.107056
| 0.10219
| 0.115572
| 0.788321
| 0.725669
| 0.725669
| 0.712287
| 0.712287
| 0.675182
| 0
| 0.01312
| 0.214804
| 2,621
| 55
| 114
| 47.654545
| 0.785714
| 0
| 0
| 0.571429
| 0
| 0
| 0.029771
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.142857
| false
| 0
| 0.071429
| 0.071429
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1b67f3317af7635fd307c92bb386b9a8704e933e
| 87
|
py
|
Python
|
ex001.py
|
amandagsilveira/Exercicios_cursoemvideo_Python
|
9055d24d44d8e195df1691767a635b0b54357672
|
[
"MIT"
] | null | null | null |
ex001.py
|
amandagsilveira/Exercicios_cursoemvideo_Python
|
9055d24d44d8e195df1691767a635b0b54357672
|
[
"MIT"
] | null | null | null |
ex001.py
|
amandagsilveira/Exercicios_cursoemvideo_Python
|
9055d24d44d8e195df1691767a635b0b54357672
|
[
"MIT"
] | null | null | null |
#Crie um programa que mostre "Olá, Mundo!" na tela.
print('\033[33mOlá, Mundo!')
| 17.4
| 52
| 0.643678
| 13
| 87
| 4.307692
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.195402
| 87
| 4
| 53
| 21.75
| 0.728571
| 0.574713
| 0
| 0
| 0
| 0
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1b6fc99b21c5d4ec2c8f8abc9afaa17f88ee2a0b
| 518
|
py
|
Python
|
pybutton/resources/__init__.py
|
button/button-client-python
|
82f9be86885ed87ec20dc20e87f3722cdba67fef
|
[
"MIT"
] | 8
|
2016-08-12T00:21:55.000Z
|
2019-04-21T12:22:05.000Z
|
pybutton/resources/__init__.py
|
button/button-client-python
|
82f9be86885ed87ec20dc20e87f3722cdba67fef
|
[
"MIT"
] | 16
|
2016-10-03T20:13:09.000Z
|
2019-09-23T17:34:43.000Z
|
pybutton/resources/__init__.py
|
button/button-client-python
|
82f9be86885ed87ec20dc20e87f3722cdba67fef
|
[
"MIT"
] | 2
|
2017-01-09T10:18:45.000Z
|
2017-02-03T01:29:30.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pybutton.resources.accounts import Accounts # noqa: 401
from pybutton.resources.customers import Customers # noqa: 401
from pybutton.resources.links import Links # noqa: 401
from pybutton.resources.merchants import Merchants # noqa: 401
from pybutton.resources.orders import Orders # noqa: 401
from pybutton.resources.transactions import Transactions # noqa: 401
| 43.166667
| 68
| 0.839768
| 67
| 518
| 6.208955
| 0.283582
| 0.173077
| 0.302885
| 0.228365
| 0.336538
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.1139
| 518
| 11
| 69
| 47.090909
| 0.867102
| 0.1139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1b7c1f0b62880000b1beaa6e93accde1060c3664
| 117
|
py
|
Python
|
Bots/Bot.py
|
dan1510123/stock-history-simulator
|
a970531f650513a4c76c250796aeecc4d7e4c39b
|
[
"MIT"
] | 1
|
2021-12-25T21:06:50.000Z
|
2021-12-25T21:06:50.000Z
|
Bots/Bot.py
|
dan1510123/stock-history-simulator
|
a970531f650513a4c76c250796aeecc4d7e4c39b
|
[
"MIT"
] | null | null | null |
Bots/Bot.py
|
dan1510123/stock-history-simulator
|
a970531f650513a4c76c250796aeecc4d7e4c39b
|
[
"MIT"
] | null | null | null |
class Bot:
# Returns orders to make with list of ticker pairs
def get_orders(self, date, limit):
pass
| 29.25
| 54
| 0.666667
| 18
| 117
| 4.277778
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273504
| 117
| 4
| 55
| 29.25
| 0.905882
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
1b7f4f5ce1d2ea657d3d2683c070a4bcb5c7f227
| 148
|
py
|
Python
|
Python Fundamentals/Data types and Variables/Exercise/Task06.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | 1
|
2022-03-16T10:23:04.000Z
|
2022-03-16T10:23:04.000Z
|
Python Fundamentals/Data types and Variables/Exercise/Task06.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | null | null | null |
Python Fundamentals/Data types and Variables/Exercise/Task06.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | null | null | null |
n = int(input())
for a in range(n):
for b in range(n):
for c in range(n):
print(f"{chr(a + 97)}{chr(b + 97)}{chr(c + 97)}")
| 24.666667
| 61
| 0.466216
| 29
| 148
| 2.37931
| 0.448276
| 0.304348
| 0.347826
| 0.318841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.310811
| 148
| 6
| 61
| 24.666667
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0.261745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1baacb4c5b1444f576f4c13e9436b73330758b08
| 145
|
py
|
Python
|
src/magnesium/path_processor/base_path_processor.py
|
kokaib/magnesium
|
0765ab89c30bfb8060c67826dd912ea26e4a4155
|
[
"MIT"
] | null | null | null |
src/magnesium/path_processor/base_path_processor.py
|
kokaib/magnesium
|
0765ab89c30bfb8060c67826dd912ea26e4a4155
|
[
"MIT"
] | null | null | null |
src/magnesium/path_processor/base_path_processor.py
|
kokaib/magnesium
|
0765ab89c30bfb8060c67826dd912ea26e4a4155
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class BasePathProcessor(ABC):
""""""
@abstractmethod
def process(self, x):
""""""
| 14.5
| 35
| 0.57931
| 13
| 145
| 6.461538
| 0.769231
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 145
| 9
| 36
| 16.111111
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
59ffe9f8964e6b973ab9db6f18186e91ccb061b8
| 53
|
py
|
Python
|
words_counter/test.py
|
dimaveshkin/py-practive
|
851f2c359645f1dd50ca9952625810b4188c88dc
|
[
"MIT"
] | null | null | null |
words_counter/test.py
|
dimaveshkin/py-practive
|
851f2c359645f1dd50ca9952625810b4188c88dc
|
[
"MIT"
] | null | null | null |
words_counter/test.py
|
dimaveshkin/py-practive
|
851f2c359645f1dd50ca9952625810b4188c88dc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
print(sys.argv[1])
| 10.6
| 21
| 0.698113
| 10
| 53
| 3.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.113208
| 53
| 5
| 22
| 10.6
| 0.765957
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
941bf0ee9b69c9898c1230e9c904c1d7b4f451c6
| 23
|
py
|
Python
|
template/example/main.py
|
cheetosysst/FDep
|
b7bae64db00121196544dc91d7c4b67d93d11af6
|
[
"MIT"
] | null | null | null |
template/example/main.py
|
cheetosysst/FDep
|
b7bae64db00121196544dc91d7c4b67d93d11af6
|
[
"MIT"
] | null | null | null |
template/example/main.py
|
cheetosysst/FDep
|
b7bae64db00121196544dc91d7c4b67d93d11af6
|
[
"MIT"
] | null | null | null |
print("Hello, wolrd!")
| 11.5
| 22
| 0.652174
| 3
| 23
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 23
| 1
| 23
| 23
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
846ee166fb5411fc42a10f2a3df6268352badb21
| 92
|
py
|
Python
|
pulsus/services/apns/__init__.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | 14
|
2015-01-16T07:48:43.000Z
|
2019-04-19T23:13:50.000Z
|
pulsus/services/apns/__init__.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | null | null | null |
pulsus/services/apns/__init__.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | 2
|
2015-08-06T12:52:56.000Z
|
2019-02-07T18:09:23.000Z
|
from .notification import APNSNotification # noqa
from .service import APNSService # noqa
| 30.666667
| 50
| 0.804348
| 10
| 92
| 7.4
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 92
| 2
| 51
| 46
| 0.948718
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
84a8f7bfa0dcd2366cb93a9f601ef514f65c39d3
| 131
|
py
|
Python
|
drdown/events/admin.py
|
fga-gpp-mds/2018.1-Cris-Down
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 11
|
2018-03-11T01:21:43.000Z
|
2018-06-19T21:51:33.000Z
|
drdown/events/admin.py
|
fga-gpp-mds/2018.1-Grupo12
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 245
|
2018-03-13T19:07:14.000Z
|
2018-07-07T22:46:00.000Z
|
drdown/events/admin.py
|
fga-gpp-mds/2018.1-Grupo12
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 12
|
2018-08-24T13:26:04.000Z
|
2021-03-27T16:28:22.000Z
|
from django.contrib import admin
from .models.model_events import Events
# Register your models here.
admin.site.register(Events)
| 21.833333
| 39
| 0.816794
| 19
| 131
| 5.578947
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114504
| 131
| 5
| 40
| 26.2
| 0.913793
| 0.198473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
84c8ee311c39ac0043e2715541bfe5b9861e8899
| 3,354
|
py
|
Python
|
setup.py
|
WRY-learning/k3http
|
095a49118d052c43eb0e1dd82b6764eee9fcb158
|
[
"MIT"
] | null | null | null |
setup.py
|
WRY-learning/k3http
|
095a49118d052c43eb0e1dd82b6764eee9fcb158
|
[
"MIT"
] | 2
|
2021-11-10T22:16:25.000Z
|
2022-03-23T06:59:52.000Z
|
setup.py
|
WRY-learning/k3http
|
095a49118d052c43eb0e1dd82b6764eee9fcb158
|
[
"MIT"
] | 1
|
2021-08-18T05:16:59.000Z
|
2021-08-18T05:16:59.000Z
|
# DO NOT EDIT!!! built with `python _building/build_setup.py`
import setuptools
setuptools.setup(
name="k3http",
packages=["k3http"],
version="0.1.0",
license='MIT',
description="We find that 'httplib' must work in blocking mode and it can not have a timeout when recving response.",
long_description="# k3http\n\n[](https://github.com/pykit3/k3http/actions/workflows/python-package.yml)\n[](https://travis-ci.com/pykit3/k3http)\n[](https://k3http.readthedocs.io/en/stable/?badge=stable)\n[](https://pypi.org/project/k3http)\n\nWe find that 'httplib' must work in blocking mode and it can not have a timeout when recving response.\n\nk3http is a component of [pykit3] project: a python3 toolkit set.\n\n\nHTTP/1.1 client\n\nUse this module, we can set timeout, if timeout raise a 'socket.timeout'.\n\n\n\n# Install\n\n```\npip install k3http\n```\n\n# Synopsis\n\n```python\n\nimport k3http\nimport urllib\nimport socket\n\nheaders = {\n 'Host': '127.0.0.1',\n 'Accept-Language': 'en, mi',\n}\n\ntry:\n h = k3http.Client('127.0.0.1', 80)\n\n # send http reqeust without body\n # read response status line\n # read response headers\n h.request('/test.txt', method='GET', headers=headers)\n\n status = h.status\n # response code return from http server, type is int\n # 200\n # 302\n # 404\n # ...\n\n res_headers = h.headers\n # response headers except status line\n # res_headers = {\n # 'Content-Type': 'text/html;charset=utf-8',\n # 'Content-Length': 1024,\n # ...\n # }\n\n # get response body\n print(h.read_body(None))\nexcept (socket.error, k3http.HttpError) as e:\n print(repr(e))\n\n\n\ncontent = urllib.urlencode({'f': 'foo', 'b': 'bar'})\nheaders = {\n 'Host': 'www.example.com',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',\n 'Content-Length': len(content),\n}\n\ntry:\n h = k3http.Client('127.0.0.1', 80)\n\n # send http reqeust\n h.send_request('http://www.example.com', method='POST', headers=headers)\n\n # send http request body\n h.send_body(content)\n\n # read response status line and headers\n status, headers = h.read_response()\n\n # read response body\n print(h.read_body(None))\nexcept (socket.error, k3http.HttpError) as e:\n print(repr(e))\n\n```\n\n# Author\n\nZhang Yanpo (张炎泼) <drdr.xp@gmail.com>\n\n# Copyright and License\n\nThe MIT License (MIT)\n\nCopyright (c) 2015 Zhang Yanpo (张炎泼) <drdr.xp@gmail.com>\n\n\n[pykit3]: https://github.com/pykit3",
long_description_content_type="text/markdown",
author='Zhang Yanpo',
author_email='drdr.xp@gmail.com',
url='https://github.com/pykit3/k3http',
keywords=['python', 'http'],
python_requires='>=3.0',
install_requires=['k3ut>=0.1.15,<0.2', 'k3stopwatch>=0.1.1,<0.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
] + ['Programming Language :: Python :: 3'],
)
| 139.75
| 2,548
| 0.673822
| 531
| 3,354
| 4.225989
| 0.352166
| 0.026738
| 0.013369
| 0.035651
| 0.335116
| 0.30303
| 0.255793
| 0.255793
| 0.234403
| 0.234403
| 0
| 0.031608
| 0.141622
| 3,354
| 23
| 2,549
| 145.826087
| 0.747829
| 0.017591
| 0
| 0
| 1
| 0.095238
| 0.8843
| 0.153052
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.095238
| 0
| 0.095238
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
84f6ba19da782d7caf00b5db6ebe6cc541c95c13
| 19,939
|
py
|
Python
|
ncellapp/ncellapp.py
|
sanam1357/ncellsms
|
e3be1c4a87b4f55b2ef68ceec33124c4c5d836a6
|
[
"MIT"
] | null | null | null |
ncellapp/ncellapp.py
|
sanam1357/ncellsms
|
e3be1c4a87b4f55b2ef68ceec33124c4c5d836a6
|
[
"MIT"
] | null | null | null |
ncellapp/ncellapp.py
|
sanam1357/ncellsms
|
e3be1c4a87b4f55b2ef68ceec33124c4c5d836a6
|
[
"MIT"
] | null | null | null |
import requests
from base64 import (b64encode, b64decode)
from ast import literal_eval
from datetime import datetime
from Crypto.Cipher import AES
class AESCipher(object):
def __init__(self):
self.key = b'zSXdd0rx59ThQlul'
self.bs = AES.block_size
def encrypt(self, raw):
raw = self._pad(raw)
# zero based byte[16]
iv = b'\0'*16
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return b64encode(cipher.encrypt(raw.encode())).decode('UTF-8')
def decrypt(self, enc):
enc = b64decode(enc)
# zero based byte[16]
iv = b'\0'*16
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc)).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class register(AESCipher):
def __init__(self, msidn):
AESCipher.__init__(self)
self.msidn = msidn
self.baseUrl = 'http://ssa.ncell.com.np:8080/mc/selfcare/v2/proxy'
self.headers = {
'X-MobileCare-AppClientVersion': 'SHn7MOIW3T/R/OL8LsAvxw==',
'Cache-Control': 'no-cache',
'X-MobileCare-PreferredLocale': 'cAsAM2g0t7oB6OSJKH1ptQ==',
'Content-Type': 'application/xml',
'X-MobileCare-APIKey': 'ABC_KEY',
'X-MobileCare-AppResolution': 'iRRhXh87ipDTZpyEWGWteg==',
'X-MobileCare-AppPlatformVersion': 'QJ2ZR3DKpuBfBr7GuTQh7w==',
'ACCEPT': 'application/json',
'X-MobileCare-AppPlatformName': 'yEHXRN3mrQMvwG4bfE2ApQ==',
'Host': 'ssa.ncell.com.np:8080',
'Connection': 'Keep-Alive',
}
def sendOtp(self):
'''[Send OTP to the number for registration]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/register'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><serviceInstance>{self.msidn}</serviceInstance></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
response = literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
try:
self.deviceClientId = AESCipher.encrypt(self, response['deviceClientId'])
except KeyError:
self.deviceClientId = None
return response
def getToken(self, otp):
'''[Send the OTP to the Ncell server and return the token if successful]
Args:
otp ([string]): [OTP sent in the phone number]
Returns:
[dict]: [response from the Ncell server with token]
'''
self.headers.update({
'X-MobileCare-DeviceClientID': self.deviceClientId,
'X-MobileCare-MSISDN': self.msidn,
})
url = self.baseUrl + '/register'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><otp>{otp}</otp></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
response = literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
if response['opStatus'] == '0':
token = b64encode(str({'msidn':self.msidn, 'deviceClientId':self.deviceClientId}).encode()).decode()
response.update({'token':token})
return response
class ncell(AESCipher):
def __init__(self, token):
AESCipher.__init__(self)
self.token = token
self.baseUrl = 'http://ssa.ncell.com.np:8080/mc/selfcare/v2/proxy'
def login(self):
'''[Extract the msidn and client ID from the token and login]
Returns:
[dict]: [returns opStatus=0 if successful]
'''
try:
self.msidn = literal_eval(b64decode(self.token).decode())['msidn']
self.deviceClientId = literal_eval(b64decode(self.token).decode())['deviceClientId']
except Exception:
self.msidn = self.deviceClientId = None
return {'opStatus': 'invalid', 'errorMessage': 'The token you provided is not valid.'}
self.headers = {
'X-MobileCare-AppClientVersion': 'SHn7MOIW3T/R/OL8LsAvxw==',
'Cache-Control': 'no-cache',
'X-MobileCare-PreferredLocale': 'cAsAM2g0t7oB6OSJKH1ptQ==',
'Content-Type': 'application/xml',
'X-MobileCare-APIKey': 'ABC_KEY',
'X-MobileCare-AppResolution': 'iRRhXh87ipDTZpyEWGWteg==',
'X-MobileCare-DeviceClientID': self.deviceClientId,
'X-MobileCare-MSISDN': self.msidn,
'X-MobileCare-AppPlatformVersion': 'QJ2ZR3DKpuBfBr7GuTQh7w==',
'ACCEPT': 'application/json',
'X-MobileCare-AppPlatformName': 'yEHXRN3mrQMvwG4bfE2ApQ==',
'Host': 'ssa.ncell.com.np:8080',
'Connection': 'Keep-Alive',
}
profile = self.viewProfile()
try:
self.name = profile['myProfile']['name']
self.status = profile['myProfile']['status']
self.partyID = profile['myProfile']['partyID']
self.accountId = profile['myProfile']['accountID']
self.serviceFlag = profile['myProfile']['serviceFlag']
self.currentPlan = profile['myProfile']['currentPlan']
self.secureToken = profile['myProfile']['secureToken']
self.hubID = profile['myProfile']['hubID']
return {'opStatus': '0', 'errorMessage': 'SUCCESS'}
except KeyError:
self.name = self.status = self.partyID = self.accountId = self.serviceFlag = self.currentPlan = self.secureToken = self.hubID = None
return {'opStatus': 'expired', 'errorMessage': 'The token you provided has expired.'}
def viewProfile(self):
'''[View the profile of the account]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/viewMyProfile'
data = "<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData /></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def sendSms(self, destination, message, schedule=None):
'''[Send SMS with the currentPlan]
Args:
destination ([int]): [MSIDN of the destination]
message ([String]): [Message to send]
schedule ([int], optional): [Schedule date in order of YYYYMMDDHHMMSS format, eg.20201105124500]. Defaults to None.
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
schedule = schedule or datetime.now().strftime("%Y%m%d%H%M%S")
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><userId>{schedule}</userId><problemDesc>{message}</problemDesc><serviceId>SENDSMS</serviceId><accountId>{self.accountId}</accountId><code>{destination}</code><offerId>yes</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def sendFreeSms(self, destination, message, schedule=None):
'''[Send free 10 SMS]
Args:
destination ([int]): [MSIDN of the destination]
message ([String]): [Message to send]
schedule ([int], optional): [Schedule date in order of YYYYMMDDHHMMSS format, eg.20201105124500]. Defaults to None.
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
schedule = schedule or datetime.now().strftime("%Y%m%d%H%M%S")
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><userId>{schedule}</userId><problemDesc>{message}</problemDesc><serviceId>SENDSMS</serviceId><accountId>{self.accountId}</accountId><code>{destination}</code><offerId>no</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def viewBalance(self):
'''[View the current balance]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/myBalance'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><code>{self.accountId}</code><accountId>{self.accountId}</accountId><offerId>{self.hubID}</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def selfRecharge(self, rpin):
'''[Recharging the current account]
Args:
rpin ([int]): [16 digit PIN of the recharge card]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><alternateContactNumber></alternateContactNumber><contractId></contractId><customerId></customerId><serviceId>RECHARGENOW</serviceId><code>{rpin}</code></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def recharge(self, destination, rpin):
'''[Recharging other's account]
Args:
destination ([int]): [MSIDN of the destination]
rpin ([int]): [16 digit PIN of the recharge card]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><alternateContactNumber>{destination}</alternateContactNumber><contractId></contractId><customerId></customerId><serviceId>RECHARGENOW</serviceId><code>{rpin}</code></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def rechargeHistory(self):
'''[latest balance transfer history]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/rechargeHistory'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><userId>TransferHistory</userId><accountId>{self.accountId}</accountId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def balanceTransfer(self, destination, amount):
'''[Initiate the balance transformation to the destination number]
Args:
destination ([int]): [MSIDN of the destination]
amount ([int]): [Amount of balance to transfer]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><alternateContactNumber>{destination}</alternateContactNumber><contractId></contractId><customerId></customerId><action>NEW</action><serviceId>BALANCETRANSFER</serviceId><code>{amount}</code></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def confirmBalanceTransfer(self, otp):
'''[Confirm the balance transfer]
Args:
otp ([int]): [OTP sent in phone number]
Returns:
[type]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><password>{otp}</password><contractId></contractId><customerId></customerId><action>NEW</action><serviceId>BALANCETRANSFER</serviceId><offerId>validate</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def viewTransaction(self, transactionsFrom, transactionsTo):
'''[Initiate to view call history]
Args:
transactionsFrom ([int]): [From date in YYYYMMDDHHMMSS order]
transactionsTo ([int]): [To date in YYYYMMDDHHMMSS order]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/viewTransactions'
self.transactionsFrom = transactionsFrom
self.transactionsTo = transactionsTo
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>prepaid</lob><userId>{self.transactionsFrom}</userId><code>GET</code><accountId>{self.accountId}</accountId><offerId>{self.transactionsTo}</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def confirmViewTransaction(self, otp):
'''[Confirm to view call history]
Args:
otp ([int]): [OTP sent in phone number]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/viewTransactions'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>prepaid</lob><action>{otp}</action><userId>{self.transactionsFrom}</userId><code>VALIDATE</code><accountId>{self.accountId}</accountId><offerId>{self.transactionsTo}</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def viewService(self, serviceCategory=''):
'''[View the list of available services to activate]
Args:
serviceCategory ([str], optional): [Category of the service]. Defaults to None.
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/viewMyService'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><code>R3027</code><serviceCategory>{serviceCategory}</serviceCategory><accountId>{self.accountId}</accountId><offerId>{self.hubID}</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def activateService(self, serviceId):
'''[Activate the certain service]
Args:
serviceId ([int]): [Service ID found in isMandatory field of viewService()]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><serviceId>SUBSCRIBEAPRODUCT</serviceId><code>{serviceId}</code></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def viewOffer(self):
'''[View the available offer for the account]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/viewOffers'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><customerId></customerId><lob>{self.serviceFlag}</lob><accountId>{self.accountId}</accountId><contractId></contractId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def activateOffer(self, offerId):
'''[Activate the certain offer]
Args:
offerId ([int]): [offer ID found in offerID field of viewOffer()]
Returns:
[type]: [description]
'''
url = self.baseUrl + '/updateServiceRequest'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><serviceId>SUBSCRIBEAPRODUCT</serviceId><code>{offerId}</code></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
def view3gPlans(self):
'''[View available plans for 3G]
Returns:
[dict]: [response from the Ncell server]
'''
url = self.baseUrl + '/view3gPlans'
data = f"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?><mAppData><userOperationData><lob>{self.serviceFlag}</lob><contractId></contractId><customerId></customerId><code>{self.accountId}</code><accountId>{self.accountId}</accountId><offerId>{self.hubID}</offerId></userOperationData></mAppData>"
data = AESCipher.encrypt(self, data)
self.request = requests.post(url, headers=self.headers, data=data)
return literal_eval(AESCipher.decrypt(self, self.request.text))['businessOutput']
| 45.11086
| 354
| 0.62611
| 2,045
| 19,939
| 6.080196
| 0.128606
| 0.031848
| 0.030561
| 0.017372
| 0.742159
| 0.728326
| 0.71401
| 0.707737
| 0.707737
| 0.698488
| 0
| 0.011844
| 0.22935
| 19,939
| 442
| 355
| 45.11086
| 0.797345
| 0.154271
| 0
| 0.543269
| 0
| 0.086538
| 0.420656
| 0.286525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.004808
| 0.024038
| 0.009615
| 0.283654
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ca2e54cebeee275ffeb4db21314349e043a0bb32
| 58
|
py
|
Python
|
stockviewer/stockviewer/view/__init__.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
stockviewer/stockviewer/view/__init__.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
stockviewer/stockviewer/view/__init__.py
|
vyacheslav-bezborodov/skt
|
58551eed497687adec5b56336037613a78cc5b2d
|
[
"MIT"
] | null | null | null |
from viewmanager import viewmanager
from main import main
| 19.333333
| 35
| 0.862069
| 8
| 58
| 6.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 36
| 29
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ca4a01bee340fe08ea41c9d3772628e46d9874c7
| 185
|
py
|
Python
|
ExerciciosPython/ex013.py
|
LucasBalbinoSS/Exercicios-Python
|
2e9d3a8ec4ab24a2732c461a84f51bde54902a24
|
[
"MIT"
] | null | null | null |
ExerciciosPython/ex013.py
|
LucasBalbinoSS/Exercicios-Python
|
2e9d3a8ec4ab24a2732c461a84f51bde54902a24
|
[
"MIT"
] | null | null | null |
ExerciciosPython/ex013.py
|
LucasBalbinoSS/Exercicios-Python
|
2e9d3a8ec4ab24a2732c461a84f51bde54902a24
|
[
"MIT"
] | null | null | null |
s = float(input('\033[35mQual é o salário do funcionário?: R$ '))
print('Um funcionário que ganhava {:.2f}, com 15% de aumento, passa a receber: {:.2f}R$'.format(s, s + (s * 15/100)))
| 46.25
| 117
| 0.637838
| 32
| 185
| 3.6875
| 0.78125
| 0.033898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 0.156757
| 185
| 3
| 118
| 61.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0.5
| 0.675676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 5
|
ca4bb540e1da17445d44f02dcbecb4603a9904a6
| 2,074
|
py
|
Python
|
EvaMap/Metrics/disjointWith.py
|
benjimor/EvaMap
|
42e616abe9f15925b885797d30496e30615989a0
|
[
"MIT"
] | 1
|
2021-01-29T18:53:26.000Z
|
2021-01-29T18:53:26.000Z
|
EvaMap/Metrics/disjointWith.py
|
benjimor/EvaMap
|
42e616abe9f15925b885797d30496e30615989a0
|
[
"MIT"
] | 1
|
2021-06-06T17:56:00.000Z
|
2021-06-06T17:56:00.000Z
|
EvaMap/Metrics/disjointWith.py
|
benjimor/EvaMap
|
42e616abe9f15925b885797d30496e30615989a0
|
[
"MIT"
] | null | null | null |
import rdflib
from EvaMap.Metrics.metric import metric
def disjointWith(g_onto, liste_map, g_map, raw_data, g_link) :
result = metric()
result['name'] = "Misuse of disjointWith"
points = 0
nbPossible = 0
for s, _, o in g_map.triples((None, None, None)) :
nbPossible = nbPossible + 1
for _, _, o1 in g_onto.triples((s, rdflib.term.URIRef('https://www.w3.org/2002/07/owl#disjointWith'), None)) :
if g_onto.triples((o, (rdflib.term.URIRef('a')|rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')), o1)) is not None :
points = points + 1
result['feedbacks'].append(str(o) + "is disjoint with" + s)
else :
for s1, _, _ in g_onto.triples((None, rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#subClassOf') ,o)):
if g_onto.triples((s1, (rdflib.term.URIRef('a')|rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')), o1)) is not None :
points = points + 1
result['feedbacks'].append(str(o) + "is disjoint with" + s)
for _, _, o1 in g_onto.triples((o, rdflib.term.URIRef('https://www.w3.org/2002/07/owl#disjointWith'), None)) :
if g_onto.triples((s, (rdflib.term.URIRef('a')|rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')), o1)) is not None :
points = points + 1
result['feedbacks'].append(str(o) + "is disjoint with" + s)
else :
for s1, _, _ in g_onto.triples((None, rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#subClassOf') ,s)):
if g_onto.triples((s1, (rdflib.term.URIRef('a')|rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')), o1)) is not None :
points = points + 1
result['feedbacks'].append(str(o) + "is disjoint with" + s)
if nbPossible == 0:
result['score'] = 1
else:
result['score'] = 1-points/nbPossible
return result
| 57.611111
| 158
| 0.575699
| 294
| 2,074
| 3.982993
| 0.217687
| 0.102477
| 0.163962
| 0.102477
| 0.766866
| 0.766866
| 0.75491
| 0.730999
| 0.730999
| 0.730999
| 0
| 0.054299
| 0.254098
| 2,074
| 35
| 159
| 59.257143
| 0.70265
| 0
| 0
| 0.40625
| 0
| 0.125
| 0.244937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
04639d2f91fe6d8fc1a5eb8e5fe6a3de216d7361
| 153
|
py
|
Python
|
service/__init__.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
service/__init__.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
service/__init__.py
|
moshebeeri/datap
|
9ff99bb435728cd69f2589e3ee858a06768ea85e
|
[
"Apache-2.0"
] | null | null | null |
from .service import Service
from .mongodb import MongoDB
from .elasticsearch import Elasticsearch
from .druid import Druid
from .sqlite import SQLiteDB
| 25.5
| 40
| 0.836601
| 20
| 153
| 6.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130719
| 153
| 5
| 41
| 30.6
| 0.962406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0476cd74f2f47547bea6e40cd61ce2f275ccb4a0
| 6,415
|
py
|
Python
|
tests/test_userviews.py
|
kumaraditya303/Mess-Management-System
|
9930e2eae485c29100133e2e030bb979ef920fe1
|
[
"MIT"
] | 2
|
2021-02-26T03:04:37.000Z
|
2021-03-28T04:02:02.000Z
|
tests/test_userviews.py
|
kumaraditya303/Mess-Management-System
|
9930e2eae485c29100133e2e030bb979ef920fe1
|
[
"MIT"
] | 18
|
2020-09-11T15:49:52.000Z
|
2022-03-28T21:20:34.000Z
|
tests/test_userviews.py
|
kumaraditya303/Mess-Management-System
|
9930e2eae485c29100133e2e030bb979ef920fe1
|
[
"MIT"
] | null | null | null |
from tests import Test
class TestGet(Test):
def test_main_page(self):
response = self.client.get('/')
self.assertIn(b'Welcome to Mess Management System', response.data)
self.assert200(response)
self.assertTemplateUsed('index.html')
self.assertMessageFlashed(
"No dishes are available in Mess!", category='warning')
def test_login_page(self):
response = self.client.get('/login')
self.assertIn(b'Login', response.data)
self.assert200(response)
self.assertTemplateUsed('login.html')
def test_register_page(self):
response = self.client.get('/register')
self.assertIn(b'Register', response.data)
self.assert200(response)
self.assertTemplateUsed('register.html')
def test_balance_page(self):
response = self.client.get('/balance')
self.assertRedirects(response, '/')
self.assertMessageFlashed("You are unauthorized to access the page!",
category='warning')
def test_order_page(self):
response = self.client.get('/order')
self.assertRedirects(response, '/')
self.assertMessageFlashed("You are unauthorized to access the page!",
category='warning')
def test_forgot_page(self):
response = self.client.get('/forgot')
self.assertIn(b'Reset Password', response.data)
self.assert200(response)
self.assertTemplateUsed('reset.html')
def test_password_reset_page(self):
response = self.client.get('/forgot/token', follow_redirects=False)
self.assertIn(b'Reset Password', response.data)
self.assert200(response)
self.assertTemplateUsed('reset_password.html')
def test_logout_page(self):
response = self.client.get('/logout')
self.assertRedirects(response, '/')
self.assertMessageFlashed("You are unauthorized to access the page!",
category='warning')
class TestPost(Test):
def test_register(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
def test_dashboard(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
def test_login(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
response = self.client.post(
'/login',
data=dict(
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
def test_balance(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
response = self.client.post(
'/balance',
data=dict(
balance=1000
),
follow_redirects=True
)
self.assert200(response)
self.assertMessageFlashed("₹ 1000 was added successfully to your Mess account!",
category='success')
self.assertIn(b'Welcome Test', response.data)
def test_order(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
response = self.client.get(
'/order',
follow_redirects=True
)
self.assertIn(b'Order Food', response.data)
self.assert200(response)
self.assertTemplateUsed('order.html')
def test_logout(self, ):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
response = self.client.get('/logout',)
self.assertRedirects(response, '/')
def test_password_reset_email(self):
response = self.client.post(
'/register',
data=dict(name='Test',
email="test@test.com",
password="testingpassword"
), follow_redirects=True
)
self.assertIn(b'Welcome Test', response.data)
self.assert200(response)
self.assertTemplateUsed('dashboard.html')
response = self.client.get('/logout')
self.assertRedirects(response, '/')
response = self.client.post(
'/forgot',
data=dict(
email='test@test.com'
)
)
self.assertRedirects(response, '/')
self.assertMessageFlashed(
'Email sent successfully!', category='success')
| 35.054645
| 88
| 0.561185
| 596
| 6,415
| 5.978188
| 0.115772
| 0.134718
| 0.10609
| 0.092619
| 0.802133
| 0.785013
| 0.724109
| 0.632613
| 0.625035
| 0.625035
| 0
| 0.012249
| 0.325487
| 6,415
| 182
| 89
| 35.247253
| 0.810954
| 0
| 0
| 0.631902
| 0
| 0
| 0.167888
| 0
| 0
| 0
| 0
| 0
| 0.343558
| 1
| 0.092025
| false
| 0.079755
| 0.006135
| 0
| 0.110429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
048d021364d03b9388d0f6314fd49dec08369504
| 80
|
py
|
Python
|
template_client/libs/sample.py
|
Braiiin/blog-client
|
4e0fe8768b1504c808ff56e1705904a01cb70907
|
[
"Apache-2.0"
] | null | null | null |
template_client/libs/sample.py
|
Braiiin/blog-client
|
4e0fe8768b1504c808ff56e1705904a01cb70907
|
[
"Apache-2.0"
] | null | null | null |
template_client/libs/sample.py
|
Braiiin/blog-client
|
4e0fe8768b1504c808ff56e1705904a01cb70907
|
[
"Apache-2.0"
] | null | null | null |
from client.libs.base import Entity
class Sample(Entity):
"""Sample object"""
| 16
| 35
| 0.7375
| 11
| 80
| 5.363636
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 80
| 5
| 36
| 16
| 0.842857
| 0.1625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
04beb3966b903f9b8bfa0034a861cded74dfeccf
| 59
|
py
|
Python
|
pypgapack/__init__.py
|
robertsj/pypgapack
|
c24b4a58f347ec02c20929aaaec25010fa603eb8
|
[
"MIT"
] | 4
|
2015-12-16T09:44:32.000Z
|
2021-05-23T23:52:33.000Z
|
pypgapack/__init__.py
|
robertsj/pypgapack
|
c24b4a58f347ec02c20929aaaec25010fa603eb8
|
[
"MIT"
] | null | null | null |
pypgapack/__init__.py
|
robertsj/pypgapack
|
c24b4a58f347ec02c20929aaaec25010fa603eb8
|
[
"MIT"
] | 1
|
2022-01-01T17:44:21.000Z
|
2022-01-01T17:44:21.000Z
|
# pypgapack/pypgapack/__init__.py
from pypgapack import *
| 14.75
| 33
| 0.79661
| 7
| 59
| 6.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 59
| 3
| 34
| 19.666667
| 0.826923
| 0.525424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b6d23ca10281dd46a28d9d4358ab420e34b1e56b
| 1,769
|
py
|
Python
|
test/test_homogeneity_checks.py
|
kilgore92/PyStatCheck
|
427744a4c98676630633362ed2a8e31f51189768
|
[
"MIT"
] | null | null | null |
test/test_homogeneity_checks.py
|
kilgore92/PyStatCheck
|
427744a4c98676630633362ed2a8e31f51189768
|
[
"MIT"
] | null | null | null |
test/test_homogeneity_checks.py
|
kilgore92/PyStatCheck
|
427744a4c98676630633362ed2a8e31f51189768
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import TestCase
from pystatcheck.tests import CheckHomogeneity
import numpy as np
__author__ = "Ishaan Bhat"
__copyright__ = "Ishaan Bhat"
__license__ = "mit"
class TestHomogeneityChecks(TestCase):
"""
Test suite to check if the module can detect homogeneity in known cases
"""
def test_same_distribution(self):
arr1 = np.random.normal(loc=0, scale=3.0, size=(1000,))
arr2 = np.random.normal(loc=0, scale=3.0, size=(1000,))
assert(CheckHomogeneity(arr1=arr1, arr2=arr2, verbose=False).perform_homogeneity_tests() is True)
def test_different_distribution_equal_variance(self):
arr1 = np.random.normal(loc=0, scale=3.0, size=(1000,))
arr2 = np.random.normal(loc=1.0, scale=3.0, size=(1000,))
assert(CheckHomogeneity(arr1=arr1, arr2=arr2, verbose=False).perform_homogeneity_tests() is False)
def test_different_distribution_unequal_variance(self):
arr1 = np.random.normal(loc=0, scale=3.0, size=(1000,))
arr2 = np.random.normal(loc=1.0, scale=5.0, size=(1000,))
assert(CheckHomogeneity(arr1=arr1, arr2=arr2, verbose=False).perform_homogeneity_tests() is False)
def test_same_distribution_non_normal(self):
arr1 = np.random.binomial(n=10, p=0.5, size=(1000,))
arr2 = np.random.binomial(n=10, p=0.5, size=(1000,))
assert(CheckHomogeneity(arr1=arr1, arr2=arr2, verbose=False).perform_homogeneity_tests() is True)
def test_different_distribution_non_normal(self):
arr1 = np.random.binomial(n=10, p=0.5, size=(1000,))
arr2 = np.random.binomial(n=10, p=0.8, size=(1000,))
assert(CheckHomogeneity(arr1=arr1, arr2=arr2, verbose=False).perform_homogeneity_tests() is False)
| 36.102041
| 106
| 0.692482
| 250
| 1,769
| 4.74
| 0.26
| 0.067511
| 0.070886
| 0.086076
| 0.752743
| 0.752743
| 0.752743
| 0.752743
| 0.752743
| 0.752743
| 0
| 0.072789
| 0.169022
| 1,769
| 48
| 107
| 36.854167
| 0.733333
| 0.053137
| 0
| 0.37037
| 0
| 0
| 0.015161
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.185185
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6fa50065e786193998fcc21cd22b25363a341a5
| 149
|
py
|
Python
|
__init__.py
|
jordanvrtanoski/block
|
18372826d7c2808a46152eeb588931ed088a1154
|
[
"MIT"
] | null | null | null |
__init__.py
|
jordanvrtanoski/block
|
18372826d7c2808a46152eeb588931ed088a1154
|
[
"MIT"
] | null | null | null |
__init__.py
|
jordanvrtanoski/block
|
18372826d7c2808a46152eeb588931ed088a1154
|
[
"MIT"
] | null | null | null |
import sys, os
from .block_utils import (
__version__, __name__, __author__, __email__, __description__, __license__
)
from .block import core
| 18.625
| 78
| 0.771812
| 17
| 149
| 5.294118
| 0.764706
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161074
| 149
| 7
| 79
| 21.285714
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8e435cf6f3657f0a0c68d75bb668ddf8fd2ba71f
| 3,830
|
py
|
Python
|
gg_manager/utilities/gg_stacks.py
|
petrichor-ai/gg-group-manager
|
21fdc50b33a1c56dad44b4537d3a8b0aa2db4b60
|
[
"MIT"
] | null | null | null |
gg_manager/utilities/gg_stacks.py
|
petrichor-ai/gg-group-manager
|
21fdc50b33a1c56dad44b4537d3a8b0aa2db4b60
|
[
"MIT"
] | null | null | null |
gg_manager/utilities/gg_stacks.py
|
petrichor-ai/gg-group-manager
|
21fdc50b33a1c56dad44b4537d3a8b0aa2db4b60
|
[
"MIT"
] | null | null | null |
import boto3
import logging
from botocore.exceptions import ClientError
logging.basicConfig(
format='%(asctime)s|%(name).10s|%(levelname).5s: %(message)s',
level=logging.WARNING
)
log = logging.getLogger('GroupStack')
log.setLevel(logging.DEBUG)
class Stack(object):
def __init__(self, s):
self._cfn = s.client('cloudformation')
self._gg = s.client('greengrass')
def create(self, config, cfntmp):
''' Create a Cloudformation Greengrass Resource Stack.
'''
if config.get('Group', None):
groupName = config['Group']['Name']
stackName = '{}-GG-Stack'.format(groupName)
stackCaps = ['CAPABILITY_IAM']
if config.get('thingName', None):
thingName = config['thingName']
stackName = '{}-Thing-Stack'.format(thingName)
stackCaps = ['CAPABILITY_IAM']
if config.get('Alias', None):
funcsName = config['Alias']
stackName = '{}-Funcs-Stack'.format(funcsName)
stackCaps = ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND']
response = self._cfn.create_stack(
StackName=stackName,
TemplateBody=cfntmp.json_dump(),
Capabilities=stackCaps
)
def update(self, config, cfntmp):
''' Update a Cloudformation Greengrass Resource Stack.
'''
if config.get('Group', None):
groupName = config['Group']['Name']
stackName = '{}-GG-Stack'.format(groupName)
stackCaps = ['CAPABILITY_IAM']
if config.get('thingName', None):
thingName = config['thingName']
stackName = '{}-Thing-Stack'.format(thingName)
stackCaps = ['CAPABILITY_IAM']
if config.get('Alias', None):
funcsName = config['Alias']
stackName = '{}-Funcs-Stack'.format(funcsName)
stackCaps = ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND']
response = self._cfn.update_stack(
StackName=stackName,
TemplateBody=cfntmp.json_dump(),
Capabilities=stackCaps
)
def delete(self, config, cfntmp):
''' Delete a Cloudformation Greengrass Resource Stack.
'''
if config.get('Group', None):
groupName = config['Group']['Name']
stackName = '{}-GG-Stack'.format(groupName)
stackCaps = ['CAPABILITY_IAM']
if config.get('thingName', None):
thingName = config['thingName']
stackName = '{}-Thing-Stack'.format(thingName)
stackCaps = ['CAPABILITY_IAM']
if config.get('Alias', None):
funcsName = config['Alias']
stackName = '{}-Funcs-Stack'.format(funcsName)
stackCaps = ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND']
response = self._cfn.delete_stack(
StackName=stackName
)
def output(self, config):
''' Retreive a Cloudformation Greengrass Resource Stack Output.
'''
if config.get('Group', None):
groupName = config['Group']['Name']
stackName = '{}-GG-Stack'.format(groupName)
stackCaps = ['CAPABILITY_IAM']
if config.get('thingName', None):
thingName = config['thingName']
stackName = '{}-Thing-Stack'.format(thingName)
stackCaps = ['CAPABILITY_IAM']
if config.get('Alias', None):
funcsName = config['Alias']
stackName = '{}-Funcs-Stack'.format(funcsName)
stackCaps = ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND']
response = self._cfn.describe_stacks(
StackName=stackName
)
outputs = response['Stacks'][0].get('Outputs', [])
return {out['OutputKey']: out['OutputValue'] for out in outputs}
| 30.887097
| 72
| 0.575718
| 355
| 3,830
| 6.109859
| 0.208451
| 0.04426
| 0.060858
| 0.08852
| 0.749654
| 0.732135
| 0.732135
| 0.732135
| 0.732135
| 0.732135
| 0
| 0.001834
| 0.288251
| 3,830
| 123
| 73
| 31.138211
| 0.793837
| 0.062402
| 0
| 0.674699
| 0
| 0
| 0.196569
| 0.035996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.036145
| 0
| 0.120482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d014d3d0b767d1dfb62af7f16c5ae22c16bb9e3
| 438
|
py
|
Python
|
rumi/msg_rumi/__init__.py
|
rotationalio/rumi
|
313f4832e0e707443182f819268b509f651e7acb
|
[
"Apache-2.0"
] | null | null | null |
rumi/msg_rumi/__init__.py
|
rotationalio/rumi
|
313f4832e0e707443182f819268b509f651e7acb
|
[
"Apache-2.0"
] | 4
|
2021-12-13T06:54:19.000Z
|
2021-12-17T12:29:07.000Z
|
rumi/msg_rumi/__init__.py
|
rotationalio/rumi
|
313f4832e0e707443182f819268b509f651e7acb
|
[
"Apache-2.0"
] | null | null | null |
# rumi.msg_rumi
# Message-based translation monitoring
#
# Author: Tianshu Li
# Created: Nov.15 2021
"""
Reader and Reporter for message-based translation monitoring, especially for
react app projects set up with lingui.js.
"""
##########################################################################
# Imports
##########################################################################
from .reader import *
from .reporter import *
| 23.052632
| 76
| 0.493151
| 39
| 438
| 5.512821
| 0.74359
| 0.111628
| 0.213953
| 0.306977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015424
| 0.111872
| 438
| 18
| 77
| 24.333333
| 0.537275
| 0.497717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d0ed5d15cdf65d9666258009bff578fb570c03c
| 399
|
py
|
Python
|
textwiser/transformations/__init__.py
|
vishalbelsare/textwiser
|
da500f24c8d35d29e3ff77702b7c5ece244562cc
|
[
"Apache-2.0"
] | 24
|
2020-07-02T13:31:46.000Z
|
2022-03-25T02:27:24.000Z
|
textwiser/transformations/__init__.py
|
fmr-llc/textwiser
|
2c5bdd73c26bd3fb7bd2f324f57d99233aa9c17f
|
[
"Apache-2.0"
] | 5
|
2020-07-23T16:43:02.000Z
|
2022-02-23T17:05:38.000Z
|
textwiser/transformations/__init__.py
|
fmr-llc/textwiser
|
2c5bdd73c26bd3fb7bd2f324f57d99233aa9c17f
|
[
"Apache-2.0"
] | 6
|
2021-01-03T08:09:39.000Z
|
2022-03-25T02:18:59.000Z
|
# Copyright 2019 FMR LLC <opensource@fidelity.com>
# SPDX-License-Identifer: Apache-2.0
from textwiser.transformations.pool import _PoolTransformation
from textwiser.transformations.nmf import _NMFTransformation
from textwiser.transformations.lda import _LDATransformation
from textwiser.transformations.svd import _SVDTransformation
from textwiser.transformations.umap_ import _UMAPTransformation
| 44.333333
| 63
| 0.87218
| 43
| 399
| 7.953488
| 0.627907
| 0.190058
| 0.409357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.075188
| 399
| 8
| 64
| 49.875
| 0.910569
| 0.20802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d2147a86f023434d766c6ccac23cb017b63f75c
| 50
|
py
|
Python
|
amplpyfinance/efficient_frontier/__init__.py
|
ampl/amplpyfinance
|
9df026dbba9235b85a0a42d1c24768b8fb2c82e5
|
[
"MIT"
] | null | null | null |
amplpyfinance/efficient_frontier/__init__.py
|
ampl/amplpyfinance
|
9df026dbba9235b85a0a42d1c24768b8fb2c82e5
|
[
"MIT"
] | null | null | null |
amplpyfinance/efficient_frontier/__init__.py
|
ampl/amplpyfinance
|
9df026dbba9235b85a0a42d1c24768b8fb2c82e5
|
[
"MIT"
] | null | null | null |
from .efficient_frontier import EfficientFrontier
| 25
| 49
| 0.9
| 5
| 50
| 8.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d24729caf18c9f840ce63eee05fa1eb7f2db864
| 127
|
py
|
Python
|
apigw/services.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
apigw/services.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
apigw/services.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
from anthill.platform.services import APIGatewayService
class Service(APIGatewayService):
"""Anthill default service."""
| 21.166667
| 55
| 0.787402
| 12
| 127
| 8.333333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11811
| 127
| 5
| 56
| 25.4
| 0.892857
| 0.188976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d36107f07e45fe62007f715bf555f10f6ef82a6
| 128
|
py
|
Python
|
main/views.py
|
toskuef/kraft
|
e499fb5fd6c741c463bb49b5c223068be99d3521
|
[
"Apache-2.0"
] | null | null | null |
main/views.py
|
toskuef/kraft
|
e499fb5fd6c741c463bb49b5c223068be99d3521
|
[
"Apache-2.0"
] | null | null | null |
main/views.py
|
toskuef/kraft
|
e499fb5fd6c741c463bb49b5c223068be99d3521
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
def index(request):
template = 'main/index.html'
return render(request, template)
| 18.285714
| 36
| 0.734375
| 16
| 128
| 5.875
| 0.75
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 128
| 6
| 37
| 21.333333
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0.117188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6d3b54f9a6bd52a43531bef10c3e5cbb8114e23d
| 26
|
py
|
Python
|
exercises/poker/poker.py
|
haithamk/python-exercism
|
8166a98ba771e0d527efdda421d3d9e741f0459b
|
[
"MIT"
] | null | null | null |
exercises/poker/poker.py
|
haithamk/python-exercism
|
8166a98ba771e0d527efdda421d3d9e741f0459b
|
[
"MIT"
] | null | null | null |
exercises/poker/poker.py
|
haithamk/python-exercism
|
8166a98ba771e0d527efdda421d3d9e741f0459b
|
[
"MIT"
] | 1
|
2021-12-29T19:26:23.000Z
|
2021-12-29T19:26:23.000Z
|
def poker(hand):
pass
| 8.666667
| 16
| 0.615385
| 4
| 26
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 26
| 2
| 17
| 13
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6d43b05e32306f4234f4eecd76dc6a8ebf66d8bf
| 35,698
|
py
|
Python
|
selenium_login.py
|
JuanLeee/Kbot
|
1013f086bec682c93b9104b31748d7c4bcd5a957
|
[
"MIT"
] | null | null | null |
selenium_login.py
|
JuanLeee/Kbot
|
1013f086bec682c93b9104b31748d7c4bcd5a957
|
[
"MIT"
] | null | null | null |
selenium_login.py
|
JuanLeee/Kbot
|
1013f086bec682c93b9104b31748d7c4bcd5a957
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC, wait
from selenium.webdriver.common.action_chains import ActionChains
from tesseract_ocr import *
from card_image import url_to_image
import tesseract_ocr
import sys
import random
import logging
import json
from discord_webhook import DiscordWebhook
from good_stuffs import dict_good_stuff_static
from good_stuffs import dict_good_stuff_addition
from good_stuffs import char_numbers
import time
import math
import time
from dotenv import load_dotenv
load_dotenv()
class Discord_Scraper:
def __init__(self, driver, action, user_name, password, server_name, channel_name, server_name_list, id_name, timer):
self.hash_table = {}
self.count_messages = 0
self.driver = driver
self.action = action
self.user_name = user_name
self.password = password
self.server_name = server_name
self.server_name_list = server_name_list
self.channel_name = channel_name
global flag_debug
global flag_cd_grab
global flag_cd_grab_long
global flag_global_clicked
global flag_refresh
global flag_stop
flag_debug = False
flag_refresh = False
flag_global_clicked = False
self.clicked_card = ''
self.flag_clicked = False
self.count_reset = 10
self.count_clicked = self.count_reset
self.id_name = id_name
self.curr_id = ''
if timer:
self.sleep_timer_grab = 300
self.sleep_timer_drop = 900
else:
self.sleep_timer_grab = 600
self.sleep_timer_drop = 1800
flag_stop = False
self.ocr = OCR_PyTes()
self.dict_good_stuff = dict_good_stuff_static
self.dict_good_stuff_addition = dict_good_stuff_addition
for key in self.dict_good_stuff.keys():
temp_value = self.dict_good_stuff[key].split(' ', 1)[0]
self.dict_good_stuff[key] = temp_value
for key in self.dict_good_stuff_addition.keys():
temp_value = self.dict_good_stuff_addition[key].split(' ', 1)[0]
self.dict_good_stuff_addition[key] = temp_value
self.time = time.time()
self.counter = 50
self.disc_webhook = os.getenv('DISCORD_LINK')
def login_sign(self):
error_count = 0
while error_count < 2:
try:
self.driver.get("https://discord.com/login")
print('Login')
username_input = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.NAME, 'email')))
username_input.send_keys(self.user_name)
password_input = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.NAME, 'password')))
password_input.send_keys(self.password)
print('Submit button')
login_button = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.XPATH, "//button[@type='submit']")))
self.driver.execute_script(
"arguments[0].click();", login_button)
server_name = "//*[@data-list-item-id=\"" + \
self.server_name + "\"]"
print("Trying if School Pop up")
try:
hcaptcha = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'flexCenter-3_1bcw')))
print("HCaptcha")
pop_up = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.CLASS_NAME, 'content-1LAB8Z')))
pop_up_button = WebDriverWait(pop_up, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, 'close-hZ94c6')))
self.driver.execute_script(
"arguments[0].click();", pop_up_button)
print("School Pop up")
except:
print("No HCaptcha")
# logging.error("Exception occurred", exc_info=True)
try:
pop_up = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'content-1LAB8Z')))
pop_up_button = WebDriverWait(pop_up, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'close-hZ94c6')))
self.driver.execute_script(
"arguments[0].click();", pop_up_button)
print("School Pop up")
except:
print("School No Pop up")
# logging.error("Exception occurred", exc_info=True)
print('Server')
server_icon = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.XPATH, server_name)))
self.driver.execute_script(
"arguments[0].click();", server_icon)
print("Trying to see if pop up")
try:
pop_up = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'focusLock-Ns3yie')))
self.driver.execute_script("arguments[0].click();", WebDriverWait(pop_up, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'button-38aScr'))))
print("Pop up")
except:
print("No Pop up")
# logging.error("Exception occurred", exc_info=True)
print('Channel Scroll')
channel_list = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.CLASS_NAME, 'scroller-RmtA4e')))
self.action.move_to_element(channel_list.find_elements(
By.CLASS_NAME, 'containerDefault--pIXnN')[-1]).perform()
time.sleep(1)
self.action.move_to_element(channel_list.find_elements(
By.CLASS_NAME, 'containerDefault--pIXnN')[-1]).perform()
channel_name = "//*[@data-list-item-id=\"" + \
self.channel_name + "\"]"
print('Channel')
logging.info("Logged into " + self.server_name_list)
channel_icon = WebDriverWait(self.driver, 120).until(
EC.presence_of_element_located((By.XPATH, channel_name)))
self.driver.execute_script(
"arguments[0].click();", channel_icon)
webhook = DiscordWebhook(url=self.disc_webhook, rate_limit_retry=True,
content='Logged into ' + self.user_name + ' and into server ' + self.server_name_list)
response = webhook.execute()
break
except:
logging.warning("Starting up")
logging.error("Exception occurred", exc_info=True)
time.sleep(5)
def message_log(self, f_condition, f_action):
global flag_debug
global flag_refresh
global flag_global_clicked
global flag_cd_grab
global flag_cd_grab_long
global flag_stop
flag_cd_grab_long = True
flag_cd_grab = True
refresh_counter = 0
logs = WebDriverWait(self.driver, 120, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'message-2qnXI6')))
i = len(logs)-1
while i <= 0:
logs = WebDriverWait(self.driver, 120, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'message-2qnXI6')))
i = len(logs)-1
interval = 0.33
print('Message Scroll')
self.action.move_to_element(WebDriverWait(self.driver, 120, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'message-2qnXI6')))[-2]).perform()
for n in range(1, 15):
WebDriverWait(self.driver, 120, poll_frequency=0.05).until(EC.presence_of_all_elements_located(
(By.CLASS_NAME, 'message-2qnXI6')))[-1].send_keys(Keys.ARROW_DOWN)
n = 20
self.current_state_fill_hash(n)
count_clear = 0
while not flag_stop:
try:
for j in range(1, n):
z = 1 * j
if flag_debug:
print(z)
try:
logs = WebDriverWait(self.driver, 120, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'message-2qnXI6')))
data_list_str = logs[z].get_attribute('id')
self.curr_id = str(data_list_str)
except:
logging.info("NO ATTRIBUTES START" +
" " + self.server_name_list)
logging.info(data_list_str)
logging.info(str(z) + " FAILED ")
break
if data_list_str not in self.hash_table:
text = logs[z].text
self.hash_table[data_list_str] = self.count_messages
self.count_messages += 1
if self.flag_clicked:
self.count_clicked -= 1
if self.id_name + ', your Evasion' in text:
self.count_clicked = self.count_reset
self.flag_clicked = False
flag_cd_grab_long = True
flag_cd_grab = True
flag_global_clicked = False
webhook = DiscordWebhook(url=self.disc_webhook, rate_limit_retry=True,
content=self.user_name + ' got ' + self.clicked_card + ' in ' + self.server_name_list)
logging.info("Evasion Proc")
webhook.execute()
break
elif self.id_name + ' fought off' in text or self.id_name + ' took the' in text:
self.count_clicked = self.count_reset
self.flag_clicked = False
flag_cd_grab_long = False
logging.info("GOTTEM")
print("Gottem")
webhook = DiscordWebhook(url=self.disc_webhook, rate_limit_retry=True,
content=self.user_name + ' got ' + self.clicked_card + ' in ' + self.server_name_list)
webhook.execute()
self.clean_hash_table()
time.sleep(self.sleep_timer_grab)
self.current_state_fill_hash(n)
flag_cd_grab_long = True
flag_cd_grab = True
flag_global_clicked = False
print("Exit: " + self.server_name_list)
break
elif self.count_clicked <= 0 or self.clicked_card in text.lower():
print("Fled")
self.count_clicked = self.count_reset
self.flag_clicked = False
flag_cd_grab = False
self.clean_hash_table()
time.sleep(60)
self.current_state_fill_hash(n)
flag_cd_grab = True
flag_cd_grab_long = True
flag_global_clicked = False
print("Exit: " + self.server_name_list)
break
elif f_condition(text):
f_action()
else:
break
if not flag_cd_grab:
print("Enter: " + self.server_name_list)
self.count_clicked = self.count_reset
self.flag_clicked = False
self.clean_hash_table()
time.sleep(59)
print("Exit: " + self.server_name_list)
self.current_state_fill_hash(n)
elif not flag_cd_grab_long:
print("Enter: " + self.server_name_list)
self.count_clicked = self.count_reset
self.flag_clicked = False
self.clean_hash_table()
time.sleep(self.sleep_timer_grab-1)
print("Exit: " + self.server_name_list)
self.current_state_fill_hash(n)
time.sleep(interval)
except:
logging.warning("Error ml " + self.server_name_list)
logging.error("Exception occurred", exc_info=True)
if flag_stop:
self.driver.quit()
print("Quit " + self.server_name_list)
break
def clean_hash_table(self):
print("Cleaning Hash Table")
for key, value in list(self.hash_table.items()):
if value < (self.count_messages-100):
del self.hash_table[key]
def current_state_fill_hash(self, n):
try:
print("Filling hash table")
logs = WebDriverWait(self.driver, 120, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'message-2qnXI6')))
for j in range(1, n):
z = -1 * j
try:
data_list_str = logs[z].get_attribute('id')
if data_list_str not in self.hash_table:
self.hash_table[data_list_str] = self.count_messages
self.count_messages += 1
else:
break
except:
logging.info("NO ATTRIBUTES START" +
" " + self.server_name_list)
logging.info(str(z) + " FAILED " + self.curr_id)
logging.warning(
"NO ATTRIBUTES START END", exc_info=True)
break
except:
logging.warning(
"Failed Filling", exc_info=True)
def kd_every(self):
global flag_refresh
global flag_global_clicked
global flag_cd_grab
global flag_cd_grab_long
global flag_stop
flag_cd_grab_long = True
flag_cd_grab = True
flag_stop = False
sleep(90)
while not flag_stop:
if not flag_cd_grab or not flag_cd_grab_long or flag_global_clicked:
sleep(20)
else:
try:
logging.warning('Drop')
textbox = self.driver.find_element_by_class_name(
'markup-2BOw-j.slateTextArea-1Mkdgw.fontSize16Padding-3Wk7zP')
textbox.send_keys('kd')
textbox.send_keys(Keys.ENTER)
sleep(float(random.randrange(
self.sleep_timer_drop, self.sleep_timer_drop+300)))
except:
logging.error("Exception occurred", exc_info=True)
self.driver.quit()
print("Quit DROP " + self.server_name_list)
def condition_BOT_droppping(self, text):
try:
if "dropping 4 cards" in text or "dropping 3 cards" in text:
if self.driver.find_element_by_id(self.curr_id).find_element_by_class_name('anchor-3Z-8Bb'):
return True
except:
return False
return False
def condition_BOT_droppping_Server(self, text):
try:
if "cards since this server is currently active!" in text:
if self.driver.find_element_by_id(self.curr_id).find_element_by_class_name('anchor-3Z-8Bb'):
return True
except:
return False
return False
def flag_stop_true(self):
global flag_stop
flag_stop = True
self.driver.quit()
print("Quit " + self.server_name_list)
def action_href_img(self):
global flag_global_clicked
global flag_cd_grab
error_count = 0
flag_finished = False
while(error_count < 3 and not flag_finished):
try:
img_container = self.driver.find_element_by_id(
self.curr_id).find_element_by_class_name('anchor-3Z-8Bb')
href_link = img_container.get_attribute('href')
error_count_image = 0
try:
image = url_to_image(href_link)
while image is None:
error_count_image += 1
if error_count_image > 3:
break
logging.info("Cant get image:" +
href_link + " " + str(error_count_image))
image = url_to_image(href_link)
h, w, c = image.shape
except:
logging.warning("Cant get image:" + href_link)
logging.warning("Unexpected error:", sys.exc_info()[0])
error_count += 1
if error_count_image > 3:
break
continue
max = 4 if w > 900 else 3
pos = max-1
try:
while pos >= 0 and not self.flag_clicked:
print_num = self.ocr.get_print_num(image, pos)
if (math.log10(print_num))+1 > 0 and (int(print_num) > 100) and not flag_global_clicked:
name_card = print_num
read_series = print_num
series = print_num
try:
self.click_reactions(pos)
name_card = self.ocr.get_names_single(
image, pos)
print(str(name_card) + str(series) +
self.server_name_list)
logging.info("Got Name: " + str(name_card) + " Series: " + str(
read_series) + " Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition" + self.server_name_list, exc_info=True)
elif(not flag_global_clicked):
name_card = self.ocr.get_names_single(
image, pos)
if (name_card in self.dict_good_stuff) and not flag_global_clicked:
series = self.dict_good_stuff.get(
name_card, '-1')
read_series = self.ocr.get_names_bottom(
image, pos).split(' ', 1)[0]
if series == '123456' or read_series == series:
try:
self.click_reactions(pos)
print(name_card + " " + series +
" " + self.server_name_list)
logging.info("Got Name: " + name_card + " Series: " + read_series +
" Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[
0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition " + self.server_name_list, exc_info=True)
elif name_card in self.dict_good_stuff_addition and not flag_global_clicked:
series = self.dict_good_stuff_addition.get(
name_card, '-1')
read_series = self.ocr.get_names_bottom(
image, pos).split(' ', 1)[0]
if series == '123456' or read_series == series:
if name_card in char_numbers:
edition = self.ocr.get_edition_number(
image, pos)
if edition == char_numbers[name_card]:
try:
self.click_reactions(
pos)
print(
name_card + " " + series + " " + self.server_name_list)
logging.info("Got Name: " + name_card + " Series: " + read_series +
" Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[
0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition " + self.server_name_list, exc_info=True)
pos -= 1
flag_finished = True
except:
error_count += 1
logging.error("Exception occurred", exc_info=True)
logging.warning(
"Cant get card name:" + href_link + " pos:" + str(pos) + " w:" + str(w))
except:
logging.warning("Error")
logging.error("Exception occurred", exc_info=True)
error_count += 1
def debug_on(self):
global flag_debug
flag_debug = not flag_debug
def get_webelement_id(self):
return self.driver.find_element_by_id(self.curr_id)
def click_reactions(self, pos):
global flag_global_clicked
global flag_cd_grab
try:
reactions_container = WebDriverWait(self.driver.find_element_by_id(
self.curr_id), 600, poll_frequency=0.05).until(
EC.presence_of_element_located((By.CLASS_NAME, 'reactions-12N0jA')))
reactions = WebDriverWait(reactions_container, 600, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'reaction-1hd86g')))
while len(reactions) < pos+1:
reactions = reactions_container.find_elements(
By.CLASS_NAME, 'reaction-1hd86g')
if not flag_global_clicked:
self.driver.execute_script("arguments[0].click();", reactions[pos].find_element(
By.CLASS_NAME, 'reactionInner-15NvIl'))
flag_global_clicked = True
print("clicked 1")
except:
time.sleep(0.05)
logging.info("Failed to Click 1st " +
self.server_name_list, exc_info=True)
try:
logs = self.get_webelement_id()
reactions_container = WebDriverWait(logs, 600, poll_frequency=0.05).until(
EC.presence_of_element_located((By.CLASS_NAME, 'reactions-12N0jA')))
reactions = WebDriverWait(reactions_container, 600, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'reaction-1hd86g')))
while len(reactions) < pos+1:
reactions = reactions_container.find_elements(
By.CLASS_NAME, 'reaction-1hd86g')
if not flag_global_clicked:
self.driver.execute_script("arguments[0].click();", reactions[pos].find_element(
By.CLASS_NAME, 'reactionInner-15NvIl'))
flag_global_clicked = True
print("clicked 2")
except:
logging.info("Failed to Click 2nd " +
self.server_name_list, exc_info=True)
def button_click(self, pos):
global flag_global_clicked
global flag_cd_grab
try:
logs = self.driver.find_element_by_id(
self.curr_id)
reactions_container = WebDriverWait(logs, 600, poll_frequency=0.05).until(
EC.presence_of_element_located((By.CLASS_NAME, 'children-2goeSq')))
reactions = WebDriverWait(reactions_container, 600, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'button-38aScr')))
if not flag_global_clicked:
WebDriverWait(logs, 600, poll_frequency=0.05).until(
EC.element_to_be_clickable((By.CLASS_NAME, 'button-38aScr')))
self.driver.execute_script(
"arguments[0].click();", reactions[pos])
flag_global_clicked = True
print("clicked 1")
except:
time.sleep(0.05)
logging.info("Failed to Click 1st " +
self.server_name_list, exc_info=True)
try:
logs = self.get_webelement_id()
reactions_container = WebDriverWait(logs, 600, poll_frequency=0.05).until(
EC.presence_of_element_located((By.CLASS_NAME, 'children-2goeSq')))
reactions = WebDriverWait(reactions_container, 600, poll_frequency=0.05).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, 'button-38aScr')))
if not flag_global_clicked:
WebDriverWait(logs, 600, poll_frequency=0.05).until(
EC.element_to_be_clickable((By.CLASS_NAME, 'button-38aScr')))
self.driver.execute_script(
"arguments[0].click();", reactions[pos])
flag_global_clicked = True
print("clicked 2")
except:
logging.info("Failed to Click 2nd " +
self.server_name_list, exc_info=True)
def action_href_img_button(self):
global flag_global_clicked
global flag_cd_grab
error_count = 0
flag_finished = False
while(error_count < 5 and not flag_finished):
try:
img_container = self.driver.find_element_by_id(
self.curr_id).find_element_by_class_name('anchor-3Z-8Bb')
href_link = img_container.get_attribute('href')
error_count_image = 0
try:
image = url_to_image(href_link)
while image is None:
error_count_image += 1
if error_count_image > 10:
break
logging.info("Cant get image:" +
href_link + " " + str(error_count_image))
image = url_to_image(href_link)
h, w, c = image.shape
except:
time.sleep(0.1)
logging.warning("Cant get image:" + href_link)
logging.warning("Unexpected error:", sys.exc_info()[0])
error_count += 1
continue
max = 4 if w > 900 else 3
pos = max-1
try:
while pos >= 0 and not self.flag_clicked:
print_num = self.ocr.get_print_num(image, pos)
if (math.log10(print_num))+1 > 0 and (int(print_num) > 100) and not flag_global_clicked:
name_card = print_num
read_series = print_num
series = print_num
try:
self.button_click(pos)
name_card = self.ocr.get_names_single(
image, pos)
print(str(name_card) + str(series) +
self.server_name_list)
logging.info("Got Name: " + str(name_card) + " Series: " + str(
read_series) + " Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition " + self.server_name_list, exc_info=True)
else:
name_card = self.ocr.get_names_single(
image, pos)
if (name_card in self.dict_good_stuff and not flag_global_clicked):
series = self.dict_good_stuff.get(
name_card, '-1')
read_series = self.ocr.get_names_bottom(
image, pos).split(' ', 1)[0]
if series == '123456' or read_series == series:
try:
self.button_click(pos)
print(name_card + " " + series +
" " + self.server_name_list)
logging.info("Got Name: " + name_card + " Series: " + read_series +
" Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[
0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition " + self.server_name_list, exc_info=True)
elif (name_card in self.dict_good_stuff_addition and not flag_global_clicked):
series = self.dict_good_stuff_addition.get(
name_card, '-1')
read_series = self.ocr.get_names_bottom(
image, pos).split(' ', 1)[0]
if series == '123456' or read_series == series:
if name_card in char_numbers:
edition = self.ocr.get_edition_number(
image, pos)
if edition == char_numbers[name_card]:
try:
self.button_click(
pos)
print(
name_card + " " + series + " " + self.server_name_list)
logging.info("Got Name: " + name_card + " Series: " + read_series +
" Server: " + self.server_name_list + " URL: " + href_link)
self.clicked_card = name_card.split(' ', 1)[
0]
self.flag_clicked = True
except:
error_count += 1
logging.warning(
"Cant Click Edition " + self.server_name_list, exc_info=True)
pos -= 1
flag_finished = True
except:
error_count += 1
logging.error("Exception occurred", exc_info=True)
logging.warning(
"Cant get card name:" + href_link + " pos:" + str(pos) + " w:" + str(w))
except:
logging.warning("Error")
logging.error("Exception occurred", exc_info=True)
error_count += 1
def action_href_img_button_dropping(self):
if self.counter > 100:
if (time.time() - self.time) > self.sleep_timer_drop:
logging.warning('Drop')
textbox = self.driver.find_element_by_class_name(
'markup-2BOw-j.slateTextArea-1Mkdgw.fontSize16Padding-3Wk7zP')
textbox.send_keys('kd')
textbox.send_keys(Keys.ENTER)
self.time = time.time()
self.counter = 0
self.counter += 1
self.action_href_img_button()
def start_up(self):
self.login_sign()
self.message_log(self.condition_BOT_droppping, self.action_href_img)
def start_up_button(self):
self.login_sign()
self.message_log(self.condition_BOT_droppping,
self.action_href_img_button)
def start_up_button_dropping(self):
self.login_sign()
self.message_log(self.condition_BOT_droppping,
self.action_href_img_button_dropping)
def start_up_server(self):
self.login_sign()
self.message_log(self.condition_BOT_droppping_Server,
self.action_href_img)
def start_up_kd(self):
self.login_sign()
self.kd_every()
| 48.436906
| 143
| 0.479747
| 3,507
| 35,698
| 4.601654
| 0.084403
| 0.029124
| 0.035568
| 0.0435
| 0.799294
| 0.771285
| 0.750899
| 0.719978
| 0.702441
| 0.681435
| 0
| 0.020497
| 0.441033
| 35,698
| 736
| 144
| 48.502717
| 0.788263
| 0.004258
| 0
| 0.686567
| 0
| 0
| 0.065894
| 0.012549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031343
| false
| 0.007463
| 0.034328
| 0.001493
| 0.077612
| 0.073134
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edb5d3d8d8806423b588bab17da1e3091d722870
| 91
|
py
|
Python
|
autogluon/core/__init__.py
|
zhanghang1989/autogluon
|
8bfe6b0da8915020eeb9895fd18d7688c0d604c1
|
[
"Apache-2.0"
] | 6
|
2020-06-16T19:17:36.000Z
|
2021-07-07T14:50:31.000Z
|
autogluon/core/__init__.py
|
zhanghang1989/autogluon
|
8bfe6b0da8915020eeb9895fd18d7688c0d604c1
|
[
"Apache-2.0"
] | null | null | null |
autogluon/core/__init__.py
|
zhanghang1989/autogluon
|
8bfe6b0da8915020eeb9895fd18d7688c0d604c1
|
[
"Apache-2.0"
] | 2
|
2020-12-13T16:40:04.000Z
|
2021-03-08T09:14:16.000Z
|
from .space import *
from .task import *
from .decorator import *
from . import optimizer
| 15.166667
| 24
| 0.736264
| 12
| 91
| 5.583333
| 0.5
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186813
| 91
| 5
| 25
| 18.2
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
edcf1173202e58c80335e9946bf92f418cbfb90c
| 177
|
py
|
Python
|
Scripts/using_name.py
|
StoneZhu2017/learning-python
|
f7aa7a0908cb4e156278494930e8be6a20aeba57
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/using_name.py
|
StoneZhu2017/learning-python
|
f7aa7a0908cb4e156278494930e8be6a20aeba57
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/using_name.py
|
StoneZhu2017/learning-python
|
f7aa7a0908cb4e156278494930e8be6a20aeba57
|
[
"bzip2-1.0.6"
] | null | null | null |
#!/usr/bin/python3
#filename:using_name.py
if __name__=='__main__':
print('This program is being run by itself')
else:
print('I am being imported form another module')
| 22.125
| 52
| 0.717514
| 27
| 177
| 4.37037
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006711
| 0.158192
| 177
| 7
| 53
| 25.285714
| 0.785235
| 0.220339
| 0
| 0
| 0
| 0
| 0.602941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ede1ccae33fe50a0f877053257aac44a88ecfc97
| 159
|
py
|
Python
|
command_line_tools/__init__.py
|
NREL/rlmolecule
|
7e98dca49ea82bf9d14164955d82adfa8bbc2d64
|
[
"BSD-3-Clause"
] | 16
|
2020-12-28T21:45:09.000Z
|
2022-03-19T12:03:58.000Z
|
command_line_tools/__init__.py
|
NREL/rlmolecule
|
7e98dca49ea82bf9d14164955d82adfa8bbc2d64
|
[
"BSD-3-Clause"
] | 56
|
2020-12-30T16:12:33.000Z
|
2022-02-02T18:32:44.000Z
|
command_line_tools/__init__.py
|
NREL/rlmolecule
|
7e98dca49ea82bf9d14164955d82adfa8bbc2d64
|
[
"BSD-3-Clause"
] | 7
|
2021-01-05T01:34:04.000Z
|
2021-09-29T13:42:44.000Z
|
from .command_line_config import parse_config_from_args, merge_configs
from .run_tools import makedir_if_not_exists, setup_run, write_config_log, get_run_name
| 53
| 87
| 0.886792
| 27
| 159
| 4.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 159
| 2
| 88
| 79.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
610037c680f506aada84982212876a133919f396
| 106
|
py
|
Python
|
target_statistic_encoding/__init__.py
|
CircArgs/target_statistic_encoding
|
4ea812b15b595a9c4b53e9cddf41a9da1d48e3cb
|
[
"MIT"
] | 1
|
2020-06-11T02:09:49.000Z
|
2020-06-11T02:09:49.000Z
|
target_statistic_encoding/__init__.py
|
CircArgs/target_statistic_encoding
|
4ea812b15b595a9c4b53e9cddf41a9da1d48e3cb
|
[
"MIT"
] | null | null | null |
target_statistic_encoding/__init__.py
|
CircArgs/target_statistic_encoding
|
4ea812b15b595a9c4b53e9cddf41a9da1d48e3cb
|
[
"MIT"
] | null | null | null |
__package__ = "target_statistic_encoding"
from .cat2num import Cat2Num
from .stat_funcs import stat_funcs
| 26.5
| 41
| 0.849057
| 14
| 106
| 5.857143
| 0.642857
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.103774
| 106
| 3
| 42
| 35.333333
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.235849
| 0.235849
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
610b3ed9c957a3fc962ef6888bf0559d39337b4b
| 68
|
py
|
Python
|
exceptions/CloudTrailBucketMissingLogsTableException.py
|
houey/SkyWrapper
|
26d4d74c9aa389b4a9d6681949bd48770f745953
|
[
"MIT"
] | 106
|
2020-04-13T17:12:04.000Z
|
2022-02-16T13:39:46.000Z
|
exceptions/CloudTrailBucketMissingLogsTableException.py
|
houey/SkyWrapper
|
26d4d74c9aa389b4a9d6681949bd48770f745953
|
[
"MIT"
] | 1
|
2021-05-14T22:49:30.000Z
|
2021-05-14T22:49:30.000Z
|
exceptions/CloudTrailBucketMissingLogsTableException.py
|
houey/SkyWrapper
|
26d4d74c9aa389b4a9d6681949bd48770f745953
|
[
"MIT"
] | 16
|
2020-04-15T15:58:20.000Z
|
2021-09-02T22:40:46.000Z
|
class CloudTrailBucketMissingLogsTableException(Exception):
pass
| 34
| 59
| 0.867647
| 4
| 68
| 14.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 68
| 2
| 60
| 34
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b626748e1ca5a1e9ce9ce72852c03f3f0aabefd8
| 323
|
py
|
Python
|
xga/samples/__init__.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 12
|
2020-05-16T09:45:45.000Z
|
2022-02-14T14:41:46.000Z
|
xga/samples/__init__.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 684
|
2020-05-28T08:52:09.000Z
|
2022-03-31T10:56:24.000Z
|
xga/samples/__init__.py
|
DavidT3/XGA
|
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
|
[
"BSD-3-Clause"
] | 2
|
2022-02-04T10:55:55.000Z
|
2022-02-04T11:30:56.000Z
|
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner (david.turner@sussex.ac.uk) 06/01/2021, 16:36. Copyright (c) David J Turner
from .base import BaseSample
from .extended import ClusterSample
from .general import PointSample
| 35.888889
| 110
| 0.758514
| 53
| 323
| 4.622642
| 0.792453
| 0.04898
| 0.097959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0.164087
| 323
| 8
| 111
| 40.375
| 0.862963
| 0.668731
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b679abe384179501966dba4869a7f664f87c00b2
| 130
|
py
|
Python
|
avaandmed/utils/__init__.py
|
mihhail-m/avaandmed-py
|
c64b07db989b9aff4cfa7f4e18efc0c47ae5e219
|
[
"MIT"
] | null | null | null |
avaandmed/utils/__init__.py
|
mihhail-m/avaandmed-py
|
c64b07db989b9aff4cfa7f4e18efc0c47ae5e219
|
[
"MIT"
] | 5
|
2022-03-17T15:00:23.000Z
|
2022-03-26T08:33:19.000Z
|
avaandmed/utils/__init__.py
|
mihhail-m/avaandmed-py
|
c64b07db989b9aff4cfa7f4e18efc0c47ae5e219
|
[
"MIT"
] | null | null | null |
from typing import List
def build_endpoint(base_url: str, resources: List[str]):
return f"{base_url}/{'/'.join(resources)}"
| 21.666667
| 56
| 0.707692
| 19
| 130
| 4.684211
| 0.736842
| 0.157303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130769
| 130
| 5
| 57
| 26
| 0.787611
| 0
| 0
| 0
| 0
| 0
| 0.246154
| 0.246154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
b697dfb37a0e69856dba889d05820e2f24985ad1
| 228
|
py
|
Python
|
mdtools/__init__.py
|
JGaed/openmm_constV
|
6564dacbb45870a4fd130fe778468d1fff871a61
|
[
"Unlicense"
] | null | null | null |
mdtools/__init__.py
|
JGaed/openmm_constV
|
6564dacbb45870a4fd130fe778468d1fff871a61
|
[
"Unlicense"
] | null | null | null |
mdtools/__init__.py
|
JGaed/openmm_constV
|
6564dacbb45870a4fd130fe778468d1fff871a61
|
[
"Unlicense"
] | null | null | null |
#!/usr/local/bin/env python
"""
Various Python utilities for OpenMM.
"""
# Define global version.
from mdtools import version
__version__ = version.version
# Import modules.
from mdtools import vvintegrator, velres, ljtools
| 16.285714
| 49
| 0.763158
| 28
| 228
| 6.071429
| 0.678571
| 0.247059
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144737
| 228
| 13
| 50
| 17.538462
| 0.871795
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fcf84ad2337911530a00875bd5988108936a64a5
| 159
|
py
|
Python
|
qhana/thirdparty/maxcut/_solvers/__init__.py
|
UST-QuAntiL/qhana
|
bf499d072dcc37f81efec1b8e17b7d5460db7a04
|
[
"Apache-2.0"
] | 1
|
2021-03-12T14:06:43.000Z
|
2021-03-12T14:06:43.000Z
|
qhana/thirdparty/maxcut/_solvers/__init__.py
|
UST-QuAntiL/qhana
|
bf499d072dcc37f81efec1b8e17b7d5460db7a04
|
[
"Apache-2.0"
] | null | null | null |
qhana/thirdparty/maxcut/_solvers/__init__.py
|
UST-QuAntiL/qhana
|
bf499d072dcc37f81efec1b8e17b7d5460db7a04
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""Max-cut problem solvers following a variety of approaches."""
from . import backend
from ._bm import MaxCutBM
from ._sdp import MaxCutSDP
| 19.875
| 64
| 0.754717
| 23
| 159
| 5.130435
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.157233
| 159
| 7
| 65
| 22.714286
| 0.873134
| 0.45912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1e4110bc6949cb0ab525311bb1a50d06196366b6
| 226
|
py
|
Python
|
tests/test_filesystem_misc.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | 5
|
2015-03-18T01:19:56.000Z
|
2020-10-23T12:44:47.000Z
|
tests/test_filesystem_misc.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_filesystem_misc.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
from rave import filesystem
from .support.filesystem import *
def test_filesystem_native_error():
native_err = RuntimeError('test')
err = filesystem.NativeError('Error', native_err)
assert err.native_error == native_err
| 22.6
| 50
| 0.787611
| 29
| 226
| 5.896552
| 0.448276
| 0.192982
| 0.245614
| 0.233918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119469
| 226
| 9
| 51
| 25.111111
| 0.859296
| 0
| 0
| 0
| 0
| 0
| 0.039823
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1eb7ca63675a4843a71d6b54c40a90c37e1652ed
| 60
|
py
|
Python
|
hero_db_utils/clients/__init__.py
|
AIO2020/hero-db-utils
|
037ace24a0934ca4df5354b6e90972a8b089e861
|
[
"Apache-2.0"
] | null | null | null |
hero_db_utils/clients/__init__.py
|
AIO2020/hero-db-utils
|
037ace24a0934ca4df5354b6e90972a8b089e861
|
[
"Apache-2.0"
] | null | null | null |
hero_db_utils/clients/__init__.py
|
AIO2020/hero-db-utils
|
037ace24a0934ca4df5354b6e90972a8b089e861
|
[
"Apache-2.0"
] | null | null | null |
from .postgres import SQLBaseClient, PostgresDatabaseClient
| 30
| 59
| 0.883333
| 5
| 60
| 10.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 60
| 1
| 60
| 60
| 0.963636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1ebf538e1715f589ee6b617173750400b91b3c5e
| 1,823
|
py
|
Python
|
test/test_conjugate_gradient.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
test/test_conjugate_gradient.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
test/test_conjugate_gradient.py
|
JohnReid/nolina
|
23894517ac60d27d167447871ef85a4a78cad630
|
[
"MIT"
] | null | null | null |
from codetiming import Timer
import logging
import pytest
import numpy as np
from nolina import random, conjugate_gradient as cg
_logger = logging.getLogger(__name__)
ds = [9, 21, 55]
@pytest.mark.parametrize("d", ds)
def test_steepest_descent(d, rng, seed):
minimiser = cg.SteepestDescent(A=random.random_spsd_matrix(d=d, random_state=rng),
b=random.random_vector(d=d, random_state=rng))
with Timer(text='Steepest descent minimiser done in {:.4f} seconds', logger=_logger.info):
x_star = minimiser()
_logger.info('Steepest descent minimiser took %d iterations.', minimiser.niter)
np.testing.assert_allclose(minimiser.A @ x_star, minimiser.b)
@pytest.mark.parametrize("d", ds)
def test_conjugate_gradient_preliminary(d, rng, seed):
minimiser = cg.ConjugateGradientPreliminary(A=random.random_spsd_matrix(d=d, random_state=rng),
b=random.random_vector(d=d, random_state=rng))
with Timer(text='Conjugate gradient preliminary minimiser done in {:.4f} seconds', logger=_logger.info):
x_star = minimiser()
_logger.info('Conjugate gradient preliminary minimiser took %d iterations.', minimiser.niter)
np.testing.assert_allclose(minimiser.A @ x_star, minimiser.b)
@pytest.mark.parametrize("d", ds)
def test_conjugate_gradient(d, rng, seed):
minimiser = cg.ConjugateGradient(A=random.random_spsd_matrix(d=d, random_state=rng),
b=random.random_vector(d=d, random_state=rng))
with Timer(text='Conjugate gradient minimiser done in {:.4f} seconds', logger=_logger.info):
x_star = minimiser()
_logger.info('Conjugate gradient minimiser took %d iterations.', minimiser.niter)
np.testing.assert_allclose(minimiser.A @ x_star, minimiser.b)
| 46.74359
| 108
| 0.704882
| 241
| 1,823
| 5.153527
| 0.232365
| 0.095813
| 0.038647
| 0.062802
| 0.775362
| 0.729469
| 0.729469
| 0.704509
| 0.704509
| 0.704509
| 0
| 0.005369
| 0.182666
| 1,823
| 38
| 109
| 47.973684
| 0.828188
| 0
| 0
| 0.387097
| 0
| 0
| 0.175535
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.096774
| false
| 0
| 0.16129
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1edb65ff392560a68a6a63b67e394c7758d94c04
| 150
|
py
|
Python
|
ai_coin_identifier/urls.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | null | null | null |
ai_coin_identifier/urls.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-10T16:50:44.000Z
|
2022-01-10T16:50:44.000Z
|
ai_coin_identifier/urls.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.ai_coin_identifier_home_page, name='ai-coin-identifier-home'),
]
| 21.428571
| 81
| 0.74
| 21
| 150
| 5.095238
| 0.619048
| 0.11215
| 0.299065
| 0.373832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 150
| 6
| 82
| 25
| 0.823077
| 0
| 0
| 0
| 0
| 0
| 0.153333
| 0.153333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.