hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f05a35db2bf24e5cd3d450829e44e1d6868265e
| 2,348
|
py
|
Python
|
apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py
|
victoraguilarc/agendas
|
31b24a2d6350605b638b59062f297ef3f58e9879
|
[
"MIT"
] | 2
|
2020-06-06T23:10:27.000Z
|
2020-10-06T19:12:26.000Z
|
apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py
|
victoraguilarc/medical-appointment
|
31b24a2d6350605b638b59062f297ef3f58e9879
|
[
"MIT"
] | 3
|
2021-04-08T20:44:38.000Z
|
2021-09-22T19:04:16.000Z
|
apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py
|
victoraguilarc/agendas
|
31b24a2d6350605b638b59062f297ef3f58e9879
|
[
"MIT"
] | 1
|
2020-10-10T14:07:37.000Z
|
2020-10-10T14:07:37.000Z
|
# -*- coding: utf-8 -*-
import pytest
from django.db.models import QuerySet
from rest_framework.exceptions import NotFound
from apps.accounts.response_codes import INVALID_TOKEN
from apps.accounts.selectors.pending_action_selector import PendingActionSelector
from apps.accounts.tests.factories.pending_action import PendingActionFactory
from apps.accounts.tests.factories.user import UserFactory
from apps.agendas.models import DoctorProfile
from apps.agendas.response_codes import DOCTOR_NOT_FOUND
from apps.agendas.selectors.appointment import AppointmentSelector
from apps.agendas.selectors.doctor_profile import DoctorProfileSelector
from apps.agendas.tests.factories.doctor_profile import DoctorProfileFactory
from apps.contrib.api.exceptions import SimpleValidationError
from faker import Factory
from faker.providers import misc
faker = Factory.create()
faker.add_provider(misc)
@pytest.mark.django_db
class DoctorProfileSelectorTests:
@staticmethod
def test_get_by_uuid():
doctor_profile = DoctorProfileFactory()
selected_doctor_profile = DoctorProfileSelector.get_by_uuid(str(doctor_profile.uuid))
assert isinstance(doctor_profile, DoctorProfile)
assert selected_doctor_profile == doctor_profile
@staticmethod
def test_get_by_uuid_not_found():
with pytest.raises(NotFound) as exec_info:
DoctorProfileSelector.get_by_uuid(faker.uuid4())
assert exec_info.value.detail.code == DOCTOR_NOT_FOUND['code']
@staticmethod
def test_get_enabled_doctors(test_user):
inactive_user = UserFactory(is_active=False)
active_doctor = DoctorProfileFactory(user=test_user)
DoctorProfileFactory(user=inactive_user)
doctors = DoctorProfileSelector.get_enabled_doctors()
assert isinstance(doctors, QuerySet)
assert doctors.count() == 1
assert doctors.first() == active_doctor
@staticmethod
def test_get_enabled_doctors_empty():
inactive_user = UserFactory(is_active=False)
DoctorProfileFactory(user=inactive_user)
doctors = DoctorProfileSelector.get_enabled_doctors()
assert isinstance(doctors, QuerySet)
assert doctors.count() == 0
def test_get_by_username_or_email(self):
pass
def test_get_by_username_or_email_not_found(self):
pass
| 35.044776
| 93
| 0.774702
| 273
| 2,348
| 6.406593
| 0.322344
| 0.04574
| 0.034305
| 0.050314
| 0.328188
| 0.293882
| 0.179531
| 0.148656
| 0.148656
| 0.148656
| 0
| 0.002024
| 0.158433
| 2,348
| 66
| 94
| 35.575758
| 0.883097
| 0.008944
| 0
| 0.28
| 0
| 0
| 0.001721
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.12
| false
| 0.04
| 0.3
| 0
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0795f4ecbd539f9866bfa75241bdbacd313bed
| 1,532
|
py
|
Python
|
catalog/views/api.py
|
iraquitan/catalog-app-flask
|
563981ddc8d55c62428cd4811bdea73ee8f8a846
|
[
"MIT"
] | null | null | null |
catalog/views/api.py
|
iraquitan/catalog-app-flask
|
563981ddc8d55c62428cd4811bdea73ee8f8a846
|
[
"MIT"
] | null | null | null |
catalog/views/api.py
|
iraquitan/catalog-app-flask
|
563981ddc8d55c62428cd4811bdea73ee8f8a846
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
* Created by PyCharm.
* Project: catalog
* Author name: Iraquitan Cordeiro Filho
* Author login: pma007
* File: api
* Date: 2/26/16
* Time: 11:26
* To change this template use File | Settings | File Templates.
"""
from flask import Blueprint, jsonify
from catalog.models import Category, Item
# Define api Blueprint for JSON endpoints
api = Blueprint('api', __name__)
@api.route('/catalog.json')
def catalog_api():
categories = Category.query.all()
all_result = []
for category in categories:
items = Item.query.filter_by(category_id=category.id).all()
result = category.serialize
result['Item'] = [i.serialize for i in items]
all_result.append(result)
return jsonify(Category=all_result)
@api.route('/category/<string:category_slug>.json')
def category_api(category_slug):
category = Category.query.filter_by(slugfield=category_slug).first_or_404()
return jsonify(category=category.serialize)
@api.route('/category/<string:category_slug>/items.json')
def category_items_api(category_slug):
category = Category.query.filter_by(slugfield=category_slug).first_or_404()
items = Item.query.filter_by(category_id=category.id).all()
result = category.serialize
result['item'] = [i.serialize for i in items]
return jsonify(category=result)
@api.route('/item/<string:item_slug>.json')
def item_api(item_slug):
item = Item.query.filter_by(slugfield=item_slug).first_or_404()
return jsonify(item=item.serialize)
| 30.039216
| 79
| 0.719321
| 208
| 1,532
| 5.134615
| 0.298077
| 0.067416
| 0.060861
| 0.047753
| 0.426966
| 0.426966
| 0.325843
| 0.325843
| 0.325843
| 0.325843
| 0
| 0.016949
| 0.152742
| 1,532
| 50
| 80
| 30.64
| 0.805855
| 0.177546
| 0
| 0.214286
| 0
| 0
| 0.106999
| 0.087691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0912cfcdcc52bdc014aa57f2387fcc7c7c1a0f
| 1,854
|
py
|
Python
|
tests/test_debug.py
|
HazemElAgaty/psycopg2-pgevents
|
c0952608777052ea2cb90d8c78802ad03f8f3da1
|
[
"MIT"
] | 11
|
2019-07-12T17:25:36.000Z
|
2021-06-07T12:51:31.000Z
|
tests/test_debug.py
|
HazemElAgaty/psycopg2-pgevents
|
c0952608777052ea2cb90d8c78802ad03f8f3da1
|
[
"MIT"
] | 5
|
2020-06-21T14:58:21.000Z
|
2021-09-06T09:34:32.000Z
|
tests/test_debug.py
|
HazemElAgaty/psycopg2-pgevents
|
c0952608777052ea2cb90d8c78802ad03f8f3da1
|
[
"MIT"
] | 4
|
2019-07-12T17:25:37.000Z
|
2021-07-13T13:26:58.000Z
|
from pytest import raises
from psycopg2_pgevents import debug
from psycopg2_pgevents.debug import log, set_debug
class TestDebug:
def test_set_debug_disabled(self):
debug._DEBUG_ENABLED = True
set_debug(False)
assert not debug._DEBUG_ENABLED
def test_set_debug_enabled(self):
debug._DEBUG_ENABLED = False
set_debug(True)
assert debug._DEBUG_ENABLED
def test_log_invalid_category(self, log_capture):
with raises(ValueError):
log("foo", category="warningwarningwarning")
logs = log_capture.actual()
assert len(logs) == 0
def test_log_debug_disabled(self, log_capture):
set_debug(False)
log("foo")
logs = log_capture.actual()
# Only log should be the one notifying that logging is being disabled
assert len(logs) == 1
def test_log_info(self, log_capture):
log("foo")
logs = log_capture.actual()
assert len(logs) == 1
assert ("pgevents", "INFO", "foo") == logs.pop()
def test_log_error(self, log_capture):
log("foo", category="error")
logs = log_capture.actual()
assert len(logs) == 1
assert ("pgevents", "ERROR", "foo") == logs.pop()
def test_log_args(self, log_capture):
log("foo %s %s %d", "bar", "baz", 1)
log("foo %(word1)s %(word2)s %(num)d", {"word2": "baz", "num": 1, "word1": "bar"})
logs = log_capture.actual()
assert len(logs) == 2
assert ("pgevents", "INFO", "foo bar baz 1") == logs.pop(0)
assert ("pgevents", "INFO", "foo bar baz 1") == logs.pop(0)
def test_log_custom_logger(self, log_capture):
log("foo", logger_name="test")
logs = log_capture.actual()
assert len(logs) == 1
assert ("test", "INFO", "foo") == logs.pop()
| 27.264706
| 90
| 0.600863
| 242
| 1,854
| 4.404959
| 0.235537
| 0.11257
| 0.056285
| 0.11257
| 0.439024
| 0.324578
| 0.257036
| 0.195122
| 0.195122
| 0.157599
| 0
| 0.013206
| 0.264833
| 1,854
| 67
| 91
| 27.671642
| 0.768892
| 0.036138
| 0
| 0.363636
| 0
| 0
| 0.114846
| 0.011765
| 0
| 0
| 0
| 0
| 0.295455
| 1
| 0.181818
| false
| 0
| 0.068182
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0a52e79ef2f2c527b1cb664f5e0e589f53a413
| 1,148
|
py
|
Python
|
abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py
|
Sheldan/abstracto
|
cef46737c5f34719c80c71aa9cd68bc53aea9a68
|
[
"MIT"
] | 5
|
2020-05-27T14:18:51.000Z
|
2021-03-24T09:23:09.000Z
|
abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py
|
Sheldan/abstracto
|
cef46737c5f34719c80c71aa9cd68bc53aea9a68
|
[
"MIT"
] | 5
|
2020-05-29T21:53:53.000Z
|
2021-05-26T12:19:16.000Z
|
abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py
|
Sheldan/abstracto
|
cef46737c5f34719c80c71aa9cd68bc53aea9a68
|
[
"MIT"
] | null | null | null |
import glob
import os
import sqlalchemy as db
from sqlalchemy.sql import text
def deploy_template_folder(db_config, folder):
engine = db.create_engine('postgresql://%s:%s@%s:%s/%s' % (db_config.user, db_config.password, db_config.host, db_config.port, db_config.database))
if not os.path.isdir(folder):
print("Given path was not a folder. Exiting.")
exit(1)
files = glob.glob(folder + '/**/*.ftl', recursive=True)
templates = []
for file in files:
with open(file) as template_file:
file_content = template_file.read()
template_key = os.path.splitext(os.path.basename(file))[0]
template = {'key': template_key, 'content': file_content}
templates.append(template)
print('Deploying %s templates from folder %s' % (len(templates), folder))
with engine.connect() as con:
with con.begin():
statement = text("""INSERT INTO template(key, content, last_modified) VALUES(:key, :content, NOW()) ON CONFLICT (key) DO UPDATE SET content = :content""")
for line in templates:
con.execute(statement, **line)
| 37.032258
| 166
| 0.642857
| 151
| 1,148
| 4.781457
| 0.463576
| 0.066482
| 0.012465
| 0.01108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002252
| 0.226481
| 1,148
| 30
| 167
| 38.266667
| 0.810811
| 0
| 0
| 0
| 0
| 0.043478
| 0.21777
| 0.023519
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.043478
| 0.173913
| 0
| 0.217391
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0a965a14ab29cfb59691e71680ca0613d8037e
| 8,466
|
py
|
Python
|
src/external.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
src/external.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
src/external.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
######################################################################
# EXTERNAL #
######################################################################
from Classes.Metadata import Metadata
from subprocess import PIPE, Popen
from extension import *
from colorama import *
from utils import *
import mslex
import halo
import sys
def handle_python_package(package_name: str, mode: str, metadata: Metadata):
command = ''
valid = Popen(mslex.split('pip --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE)
_, err = valid.communicate()
if err:
click.echo(click.style('Python Is Not Installed. Exit Code [0011]', fg='red'))
disp_error_msg(get_error_message('0011', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
command = 'python -m pip install --upgrade --no-input'
command += f' {package_name}'
proc = Popen(mslex.split(command), stdin=PIPE,
stdout=PIPE, stderr=PIPE)
py_version = sys.version.split()
for line in proc.stdout:
line = line.decode('utf-8')
if f'Collecting {package_name}' in line:
write(f'Python v{py_version[0]} :: Collecting {package_name}', 'green', metadata)
if 'Downloading' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Downloading {package_name}', 'green', metadata)
if 'Installing collected packages' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Installing {package_name}', 'green', metadata)
if f'Requirement already satisfied: {package_name} ' in line and package_name in line:
write(
f'Python v{py_version[0]} :: {package_name} Is Already Installed And On The Latest Version ==> {line.split()[-1]}', 'yellow', metadata)
if 'Successfully installed' in line and package_name in line:
ver = line.split('-')[1]
write(
f'Python v{py_version[0]} :: Successfully Installed {package_name} {ver}', 'green', metadata)
if 'You should consider upgrading via' in line:
wants = click.confirm(
'Would you like to upgrade your pip version?')
if wants:
write('Updating Pip Version', 'green', metadata)
Popen(mslex.split('python -m pip install --upgrade pip'))
elif mode == 'uninstall':
command = 'python -m pip uninstall --no-input --yes'
command += f' {package_name}'
proc = Popen(mslex.split(command), stdin=PIPE,
stdout=PIPE, stderr=PIPE)
py_version = sys.version.split()
for line in proc.stdout:
line = line.decode('utf-8')
if 'Uninstalling' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Uninstalling {package_name}', 'green', metadata)
if 'Successfully uninstalled' in line and package_name in line:
ver = line.split('-')[1]
write(
f'Python v{py_version[0]} :: Successfully Uninstalled {package_name} {ver}', 'green', metadata)
_, err = proc.communicate()
if err:
err = err.decode('utf-8')
if f'WARNING: Skipping {package_name}' in err:
write(
f'Python v{py_version[0]} :: Could Not Find Any Installations Of {package_name}', 'yellow', metadata)
def handle_node_package(package_name: str, mode: str, metadata: Metadata):
version_proc = Popen(mslex.split('npm --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
version, err = version_proc.communicate()
version = version.decode().strip()
if err:
click.echo(click.style('npm Or node Is Not Installed. Exit Code [0011]', fg='bright_yellow'))
disp_error_msg(get_error_message('0011', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
proc = Popen(mslex.split(f'npm i {package_name} -g'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
write(f'npm v{version} :: Collecting {package_name}', 'green', metadata)
package_version = None
for line in proc.stdout:
line = line.decode()
if 'node install.js' in line:
write(f'npm v{version} :: Running `node install.js` for {package_name}', 'green', metadata)
if package_name in line and '@' in line and 'install' in line or ' postinstall' in line:
package_version = line.split()[1]
write(f'npm v{version} :: {package_version} Installing To <=> "{line.split()[3]}"', 'green', metadata)
if 'Success' in line and package_name in line or 'added' in line:
write(f'npm v{version} :: Successfully Installed {package_version}', 'green', metadata)
if 'updated' in line:
if package_version:
write(f'npm v{version} :: Sucessfully Updated {package_version}', 'green', metadata)
else:
write(f'npm v{version} :: Sucessfully Updated {package_name}', 'green', metadata)
else:
proc = Popen(mslex.split(f'npm uninstall -g {package_name}'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'up to date' in line:
write(f'npm v{version} :: Could Not Find Any Existing Installations Of {package_name}', 'yellow', metadata)
if 'removed' in line:
number = line.split(' ')[1].strip()
time = line.split(' ')[4].strip()
write(f'npm v{version} :: Sucessfully Uninstalled {package_name} And {number} Other Dependencies in {time}', 'green', metadata)
def handle_vscode_extension(package_name: str, mode: str, metadata: Metadata):
try:
version_proc = Popen(mslex.split('code --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
except FileNotFoundError:
click.echo(click.style('Visual Studio Code Or vscode Is Not Installed. Exit Code [0111]', fg='bright_yellow'))
disp_error_msg(get_error_message('0111', 'install'))
handle_exit('ERROR', None, metadata)
version, err = version_proc.communicate()
version = version.decode().strip().split('\n')[0]
if err:
click.echo(click.style('Visual Studio Code Or vscode Is Not Installed. Exit Code [0111]', fg='bright_yellow'))
disp_error_msg(get_error_message('0111', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
command = f'code --install-extension {package_name} --force'
proc = Popen(mslex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'Installing extensions' in line:
write(f'Code v{version} :: Installing {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if 'is already installed' in line:
write(f'{Fore.GREEN}Code v{version} :: {Fore.MAGENTA}{package_name}{Fore.YELLOW} is already installed!', 'white', metadata)
if 'was successfully installed' in line:
write(f'{Fore.GREEN}Code v{version} :: Successfully Installed {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if mode == 'uninstall':
command = f'code --uninstall-extension {package_name} --force'
proc = Popen(mslex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'Uninstalling' in line:
write(f'Code v{version} :: Uninstalling {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if 'is not installed' in line:
write(f'{Fore.GREEN}Code v{version} :: {Fore.MAGENTA}{package_name}{Fore.YELLOW} is not installed!', 'white', metadata)
if 'was successfully uninstalled' in line:
write(f'{Fore.GREEN}Code v{version} :: Successfully Uninstalled {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
| 46.516484
| 155
| 0.583038
| 1,007
| 8,466
| 4.812314
| 0.147964
| 0.088527
| 0.031779
| 0.034668
| 0.691911
| 0.605861
| 0.547049
| 0.486587
| 0.434173
| 0.372472
| 0
| 0.008337
| 0.277463
| 8,466
| 181
| 156
| 46.773481
| 0.783881
| 0.000945
| 0
| 0.379562
| 0
| 0.043796
| 0.35126
| 0.031856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021898
| false
| 0
| 0.058394
| 0
| 0.080292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0bcf77d0e89f7eeb81cfefde1fb86ef9a0fc3f
| 2,844
|
py
|
Python
|
LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# Given a string, sort it in decreasing order based on the frequency of characters.
# Example 1:
# Input:
# "tree"
# Output:
# "eert"
# Explanation:
# 'e' appears twice while 'r' and 't' both appear once.
# So 'e' must appear before both 'r' and 't'. Therefore "eetr" is also a valid answer.
# Example 2:
# Input:
# "cccaaa"
# Output:
# "cccaaa"
# Explanation:
# Both 'c' and 'a' appear three times, so "aaaccc" is also a valid answer.
# Note that "cacaca" is incorrect, as the same characters must be together.
# Example 3:
# Input:
# "Aabb"
# Output:
# "bbAa"
# Explanation:
# "bbaA" is also a valid answer, but "Aabb" is incorrect.
# Note that 'A' and 'a' are treated as two different characters.
import collections
import heapq
class Solution:
def frequencySort(self, s: str) -> str:
# M1. 模拟 O(nlogn) O(n)
if not s:
return s
# Convert s to a list.
s = list(s)
# Sort the characters in s.
s.sort()
# Make a list of strings, one for each unique char.
all_strings = []
cur_sb = [s[0]]
for c in s[1:]:
# If the last character on string builder is different...
if cur_sb[-1] != c:
all_strings.append("".join(cur_sb))
cur_sb = []
cur_sb.append(c)
all_strings.append("".join(cur_sb))
# Sort the strings by length from *longest* to shortest.
all_strings.sort(key=lambda string : len(string), reverse=True)
# Convert to a single string to return.
# Converting a list of strings to a string is often done
# using this rather strange looking python idiom.
return "".join(all_strings)
# ====================================
# M2. 哈希表+排序 O(nlogn) O(n)
# Count the occurence on each character
cnt = collections.defaultdict(int)
for c in s:
cnt[c] += 1
# Sort and Build string
res = []
for k, v in sorted(cnt.items(), key = lambda x: -x[1]):
res += [k] * v
return "".join(res)
# ====================================
# O(nlogk) O(n)
# Count the occurence on each character
cnt = collections.Counter(s)
# Build string
res = []
for k, v in cnt.most_common():
res += [k] * v
return "".join(res)
# ====================================
# M3.哈希表 + 优先级队列 O(nlogk) O(n)
# Count the occurence on each character
cnt = collections.Counter(s)
# Build heap
heap = [(-v, k) for k, v in cnt.items()]
heapq.heapify(heap)
# Build string
res = []
while heap:
v, k = heapq.heappop(heap)
res += [k] * -v
return ''.join(res)
| 27.61165
| 86
| 0.522504
| 371
| 2,844
| 3.973046
| 0.38814
| 0.020353
| 0.014247
| 0.024423
| 0.265943
| 0.220488
| 0.183853
| 0.123474
| 0.123474
| 0.123474
| 0
| 0.005777
| 0.33052
| 2,844
| 103
| 87
| 27.61165
| 0.768382
| 0.482068
| 0
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.052632
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0bf2755cfa5ea302283c30bc9e0ccfd4f8893d
| 1,837
|
py
|
Python
|
ttkinter_app.py
|
bombero2020/python_tools
|
393092609c4555e47b9789eb3fcb614ea25fdef9
|
[
"MIT"
] | null | null | null |
ttkinter_app.py
|
bombero2020/python_tools
|
393092609c4555e47b9789eb3fcb614ea25fdef9
|
[
"MIT"
] | null | null | null |
ttkinter_app.py
|
bombero2020/python_tools
|
393092609c4555e47b9789eb3fcb614ea25fdef9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
class HashCorpFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name_entry = ttk.Entry(self)
self.name_entry.pack()
self.greet_button = ttk.Button(
self, text="Saludar", command=self.say_hello)
self.greet_button.pack()
self.greet_label = ttk.Label(self)
self.greet_label.pack()
def say_hello(self):
self.greet_label["text"] = \
"¡Hola, {}!".format(self.name_entry.get())
class AboutFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = ttk.Label(self)
self.label["text"] = ("Visitanos en recursospython.com y "
"foro.recursospython.com.")
self.label.pack()
self.web_button = ttk.Button(self, text="Visitar web")
self.web_button.pack(pady=10)
self.forum_button = ttk.Button(self, text="Visitar foro")
self.forum_button.pack()
class Application(ttk.Frame):
def __init__(self, main_window):
super().__init__(main_window)
main_window.title("Hashing Coorp.")
main_window.geometry('700x400') # anchura x altura
main_window.configure(bg = 'beige')
self.notebook = ttk.Notebook(self)
self.hashcorp_frame = HashCorpFrame(self.notebook)
self.notebook.add(
self.hashcorp_frame, text="Saludos", padding=10)
self.about_frame = AboutFrame(self.notebook)
self.notebook.add(
self.about_frame, text="Acerca de", padding=10)
self.notebook.pack(padx=10, pady=10)
self.pack()
main_window = tk.Tk()
app = Application(main_window)
app.mainloop()
| 27.41791
| 66
| 0.616222
| 226
| 1,837
| 4.79646
| 0.327434
| 0.064576
| 0.030443
| 0.041513
| 0.285978
| 0.208487
| 0.095941
| 0.095941
| 0.095941
| 0.095941
| 0
| 0.012266
| 0.245509
| 1,837
| 67
| 67
| 27.41791
| 0.76912
| 0.032118
| 0
| 0.136364
| 0
| 0
| 0.083333
| 0.013514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0dcd6e6a26bb27177e11fcbcba91b603bd720d
| 8,513
|
py
|
Python
|
api/src/dojo.py
|
mosoriob/dojo
|
71bba04c4fdc4224320087b4c400fcba91b6597d
|
[
"MIT"
] | 1
|
2021-10-08T00:47:58.000Z
|
2021-10-08T00:47:58.000Z
|
api/src/dojo.py
|
mosoriob/dojo
|
71bba04c4fdc4224320087b4c400fcba91b6597d
|
[
"MIT"
] | null | null | null |
api/src/dojo.py
|
mosoriob/dojo
|
71bba04c4fdc4224320087b4c400fcba91b6597d
|
[
"MIT"
] | null | null | null |
import uuid
from typing import List
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from fastapi import APIRouter, Response, status
from validation import DojoSchema
from src.settings import settings
import logging
logger = logging.getLogger(__name__)
router = APIRouter()
es = Elasticsearch([settings.ELASTICSEARCH_URL], port=settings.ELASTICSEARCH_PORT)
def search_by_model(model_id):
q = {"query": {"term": {"model_id.keyword": {"value": model_id, "boost": 1.0}}}}
return q
def search_and_scroll(index, query=None, size=10, scroll_id=None):
if query:
q = {
"query": {
"query_string": {
"query": query,
}
},
}
else:
q = {"query": {"match_all": {}}}
if not scroll_id:
# we need to kick off the query
results = es.search(index=index, body=q, scroll="2m", size=size)
else:
# otherwise, we can use the scroll
results = es.scroll(scroll_id=scroll_id, scroll="2m")
# get count
count = es.count(index=index, body=q)
# if results are less than the page size (10) don't return a scroll_id
if len(results["hits"]["hits"]) < size:
scroll_id = None
else:
scroll_id = results.get("_scroll_id", None)
return {
"hits": count["count"],
"scroll_id": scroll_id,
"results": [i["_source"] for i in results["hits"]["hits"]],
}
@router.post("/dojo/directive")
def create_directive(payload: DojoSchema.ModelDirective):
"""
Create a `directive` for a model. This is the command which is used to execute
the model container. The `directive` is templated out using Jinja, where each templated `{{ item }}`
maps directly to the name of a specific `parameter.
"""
try:
es.update(index="directives", body={"doc": payload.dict()}, id=payload.model_id)
return Response(
status_code=status.HTTP_200_OK,
headers={"location": f"/dojo/directive/{payload.model_id}"},
content=f"Created directive for model with id = {payload.model_id}",
)
except NotFoundError:
es.index(index="directives", body=payload.json(), id=payload.model_id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/dojo/directive/{payload.model_id}"},
content=f"Created directive for model with id = {payload.model_id}",
)
@router.get("/dojo/directive/{model_id}")
def get_directive(model_id: str) -> DojoSchema.ModelDirective:
results = es.search(index="directives", body=search_by_model(model_id))
try:
directive = results["hits"]["hits"][-1]["_source"]
return directive
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Directive for model {model_id} not found.",
)
@router.post("/dojo/config")
def create_configs(payload: List[DojoSchema.ModelConfig]):
"""
Create one or more model `configs`. A `config` is a settings file which is used by the model to
set a specific parameter level. Each `config` is stored to S3, templated out using Jinja, where each templated `{{ item }}`
maps directly to the name of a specific `parameter.
"""
for p in payload:
es.index(index="configs", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/config/{p.id}"},
content=f"Created config(s) for model with id = {p.model_id}",
)
@router.get("/dojo/config/{model_id}")
def get_configs(model_id: str) -> List[DojoSchema.ModelConfig]:
results = es.search(index="configs", body=search_by_model(model_id))
try:
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Config(s) for model {model_id} not found.",
)
@router.post("/dojo/outputfile")
def create_outputfiles(payload: List[DojoSchema.ModelOutputFile]):
"""
Create an `outputfile` for a model. Each `outputfile` represents a single file that is created upon each model
execution. Here we store key metadata about the `outputfile` which enables us to find it within the container and
normalize it into a CauseMos compliant format.
"""
for p in payload:
es.index(index="outputfiles", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/outputfile/{p.id}"},
content=f"Created outputfile(s) for model with id = {p.model_id}",
)
@router.get("/dojo/outputfile/{model_id}")
def get_outputfiles(model_id: str) -> List[DojoSchema.ModelOutputFile]:
results = es.search(index="outputfiles", body=search_by_model(model_id))
try:
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Outputfile(s) for model {model_id} not found.",
)
### Accessories Endpoints
@router.get("/dojo/accessories/{model_id}")
def get_accessory_files(model_id: str) -> List[DojoSchema.ModelAccessory]:
"""
Get the `accessory files` for a model.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
try:
results = es.search(index="accessories", body=search_by_model(model_id))
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Accessory file(s) for model {model_id} not found.",
)
@router.post("/dojo/accessories")
def create_accessory_file(payload: DojoSchema.ModelAccessory):
"""
Create or update an `accessory file` for a model.
`id` is optional and will be assigned a uuid by the API.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
try:
payload.id = uuid.uuid4() # update payload with uuid
es.update(index="accessories", body={"doc": payload.dict()}, id=payload.id)
return Response(
status_code=status.HTTP_200_OK,
headers={"location": f"/dojo/accessory/{payload.model_id}"},
content=f"Created accessory for model with id = {payload.model_id}",
)
except NotFoundError:
es.index(index="accessories", body=payload.json(), id=payload.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/dojo/accessory/{payload.model_id}"},
content=f"Created accessory for model with id = {payload.model_id}",
)
@router.put("/dojo/accessories")
def create_accessory_files(payload: List[DojoSchema.ModelAccessory]):
"""
The PUT would overwrite the entire array with a new array.
For each, create an `accessory file` for a model.
`id` is optional and will be assigned a uuid by the API.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
if len(payload) == 0:
return Response(status_code=status.HTTP_400_BAD_REQUEST,content=f"No payload")
# Delete previous entries.
try:
results = es.search(index="accessories", body=search_by_model(payload[0].model_id))
for i in results["hits"]["hits"]:
es.delete(index="accessories", id=i["_source"]["id"])
except Exception as e:
logger.error(e)
# Add the new entries.
for p in payload:
p.id = uuid.uuid4() # update payload with uuid
es.index(index="accessories", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/accessory/{p.id}"},
content=f"Created accessories(s) for model with id = {p.model_id}",
)
| 35.470833
| 127
| 0.649595
| 1,133
| 8,513
| 4.770521
| 0.169462
| 0.045328
| 0.044403
| 0.053284
| 0.583164
| 0.531915
| 0.513784
| 0.483256
| 0.459019
| 0.448844
| 0
| 0.00765
| 0.232233
| 8,513
| 240
| 128
| 35.470833
| 0.819308
| 0.241278
| 0
| 0.348993
| 0
| 0
| 0.218002
| 0.050431
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073826
| false
| 0
| 0.053691
| 0
| 0.248322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f10c305acc4e613b5656eb25e050b130ecbb7b2
| 631
|
py
|
Python
|
examples/house_prices_kaggle.py
|
ChillBoss/ml_automation
|
50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741
|
[
"MIT"
] | null | null | null |
examples/house_prices_kaggle.py
|
ChillBoss/ml_automation
|
50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741
|
[
"MIT"
] | null | null | null |
examples/house_prices_kaggle.py
|
ChillBoss/ml_automation
|
50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741
|
[
"MIT"
] | null | null | null |
# Regression Task, assumption is that the data is in the right directory
# data can be taken from https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data
import os
import ml_automation
if __name__ == '__main__':
data_dir = os.path.join(os.path.dirname(__file__), 'data')
f_train = os.path.join(data_dir, 'train.csv')
f_test = os.path.join(data_dir, 'test.csv')
#training
ml_automation.automate(path=f_train,
ignore_cols=['Id'],
out_dir='model')
#predictions
preds = ml_automation.predict(f_test, model_dir='model')
print(preds)
| 30.047619
| 98
| 0.66561
| 89
| 631
| 4.438202
| 0.561798
| 0.060759
| 0.075949
| 0.070886
| 0.086076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213946
| 631
| 20
| 99
| 31.55
| 0.796371
| 0.29477
| 0
| 0
| 0
| 0
| 0.093182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f140a8d48047d7ddb5cd0c7677d6976d0a7cec0
| 827
|
py
|
Python
|
threatstack/v1/client.py
|
giany/threatstack-python-client
|
c9e0a4bed55685d3a032c6f1a03261d44de64c4a
|
[
"MIT"
] | 4
|
2018-03-14T21:51:46.000Z
|
2020-01-06T17:25:53.000Z
|
threatstack/v1/client.py
|
giany/threatstack-python-client
|
c9e0a4bed55685d3a032c6f1a03261d44de64c4a
|
[
"MIT"
] | 4
|
2018-01-17T19:58:29.000Z
|
2018-04-13T17:03:01.000Z
|
threatstack/v1/client.py
|
giany/threatstack-python-client
|
c9e0a4bed55685d3a032c6f1a03261d44de64c4a
|
[
"MIT"
] | 6
|
2018-01-15T18:46:25.000Z
|
2022-02-17T10:13:35.000Z
|
"""
V1 Client
"""
from threatstack.base import BaseClient
from threatstack.v1 import resources
class Client(BaseClient):
BASE_URL = "https://app.threatstack.com/api/v1/"
def __init__(self, api_key=None, org_id=None, user_id=None, timeout=None):
BaseClient.__init__(self, api_key=api_key, timeout=timeout)
self.org_id = org_id
self.user_id = user_id
self.agents = resources.Agents(self)
self.alerts = resources.Alerts(self)
self.logs = resources.Logs(self)
self.organizations = resources.Organizations(self)
self.policies = resources.Policies(self)
def request_headers(self, _method, _url):
headers = { "Authorization": self.api_key }
if self.org_id:
headers["Organization"] = self.org_id
return headers
| 27.566667
| 78
| 0.665054
| 103
| 827
| 5.106796
| 0.339806
| 0.047529
| 0.057034
| 0.053232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004695
| 0.227328
| 827
| 29
| 79
| 28.517241
| 0.818466
| 0.010883
| 0
| 0
| 0
| 0
| 0.074166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f14372008e665aac666215605a53db7e9d8af9a
| 5,930
|
py
|
Python
|
djangocms_newsletter/admin/mailinglist.py
|
nephila/djangocms-newsletter
|
5ebd8d3e1e2c85b2791d0261a954469f2548c840
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_newsletter/admin/mailinglist.py
|
nephila/djangocms-newsletter
|
5ebd8d3e1e2c85b2791d0261a954469f2548c840
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_newsletter/admin/mailinglist.py
|
nephila/djangocms-newsletter
|
5ebd8d3e1e2c85b2791d0261a954469f2548c840
|
[
"BSD-3-Clause"
] | 2
|
2021-03-15T13:33:53.000Z
|
2021-05-18T20:34:47.000Z
|
"""ModelAdmin for MailingList"""
from datetime import datetime
from django.contrib import admin
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
from django.utils.encoding import smart_str
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponseRedirect
from emencia.django.newsletter.models import Contact
from emencia.django.newsletter.models import MailingList
from emencia.django.newsletter.settings import USE_WORKGROUPS
from emencia.django.newsletter.utils.workgroups import request_workgroups
from emencia.django.newsletter.utils.workgroups import request_workgroups_contacts_pk
from emencia.django.newsletter.utils.workgroups import request_workgroups_mailinglists_pk
from emencia.django.newsletter.utils.vcard import vcard_contacts_export_response
from emencia.django.newsletter.utils.excel import ExcelResponse
class MailingListAdmin(admin.ModelAdmin):
date_hierarchy = 'creation_date'
list_display = ('creation_date', 'name', 'description',
'subscribers_count', 'unsubscribers_count',
'exportation_links')
list_editable = ('name', 'description')
list_filter = ('creation_date', 'modification_date')
search_fields = ('name', 'description',)
filter_horizontal = ['subscribers', 'unsubscribers']
fieldsets = ((None, {'fields': ('name', 'description',)}),
(None, {'fields': ('subscribers',)}),
(None, {'fields': ('unsubscribers',)}),
)
actions = ['merge_mailinglist']
actions_on_top = False
actions_on_bottom = True
def queryset(self, request):
queryset = super(MailingListAdmin, self).queryset(request)
if not request.user.is_superuser and USE_WORKGROUPS:
mailinglists_pk = request_workgroups_mailinglists_pk(request)
queryset = queryset.filter(pk__in=mailinglists_pk)
return queryset
def save_model(self, request, mailinglist, form, change):
workgroups = []
if not mailinglist.pk and not request.user.is_superuser \
and USE_WORKGROUPS:
workgroups = request_workgroups(request)
mailinglist.save()
for workgroup in workgroups:
workgroup.mailinglists.add(mailinglist)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if 'subscribers' in db_field.name and not request.user.is_superuser \
and USE_WORKGROUPS:
contacts_pk = request_workgroups_contacts_pk(request)
kwargs['queryset'] = Contact.objects.filter(pk__in=contacts_pk)
return super(MailingListAdmin, self).formfield_for_manytomany(
db_field, request, **kwargs)
def merge_mailinglist(self, request, queryset):
"""Merge multiple mailing list"""
if queryset.count() == 1:
self.message_user(request, _('Please select a least 2 mailing list.'))
return None
subscribers = {}
unsubscribers = {}
for ml in queryset:
for contact in ml.subscribers.all():
subscribers[contact] = ''
for contact in ml.unsubscribers.all():
unsubscribers[contact] = ''
when = str(datetime.now()).split('.')[0]
new_mailing = MailingList(name=_('Merging list at %s') % when,
description=_('Mailing list created by merging at %s') % when)
new_mailing.save()
new_mailing.subscribers = subscribers.keys()
new_mailing.unsubscribers = unsubscribers.keys()
if not request.user.is_superuser and USE_WORKGROUPS:
for workgroup in request_workgroups(request):
workgroup.mailinglists.add(new_mailing)
self.message_user(request, _('%s succesfully created by merging.') % new_mailing)
return HttpResponseRedirect(reverse('admin:newsletter_mailinglist_change',
args=[new_mailing.pk]))
merge_mailinglist.short_description = _('Merge selected mailinglists')
def exportation_links(self, mailinglist):
"""Display links for exportation"""
return u'<a href="%s">%s</a> / <a href="%s">%s</a>' % (
reverse('admin:newsletter_mailinglist_export_excel',
args=[mailinglist.pk]), _('Excel'),
reverse('admin:newsletter_mailinglist_export_vcard',
args=[mailinglist.pk]), _('VCard'))
exportation_links.allow_tags = True
exportation_links.short_description = _('Export')
def exportion_vcard(self, request, mailinglist_id):
"""Export subscribers in the mailing in VCard"""
mailinglist = get_object_or_404(MailingList, pk=mailinglist_id)
name = 'contacts_%s' % smart_str(mailinglist.name)
return vcard_contacts_export_response(mailinglist.subscribers.all(), name)
def exportion_excel(self, request, mailinglist_id):
"""Export subscribers in the mailing in Excel"""
mailinglist = get_object_or_404(MailingList, pk=mailinglist_id)
name = 'contacts_%s' % smart_str(mailinglist.name)
return ExcelResponse(mailinglist.subscribers.all(), name)
def get_urls(self):
urls = super(MailingListAdmin, self).get_urls()
my_urls = patterns('',
url(r'^export/vcard/(?P<mailinglist_id>\d+)/$',
self.admin_site.admin_view(self.exportion_vcard),
name='newsletter_mailinglist_export_vcard'),
url(r'^export/excel/(?P<mailinglist_id>\d+)/$',
self.admin_site.admin_view(self.exportion_excel),
name='newsletter_mailinglist_export_excel'))
return my_urls + urls
| 47.063492
| 96
| 0.663238
| 634
| 5,930
| 5.987382
| 0.22082
| 0.021075
| 0.035827
| 0.056902
| 0.301106
| 0.251054
| 0.204162
| 0.204162
| 0.204162
| 0.141201
| 0
| 0.00265
| 0.236425
| 5,930
| 125
| 97
| 47.44
| 0.835689
| 0.028668
| 0
| 0.076923
| 0
| 0.009615
| 0.134112
| 0.046216
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.163462
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f18561e37fb0a33a844c99a5051aea7c8863cea
| 4,263
|
py
|
Python
|
lib/train/recorder.py
|
rurusasu/OrigNet
|
3b3384cb3d09b52c7c98bb264901285f006e51c1
|
[
"Apache-2.0"
] | null | null | null |
lib/train/recorder.py
|
rurusasu/OrigNet
|
3b3384cb3d09b52c7c98bb264901285f006e51c1
|
[
"Apache-2.0"
] | null | null | null |
lib/train/recorder.py
|
rurusasu/OrigNet
|
3b3384cb3d09b52c7c98bb264901285f006e51c1
|
[
"Apache-2.0"
] | 1
|
2021-09-24T01:24:05.000Z
|
2021-09-24T01:24:05.000Z
|
import os
import sys
from collections import deque, defaultdict
from typing import Dict, Union
sys.path.append("../../")
import torch
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class Recorder(object):
def __init__(self, cfg):
if "record_dir" not in cfg and "resume" not in cfg:
raise ("The required parameter is not set.")
# log_dir = os.path.join(pth.DATA_DIR, cfg.task, cfg.record_dir)
log_dir = cfg.record_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not cfg.resume:
os.system("rm -rf {}".format(log_dir))
self.writer = SummaryWriter(log_dir)
# scalars
self.epoch = 0
self.step = 0
self.loss_stats = defaultdict(SmoothedValue)
self.batch_time = SmoothedValue()
# images
self.image_stats = defaultdict(object)
def VisualizeNetwork(self, network: torch.nn, inputs: torch.Tensor) -> None:
"""TensorBoard 上でネットワークの構造を可視化するためのデータを作成するための関数.
Args:
network (torch.nn): 可視化したいモデル
inputs (torch.Tensor): モデルの構造を定義するための入力データ
"""
self.writer.add_graph(network, inputs)
def update_image_stats(self, image_stats: Dict) -> None:
"""
Arg:
image_stats(Dict[batch_imgs]):
辞書の内部に保存される値は、
* 4D形状のミニバッチテンソル (B x C x H x W)
* すべて同じサイズの画像のリスト。
"""
for k, v in image_stats.items():
self.image_stats[k] = v.detach().cpu()
def update_loss_stats(self, loss_dict: Dict) -> None:
for k, v in loss_dict.items():
self.loss_stats[k].update(v.detach().cpu())
def record(
self,
prefix,
step: int = -1,
loss_stats: Union[Dict, None] = None,
image_stats: Union[Dict, None] = None,
):
pattern = prefix + "/{}"
step = step if step >= 0 else self.step
loss_stats = loss_stats if loss_stats else self.loss_stats
image_stats = image_stats if image_stats else self.image_stats
for k, v in loss_stats.items():
if isinstance(v, SmoothedValue):
self.writer.add_scalar(pattern.format(k), v.median, step)
else:
self.writer.add_scalar(pattern.format(k), v, step)
for k, v in self.image_stats.items():
# RGB かつ [0, 1] の範囲の値を持つ場合
if len(v.size()) == 3:
b_size, h, w = v.size()[0], v.size()[1], v.size()[2]
v = v.view(b_size, -1, h, w)
v = v.float() if v.dtype != torch.float32 else v
self.writer.add_image(
pattern.format(k), vutils.make_grid(v, value_range=[0, 1]), step
)
del loss_stats
def state_dict(self):
scalar_dict = {}
scalar_dict["step"] = self.step
return scalar_dict
def load_state_dict(self, scalar_dict):
self.step = scalar_dict["step"]
def __str__(self):
loss_state = []
for k, v in self.loss_stats.items():
loss_state.append("{}: {:.4f}".format(k, v.avg))
loss_state = " ".join(loss_state)
recording_state = " ".join(
["epoch: {}", "step: {}", "{}", "batch_time: {:.3f} sec."]
)
return recording_state.format(
self.epoch,
self.step,
loss_state,
# self.data_time.avg,
self.batch_time.avg,
)
def make_recorder(cfg):
return Recorder(cfg)
| 29
| 80
| 0.569317
| 539
| 4,263
| 4.352505
| 0.274583
| 0.051151
| 0.029838
| 0.014919
| 0.116795
| 0.059676
| 0.059676
| 0.059676
| 0.030691
| 0
| 0
| 0.008172
| 0.311049
| 4,263
| 146
| 81
| 29.19863
| 0.790603
| 0.113535
| 0
| 0.05102
| 0
| 0
| 0.035997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0.020408
| 0.295918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f1970116641c3a579674b0a3cde7a6940267ce4
| 5,642
|
py
|
Python
|
scrapytest/spiders.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
scrapytest/spiders.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
scrapytest/spiders.py
|
coderatchet/scrapy-test
|
4f5febfca05d267dc98df94e65a403210ce39d81
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from datetime import datetime
import scrapy
from scrapy.http import Response
# noinspection PyUnresolvedReferences
import scrapytest.db
from scrapytest.config import config
from scrapytest.types import Article
from scrapytest.utils import merge_dict
log = logging.getLogger(__name__)
class GuardianNewsSpider(scrapy.spiders.CrawlSpider):
""" Spider that crawls over the Guardian news website"""
name = "guardian"
_user_config = {}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if hasattr(self, '_user_config'):
self._config = merge_dict(self._user_config, config['guardian_spider'])
else:
self._config = config['guardian_spider']
@classmethod
def update_settings(cls, settings):
super(GuardianNewsSpider, cls).update_settings(settings)
# super impose user cmd line args onto the current spider configuration.
if 'custom_guardian_config' in settings:
cls._user_config = settings.get('custom_guardian_config')
def start_requests(self):
"""
generator for requesting the content from each of the main news collection entry points
"""
urls = ['http://{host}/{path}'.format(host=self._config['host'], path=path) for path in
self._config['collection_paths']]
for url in urls:
max_depth = self._config['max_depth']
yield scrapy.Request(url=url, callback=lambda response: self._parse_news_list(response, max_depth))
def _parse_news_list(self, response: Response, depth=10):
"""
handle the raw html
:param depth: maximum depth we should search for articles
:param response: the top level news response
"""
log.debug("Parsing news list link: {}".format(response.url))
for link in self._article_links(response):
link = response.urljoin(link)
yield scrapy.Request(url=link, callback=self._parse_article_link)
# if next link exists and depth not exceeded, visit next link and yield results.
next_page = response.css(self._config['next_page_selector']).extract_first()
# we keep iterating through until our maximum depth is reached.
if next_page is not None and depth > 0:
next_page = response.urljoin(next_page)
yield scrapy.Request(url=next_page, callback=lambda list_response: self._parse_news_list(list_response,
depth - 1))
def _parse_article_link(self, article: Response):
"""
parses the article's main page
:param Response article: top level article page.
should search for existing article and store if not found.
"""
import re
# some author elements have clickable links with the name and picture of author
author_raw = article.css(self._config['author_selector'])
log.debug("author_raw: {}".format(author_raw.extract_first()))
try:
if author_raw.css('a').extract_first() is not None:
author = author_raw.css('a::text').extract_first()
else:
author = author_raw.css('*::text').extract_first()
author = re.split(r"-", author)[0].strip()
except:
author = "The Guardian"
log.debug("parsed author name: {}".format(author))
# author is in format of "name - email"
date_time_string = article.css(self._config['date_time_selector']).extract_first()
# remove the ':' from the date string as sftptime does not support this
sub = re.sub(r':([0-9]{2})$', r'\1', date_time_string)
date_time = datetime.strptime(sub, self._config['date_time_format'])
# assemble the article object
title = article.css(self._config['title_selector']).extract_first().strip()
data = {
'title': title,
'author': author,
'date_time': date_time,
'content': '\n'.join(article.css(self._config['content_selector']).extract()).strip()
}
# don't save the article if nothing exists in it.
if not data['content'].strip() == '':
# persist it if it doesn't exist yet
log.debug("Searching for existing article with the title '{}' and date_time '{}'".format(title, date_time))
existing_article = Article.objects(title__exact=title, date_time__exact=date_time).first()
if existing_article is None:
log.debug("Article not found for {} - {}, saving new article: {}".format(title, date_time, data))
new_article = Article(**data)
new_article.save()
else:
log.debug("Article found, not saving")
@staticmethod
def _parse_author_tag(author_tag: Response):
"""
parse the author section for the name
:param author_tag: the author/div tag to parse
:return: the name of the author
"""
text = author_tag.css('.story-header__author-name::text').extract_first()
return re.split(r"-", text)[0].strip()
def _article_links(self, news_list_response: Response):
"""
Generator for iterating through articles
:param scrapy.http.Response news_list_response: a top level news list page
:yields: the next article in the news list
"""
for article_link in news_list_response.css(self._config['article_list_item_link_selector']):
yield article_link.extract()
| 40.589928
| 119
| 0.63045
| 690
| 5,642
| 4.957971
| 0.268116
| 0.035077
| 0.0228
| 0.023385
| 0.014616
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002419
| 0.267281
| 5,642
| 138
| 120
| 40.884058
| 0.825109
| 0.216767
| 0
| 0.063291
| 0
| 0
| 0.139915
| 0.025331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.126582
| 0
| 0.265823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f1c1c275c8674c941378ad94c7d52f05e79ddd2
| 14,861
|
py
|
Python
|
userge/plugins/admin/gban.py
|
wildyvpn-network/bot
|
87459495000bd6004b8f62a9cb933c164da9ef29
|
[
"MIT"
] | null | null | null |
userge/plugins/admin/gban.py
|
wildyvpn-network/bot
|
87459495000bd6004b8f62a9cb933c164da9ef29
|
[
"MIT"
] | null | null | null |
userge/plugins/admin/gban.py
|
wildyvpn-network/bot
|
87459495000bd6004b8f62a9cb933c164da9ef29
|
[
"MIT"
] | null | null | null |
""" setup gban """
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved
import json
import asyncio
from typing import Union
import aiohttp
import spamwatch
from spamwatch.types import Ban
from pyrogram.errors.exceptions.bad_request_400 import (
ChatAdminRequired, UserAdminInvalid, ChannelInvalid)
from userge import userge, Message, Config, get_collection, filters, pool
SAVED_SETTINGS = get_collection("CONFIGS")
GBAN_USER_BASE = get_collection("GBAN_USER")
WHITELIST = get_collection("WHITELIST_USER")
CHANNEL = userge.getCLogger(__name__)
LOG = userge.getLogger(__name__)
async def _init() -> None:
s_o = await SAVED_SETTINGS.find_one({'_id': 'ANTISPAM_ENABLED'})
if s_o:
Config.ANTISPAM_SENTRY = s_o['data']
@userge.on_cmd("antispam", about={
'header': "enable / disable antispam",
'description': "Toggle API Auto Bans"}, allow_channels=False)
async def antispam_(message: Message):
""" enable / disable antispam """
if Config.ANTISPAM_SENTRY:
Config.ANTISPAM_SENTRY = False
await message.edit("`antispam disabled !`", del_in=3)
else:
Config.ANTISPAM_SENTRY = True
await message.edit("`antispam enabled !`", del_in=3)
await SAVED_SETTINGS.update_one(
{'_id': 'ANTISPAM_ENABLED'}, {"$set": {'data': Config.ANTISPAM_SENTRY}}, upsert=True)
@userge.on_cmd("gban", about={
'header': "Globally Ban A User",
'description': "Adds User to your GBan List. "
"Bans a Globally Banned user if they join or message. "
"[NOTE: Works only in groups where you are admin.]",
'examples': "{tr}gban [userid | reply] [reason for gban] (mandatory)"},
allow_channels=False, allow_bots=False)
async def gban_user(message: Message):
""" ban a user globally """
await message.edit("`GBanning...`")
user_id, reason = message.extract_user_and_text
if not user_id:
await message.edit(
"`no valid user_id or message specified,`"
"`don't do .help gban for more info. "
"Coz no one's gonna help ya`(。ŏ_ŏ) ⚠", del_in=0)
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
if not reason:
await message.edit(
f"**#Aborted**\n\n**Gbanning** of [{firstname}](tg://user?id={user_id}) "
"Aborted coz No reason of gban provided by banner", del_in=5)
return
user_id = get_mem['id']
if user_id == (await message.client.get_me()).id:
await message.edit(r"LoL. Why would I GBan myself ¯\(°_o)/¯")
return
if user_id in Config.SUDO_USERS:
await message.edit(
"That user is in my Sudo List, Hence I can't ban him.\n\n"
"**Tip:** Remove them from Sudo List and try again. (¬_¬)", del_in=5)
return
found = await GBAN_USER_BASE.find_one({'user_id': user_id})
if found:
await message.edit(
"**#Already_GBanned**\n\nUser Already Exists in My Gban List.\n"
f"**Reason For GBan:** `{found['reason']}`", del_in=5)
return
await message.edit(r"\\**#GBanned_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n**Reason:** `{reason}`")
# TODO: can we add something like "GBanned by {any_sudo_user_fname}"
if message.client.is_bot:
chats = [message.chat]
else:
chats = await message.client.get_common_chats(user_id)
gbanned_chats = []
for chat in chats:
try:
await chat.kick_member(user_id)
gbanned_chats.append(chat.id)
await CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {chat.title}\n"
f"**Chat ID:** `{chat.id}`\n"
f"**Reason:** `{reason}`\n\n$GBAN #id{user_id}")
except (ChatAdminRequired, UserAdminInvalid, ChannelInvalid):
pass
await GBAN_USER_BASE.insert_one({'firstname': firstname,
'user_id': user_id,
'reason': reason,
'chat_ids': gbanned_chats})
if message.reply_to_message:
await CHANNEL.fwd_msg(message.reply_to_message)
await CHANNEL.log(f'$GBAN #prid{user_id} ⬆️')
LOG.info("G-Banned %s", str(user_id))
@userge.on_cmd("ungban", about={
'header': "Globally Unban an User",
'description': "Removes an user from your Gban List",
'examples': "{tr}ungban [userid | reply]"},
allow_channels=False, allow_bots=False)
async def ungban_user(message: Message):
""" unban a user globally """
await message.edit("`UnGBanning...`")
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await GBAN_USER_BASE.find_one({'user_id': user_id})
if not found:
await message.err("User Not Found in My Gban List")
return
if 'chat_ids' in found:
for chat_id in found['chat_ids']:
try:
await userge.unban_chat_member(chat_id, user_id)
await CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n\n"
f"$UNGBAN #id{user_id}")
except (ChatAdminRequired, UserAdminInvalid, ChannelInvalid):
pass
await message.edit(r"\\**#UnGbanned_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`")
await GBAN_USER_BASE.delete_one({'firstname': firstname, 'user_id': user_id})
LOG.info("UnGbanned %s", str(user_id))
@userge.on_cmd("glist", about={
'header': "Get a List of Gbanned Users",
'description': "Get Up-to-date list of users Gbanned by you.",
'examples': "Lol. Just type {tr}glist"},
allow_channels=False)
async def list_gbanned(message: Message):
""" vies gbanned users """
msg = ''
async for c in GBAN_USER_BASE.find():
msg += ("**User** : " + str(c['firstname']) + "-> with **User ID** -> "
+ str(c['user_id']) + " is **GBanned for** : " + str(c['reason']) + "\n\n")
await message.edit_or_send_as_file(
f"**--Globally Banned Users List--**\n\n{msg}" if msg else "`glist empty!`")
@userge.on_cmd("whitelist", about={
'header': "Whitelist a User",
'description': "Use whitelist to add users to bypass API Bans",
'useage': "{tr}whitelist [userid | reply to user]",
'examples': "{tr}whitelist 5231147869"},
allow_channels=False, allow_bots=False)
async def whitelist(message: Message):
""" add user to whitelist """
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await WHITELIST.find_one({'user_id': user_id})
if found:
await message.err("User Already in My WhiteList")
return
await asyncio.gather(
WHITELIST.insert_one({'firstname': firstname, 'user_id': user_id}),
message.edit(
r"\\**#Whitelisted_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`"),
CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {message.chat.title}\n"
f"**Chat ID:** `{message.chat.id}`\n\n$WHITELISTED #id{user_id}")
)
LOG.info("WhiteListed %s", str(user_id))
@userge.on_cmd("rmwhite", about={
'header': "Removes a User from Whitelist",
'description': "Use it to remove users from WhiteList",
'useage': "{tr}rmwhite [userid | reply to user]",
'examples': "{tr}rmwhite 5231147869"},
allow_channels=False, allow_bots=False)
async def rmwhitelist(message: Message):
""" remove a user from whitelist """
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await WHITELIST.find_one({'user_id': user_id})
if not found:
await message.err("User Not Found in My WhiteList")
return
await asyncio.gather(
WHITELIST.delete_one({'firstname': firstname, 'user_id': user_id}),
message.edit(
r"\\**#Removed_Whitelisted_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`"),
CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {message.chat.title}\n"
f"**Chat ID:** `{message.chat.id}`\n\n$RMWHITELISTED #id{user_id}")
)
LOG.info("WhiteListed %s", str(user_id))
@userge.on_cmd("listwhite", about={
'header': "Get a List of Whitelisted Users",
'description': "Get Up-to-date list of users WhiteListed by you.",
'examples': "Lol. Just type {tr}listwhite"},
allow_channels=False)
async def list_white(message: Message):
""" list whitelist """
msg = ''
async for c in WHITELIST.find():
msg += ("**User** : " + str(c['firstname']) + "-> with **User ID** -> " +
str(c['user_id']) + "\n\n")
await message.edit_or_send_as_file(
f"**--Whitelisted Users List--**\n\n{msg}" if msg else "`whitelist empty!`")
@userge.on_filters(filters.group & filters.new_chat_members, group=1, check_restrict_perm=True)
async def gban_at_entry(message: Message):
""" handle gbans """
chat_id = message.chat.id
for user in message.new_chat_members:
user_id = user.id
first_name = user.first_name
if await WHITELIST.find_one({'user_id': user_id}):
continue
gbanned = await GBAN_USER_BASE.find_one({'user_id': user_id})
if gbanned:
if 'chat_ids' in gbanned:
chat_ids = gbanned['chat_ids']
chat_ids.append(chat_id)
else:
chat_ids = [chat_id]
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{gbanned['reason']}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** {gbanned['reason']}\n**Quick Action:** "
f"Banned in {message.chat.title}"),
GBAN_USER_BASE.update_one(
{'user_id': user_id, 'firstname': first_name},
{"$set": {'chat_ids': chat_ids}}, upsert=True)
)
elif Config.ANTISPAM_SENTRY:
async with aiohttp.ClientSession() as ses:
async with ses.get(f'https://api.cas.chat/check?user_id={user_id}') as resp:
res = json.loads(await resp.text())
if res['ok']:
reason = ' | '.join(
res['result']['messages']) if 'result' in res else None
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
"**$SENTRY CAS Federation Ban**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{reason}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
"**$SENRTY #CAS BAN**"
f"\n**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{reason}`\n**Quick Action:**"
f" Banned in {message.chat.title}\n\n$AUTOBAN #id{user_id}")
)
elif Config.SPAM_WATCH_API:
intruder = await _get_spamwatch_data(user_id)
if intruder:
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
"**$SENTRY SpamWatch Federation Ban**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{intruder.reason}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
"**$SENRTY #SPAMWATCH_API BAN**"
f"\n**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{intruder.reason}`\n"
f"**Quick Action:** Banned in {message.chat.title}\n\n"
f"$AUTOBAN #id{user_id}")
)
message.continue_propagation()
@pool.run_in_thread
def _get_spamwatch_data(user_id: int) -> Union[Ban, bool]:
return spamwatch.Client(Config.SPAM_WATCH_API).get_ban(user_id)
| 43.200581
| 95
| 0.554942
| 1,872
| 14,861
| 4.235577
| 0.15438
| 0.093833
| 0.052466
| 0.054484
| 0.527431
| 0.507252
| 0.462858
| 0.447471
| 0.404969
| 0.365115
| 0
| 0.003963
| 0.286926
| 14,861
| 343
| 96
| 43.326531
| 0.743418
| 0.024965
| 0
| 0.418605
| 0
| 0.009967
| 0.345096
| 0.098429
| 0
| 0
| 0
| 0.002915
| 0
| 1
| 0.003322
| false
| 0.009967
| 0.026578
| 0.003322
| 0.069767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f289aaa4dfd07380db23d1700f9b70a80d10934
| 5,266
|
py
|
Python
|
oaipmh/__init__.py
|
scieloorg/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 2
|
2019-03-16T04:40:29.000Z
|
2022-03-10T14:50:21.000Z
|
oaipmh/__init__.py
|
DalavanCloud/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 27
|
2017-08-23T17:11:57.000Z
|
2021-06-01T21:57:31.000Z
|
oaipmh/__init__.py
|
DalavanCloud/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 2
|
2017-06-12T16:18:35.000Z
|
2019-03-16T04:40:12.000Z
|
import os
import re
from pyramid.config import Configurator
from pyramid.events import NewRequest
from oaipmh import (
repository,
datastores,
sets,
utils,
articlemeta,
entities,
)
from oaipmh.formatters import (
oai_dc,
oai_dc_openaire,
)
METADATA_FORMATS = [
(entities.MetadataFormat(
metadataPrefix='oai_dc',
schema='http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
metadataNamespace='http://www.openarchives.org/OAI/2.0/oai_dc/'),
oai_dc.make_metadata,
lambda x: x),
(entities.MetadataFormat(
metadataPrefix='oai_dc_openaire',
schema='http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
metadataNamespace='http://www.openarchives.org/OAI/2.0/oai_dc/'),
oai_dc_openaire.make_metadata,
oai_dc_openaire.augment_metadata),
]
STATIC_SETS = [
(sets.Set(setSpec='openaire', setName='OpenAIRE'),
datastores.identityview),
]
DEFAULT_SETTINGS = [
('oaipmh.repo.name', 'OAIPMH_REPO_NAME', str,
'SciELO - Scientific Electronic Library Online'),
('oaipmh.repo.baseurl', 'OAIPMH_REPO_BASEURL', str,
'http://www.scielo.br/oai/scielo-oai.php'),
('oaipmh.repo.protocolversion', 'OAIPMH_REPO_PROTOCOLVERSION', str,
'2.0'),
('oaipmh.repo.adminemail', 'OAIPMH_REPO_ADMINEMAIL', str,
'scielo@scielo.org'),
('oaipmh.repo.earliestdatestamp', 'OAIPMH_REPO_EARLIESTDATESTAMP',
utils.parse_date, '1998-08-01'),
('oaipmh.repo.deletedrecord', 'OAIPMH_REPO_DELETEDRECORD', str,
'no'),
('oaipmh.repo.granularity', 'OAIPMH_REPO_GRANULARITY', str,
'YYYY-MM-DD'),
('oaipmh.repo.granularity_regex', 'OAIPMH_REPO_GRANULARITY_REGEX',
re.compile, r'^(\d{4})-(\d{2})-(\d{2})$'),
('oaipmh.collection', 'OAIPMH_COLLECTION', str,
'scl'),
('oaipmh.listslen', 'OAIPMH_LISTSLEN', int,
100),
('oaipmh.chunkedresumptiontoken.chunksize',
'OAIPMH_CHUNKEDRESUMPTIONTOKEN_CHUNKSIZE', int, 12),
('oaipmh.articlemeta_uri', 'OAIPMH_ARTICLEMETA_URI', str,
'articlemeta.scielo.org:11621'),
]
def parse_settings(settings):
"""Analisa e retorna as configurações da app com base no arquivo .ini e env.
As variáveis de ambiente possuem precedência em relação aos valores
definidos no arquivo .ini.
"""
parsed = {}
cfg = list(DEFAULT_SETTINGS)
for name, envkey, convert, default in cfg:
value = os.environ.get(envkey, settings.get(name, default))
if convert is not None:
value = convert(value)
parsed[name] = value
return parsed
def get_datastore(settings):
client = articlemeta.get_articlemeta_client(settings['oaipmh.collection'],
domain=settings['oaipmh.articlemeta_uri'])
return articlemeta.ArticleMeta(client)
def get_repository_meta(settings):
repometa = repository.RepositoryMeta(
repositoryName=settings['oaipmh.repo.name'],
baseURL=settings['oaipmh.repo.baseurl'],
protocolVersion=settings['oaipmh.repo.protocolversion'],
adminEmail=settings['oaipmh.repo.adminemail'],
earliestDatestamp=settings['oaipmh.repo.earliestdatestamp'],
deletedRecord=settings['oaipmh.repo.deletedrecord'],
granularity=settings['oaipmh.repo.granularity'])
return repometa
def get_granularity_validator(settings):
def validate(date_time):
return bool(settings['oaipmh.repo.granularity_regex'].fullmatch(
date_time))
return validate
def get_setsregistry(settings):
registry = articlemeta.ArticleMetaSetsRegistry(
datastore=get_datastore(settings))
for metadata, view in STATIC_SETS:
registry.add(metadata, view)
return registry
def get_resultpage_factory(settings):
return repository.ResultPageFactory(ds=get_datastore(settings),
setsreg=get_setsregistry(settings),
listslen=settings['oaipmh.listslen'],
chunk_size=settings['oaipmh.chunkedresumptiontoken.chunksize'],
granularity_validator=get_granularity_validator(settings),
earliest_datestamp=settings['oaipmh.repo.earliestdatestamp'])
def add_oai_repository(event):
settings = event.request.registry.settings
event.request.repository = repository.Repository(
get_repository_meta(settings), get_datastore(settings),
get_granularity_validator(settings),
resultpage_factory=get_resultpage_factory(settings))
for metadata, formatter, augmenter in METADATA_FORMATS:
event.request.repository.add_metadataformat(metadata, formatter,
augmenter)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings.update(parse_settings(settings))
config = Configurator(settings=settings)
config.add_subscriber(add_oai_repository, NewRequest)
# URL patterns
config.add_route('root', '/')
config.scan()
return config.make_wsgi_app()
| 33.329114
| 80
| 0.657615
| 542
| 5,266
| 6.219557
| 0.308118
| 0.074162
| 0.053397
| 0.026105
| 0.088401
| 0.056363
| 0.056363
| 0.056363
| 0.056363
| 0.056363
| 0
| 0.007635
| 0.229016
| 5,266
| 157
| 81
| 33.541401
| 0.82266
| 0.045196
| 0
| 0.051282
| 0
| 0
| 0.255898
| 0.145942
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.051282
| 0.017094
| 0.196581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f2a79411bbedc2b8b017ecceddbf86f3d9843cc
| 1,613
|
py
|
Python
|
create_training_data.py
|
nasi-famnit/HOur-flight
|
96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9
|
[
"MIT"
] | 1
|
2016-04-24T10:49:52.000Z
|
2016-04-24T10:49:52.000Z
|
create_training_data.py
|
nasi-famnit/HOur-flight
|
96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9
|
[
"MIT"
] | null | null | null |
create_training_data.py
|
nasi-famnit/HOur-flight
|
96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9
|
[
"MIT"
] | null | null | null |
import flightdata
import weatherparser
import airportdata
import pandas as pd
from datetime import datetime
from pathlib import Path
flights = flightdata.read_csv('data/unpacked/flights/On_Time_On_Time_Performance_2016_1.csv')
fname = 'data/processed/training/training{:04}_v1.csv'
prev_time = datetime.now()
df = pd.DataFrame()
current_csv_name = Path(fname.format(1))
for idx, flight in flights.iterrows():
idx = idx+1
if idx%100 == 0:
now_time = datetime.now()
delta = now_time - prev_time
print('Processing file', idx, ',', 100.0/delta.total_seconds(), 'per second')
prev_time = now_time
if idx % 1000 == 0:
ff = fname.format(idx//1000)
current_csv_name = Path(fname.format(1+idx//1000))
print('Writing to', ff)
df.to_csv(ff)
else:
if current_csv_name.exists():
continue
ff = flight[['Year', 'Month', 'DayofMonth', 'DayOfWeek', 'UniqueCarrier', 'Origin', 'Dest', 'CRSDepTime', 'DepDelayMinutes', 'DepDel15', 'CRSArrTime', 'ArrTime', 'ArrDelay', 'ArrDelayMinutes', 'ArrDel15', 'CRSElapsedTime', 'ActualElapsedTime', 'Distance', 'WeatherDelay']]
weather_origin = weatherparser.get_weather_conditions(airportdata.from_faa(ff.Origin), ff.CRSDepTime)
weather_dest = weatherparser.get_weather_conditions(airportdata.from_faa(ff.Dest), ff.CRSArrTime)
if (weather_origin is None) or ( weather_dest is None):
continue
line = pd.DataFrame(pd.concat([ff, weather_origin, weather_dest])).T
if idx%1000==1:
df = line
else:
df = df.append(line)
| 37.511628
| 276
| 0.675139
| 205
| 1,613
| 5.146341
| 0.419512
| 0.02654
| 0.03981
| 0.034123
| 0.157346
| 0.157346
| 0.157346
| 0.100474
| 0
| 0
| 0
| 0.031563
| 0.194668
| 1,613
| 42
| 277
| 38.404762
| 0.7806
| 0
| 0
| 0.111111
| 0
| 0
| 0.200248
| 0.064476
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f2e54facc35f3d3aca215fe2e3b9ff2dc7350a5
| 4,374
|
py
|
Python
|
metadrive/config.py
|
wefindx/metadrive
|
576d240065b61b0187afc249819b705c06308d05
|
[
"Apache-2.0"
] | 7
|
2019-02-04T18:31:06.000Z
|
2021-12-22T17:08:55.000Z
|
metadrive/config.py
|
wefindx/metadrive
|
576d240065b61b0187afc249819b705c06308d05
|
[
"Apache-2.0"
] | 11
|
2019-04-30T18:19:33.000Z
|
2019-08-15T19:56:37.000Z
|
metadrive/config.py
|
wefindx/metadrive
|
576d240065b61b0187afc249819b705c06308d05
|
[
"Apache-2.0"
] | 2
|
2019-01-26T03:17:25.000Z
|
2019-04-15T18:35:56.000Z
|
import os
import imp
from pathlib import Path
import configparser
import requests
import gpgrecord
config = configparser.ConfigParser()
INSTALLED = imp.find_module('metadrive')[1]
HOME = str(Path.home())
DEFAULT_LOCATION = os.path.join(HOME,'.metadrive')
CONFIG_LOCATION = os.path.join(DEFAULT_LOCATION, 'config')
CREDENTIALS_DIR = os.path.join(DEFAULT_LOCATION, '-/+')
SESSIONS_DIR = os.path.join(DEFAULT_LOCATION, 'sessions')
DATA_DIR = os.path.join(DEFAULT_LOCATION, 'data')
SITES_DIR = os.path.join(HOME, 'Sites')
KNOWN_DRIVERS = os.path.join(DEFAULT_LOCATION, 'known_drivers')
SUBTOOLS = [
fn.rsplit('.py')[0]
for fn in os.listdir(INSTALLED)
if fn.startswith('_') and fn.endswith('.py') and not fn == '__init__.py'
]
def ENSURE_SESSIONS():
if not os.path.exists(SESSIONS_DIR):
os.makedirs(SESSIONS_DIR)
for subtool in SUBTOOLS:
subtool_profiles_path = os.path.join(SESSIONS_DIR, subtool)
if not os.path.exists(subtool_profiles_path):
if subtool != '__init__':
os.makedirs(subtool_profiles_path)
ENSURE_SESSIONS()
def ENSURE_DATA():
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
ENSURE_DATA()
def ENSURE_SITES():
if not os.path.exists(SITES_DIR):
os.makedirs(SITES_DIR)
ENSURE_SITES()
if not os.path.exists(CONFIG_LOCATION):
username = input("Type your GitHub username: ")
config['GITHUB'] = {'USERNAME': username}
config['PROXIES'] = {'http': '', 'https': ''}
config['DRIVERS'] = {'auto_upgrade': False}
config['SELENIUM'] = {'headless': False}
config['DRIVER_BACKENDS'] = {
'CHROME': '/usr/bin/chromedriver' # e.g., or http://0.0.0.0:4444/wd/hub, etc.
}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
config.read(CONFIG_LOCATION)
GITHUB_USER = config['GITHUB']['USERNAME']
REPO_PATH = os.path.join(DEFAULT_LOCATION, '-')
DRIVERS_PATH = os.path.join(DEFAULT_LOCATION, 'drivers')
CHROME_DRIVER = config['DRIVER_BACKENDS']['CHROME']
SELENIUM = config['SELENIUM']
if str(config['DRIVERS']['auto_upgrade']) == 'False':
AUTO_UPGRADE_DRIVERS = False
elif str(config['DRIVERS']['auto_upgrade']) == 'True':
AUTO_UPGRADE_DRIVERS = True
elif str(config['DRIVERS']['auto_upgrade']) == 'None':
AUTO_UPGRADE_DRIVERS = None
else:
AUTO_UPGRADE_DRIVERS = False
def ENSURE_REPO():
while not requests.get('https://github.com/{}/-'.format(GITHUB_USER)).ok:
input("Please, create repository named `-` on your GitHub. Type [ENTER] to continue... ")
if os.path.exists(REPO_PATH):
# git pull #
os.system('cd {}; git pull'.format(REPO_PATH))
else:
# git clone #
os.system('cd {}; git clone {}'.format(
DEFAULT_LOCATION,
'git@github.com:{}/-.git'.format(GITHUB_USER)))
if not os.path.exists(CREDENTIALS_DIR):
os.makedirs(CREDENTIALS_DIR)
os.system("cd {}; git add .; git commit -m 'credentials (+)'; git push origin master".format(
REPO_PATH
))
def ENSURE_GPG():
config.read(CONFIG_LOCATION)
if 'GPG' in config.keys():
return config['GPG']['KEY']
print('Choose your GPG key for encrypting credentials:')
KEY_LIST = gpgrecord.list_recipients()
for i, key in enumerate(KEY_LIST):
print('{id}. {uid} {fingerprint}'.format(
id=i+1,
uid=key['uids'],
fingerprint=key['fingerprint']
))
i = int(input('Type key order in the list: ')) - 1
GPG_KEY = KEY_LIST[i]['fingerprint']
config['GPG'] = {'KEY': GPG_KEY}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
return GPG_KEY
def ENSURE_PROXIES():
config.read(CONFIG_LOCATION)
if 'PROXIES' in config.keys():
return {key: 'socks5h://'+config['PROXIES'][key] or None
for key in config['PROXIES'] if config['PROXIES'][key]}
SOCKS5 = input('Type-in default socks5 proxy (e.g., 127.0.0.1:9999) (leave emtpy to default to direct connections) [ENTER]: ')
config['PROXIES'] = {
'http': SOCKS5,
'https': SOCKS5
}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
return {key: 'socks5h://'+config['PROXIES'][key] or None
for key in config['PROXIES'] if config['PROXIES'][key]}
| 29.355705
| 131
| 0.645405
| 566
| 4,374
| 4.833922
| 0.242049
| 0.037281
| 0.03655
| 0.043494
| 0.310307
| 0.225877
| 0.192982
| 0.119883
| 0.119883
| 0.119883
| 0
| 0.008014
| 0.201189
| 4,374
| 148
| 132
| 29.554054
| 0.775043
| 0.013946
| 0
| 0.174312
| 0
| 0.009174
| 0.211333
| 0.010218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055046
| false
| 0
| 0.055046
| 0
| 0.146789
| 0.036697
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f2efcc18abcce7bbf0e01ac810dce1793930f16
| 1,272
|
py
|
Python
|
project/matching/migrations/0001_initial.py
|
Project-EPIC/emergencypetmatcher
|
72c9eec228e33c9592243266e048dc02824d778d
|
[
"MIT"
] | null | null | null |
project/matching/migrations/0001_initial.py
|
Project-EPIC/emergencypetmatcher
|
72c9eec228e33c9592243266e048dc02824d778d
|
[
"MIT"
] | null | null | null |
project/matching/migrations/0001_initial.py
|
Project-EPIC/emergencypetmatcher
|
72c9eec228e33c9592243266e048dc02824d778d
|
[
"MIT"
] | 1
|
2021-06-24T01:50:06.000Z
|
2021-06-24T01:50:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reporting', '0002_remove_petreport_revision_number'),
('socializing', '__first__'),
]
operations = [
migrations.CreateModel(
name='PetMatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('proposed_date', models.DateTimeField(auto_now_add=True)),
('has_failed', models.BooleanField(default=False)),
('down_votes', models.ManyToManyField(related_name='down_votes_related', to='socializing.UserProfile')),
('found_pet', models.ForeignKey(related_name='found_pet_related', default=None, to='reporting.PetReport')),
('lost_pet', models.ForeignKey(related_name='lost_pet_related', default=None, to='reporting.PetReport')),
('proposed_by', models.ForeignKey(related_name='proposed_by_related', to='socializing.UserProfile')),
('up_votes', models.ManyToManyField(related_name='up_votes_related', to='socializing.UserProfile')),
],
),
]
| 43.862069
| 123
| 0.644654
| 125
| 1,272
| 6.24
| 0.48
| 0.070513
| 0.076923
| 0.119231
| 0.369231
| 0.105128
| 0.105128
| 0
| 0
| 0
| 0
| 0.00502
| 0.216981
| 1,272
| 28
| 124
| 45.428571
| 0.778112
| 0.016509
| 0
| 0
| 0
| 0
| 0.272218
| 0.084868
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f31588bc153ffcf5b70c1b521bc861fcd11b513
| 5,123
|
py
|
Python
|
lithops/localhost/local_handler.py
|
GEizaguirre/lithops
|
296451ea3ebf630a5dca2f17248387e6bb1ee5b6
|
[
"Apache-2.0"
] | null | null | null |
lithops/localhost/local_handler.py
|
GEizaguirre/lithops
|
296451ea3ebf630a5dca2f17248387e6bb1ee5b6
|
[
"Apache-2.0"
] | null | null | null |
lithops/localhost/local_handler.py
|
GEizaguirre/lithops
|
296451ea3ebf630a5dca2f17248387e6bb1ee5b6
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import json
import pkgutil
import logging
import uuid
import time
import multiprocessing
from pathlib import Path
from threading import Thread
from types import SimpleNamespace
from multiprocessing import Process, Queue
from lithops.utils import version_str, is_unix_system
from lithops.worker import function_handler
from lithops.config import STORAGE_DIR, JOBS_DONE_DIR
from lithops import __version__
os.makedirs(STORAGE_DIR, exist_ok=True)
os.makedirs(JOBS_DONE_DIR, exist_ok=True)
log_file = os.path.join(STORAGE_DIR, 'local_handler.log')
logging.basicConfig(filename=log_file, level=logging.INFO)
logger = logging.getLogger('handler')
CPU_COUNT = multiprocessing.cpu_count()
def extract_runtime_meta():
runtime_meta = dict()
mods = list(pkgutil.iter_modules())
runtime_meta["preinstalls"] = [entry for entry in sorted([[mod, is_pkg]for _, mod, is_pkg in mods])]
runtime_meta["python_ver"] = version_str(sys.version_info)
print(json.dumps(runtime_meta))
class ShutdownSentinel():
"""Put an instance of this class on the queue to shut it down"""
pass
class LocalhostExecutor:
"""
A wrap-up around Localhost multiprocessing APIs.
"""
def __init__(self, config, executor_id, job_id, log_level):
logging.basicConfig(filename=log_file, level=log_level)
self.log_active = logger.getEffectiveLevel() != logging.WARNING
self.config = config
self.queue = Queue()
self.use_threads = not is_unix_system()
self.num_workers = self.config['lithops'].get('workers', CPU_COUNT)
self.workers = []
sys.stdout = open(log_file, 'a')
sys.stderr = open(log_file, 'a')
if self.use_threads:
for worker_id in range(self.num_workers):
p = Thread(target=self._process_runner, args=(worker_id,))
self.workers.append(p)
p.start()
else:
for worker_id in range(self.num_workers):
p = Process(target=self._process_runner, args=(worker_id,))
self.workers.append(p)
p.start()
logger.info('ExecutorID {} | JobID {} - Localhost Executor started - {} workers'
.format(job.executor_id, job.job_id, self.num_workers))
def _process_runner(self, worker_id):
logger.debug('Localhost worker process {} started'.format(worker_id))
while True:
event = self.queue.get(block=True)
if isinstance(event, ShutdownSentinel):
break
act_id = str(uuid.uuid4()).replace('-', '')[:12]
os.environ['__LITHOPS_ACTIVATION_ID'] = act_id
event['extra_env']['__LITHOPS_LOCAL_EXECUTION'] = 'True'
function_handler(event)
def _invoke(self, job, call_id):
payload = {'config': self.config,
'log_level': logging.getLevelName(logger.getEffectiveLevel()),
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'execution_timeout': job.execution_timeout,
'data_byte_range': job.data_ranges[int(call_id)],
'executor_id': job.executor_id,
'job_id': job.job_id,
'call_id': call_id,
'host_submit_tstamp': time.time(),
'lithops_version': __version__,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory}
self.queue.put(payload)
def run(self, job_description):
job = SimpleNamespace(**job_description)
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
self._invoke(job, call_id)
for i in self.workers:
self.queue.put(ShutdownSentinel())
def wait(self):
for worker in self.workers:
worker.join()
if __name__ == "__main__":
logger.info('Starting Localhost job handler')
command = sys.argv[1]
logger.info('Received command: {}'.format(command))
if command == 'preinstalls':
extract_runtime_meta()
elif command == 'run':
job_filename = sys.argv[2]
logger.info('Got {} job file'.format(job_filename))
with open(job_filename, 'rb') as jf:
job = SimpleNamespace(**json.load(jf))
logger.info('ExecutorID {} | JobID {} - Starting execution'
.format(job.executor_id, job.job_id))
localhost_execuor = LocalhostExecutor(job.config, job.executor_id,
job.job_id, job.log_level)
localhost_execuor.run(job.job_description)
localhost_execuor.wait()
sentinel = '{}/{}_{}.done'.format(JOBS_DONE_DIR,
job.executor_id.replace('/', '-'),
job.job_id)
Path(sentinel).touch()
logger.info('ExecutorID {} | JobID {} - Execution Finished'
.format(job.executor_id, job.job_id))
| 33.927152
| 104
| 0.610189
| 598
| 5,123
| 4.978261
| 0.294314
| 0.015116
| 0.030568
| 0.026873
| 0.121599
| 0.121599
| 0.089016
| 0.061807
| 0.061807
| 0.039637
| 0
| 0.001892
| 0.277962
| 5,123
| 150
| 105
| 34.153333
| 0.80292
| 0.020886
| 0
| 0.071429
| 0
| 0
| 0.115916
| 0.00961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0.008929
| 0.142857
| 0
| 0.214286
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0294cc7bc1ae9063a289d36fbf581ccd346caba
| 1,008
|
py
|
Python
|
SC_projects/recursion/dice_rolls_sum.py
|
hsiaohan416/stancode
|
8920f2e99e184d165fa04551f24a2da8b0975219
|
[
"MIT"
] | null | null | null |
SC_projects/recursion/dice_rolls_sum.py
|
hsiaohan416/stancode
|
8920f2e99e184d165fa04551f24a2da8b0975219
|
[
"MIT"
] | null | null | null |
SC_projects/recursion/dice_rolls_sum.py
|
hsiaohan416/stancode
|
8920f2e99e184d165fa04551f24a2da8b0975219
|
[
"MIT"
] | null | null | null |
"""
File: dice_rolls_sum.py
Name: Sharon
-----------------------------
This program finds all the dice rolls permutations
that sum up to a constant TOTAL. Students will find
early stopping a good strategy of decreasing the number
of recursive calls
"""
# This constant controls the sum of dice of our interest
TOTAL = 8
# global variable
run_times = 0
def main():
dice_sum(TOTAL)
print(f'Total run times: {run_times}')
def dice_sum(total):
dice_sum_helper(total, [])
def dice_sum_helper(total, ans):
global run_times
run_times += 1
if sum(ans) == total:
print(ans)
else:
for roll in [1, 2, 3, 4, 5, 6]:
if sum(ans) <= total:
diff = total - sum(ans)
if diff > roll:
# choose
ans.append(roll)
# explore
dice_sum_helper(total, ans)
# un-choose
ans.pop()
if __name__ == '__main__':
main()
| 21
| 56
| 0.545635
| 131
| 1,008
| 4.030534
| 0.480916
| 0.075758
| 0.073864
| 0.102273
| 0.079545
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.339286
| 1,008
| 47
| 57
| 21.446809
| 0.779279
| 0.338294
| 0
| 0
| 0
| 0
| 0.054962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0
| 0
| 0.136364
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a02a39862663da51e8f5219d5dd8ae0de6edd96f
| 909
|
py
|
Python
|
valid_parentheses.py
|
KevinLuo41/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | 19
|
2015-01-19T19:36:09.000Z
|
2020-03-18T03:10:12.000Z
|
valid_parentheses.py
|
CodingVault/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | null | null | null |
valid_parentheses.py
|
CodingVault/LeetCodeInPython
|
051e1aab9bab17b0d63b4ca73473a7a00899a16a
|
[
"Apache-2.0"
] | 12
|
2015-04-25T14:20:38.000Z
|
2020-09-27T04:59:59.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
valid_parentheses.py
Created by Shengwei on 2014-07-24.
"""
# https://oj.leetcode.com/problems/valid-parentheses/
# tags: easy, array, parentheses, stack
"""
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
class Solution:
# @return a boolean
def isValid(self, s):
mappings = {')': '(', ']': '[', '}': '{'}
stack = []
for par in s:
if par in mappings.values():
stack.append(par)
elif stack and stack[-1] == mappings[par]:
stack.pop()
else:
return False
# note: remember to check if stack is empty
return False if stack else True
| 26.735294
| 118
| 0.540154
| 109
| 909
| 4.495413
| 0.66055
| 0.065306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015723
| 0.30033
| 909
| 33
| 119
| 27.545455
| 0.754717
| 0.267327
| 0
| 0
| 0
| 0
| 0.01432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0302470addb803e75aa2587a1202d6ec072bcdf
| 3,104
|
py
|
Python
|
doc/terms.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
doc/terms.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
doc/terms.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
"""Terms used in the bot."""
from collections import OrderedDict
from dataclasses import dataclass
from typing import List
from django.utils.translation import gettext_lazy as _
from JellyBot.systemconfig import Database
@dataclass
class TermExplanation:
"""An entry for a term and its explanation."""
term: str
description: str
example: str
@dataclass
class TermsCollection:
"""A holder containing multiple terms."""
name: str
terms: List[TermExplanation]
terms_collection = OrderedDict()
terms_collection["Core"] = TermsCollection(
_("Core"),
[TermExplanation(_("Operation"),
_("The system has two ways to control: on the website, using the API. "
"Some actions may only available at a single side."),
_("-"))
]
)
terms_collection["Features"] = TermsCollection(
_("Features"),
[TermExplanation(_("Auto-Reply"),
_("When the system receives/sees a word, it will reply back certain word(s) if it is set."),
_("User A setup an Auto-Reply module, which keyword is **A** and reply is **B**. Then, "
"somebody typed **A** wherever Jelly BOT can see, so Jelly BOT will reply **B** back.")),
TermExplanation(_("Execode"),
_("The users provide partial required information for an operation, then the system will yield a "
"code (Execode) to the users for completing it while holding it for %d hrs.<br>"
"Users will need to use the given Execode with the missing information for completing the "
"operation before it expires.") % (Database.ExecodeExpirySeconds // 3600),
_("User B created an Auto-Reply module on the website and choose the issue an Execode option. "
"Then, he submit the Execode in the channel, so the Auto-Reply module is registered.")),
TermExplanation(_("Profile System/Permission"),
_("Users can have multiple profiles in the channel for various features use. Profiles will have "
"some permission or their privilege attached.<br>Some profiles may be granted by votes from "
"channel members or assigned by channel manager.<br>"
"This system is similar to the role system of **Discord**."),
_("ChannelA have profiles called **A** with admin privilege and **B** for normal users.<br>"
"Users who have profile **A** assigned will be able to "
"use features that only admins can use.")),
TermExplanation(_("Channel Management"),
_("Users will be able to adjust the settings specifically designated to the channel. "
"The availability of what can be adjusted will base on the user's profile."),
_("Eligibility of accessing the pinned auto-reply module, "
"changing the admin/mod of a channel...etc.")),
]
)
| 47.030303
| 119
| 0.61018
| 364
| 3,104
| 5.142857
| 0.453297
| 0.024038
| 0.032051
| 0.018162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00185
| 0.303479
| 3,104
| 65
| 120
| 47.753846
| 0.864015
| 0.031894
| 0
| 0.039216
| 0
| 0.058824
| 0.552359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098039
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a032fe94d351a63b7d04128ecd927fbb6c87a879
| 6,144
|
py
|
Python
|
service/azservice/tooling1.py
|
cnective-inc/vscode-azurecli
|
0ac34e2214078270b63cf6716423d40a60423834
|
[
"MIT"
] | 38
|
2019-06-21T00:26:15.000Z
|
2022-03-19T05:23:55.000Z
|
service/azservice/tooling1.py
|
cnective-inc/vscode-azurecli
|
0ac34e2214078270b63cf6716423d40a60423834
|
[
"MIT"
] | 46
|
2017-05-17T09:00:51.000Z
|
2019-04-24T10:18:19.000Z
|
service/azservice/tooling1.py
|
cnective-inc/vscode-azurecli
|
0ac34e2214078270b63cf6716423d40a60423834
|
[
"MIT"
] | 27
|
2019-05-19T18:42:42.000Z
|
2022-01-18T09:14:26.000Z
|
"""tooling integration"""
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import os
import traceback
from importlib import import_module
from sys import stderr
import pkgutil
import yaml
from six.moves import configparser
from azure.cli.core.application import APPLICATION, Configuration
from azure.cli.core.commands import _update_command_definitions, BLACKLISTED_MODS
from azure.cli.core._profile import _SUBSCRIPTION_NAME, Profile
from azure.cli.core._session import ACCOUNT
from azure.cli.core._environment import get_config_dir as cli_config_dir
from azure.cli.core._config import az_config, GLOBAL_CONFIG_PATH, DEFAULTS_SECTION
from azure.cli.core.help_files import helps
from azure.cli.core.util import CLIError
GLOBAL_ARGUMENTS = {
'verbose': {
'options': ['--verbose'],
'help': 'Increase logging verbosity. Use --debug for full debug logs.'
},
'debug': {
'options': ['--debug'],
'help': 'Increase logging verbosity to show all debug logs.'
},
'output': {
'options': ['--output', '-o'],
'help': 'Output format',
'choices': ['json', 'tsv', 'table', 'jsonc']
},
'help': {
'options': ['--help', '-h'],
'help': 'Get more information about a command'
},
'query': {
'options': ['--query'],
'help': 'JMESPath query string. See http://jmespath.org/ for more information and examples.'
}
}
def initialize():
_load_profile()
def _load_profile():
azure_folder = cli_config_dir()
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
def load_command_table():
APPLICATION.initialize(Configuration())
command_table = APPLICATION.configuration.get_command_table()
_install_modules(command_table)
return command_table
def get_arguments(command):
return command.arguments
def arguments_loaded(command_name):
return True
def load_arguments(cmd_table, batch):
return False
def _install_modules(command_table):
for cmd in command_table:
command_table[cmd].load_arguments()
try:
mods_ns_pkg = import_module('azure.cli.command_modules')
installed_command_modules = [modname for _, modname, _ in
pkgutil.iter_modules(mods_ns_pkg.__path__)
if modname not in BLACKLISTED_MODS]
except ImportError:
pass
for mod in installed_command_modules:
try:
mod = import_module('azure.cli.command_modules.' + mod)
mod.load_params(mod)
mod.load_commands()
except Exception: # pylint: disable=broad-except
print("Error loading: {}".format(mod), file=stderr)
traceback.print_exc(file=stderr)
_update_command_definitions(command_table)
HELP_CACHE = {}
def get_help(group_or_command):
if group_or_command not in HELP_CACHE and group_or_command in helps:
HELP_CACHE[group_or_command] = yaml.load(helps[group_or_command])
return HELP_CACHE.get(group_or_command)
PROFILE = Profile()
def get_current_subscription():
_load_profile()
try:
return PROFILE.get_subscription()[_SUBSCRIPTION_NAME]
except CLIError:
return None # Not logged in
def get_configured_defaults():
_reload_config()
try:
options = az_config.config_parser.options(DEFAULTS_SECTION)
defaults = {}
for opt in options:
value = az_config.get(DEFAULTS_SECTION, opt)
if value:
defaults[opt] = value
return defaults
except configparser.NoSectionError:
return {}
def is_required(argument):
required_tooling = hasattr(argument.type, 'required_tooling') and argument.type.required_tooling is True
return required_tooling and argument.name != 'is_linux'
def get_defaults(arguments):
_reload_config()
return {name: _get_default(argument) for name, argument in arguments.items()}
def _get_default(argument):
configured = _find_configured_default(argument)
return configured or argument.type.settings.get('default')
def run_argument_value_completer(command, argument, cli_arguments):
try:
args = _to_argument_object(command, cli_arguments)
_add_defaults(command, args)
return argument.completer('', '', args)
except TypeError:
try:
return argument.completer('')
except TypeError:
try:
return argument.completer()
except TypeError:
return None
def _to_argument_object(command, cli_arguments):
result = lambda: None # noqa: E731
for argument_name, value in cli_arguments.items():
name, _ = _find_argument(command, argument_name)
setattr(result, name, value)
return result
def _find_argument(command, argument_name):
for name, argument in get_arguments(command).items():
if argument_name in argument.options_list:
return name, argument
return None, None
def _add_defaults(command, arguments):
_reload_config()
for name, argument in get_arguments(command).items():
if not hasattr(arguments, name):
default = _find_configured_default(argument)
if default:
setattr(arguments, name, default)
return arguments
def _reload_config():
az_config.config_parser.read(GLOBAL_CONFIG_PATH)
def _find_configured_default(argument):
if not (hasattr(argument.type, 'default_name_tooling') and argument.type.default_name_tooling):
return None
try:
return az_config.get(DEFAULTS_SECTION, argument.type.default_name_tooling, None)
except configparser.NoSectionError:
return None
| 29.681159
| 108
| 0.662598
| 704
| 6,144
| 5.522727
| 0.242898
| 0.020576
| 0.024691
| 0.032922
| 0.150977
| 0.082562
| 0.047068
| 0.047068
| 0.022119
| 0
| 0
| 0.000625
| 0.218262
| 6,144
| 206
| 109
| 29.825243
| 0.808869
| 0.066895
| 0
| 0.16
| 0
| 0
| 0.091592
| 0.008915
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126667
| false
| 0.006667
| 0.126667
| 0.02
| 0.406667
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a03a7becb2df6dd7e3600ceabbb203ca1e648d2d
| 10,131
|
py
|
Python
|
venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | 4
|
2021-02-05T10:57:39.000Z
|
2022-02-25T04:43:23.000Z
|
venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | null | null | null |
# shallowbundle.py - bundle10 implementation for use with shallow repositories
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial.node import bin, hex, nullid
from mercurial import (
bundlerepo,
changegroup,
error,
match,
mdiff,
pycompat,
)
from . import (
constants,
remotefilelog,
shallowutil,
)
NoFiles = 0
LocalFiles = 1
AllFiles = 2
def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
if not isinstance(rlog, remotefilelog.remotefilelog):
for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
yield c
return
if len(nodelist) == 0:
yield self.close()
return
nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
# add the parent of the first rev
p = rlog.parents(nodelist[0])[0]
nodelist.insert(0, p)
# build deltas
for i in pycompat.xrange(len(nodelist) - 1):
prev, curr = nodelist[i], nodelist[i + 1]
linknode = lookup(curr)
for c in self.nodechunk(rlog, curr, prev, linknode):
yield c
yield self.close()
class shallowcg1packer(changegroup.cgpacker):
def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
if shallowutil.isenabled(self._repo):
fastpathlinkrev = False
return super(shallowcg1packer, self).generate(
commonrevs, clnodes, fastpathlinkrev, source, **kwargs
)
def group(self, nodelist, rlog, lookup, units=None, reorder=None):
return shallowgroup(
shallowcg1packer, self, nodelist, rlog, lookup, units=units
)
def generatefiles(self, changedfiles, *args):
try:
linknodes, commonrevs, source = args
except ValueError:
commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
if shallowutil.isenabled(self._repo):
repo = self._repo
if isinstance(repo, bundlerepo.bundlerepository):
# If the bundle contains filelogs, we can't pull from it, since
# bundlerepo is heavily tied to revlogs. Instead require that
# the user use unbundle instead.
# Force load the filelog data.
bundlerepo.bundlerepository.file(repo, b'foo')
if repo._cgfilespos:
raise error.Abort(
b"cannot pull from full bundles",
hint=b"use `hg unbundle` instead",
)
return []
filestosend = self.shouldaddfilegroups(source)
if filestosend == NoFiles:
changedfiles = list(
[f for f in changedfiles if not repo.shallowmatch(f)]
)
return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
def shouldaddfilegroups(self, source):
repo = self._repo
if not shallowutil.isenabled(repo):
return AllFiles
if source == b"push" or source == b"bundle":
return AllFiles
caps = self._bundlecaps or []
if source == b"serve" or source == b"pull":
if constants.BUNDLE2_CAPABLITY in caps:
return LocalFiles
else:
# Serving to a full repo requires us to serve everything
repo.ui.warn(_(b"pulling from a shallow repo\n"))
return AllFiles
return NoFiles
def prune(self, rlog, missing, commonrevs):
if not isinstance(rlog, remotefilelog.remotefilelog):
return super(shallowcg1packer, self).prune(
rlog, missing, commonrevs
)
repo = self._repo
results = []
for fnode in missing:
fctx = repo.filectx(rlog.filename, fileid=fnode)
if fctx.linkrev() not in commonrevs:
results.append(fnode)
return results
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
if prevnode == nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
# Actually uses remotefilelog.revdiff which works on nodes, not revs
delta = revlog.revdiff(prevnode, node)
p1, p2 = revlog.parents(node)
flags = revlog.flags(node)
meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
meta += prefix
l = len(meta) + len(delta)
yield changegroup.chunkheader(l)
yield meta
yield delta
def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
if not shallowutil.isenabled(repo):
return orig(repo, outgoing, version, source, *args, **kwargs)
original = repo.shallowmatch
try:
# if serving, only send files the clients has patterns for
if source == b'serve':
bundlecaps = kwargs.get('bundlecaps')
includepattern = None
excludepattern = None
for cap in bundlecaps or []:
if cap.startswith(b"includepattern="):
raw = cap[len(b"includepattern=") :]
if raw:
includepattern = raw.split(b'\0')
elif cap.startswith(b"excludepattern="):
raw = cap[len(b"excludepattern=") :]
if raw:
excludepattern = raw.split(b'\0')
if includepattern or excludepattern:
repo.shallowmatch = match.match(
repo.root, b'', None, includepattern, excludepattern
)
else:
repo.shallowmatch = match.always()
return orig(repo, outgoing, version, source, *args, **kwargs)
finally:
repo.shallowmatch = original
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
if not shallowutil.isenabled(repo):
return orig(repo, source, revmap, trp, expectedfiles, *args)
newfiles = 0
visited = set()
revisiondatas = {}
queue = []
# Normal Mercurial processes each file one at a time, adding all
# the new revisions for that file at once. In remotefilelog a file
# revision may depend on a different file's revision (in the case
# of a rename/copy), so we must lay all revisions down across all
# files in topological order.
# read all the file chunks but don't add them
progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata[b"filename"]
repo.ui.debug(b"adding %s revisions\n" % f)
progress.increment()
if not repo.shallowmatch(f):
fl = repo.file(f)
deltas = source.deltaiter()
fl.addgroup(deltas, revmap, trp)
continue
chain = None
while True:
# returns: (node, p1, p2, cs, deltabase, delta, flags) or None
revisiondata = source.deltachunk(chain)
if not revisiondata:
break
chain = revisiondata[0]
revisiondatas[(f, chain)] = revisiondata
queue.append((f, chain))
if f not in visited:
newfiles += 1
visited.add(f)
if chain is None:
raise error.Abort(_(b"received file revlog group is empty"))
processed = set()
def available(f, node, depf, depnode):
if depnode != nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
return True
# re-add self to queue
queue.insert(0, (f, node))
# add dependency in front
queue.insert(0, (depf, depnode))
return False
return True
skipcount = 0
# Prefetch the non-bundled revisions that we will need
prefetchfiles = []
for f, node in queue:
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
if dependent == nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
repo.fileservice.prefetch(prefetchfiles)
# Apply the revisions in topological order such that a revision
# is only written once it's deltabase and parents have been written.
while queue:
f, node = queue.pop(0)
if (f, node) in processed:
continue
skipcount += 1
if skipcount > len(queue) + 1:
raise error.Abort(_(b"circular node dependency"))
fl = repo.file(f)
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
node, p1, p2, linknode, deltabase, delta, flags = revisiondata
if not available(f, node, f, deltabase):
continue
base = fl.rawdata(deltabase)
text = mdiff.patch(base, delta)
if not isinstance(text, bytes):
text = bytes(text)
meta, text = shallowutil.parsemeta(text)
if b'copy' in meta:
copyfrom = meta[b'copy']
copynode = bin(meta[b'copyrev'])
if not available(f, node, copyfrom, copynode):
continue
for p in [p1, p2]:
if p != nullid:
if not available(f, node, f, p):
continue
fl.add(text, meta, trp, linknode, p1, p2)
processed.add((f, node))
skipcount = 0
progress.complete()
return len(revisiondatas), newfiles
| 33.325658
| 80
| 0.583062
| 1,111
| 10,131
| 5.30063
| 0.288929
| 0.011887
| 0.008151
| 0.015622
| 0.163695
| 0.117677
| 0.089659
| 0.065886
| 0.024113
| 0.024113
| 0
| 0.008082
| 0.328299
| 10,131
| 303
| 81
| 33.435644
| 0.857311
| 0.141743
| 0
| 0.210046
| 0
| 0
| 0.033707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045662
| false
| 0
| 0.022831
| 0.004566
| 0.164384
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a03aa51d99ddd848b1a155bf6b547a0503967011
| 10,507
|
py
|
Python
|
tests/test_main.py
|
FixItDad/vault-pwmgr
|
8c9edec3786eefbf72f0c13c24f3d4e331ab1562
|
[
"MIT"
] | 1
|
2018-01-26T12:45:44.000Z
|
2018-01-26T12:45:44.000Z
|
tests/test_main.py
|
FixItDad/vault-pwmgr
|
8c9edec3786eefbf72f0c13c24f3d4e331ab1562
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
FixItDad/vault-pwmgr
|
8c9edec3786eefbf72f0c13c24f3d4e331ab1562
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Functional tests for the main password manager page
# Currently targeting Firefox
# Depends on the vault configuration provided by the startdev.sh script.
# Depends on pytest-sourceorder to force test case execution order.
# TODO: configure vault data from pretest fixture.
import datetime
import pytest
import time
from pytest_sourceorder import ordered
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import testutils
# Test vault-pwmgr server to point to for tests
PWMGR_URL = "http://127.0.0.1:7080/"
HISTGROUP = testutils.HISTGROUP
# Used to store state information for multi-step tests. Generally tests should
# be independent, but some cases may depend on results returned earlier by the
# program under test. This can be used judiciously instead of than duplicating
# test code or having very long test cases that test multiple items.
state = {}
def _login_pw(driver, userid, userpw):
""" Helper routine to log in by password with the supplied credentials. """
loginid = driver.find_element_by_id("loginid")
loginid.clear()
loginid.send_keys(userid)
loginpw = driver.find_element_by_id("loginpw")
loginpw.clear()
loginpw.send_keys(userpw)
loginpw.submit()
@pytest.fixture(scope="module")
def webdriver_module():
# Create a new instance of the browser driver at the module level
driver = webdriver.Firefox()
yield driver
driver.quit()
@pytest.fixture
def driver(webdriver_module):
# Set up the initial webdirver state for functions in this module.
# These functions test post login functionality, so start with a
# fresh login page and enter test user credentials.
webdriver_module.get(PWMGR_URL)
WebDriverWait(webdriver_module, 10).until(EC.title_contains("Vault Password Manager"))
_login_pw(webdriver_module,'user1','user1pw')
WebDriverWait(webdriver_module, 10).until(EC.presence_of_element_located((By.ID,"entrydetails")))
WebDriverWait(webdriver_module, 10).until(EC.presence_of_element_located((By.TAG_NAME,"nav")))
return webdriver_module
def ztest_navigation_visibility(driver):
"""
Requirement: All authorized items should be reachable from the nav tree.
Requirement: Nav tree initially shows only collection names.
Gradually expand tree to reveal all 3 levels: collection, group, item
"""
nav = testutils.NavigationHelper(driver)
# initially only collection names are visible
visible = nav.visiblelist()
assert visible == [('linuxadmin',), ('user1',)]
# open a collection, groups in the collection should be visible
nav.click(["linuxadmin"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1',),
]
# open other collection. All groups visible
nav.click(["user1"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/'),
]
# open a group. Group items are visible
nav.click(["user1","web/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/', 'google'),
('user1','web/', 'netflix'),
]
# Close a group and open another
nav.click(["user1","web/"])
nav.click(["linuxadmin","webservers/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/','LoadBal'),
('linuxadmin','webservers/','extA'),
('linuxadmin','webservers/','extB'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/'),
]
#open the last group
nav.click(["user1","Pauls Stuff/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/','LoadBal'),
('linuxadmin','webservers/','extA'),
('linuxadmin','webservers/','extB'),
('user1','Pauls Stuff/','$+dream'),
('user1','network/'),
('user1','web/'),
]
# close a collection, all groups and items in the collection are hidden
nav.click(["linuxadmin"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin',),
('user1','Pauls Stuff/','$+dream'),
('user1','network/'),
('user1','web/'),
]
@ordered
class TestAddRemove(object):
"""
"""
def test_add_item_from_initial(s,driver):
""" Requirement: Add an item.
Add from initial screen with blank fields.
"""
nav = testutils.NavigationHelper(driver)
form = testutils.ItemHelper(driver)
# initially only collection names are visible
visible = nav.visiblelist()
assert visible == [('linuxadmin',), ('user1',),]
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}
form.fields = {
"collectionid":"user1",
"groupid":"web",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
"password":"bobknows",
"notes":"Forget privacy!",
}
# Should be able to read the values back.
assert form.fields == {
"collectionid":"user1",
"groupid":"web",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
"password":"bobknows",
"notes":"Forget privacy!",
}
form.add_new()
assert form.message == "Added new entry web/Facepalm"
# visible in nav tree?
nav.click(["user1"])
nav.click(["user1","web/"])
assert nav.visible(('user1','web/', 'Facepalm'))
def test_del_item_facepalm(s,driver):
""" Requirements: Items can be deleted. Old items are moved
to an Archive group in the same collection. The item title contains a timestamp.
Form fields are cleared. A delete message is shown.
"""
nav = testutils.NavigationHelper(driver)
nav.click(["user1"])
nav.click(["user1","web/"])
assert nav.visible(('user1','web/', 'Facepalm'))
nav.click(["user1","web/","Facepalm"])
form = testutils.ItemHelper(driver)
assert form.fields == {
"collectionid":"user1",
"groupid":"web",
"notes":"Forget privacy!",
"password":"bobknows",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
}, "Expected item values displayed when selected"
form.delete()
delete_ts = datetime.datetime.utcnow()
WebDriverWait(driver, 5).until(
EC.text_to_be_present_in_element(
(By.ID,"mainmsg"),"Deleted entry web/Facepalm"))
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}, "Requirement: Fields are cleared after delete"
assert form.message == "Deleted entry web/Facepalm", "Requirement: delete message displayed"
time.sleep(1) # Nav tree needs extra time to update
visible = nav.visiblelist()
assert visible == [
(u'linuxadmin',),
(u'user1',HISTGROUP),
(u'user1',u'Pauls Stuff/'),
(u'user1',u'network/'),
(u'user1',u'web/', u'google'),
(u'user1',u'web/', u'netflix'),
], "Archive group is visible in nav tree"
nav.click(["user1", HISTGROUP])
title = nav.findarchived(delete_ts, ('user1','web','Facepalm') )
assert title is not None, "Requirement: deleted entry is in archive group."
# Stash title name for later test step
state["test_del_item_facepalm_title"] = title
def test_del_archived_item_facepalm(s,driver):
""" Requirements: Items can be deleted from the archive group.
"""
nav = testutils.NavigationHelper(driver)
form = testutils.ItemHelper(driver)
nav.click(["user1"])
nav.click(["user1", HISTGROUP])
title = state["test_del_item_facepalm_title"]
del state["test_del_item_facepalm_title"]
nav.click(("user1", HISTGROUP, title))
assert form.fields == {
"collectionid":"user1",
"groupid":HISTGROUP[:-1],
"notes":"Forget privacy!",
"password":"bobknows",
"title":title,
"url":"https://facepalm.com",
"userid":"bob",
}, "Archived entry values are as expected."
form.delete()
WebDriverWait(driver, 5).until(
EC.text_to_be_present_in_element(
(By.ID,"mainmsg"),"Deleted entry %s%s" % (HISTGROUP, title)))
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}, "Requirement: fields cleared after delete (archived entry)"
assert nav.hidden(('user1', HISTGROUP, title)), "Requirement: item removed from archive"
def ztest_delete_item(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_group(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_notes(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_password(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_title(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_url(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_userid(driver):
""" """
assert False, 'not implemented'
def ztest_clear_item_fields(driver):
""" """
assert False, 'not implemented'
def ztest_shared_item_visibility(driver):
""" """
assert False, 'not implemented'
| 31.743202
| 101
| 0.596745
| 1,121
| 10,507
| 5.507583
| 0.242641
| 0.020732
| 0.027373
| 0.039359
| 0.472465
| 0.44493
| 0.408487
| 0.375445
| 0.320862
| 0.237609
| 0
| 0.009158
| 0.262111
| 10,507
| 330
| 102
| 31.839394
| 0.787179
| 0.185781
| 0
| 0.571429
| 0
| 0
| 0.252484
| 0.010056
| 0
| 0
| 0
| 0.00303
| 0.133929
| 1
| 0.071429
| false
| 0.040179
| 0.049107
| 0
| 0.129464
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a03d0e1506063ae02913fe583bcbdc21759f23a9
| 2,031
|
py
|
Python
|
reacticket/extensions/usersettings.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 56
|
2019-03-21T21:03:26.000Z
|
2022-03-14T08:26:55.000Z
|
reacticket/extensions/usersettings.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 38
|
2019-08-20T02:18:27.000Z
|
2022-02-22T11:19:05.000Z
|
reacticket/extensions/usersettings.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 44
|
2019-07-04T06:17:54.000Z
|
2022-03-25T19:18:31.000Z
|
from typing import Optional
from reacticket.extensions.abc import MixinMeta
from reacticket.extensions.mixin import settings
class ReacTicketUserSettingsMixin(MixinMeta):
@settings.group()
async def userpermissions(self, ctx):
"""Control the permissions that users have with their own tickets"""
pass
@userpermissions.command()
async def usercanclose(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can close their own tickets or not."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanclose()
await self.config.guild(ctx.guild).usercanclose.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now close their own tickets.")
else:
await ctx.send("Only administrators can now close tickets.")
@userpermissions.command()
async def usercanmodify(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can add or remove additional users to their ticket."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanmodify()
await self.config.guild(ctx.guild).usercanmodify.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now add/remove other users to their own tickets.")
else:
await ctx.send("Only administrators can now add/remove users to tickets.")
@userpermissions.command()
async def usercanname(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can rename their tickets and associated channels."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanname()
await self.config.guild(ctx.guild).usercanname.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now rename their tickets and associated channels.")
else:
await ctx.send("Only administrators can now rename tickets and associated channels.")
| 42.3125
| 97
| 0.678484
| 276
| 2,031
| 4.884058
| 0.228261
| 0.055638
| 0.077893
| 0.040059
| 0.658012
| 0.603116
| 0.545252
| 0.429525
| 0.399852
| 0.399852
| 0
| 0
| 0.231905
| 2,031
| 47
| 98
| 43.212766
| 0.864103
| 0
| 0
| 0.352941
| 0
| 0
| 0.182232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.029412
| 0.088235
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a041cf9593ba00ff0663499073797bf96dd8b200
| 300
|
py
|
Python
|
agnocomplete/urls.py
|
mike-perdide/django-agnocomplete
|
1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8
|
[
"MIT"
] | null | null | null |
agnocomplete/urls.py
|
mike-perdide/django-agnocomplete
|
1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8
|
[
"MIT"
] | null | null | null |
agnocomplete/urls.py
|
mike-perdide/django-agnocomplete
|
1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8
|
[
"MIT"
] | 1
|
2022-01-03T16:18:00.000Z
|
2022-01-03T16:18:00.000Z
|
"""
Agnostic Autocomplete URLS
"""
from django.conf.urls import url
from .views import AgnocompleteView, CatalogView
urlpatterns = [
url(
r'^(?P<klass>[-_\w]+)/$',
AgnocompleteView.as_view(),
name='agnocomplete'),
url(r'^$', CatalogView.as_view(), name='catalog'),
]
| 21.428571
| 54
| 0.626667
| 32
| 300
| 5.78125
| 0.65625
| 0.043243
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193333
| 300
| 13
| 55
| 23.076923
| 0.764463
| 0.086667
| 0
| 0
| 0
| 0
| 0.157895
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a044f460600112720a002859c5451c1e4332abe6
| 10,037
|
py
|
Python
|
tem/repo.py
|
tem-cli/tem
|
6974734a000604fe201fcba573b05e8fe50eda72
|
[
"MIT"
] | null | null | null |
tem/repo.py
|
tem-cli/tem
|
6974734a000604fe201fcba573b05e8fe50eda72
|
[
"MIT"
] | null | null | null |
tem/repo.py
|
tem-cli/tem
|
6974734a000604fe201fcba573b05e8fe50eda72
|
[
"MIT"
] | null | null | null |
"""Repository operations"""
import os
from tem import config, util
class Repo:
"""A python representation of a repository."""
def __init__(self, *args):
if not args:
self.path = ""
if isinstance(args[0], str):
self.path = args[0]
elif isinstance(args[0], Repo):
self.path = args[0].path
def abspath(self):
"""Get the absolute path of the repo, preserving symlinks."""
return util.abspath(self.path)
def realpath(self):
"""Get the real path of the repo."""
return os.path.realpath(self.path)
def name(self):
"""Get the name of the repo at ``path`` from its configuration.
If the repo has not configured a name, the base name of its directory
is used. This works even if the repository does not exist on the
filesystem.
"""
# TODO put this entry in the local config file
cfg = config.Parser(self.path + "/.tem/repo")
name = cfg["general.name"]
if name:
return name
return util.basename(self.path)
def has_template(self, template):
"""Test if the repo contains `template`."""
return os.path.exists(util.abspath(self.path + "/" + template))
@staticmethod
def named(name):
"""
Return absolute path to the repository with the given name if it exists;
otherwise return `name` unmodified.
"""
# TODO decide how to handle ambiguity
for repo in lookup_path:
repo = Repo(repo)
if repo.name() == name:
return repo
return Repo(None)
@staticmethod
def from_id(repo_id):
"""Get a repository with id ``repo_id``.
A repository id is a common term for name and path. To determine if
``repo_id`` is a name or a path, the following strategy is used:
- If it contains a '/', it is resolved as a path
- Otherwise, a repo with the name ``repo_id`` is looked up and if
found, that repo is returned
- If the above fails, ``repo_id`` is resolved as a path. In this case,
the repo need not exist on the filesystem.
"""
if "/" in repo_id:
return Repo(repo_id)
return Repo.named(repo_id)
#: List of lookup paths for tem repositories
lookup_path = [
Repo(line) for line in os.environ.get("REPO_PATH", "").split("\n") if line
]
class RepoSpec:
"""An abstraction for various ways of specifying tem repositories
The following types of specs are supported:
- absolute or relative path to a repository
- name of a repository
- absolute or relative path to a repository that is to be excluded
(when `spec_type` is :attr:`EXCLUDE`)
- all repositories from :data:`path`
You can obtain the list of repositories from a spec by calling
:func:`repos`.
If ``spec_type`` is :attr:`EXCLUDE` then ``pseudopaths`` are
excluded from the final list. If ``spec_type`` is :attr:`FROM_LOOKUP_PATH`
then all the paths from :data:`repo.lookup_path` are included in the spec.
An empty spec is euivalent to a `FROM_LOOKUP_PATH` spec.
Attributes
----------
paths : str, list, optional
Repository paths or other types of specs
spec_type
Values: `INCLUDE`, `EXCLUDE`, `FROM_LOOKUP_PATH` or a bitwise OR of
these
Constants
---------
INCLUDE
Specified repos or specs will be included in the final list of repos.
EXCLUDE
Specified specs will be excluded from the final list of repos.
FROM_LOOKUP_PATH
Repos from :data:`path` will be:
- included if `INCLUDE` is set
- excluded if `EXCLUDE` is set
Methods
-------
"""
INCLUDE = 1
EXCLUDE = 2
FROM_LOOKUP_PATH = 4
@staticmethod
def of_type(spec_type):
"""
Look at :func:`__init__` for the proper ways of specifying a spec type.
"""
def func(specs=None):
return RepoSpec(specs=specs, spec_type=spec_type)
return func
# Holds the paths/subspecs
_data: list
def __init__(self, specs=None, spec_type=None):
"""Initialize repo spec
In the most basic form, ``specs`` is a string or list of strings
representing repository paths or names. Specs can also contain other
specs. ``spec_type`` is the type of spec and can be a single type or a
tuple containing multiple types. If no `spec_type` is specified, the
spec will be of the ``INCLUDE`` type.
"""
if not spec_type and isinstance(specs, int):
# Constructed with only spec_type as its argument
spec_type = specs
specs = None
elif not spec_type:
# Unspecified spec_type should fall back to INCLUDE
spec_type = RepoSpec.INCLUDE
# Error checking
if not spec_type & (
self.INCLUDE | self.EXCLUDE | self.FROM_LOOKUP_PATH
):
raise ValueError("invalid spec type")
if spec_type & RepoSpec.INCLUDE and spec_type & RepoSpec.EXCLUDE:
raise ValueError(
"spec_type cannot contain both INCLUDE and EXCLUDE"
)
if spec_type & RepoSpec.FROM_LOOKUP_PATH and specs is not None:
raise ValueError("cannot specify specs with FROM_LOOKUP_PATH")
self._data = []
self.spec_type = spec_type
if specs is not None:
self.append(specs)
def append(self, specs):
"""Append specs to the list."""
err = ValueError("specs must be a string, spec, or list of specs")
if isinstance(specs, str):
self._data += [s for s in specs.split("\n") if s]
elif isinstance(specs, RepoSpec):
self._data.append(specs)
elif isinstance(specs, list):
# All items in specs must be strings or RepoSpecs
if all(isinstance(spec, (str, RepoSpec)) for spec in specs):
self._data += specs
else:
raise err
else:
raise err
def _abspaths(self, included):
"""Get a list of paths that are included/excluded by this spec."""
if (included and (self.spec_type & RepoSpec.EXCLUDE)) or (
not included and not (self.spec_type & RepoSpec.EXCLUDE)
):
# Only exclude-type specs can exclude paths, and only other-type
# specs can include
return []
if not self._data:
if included:
return lookup_path
return []
if self.spec_type & RepoSpec.FROM_LOOKUP_PATH:
return lookup_path
result = lookup_path.copy()
# If at least one subspec is not EXCLUDE, initialize empty result
for item in self._data:
if isinstance(item, str) or (
not item.spec_type & RepoSpec.EXCLUDE
):
result = []
break
for item in self._data:
if isinstance(item, str):
result.append(resolve(item))
elif isinstance(item, RepoSpec):
if item.spec_type & RepoSpec.EXCLUDE:
result[:] = [
spec
for spec in result
if spec
not in item._abspaths( # pylint: disable=protected-access disable=line-too-long
False
)
]
else:
result += item.repos()
else:
raise ValueError(
"Spec list contains invalid types. Please "
"report this as a bug."
)
return list(dict.fromkeys(result)) # Remove duplicates
def repos(self):
"""Return absolute paths of repositores specified by this spec."""
# return self._abspaths(True)
return [Repo(path) for path in self._abspaths(True)]
def is_valid_name(name):
"""Test if ``name`` is a valid repository name."""
return "/" not in name
def resolve(path_or_name):
"""Get the repo identified by ``path_or_name``.
The following strategy is used:
- If the argument is a valid repository name, find a repo in
`repo.lookup_path` with the given name.
- If the argument is a path or the previous step failed to find a repo,
return the absolute path version of the input path.
"""
if not path_or_name:
return Repo()
if is_valid_name(path_or_name):
return named(path_or_name)
return Repo(path_or_name)
def find_template(template: str, repos=None, at_most=-1):
"""Return the absolute path of a template, looked up in ``repos``.
Parameters
----------
template : str
Path to template relative to the containing repo.
repos : list[int]
Repositories to look up. A None value will use :data:`path`.
at_most : int
Return no more than this number of repositories.
Returns
-------
template_paths : list[str]
List of absolute paths to templates under the given repos.
Notes
-----
A template can be a directory tree, e.g. "a/b/c".
"""
if repos is None:
repos = lookup_path
if at_most == 0:
return []
result_paths = []
i = 0
for repo in repos:
if i >= at_most and at_most != -1:
break
template_abspath = repo.abspath() + "/" + template
if os.path.exists(template_abspath):
result_paths.append(template_abspath)
return result_paths
def remove_from_path(remove_repos):
"""Remove matching repos from REPO_PATH environment variable."""
remove_repo_paths = [r.realpath() for r in remove_repos]
lookup_path[:] = (
repo
for repo in lookup_path
if repo.realpath() not in remove_repo_paths
)
| 31.170807
| 104
| 0.586629
| 1,300
| 10,037
| 4.429231
| 0.191538
| 0.041681
| 0.021883
| 0.019972
| 0.134595
| 0.05627
| 0.024661
| 0.012504
| 0.012504
| 0
| 0
| 0.001634
| 0.329082
| 10,037
| 321
| 105
| 31.267913
| 0.85343
| 0.411876
| 0
| 0.153333
| 0
| 0
| 0.047327
| 0
| 0
| 0
| 0
| 0.006231
| 0
| 1
| 0.113333
| false
| 0
| 0.013333
| 0.006667
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a047d0ac5a16d636bbec804560bb56282540b1b2
| 13,172
|
py
|
Python
|
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | null | null | null |
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
remit/settings.py
|
naamara/blink
|
326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a
|
[
"Unlicense",
"MIT"
] | null | null | null |
''' settings for Django '''
import os
import django.conf.global_settings as DEFAULT_SETTINGS
LOCALHOST = False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PAYMENTS = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
LIVE = 1
ADMINS = (
('Madra David', 'madra@redcore.co.ug'),
)
APP_EMAILS = {
'contact_us':'mandelashaban593@gmail.com',
'about_us':'mandelashaban593@gmail.com',
'info':'mandelashaban593@gmail.com',
'support':'mandelashaban593@gmail.com',
}
DEBUG_EMAILS = {
'madra@redcore.co.ug' ,
}
APP_NAME = 'Useremit'
DOMAIN_NAME = 'Remit'
APP_TITLE = 'Remit | Send Money to Mobile Money in Uganda or Kenya | Pay utility bills online'
MANAGERS = ADMINS
USE_JUMIO = True
BASE_URL = 'https://useremit.com/'
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Or path to database file if using sqlite3.
'NAME': 'anenyuoe4',
# The following settings are not used with sqlite3:
'USER': 'dqebbquaa4iba',
'PASSWORD': 'WMm8mq1ZYAOn',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': 'LOCALHOST',
'PORT': '', # Set to empty string for default.
'OPTIONS': {'autocommit': True, },
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['www.useremit.com', 'http://useremit.com',
'https://useremit.com', 'https://useremit.com']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
#TIME_ZONE = 'Africa/Nairobi'
TIME_ZONE ='UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = BASE_DIR + 'static/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = BASE_URL + 'static/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
#GEOIP_PATH = BASE_URL + 'geoip_data/'
geo_dir = os.path.dirname(__file__)
geo_rel_path = "geoip"
GEOIP_PATH = os.path.join(geo_dir, geo_rel_path)
EMAIL_TEMPLATE_DIR = BASE_DIR + 'templates/email/'
AJAX_TEMPLATE_DIR = BASE_DIR + 'templates/ajax/'
SMS_TEMPLATE_DIR = BASE_DIR + 'templates/sms/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ksx8+lq!5pzx&)xuqp0sc-rdgtd14gmix-eglq(iz%3+7h)f52'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'session_security.middleware.SessionSecurityMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'remit.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'remit.wsgi.application'
TEMPLATE_DIRS = (
BASE_DIR + 'templates',
BASE_DIR + 'remit_admin/templates/',
BASE_DIR + 'remit_admin/templates/admin/',
)
INSTALLED_APPS = (
#background tasks
#'huey.djhuey',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'remit',
'social_widgets',
'accounts',
#'south'
'landingapp',
'coverage',
#'notification',
'nexmo',
'guardian',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#'django_admin_bootstrapped.bootstrap3',
#'django_admin_bootstrapped',
# Uncomment the next line to enable the admin:
'remit_admin',
'session_security',
'gravatar',
'django_cron',
'django.contrib.humanize',
'django_extensions',
#'django_bitcoin',
'btc',
'rest_framework',
'rest_framework.authtoken',
'api',
'seo',
'payments',
'background_task',
'django.contrib.admin',
'ipn',
'standard',
'crispy_forms',
'tinymce',
#'django_twilio',
)
PAYPAL_RECEIVER_EMAIL = "mandelashaban593@gmail.com"
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
#'rest_framework.renderers.BrowsableAPIRenderer',
),
# Use Django's standard `django.contrib.auth` permissions,
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S'
}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Custom template processors
# YOpay
YOPAY_USERNAME = '100224720137'
YOPAY_PASSWORD = 'jLQF-r1oa-OyIq-0zoQ-544O-7U1F-oGj5-YoyU'
YOPAY_ENDPOINT = 'https://paymentsapi1.yo.co.ug/ybs/task.php'
# Ipay
LIVE = 1
IPAY_CALLBACK_URL = '%stransaction/confirm_payment/' % BASE_URL
IPAY_USER = 'redcore'
IPAY_MERCHANT = 'RedCore'
IPAY_HASH_KEY = '0yiq0zoQ544O'
# uba
UBA_CALLBACK_URL = ''
UBA_MERCHANT_ID = ''
UBA_MERCHANT_KEY = ''
#jumio
JUMIO_URL="https://netverify.com/api/netverify/v2/initiateNetverify/"
JUMIO_TOKEN="fcf1eec3-728d-4f8a-8811-5b8e0e534597"
JUMIO_SECRET="9mnQyVj1ppiyVESYroDHZS23Z9OfQ9GS"
JUMIO_USER_AGENT="MyCompany MyApp/1.0.0"
USE_JUMIO = True
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/jumiopass/"
JUMIO_ERROR_URL="https://simtransfer.com/jumiofail/"
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/idscanned/"
JUMIO_ERROR_URL="https://simtransfer.com/idscanfailed/"
JUMIO_CALLBACK="https://simtransfer.com/jumiodata/"
# Mailgun
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGIN_URL = BASE_URL + 'login/'
SIGNUP_URL = BASE_URL + 'signup/'
LOGOUT_URL = BASE_URL + 'signout/'
AUTHENTICATION_BACKENDS = (
'accounts.backends.EmailVerificationBackend',
'remit.backends.EmailAuthBackend',
'guardian.backends.ObjectPermissionBackend',
)
ACTIVATION_LINK = BASE_URL + 'activate/'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
"""
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = ''
DEFAULT_TO_EMAIL = ''
"""
#EMAIL_PORT = 587
ADMIN_USER='admin_key_user'
ADMIN_USER_KEY='user_004_admin'
# Mailgun settings
DEFAULT_FROM_EMAIL = 'Remit.ug <noreply@remit.ug>'
#EMAIL_USE_TLS = True
#EMAIL_HOST = 'smtp.mailgun.org'
#EMAIL_HOST_USER = 'postmaster@remit.ug'
#EMAIL_HOST_PASSWORD = '25s0akinnuk8'
#EMAIL_PORT = 25
# Mailgun settings
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
#EMAIL_TEMPLATE_DIR = '%stemplates/email/' % (BASE_DIR)
# using sandbox account here , change later
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'remit.ug'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v2/remit.ug/messages'
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'useremit.com'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v3/useremit.com/messages'
CONTACT_NO = '+256783877133'
# Nexmo
NEXMO_USERNAME = '8cede62f'
NEXMO_PASSWORD = 'd4d43a29'
NEXMO_FROM = 'Remit'
#Nexmo App
NEXMO_API_KEY = '8cede62fSecret'
NEXMO_API_SECRET = 'd4d43a29'
NEXMO_DEFAULT_FROM = 'Remit'
#if set to zero we use twilio
USE_NEXMO = 0
USE_TWILIO = True
USE_SUKUMA = False
USE_AFRICA_SMS = True
TWILIO_ACCOUNT_SID='AC2a0de3ac9808d7bfa5c3d75853c073d6'
TWILIO_AUTH_TOKEN='82b2ab8535255c8fd8d96bad96103ae7'
TWILIO_DEFAULT_CALLERID = 'Remit'
# Session security
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# cron jobs
CRON_CLASSES = [
"remit.cron.UpdateRates",
# ...
]
# Paganation
PAGNATION_LIMIT = 10
# Avatar
GRAVATAR_URL = "https://www.gravatar.com/avatar.php?"
# Bitcoin
#BITCOIND_CONNECTION_STRING = "http://ubuntu:bitwa8bfede82llet@localhost:8332"
BITCOIND_CONNECTION_STRING = "http://redcorebrpc:BKGyjwyNXzHumywcau3FubmyaJ8NypJtd1eSdTYCqSkJ@localhost:8332"
# How many bitcoin network confirmations are required until we consider the transaction
# as received
BITCOIN_MINIMUM_CONFIRMATIONS = 3
# Use Django signals to tell the system when new money has arrived to your
# wallets
BITCOIN_TRANSACTION_SIGNALING = True
from decimal import Decimal
MAIN_ADDRESS = '12oaMnJZZJRx59kWyAshzmogHERo8y54Et'
BITCOIN_PAYMENT_BUFFER_SIZE = 1
BITCOIN_ADDRESS_BUFFER_SIZE = 1
PAYMENT_VALID_HOURS = 1
BITCOIN_PRIVKEY_FEE = Decimal("0.0005")
BITCOIN_TRANSACTION_CACHING = 1
#admin who processed transactions
PROCESSED_BY = 1
#background tasks
#HUEY_CONFIG = {
# 'QUEUE': 'huey.backends.redis_backend.RedisBlockingQueue',
# 'QUEUE_NAME': 'test-queue',
# 'QUEUE_CONNECTION': {
# 'host': 'localhost',
# 'port': 6379,
# },
# 'THREADS': 4,
#}
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_FAILURE_VIEW = 'remit.views.csrf_failure_view'
MTN_SDP = '172.25.48.43'
MTN_TEST_BED = 0
MTN_SDP_USERNAME = 'remitug.sp1'
MTN_SDP_PASS = 'Huawei2014'
MTN_SDP_SERVICEID = '2560110001380'
MTN_SDP_URL = 'http://172.25.48.43:8310/'
MTN_VENDOR_CODE = 'REMIT'
REVENUE_SHARE = 2.16
#disable email and sms sending
DISABLE_COMMS = False
#background tasks
MAX_ATTEMPTS = 5
#need this for generating reports from sqlite
IS_SQLITE = False
OTHER_FEES = True
OTHER_FEES = True
SEND_KYC_SMS = True
# Pesapot
PESAPOT_URL = 'http://pesapot.com/api/'
PESAPOT_TOKEN = ''
PESAPOT_KEY = ''
#paybill
PAYBILL = False
DISABLE_MTN = True
ENABLE_TRADELANCE = True
ENABLE_YO = False
DISABLE_AIRTEL_MONEY = False
DISABLE_MTN_MOBILE_MONEY = False
#force Transaction id
FORCE_TRANSACTION_ID = True
# Localhost settings
# Crispy forms tags settings
CRISPY_TEMPLATE_PACK = 'bootstrap3'
try:
from local_settings import *
except ImportError:
pass
STATIC_ROOT = BASE_DIR + 'static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = BASE_URL + 'static/'
| 24.994307
| 109
| 0.719708
| 1,632
| 13,172
| 5.613358
| 0.35723
| 0.025543
| 0.013099
| 0.009606
| 0.129462
| 0.095623
| 0.051195
| 0.037441
| 0.034276
| 0.034276
| 0
| 0.02739
| 0.162921
| 13,172
| 526
| 110
| 25.041825
| 0.803465
| 0.336319
| 0
| 0.057915
| 0
| 0
| 0.416707
| 0.215032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.019305
| 0.019305
| 0
| 0.019305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a04cf7b68b006d07caae20b361bd4e847b1b78eb
| 13,900
|
py
|
Python
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 8
|
2017-08-15T12:51:34.000Z
|
2020-10-07T09:58:34.000Z
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 5
|
2017-02-04T12:32:16.000Z
|
2020-07-01T14:13:19.000Z
|
tests/gfp.py
|
mcepl/git-packaging-tools
|
de705a9ac2efd1752754e4feb093fe85821f9224
|
[
"MIT"
] | 6
|
2017-02-07T13:31:21.000Z
|
2021-02-10T23:14:03.000Z
|
#!/usr/bin/python3
#
# Copyright (c) 2017-2020, SUSE LLC
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. Redistributions
# in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the SUSE Linux Products GmbH nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Author: Bo Maryniuk <bo@suse.de>
This tool helps to:
1. Format patches from Git the way it has a minimal impact on
the changes in the future
2. Update patches to the current package source
3. Detect content differences, if the filename is still the same
4. Generate include message for .changes logfile
'''
import os
import sys
import re
import argparse
import shutil
ORDERING_FILE = 'patches.orders.txt'
CHANGES_FILE = 'patches.changes.txt'
def remove_order(filename):
'''
Remove order of the patch filename.
Git formats patches: XXXX-filename.patch
This function removes the "XXXX-" part, if any.
'''
ordnum = os.path.basename(filename).split('-')[0]
if ordnum and not re.sub(r'[0-9]', '', ordnum):
filename = os.path.join(os.path.dirname(filename),
filename.split('-', 1)[-1]).lower()
ordnum = int(ordnum)
else:
ordnum = None
return ordnum, filename
def remove_order_from_subject(src_file, dst_file, use_unique=False):
'''
Remove subject inside the patch.
Git format patches inside with the following subject format:
Subject: [PATCH X/Y] .........
This function removes [PATCH X/Y] part, if any. In Git
format-patches one can add "-N" flag, so then subject won't have
these numbers, but just "[PATCH]". In this case we leave it out.
'''
if os.path.exists(dst_file) and not use_unique:
raise IOError('the file {0} exists'.format(dst_file))
if os.path.exists(dst_file) and use_unique:
dst_file = unique(dst_file)
dst = open(dst_file, 'w')
for fline in open(src_file).read().split(os.linesep):
fline_tk = re.split(r'\s+\[PATCH \d+/\d+\]\s+', fline)
if len(fline_tk) == 2 and fline_tk[0] == 'Subject:':
fline = ' [PATCH] '.join(fline_tk)
dst.write('{0}\n'.format(fline))
dst.close()
def git_format_patch(tag):
'''
Formats patches from the given tag.
'''
patches = 0
for patch in os.popen(
'git format-patch {0}'.format(tag)).read().split(os.linesep):
if patch.split('.')[-1] == 'patch':
patches += 1
print("Patches fetched: {0}".format(patches))
def get_diff_contents(data):
'''
Get diff contents only.
'''
# Yes, I know about library https://github.com/cscorley/whatthepatch
# But for now we go ultra-primitive to keep no deps
data = '--'.join(data.split("--")[:-1])
contents = []
for chunk in re.split(r'@@.*?@@.*?\n', data)[1:]:
contents.append(chunk.split('diff --git')[0])
return contents
def unique(fname):
'''
Change name to the unique, in case it isn't.
:param fname:
:param use:
:return:
'''
fname = fname.split('.')
if '-' not in fname[0]:
fname[0] = '{0}-{1}'.format(fname[0], 1)
else:
chnk = fname[0].split('-')
try:
fname[0] = '{0}-{1}'.format('-'.join(chnk[:-1]), int(chnk[-1]) + 1)
except ValueError:
# Filename is not in "str-int", but "str-str".
fname[0] = '{0}-{1}'.format(fname[0], 1)
return '.'.join(fname)
def extract_spec_source_patches(specfile):
'''
Extracts source patches from the .spec file to match existing
comments, according to the
https://en.opensuse.org/openSUSE:Packaging_Patches_guidelines
:param: specfile
:return:
'''
patch_sec_start = False
patch_sec_end = False
head_buff = []
patch_section = []
for spec_line in open(specfile).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', spec_line) and not patch_sec_start:
patch_sec_start = True
if not spec_line.startswith('#') and \
not re.match(r'^[Pp]atch[0-9]+:', spec_line) and \
patch_sec_start and \
not patch_sec_end:
patch_sec_end = True
if not patch_sec_start and not patch_sec_end:
head_buff.append(spec_line)
if patch_sec_start and not patch_sec_end:
patch_section.append(spec_line)
first_comment = []
for head_line in reversed(head_buff):
if not head_line:
break
if head_line.startswith('#'):
first_comment.append(head_line)
patch_section.insert(0, os.linesep.join(first_comment))
patchset = {}
curr_key = None
for line in reversed(patch_section):
if re.match(r'^[Pp]atch[0-9]+:', line):
curr_key = re.sub(r'^[Pp]atch[0-9]+:', '', line).strip()
patchset[curr_key] = []
continue
if curr_key and line and line.startswith('#'):
patchset[curr_key].append(line)
return patchset
def do_remix_spec(args):
'''
Remix spec file.
:param args:
:return:
'''
if not os.path.exists(args.spec or ''):
raise IOError('Specfile {0} is not accessible or is somewhere else'.format(args.spec))
if not os.path.exists(args.ordering or ''):
args.ordering = './{0}'.format(ORDERING_FILE)
if not os.path.exists(args.ordering):
raise IOError('Ordering file is expected "./{0}" but is not visible'.format(ORDERING_FILE))
patchset = extract_spec_source_patches(args.spec)
for o_line in open(args.ordering).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', o_line):
ref, pname = [_f for _f in o_line.split(' ') if _f]
print(os.linesep.join(patchset.get(pname) or ['# Description N/A']))
print(ref.ljust(15), pname)
def do_create_patches(args):
'''
Create and reformat patches for the package.
'''
current_dir = os.path.abspath('.')
if not args.existing:
if os.listdir(current_dir):
print("Error: this directory has to be empty!")
sys.exit(1)
git_format_patch(args.format)
else:
if not [fname for fname in os.listdir(current_dir) if fname.endswith('.patch')]:
print("Error: can't find a single patch in {0} to work with!".format(current_dir))
sys.exit(1)
ord_fh = open(args.ordering or ORDERING_FILE, 'w')
ord_fh.write('#\n#\n# This is pre-generated snippets of patch ordering\n#\n')
ord_patches_p = []
patches = 0
for fname in os.listdir(current_dir):
if fname.split('.')[-1] == 'patch':
# Check if we should skip this patch in case subject starts with SKIP_TAG
with open(fname) as patch_file:
if any(re.match(r'^Subject: \[PATCH.*] {}'.format(re.escape(args.skip_tag)), i) for i in patch_file.readlines()):
print("Skipping {}".format(fname))
os.unlink(fname)
continue
print("Preparing {}".format(fname))
order, nfname = remove_order(fname)
if args.index is not None:
order += args.index
remove_order_from_subject(fname, nfname, use_unique=args.increment)
os.unlink(fname)
ord_fh.write('{patch}{fname}\n'.format(patch='Patch{0}:'.format(order).ljust(15), fname=nfname))
ord_patches_p.append(order)
patches += 1
if ord_patches_p:
ord_fh.write('#\n#\n# Patch processing inclusion:\n')
for order in ord_patches_p:
ord_fh.write('%patch{num} -p1\n'.format(num=order))
else:
ord_fh.write('# Nothing here, folks... :-(\n')
ord_fh.close()
print("\nRe-formatted {0} patch{1}".format(patches, patches > 1 and 'es' or ''))
def do_update_patches(args):
'''
Update patches on the target package source.
'''
print("Updating packages from {0} directory".format(args.update))
added = []
removed = []
changed = []
# Gather current patches
current_patches = {}
for fname in os.listdir(os.path.abspath(".")):
if fname.endswith('.patch'):
current_patches[os.path.basename(fname)] = True
for fname in os.listdir(args.update):
if fname.endswith('.patch'):
fname = os.path.join(args.update, fname)
if os.path.isfile(fname):
current_patches[os.path.basename(fname)] = False
n_fname = os.path.basename(fname)
if not os.path.exists(n_fname):
print("Adding {0} patch".format(fname))
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
added.append(n_fname)
else:
if get_diff_contents(open(fname).read()) != get_diff_contents(open(n_fname).read()):
if args.changed:
print("Replacing {0} patch".format(n_fname))
os.unlink(n_fname)
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
changed.append(n_fname)
else:
print("WARNING: Patches {0} and {1} are different!".format(fname, n_fname))
for fname in sorted([patch_name for patch_name, is_dead in list(current_patches.items()) if is_dead]):
print("Removing {0} patch".format(fname))
os.unlink(fname)
removed.append(fname)
# Generate an include for spec changes
with open(CHANGES_FILE, "w") as changes:
for title, data in [('Changed', changed), ('Added', added),
('Removed', removed)]:
if not data:
continue
print("- {}:".format(title), file=changes)
for fname in sorted(data):
print(" * {}".format(fname), file=changes)
print(file=changes)
if not removed and not added and not changes:
print("No files has been changed")
def main():
'''
Main app.
'''
VERSION = '0.2'
parser = argparse.ArgumentParser(description='Git patch formatter for RPM packages')
parser.add_argument('-u', '--update', action='store', const=None,
help='update current patches with the destination path')
parser.add_argument('-f', '--format', action='store', const=None,
help='specify tag or range of commits for patches to be formatted')
parser.add_argument('-o', '--ordering', action='store', const=None,
help='specify ordering spec inclusion file. Default: {0}'.format(ORDERING_FILE))
parser.add_argument('-x', '--index', action='store', const=None,
help='specify start ordering index. Default: 0')
parser.add_argument('-s', '--spec', action='store', const=None,
help='remix spec file and extract sources with their comments to match new patch ordering')
parser.add_argument('-i', '--increment', action='store_const', const=True,
help='use increments for unique names when patch commits repeated')
parser.add_argument('-c', '--changed', action='store_const', const=True,
help='update also changed files with the content')
parser.add_argument('-e', '--existing', action='store_const', const=True,
help='work with already formatted patches from Git')
parser.add_argument('-k', '--skip-tag', action='store', const=None, default='[skip]',
help='skip commits starting with this tag. Default: [skip]')
parser.add_argument('-v', '--version', action='store_const', const=True,
help='show version')
args = parser.parse_args()
try:
if args.index:
try:
args.index = int(args.index)
except ValueError:
raise Exception('Value "{0}" should be a digit'.format(args.index))
if args.version:
print("Version: {0}".format(VERSION))
elif args.spec:
do_remix_spec(args)
elif args.update and not args.format:
do_update_patches(args)
elif (args.format and not args.update) or args.existing:
do_create_patches(args)
else:
parser.print_help()
sys.exit(1)
except Exception as ex:
print("Critical error:", ex, file=sys.stderr)
if __name__ == '__main__':
main()
| 36.197917
| 129
| 0.606475
| 1,842
| 13,900
| 4.478284
| 0.226927
| 0.014547
| 0.020609
| 0.014547
| 0.158565
| 0.126682
| 0.083647
| 0.070796
| 0.059886
| 0.042672
| 0
| 0.008835
| 0.267122
| 13,900
| 383
| 130
| 36.292428
| 0.800923
| 0.215971
| 0
| 0.137168
| 0
| 0
| 0.164488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044248
| false
| 0
| 0.022124
| 0
| 0.084071
| 0.088496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a04efbad847960e56a6c9a8e43d4465164fb4801
| 5,455
|
py
|
Python
|
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
modules/dispatch.py
|
kex5n/Vehicles-Dispatch-Simulator
|
d0cca03fbf56e4b0ceeef8dafc59de105c1d4507
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
import random
from typing import List
import numpy as np
import torch
from config import Config
from domain import DispatchMode
from models import DQN
from modules.state import FeatureManager
from objects import Area, Vehicle
from objects.area import AreaManager
from objects.vehicle import VehicleManager
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
torch.backends.cudnn.deterministic = True
@dataclass(frozen=True)
class DispatchOrder:
vehicle_id: int
start_node_id: int
end_node_id: int
action: int
from_area_id: int = None
to_area_id: int = None
class DispatchModuleInterface:
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle) -> DispatchOrder:
raise NotImplementedError
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager) -> List[DispatchOrder]:
raise NotImplementedError
class RandomDispatch(DispatchModuleInterface):
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle) -> DispatchOrder:
current_area: Area = area_manager.get_area_by_area_id(vehicle.location_area_id)
candidate_area_id = [current_area.id] + current_area.get_neighbor_ids()
next_area = area_manager.get_area_by_area_id(random.choice(candidate_area_id))
next_node_id = next_area.centroid
start_node_id = vehicle.location_node_id
return DispatchOrder(
vehicle_id=vehicle.id,
start_node_id=start_node_id,
end_node_id=next_node_id,
action=None,
)
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager) -> List[DispatchOrder]:
dispatch_order_list: List[DispatchOrder] = []
for area in area_manager.get_area_list():
for vehicle_id in area.get_idle_vehicle_ids():
vehicle = vehicle_manager.get_vehicle_by_vehicle_id(vehicle_id)
dispatch_order = self.dispatch(
area_manager=area_manager,
vehicle=vehicle,
)
dispatch_order_list.append(dispatch_order)
return dispatch_order_list
class DQNDispatch(DispatchModuleInterface):
def __init__(self, config: Config, is_train=False):
self.model = DQN(k=config.K, num_actions=9)
self.__feature_manager = FeatureManager(k=config.K)
self.is_train = is_train
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle, prediction, episode: int = 0, is_train: bool = False) -> DispatchOrder:
current_area = area_manager.get_area_by_area_id(vehicle.location_area_id)
candidate_area_id = [current_area.id] + current_area.get_neighbor_ids()
supply_array = np.array([area.num_idle_vehicles for area in area_manager.get_area_list()])
state_list = self.__feature_manager.calc_state(
area=current_area,
demand_array=prediction,
supply_array=supply_array
)
state_array = torch.FloatTensor(state_list)
action = self.model.get_action(state_array, episode=episode, candidate_area_ids=candidate_area_id, is_train=is_train)
next_area_id = candidate_area_id[action]
next_node_id = area_manager.get_area_by_area_id(next_area_id).centroid
return DispatchOrder(
vehicle_id=vehicle.id,
start_node_id=vehicle.location_node_id,
end_node_id=next_node_id,
action=action,
from_area_id=current_area.id,
to_area_id=next_area_id
)
def memorize(self, state, action, next_state, reward, from_area_id, to_area_id) -> None:
self.model.memorize(state, action, next_state, reward, from_area_id, to_area_id)
def train(self, area_manager: AreaManager, date_info, episode=None):
return self.model.update_q_function(area_manager=area_manager, date_info=date_info, episode=episode)
def save(self, checkpoint_path: str) -> None:
self.model.save_checkpoint(checkpoint_path)
def load(self, checkpoint_path: str) -> None:
self.model.load_checkpoint(checkpoint_path)
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager, prediction: np.ndarray, episode: int = 0) -> List[DispatchOrder]:
dispatch_order_list: List[DispatchOrder] = []
for area in area_manager.get_area_list():
for vehicle_id in area.get_idle_vehicle_ids():
vehicle = vehicle_manager.get_vehicle_by_vehicle_id(vehicle_id)
dispatch_order = self.dispatch(
area_manager=area_manager,
vehicle=vehicle,
episode=episode,
prediction=prediction,
is_train=self.is_train,
)
dispatch_order_list.append(dispatch_order)
return dispatch_order_list
def load_dispatch_component(dispatch_mode: DispatchMode, config: Config, is_train=False) -> DispatchModuleInterface:
if dispatch_mode == DispatchMode.DQN:
dispatch_module = DQNDispatch(config=config, is_train=is_train)
return dispatch_module
elif dispatch_mode == DispatchMode.RANDOM:
dispatch_module = RandomDispatch()
return dispatch_module
elif dispatch_mode == DispatchMode.NOT_DISPATCH:
return None
else:
raise NotImplementedError
| 40.110294
| 148
| 0.700642
| 667
| 5,455
| 5.374813
| 0.164918
| 0.041841
| 0.029289
| 0.050767
| 0.521339
| 0.488982
| 0.488982
| 0.422873
| 0.391632
| 0.330544
| 0
| 0.004498
| 0.225665
| 5,455
| 135
| 149
| 40.407407
| 0.844223
| 0
| 0
| 0.309735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106195
| false
| 0
| 0.106195
| 0.00885
| 0.371681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a05379809d542906a1e8b3ecab8d346bf1a2d752
| 2,272
|
py
|
Python
|
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | 5
|
2016-03-18T15:12:04.000Z
|
2019-01-28T20:18:24.000Z
|
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/sts/replayer_integration_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | 1
|
2019-11-02T22:04:48.000Z
|
2019-11-02T22:04:48.000Z
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
sys.path.append(os.path.dirname(__file__) + "/../../..")
simple_cfg = '''
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig()
control_flow = Replayer(simulation_config, "%s")
'''
class ReplayerTest(unittest.TestCase):
tmpsuperlog = '/tmp/superlog.tmp'
tmpcfg = 'config/replayer_simple_test.py'
tmpcfgpyc = 'config/replayer_simple_test.pyc'
tmpcfgmodule = 'config.replayer_simple_test'
def write_simple_superlog(self):
''' Returns the file. Make sure to close afterwards! '''
superlog = open(self.tmpsuperlog, 'w')
e1 = str('''{"dependent_labels": ["e2"], "start_dpid": 8, "class": "LinkFailure",'''
''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e1", "time": [0,0], "round": 0}''')
superlog.write(e1 + '\n')
e2 = str('''{"dependent_labels": [], "start_dpid": 8, "class": "LinkRecovery",'''
''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e2", "time": [0,0], "round": 0}''')
superlog.write(e2 + '\n')
superlog.close()
def write_simple_cfg(self):
cfg = open(self.tmpcfg, 'w')
cfg.write(simple_cfg % self.tmpsuperlog)
cfg.close()
def basic_test(self):
try:
self.write_simple_superlog()
self.write_simple_cfg()
ret = os.system("./simulator.py -c %s" % self.tmpcfgmodule)
self.assertEqual(0, ret)
finally:
os.unlink(self.tmpsuperlog)
os.unlink(self.tmpcfg)
if os.path.exists(self.tmpcfgpyc):
os.unlink(self.tmpcfgpyc)
if __name__ == '__main__':
unittest.main()
| 33.910448
| 116
| 0.677377
| 308
| 2,272
| 4.840909
| 0.457792
| 0.040241
| 0.040241
| 0.04829
| 0.081824
| 0.081824
| 0.081824
| 0.04829
| 0.04829
| 0.04829
| 0
| 0.019159
| 0.172975
| 2,272
| 66
| 117
| 34.424242
| 0.774348
| 0.275088
| 0
| 0
| 0
| 0
| 0.33052
| 0.098453
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.073171
| false
| 0
| 0.121951
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a055c0b6a8a397cfaf7bde8f028637510c8a76bc
| 3,733
|
py
|
Python
|
responsive_dashboard/views.py
|
rhooper/django-responsive-dashboard
|
039d634cbefb87be610334c01bda1a790cf5cd71
|
[
"BSD-3-Clause"
] | 28
|
2015-07-08T01:03:17.000Z
|
2022-03-11T13:30:49.000Z
|
responsive_dashboard/views.py
|
burke-software/django-responsive-dashboard
|
e08d7a12155d87d78cb3928bcc58f2701d326b69
|
[
"BSD-3-Clause"
] | 4
|
2018-09-03T14:15:42.000Z
|
2021-06-10T17:24:09.000Z
|
responsive_dashboard/views.py
|
rhooper/django-responsive-dashboard
|
039d634cbefb87be610334c01bda1a790cf5cd71
|
[
"BSD-3-Clause"
] | 13
|
2015-01-15T14:33:30.000Z
|
2021-08-23T02:39:38.000Z
|
"""Views."""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from responsive_dashboard.dashboard import dashboards
from responsive_dashboard.models import UserDashboard, UserDashlet
# pylint: disable=no-member
@login_required
def generate_dashboard(request, app_name="", title=""):
"""Generate a dashboard view.
Generates a dashboard view by looking up the dashboard from its name.
responsive_dashboards is a list of all possible dashboards.
"""
dashboard_name = app_name
if title:
dashboard_name = '{0}__{1}'.format(app_name, title)
dashboard = dashboards.get_dashboard(dashboard_name)
if dashboard is None:
raise Http404("Dashboard does not exist")
user_dashboard = UserDashboard.objects.get_or_create(
dashboard_name=dashboard_name,
user=request.user,
)[0]
user_dashlets = user_dashboard.userdashlet_set.all()
dashlet_names = []
addable_dashlet_names = []
for dashlet in dashboard.dashlets:
dashlet.set_request(request)
if (dashlet.is_default() and
not user_dashlets.filter(dashlet_name=dashlet.title)):
user_dashlets.create(dashlet_name=dashlet.title, user_dashboard=user_dashboard)
dashlet_names += [dashlet.title]
if dashlet.allow_multiple or user_dashlets.filter(deleted=False, dashlet_name=dashlet.title).count() == 0:
addable_dashlet_names += [dashlet.title]
user_dashlets = user_dashlets.filter(
dashlet_name__in=dashlet_names,
deleted=False, )
for user_dashlet in user_dashlets:
for dashlet in dashboard.dashlets:
if dashlet.title == user_dashlet.dashlet_name:
dashlet.user_dashlet = user_dashlet # Lets us access per user settings in templates
user_dashlet.dashlet = dashlet
break
include_jquery = False
if getattr(settings, 'RESPONSIVE_DASHBOARD_INCLUDE_JQUERY', None):
include_jquery = True
return render(request, dashboard.template_name, {
'dashboard': dashboard,
'dashlets': user_dashlets,
'new_dashlet_names': addable_dashlet_names,
'app_name': app_name,
'title': title,
'include_jquery': include_jquery
})
@login_required
def ajax_reposition(request, **kwargs):
""" Save the position field in the user dashlet
django-positions should take care of everythign """
dashlet = UserDashlet.objects.get(
user_dashboard__user=request.user, id=request.POST['dashlet_id'])
dashlet.position = int(request.POST['position'])
dashlet.save()
return HttpResponse('SUCCESS')
@login_required
def ajax_delete(request, **kwargs):
""" Delete user dashlet by marking as deleted. """
dashlet = UserDashlet.objects.get(
user_dashboard__user=request.user, id=request.POST['dashlet_id'])
dashlet.deleted = True
dashlet.save()
return HttpResponse('SUCCESS')
@login_required
def add_dashlet(request, app_name="", title=""):
""" Add a new user dashlet then reload the page """
dashboard_name = app_name
if title:
dashboard_name = '{0}__{1}'.format(app_name, title)
user_dashboard = UserDashboard.objects.get_or_create(
dashboard_name=dashboard_name,
user=request.user,
)[0]
dashlet_name = request.GET['dashlet_name']
if not dashlet_name:
raise Exception('Cannot add a null dashlet')
UserDashlet.objects.create(
user_dashboard=user_dashboard,
dashlet_name=dashlet_name,
)
return redirect(request.META['HTTP_REFERER'])
| 34.88785
| 114
| 0.698902
| 445
| 3,733
| 5.633708
| 0.258427
| 0.043877
| 0.023933
| 0.027523
| 0.338652
| 0.225768
| 0.225768
| 0.225768
| 0.184284
| 0.184284
| 0
| 0.004401
| 0.208679
| 3,733
| 106
| 115
| 35.216981
| 0.844279
| 0.112242
| 0
| 0.35
| 0
| 0
| 0.069547
| 0.010723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.075
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a05679a1770f12c767a08a09a8c1456749cc03d4
| 769
|
py
|
Python
|
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T06:36:25.000Z
|
2021-03-06T06:36:25.000Z
|
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | null | null | null |
app/config.py
|
tomakado/markovscope-api
|
3dd60439d980e3b77429850f1b43cb37ffd02f99
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from dataclasses import dataclass
@dataclass(frozen=True)
class Config:
listen_host: str
listen_port: int
yc_oauth_token: str
yc_folder_id: str
data_path: str
debug_enabled: bool
@staticmethod
def create_from_env() -> 'Config':
return Config(
listen_host=os.getenv('LISTEN_HOST', '0.0.0.0'),
listen_port=int(os.getenv('LISTEN_PORT', 8000)),
yc_oauth_token=os.getenv('YC_OAUTH_TOKEN'),
yc_folder_id=os.getenv('YC_FOLDER_ID'),
data_path=os.getenv('DATA_PATH'),
debug_enabled=(
True if os.getenv('DEBUG_ENABLED') in ['1', 'true', 'yes', 'on']
else False
),
)
CONFIG = Config.create_from_env()
| 25.633333
| 80
| 0.595579
| 99
| 769
| 4.343434
| 0.393939
| 0.111628
| 0.083721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.286086
| 769
| 29
| 81
| 26.517241
| 0.766849
| 0
| 0
| 0
| 0
| 0
| 0.120936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0.041667
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0578bb9313ba5a000fe92b2495ddd4a94b1be7e
| 15,486
|
py
|
Python
|
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T14:54:49.000Z
|
2020-12-31T14:54:49.000Z
|
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | null | null | null |
atlas/atlas.py
|
pythseq/atlas
|
6fd8d9e8ad05d234fc408aef8e0989da199f3b48
|
[
"BSD-3-Clause"
] | null | null | null |
import click
import logging
import multiprocessing
import os
import sys
from atlas import __version__
from atlas.conf import make_config
from atlas.parsers import refseq_parser
from atlas.tables import merge_tables
from atlas.workflows import download, run_workflow
logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%d %H:%M", format="[%(asctime)s %(levelname)s] %(message)s")
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
@click.version_option(__version__)
@click.pass_context
def cli(obj):
"""ATLAS - a framework for assembly, annotation, and genomic binning of metagenomic and
metatranscriptomic data.
For updates and reporting issues, see: https://github.com/pnnl/atlas
"""
@cli.command("refseq", short_help="enables tree based LCA and LCA star methods")
@click.argument("tsv", type=click.Path(exists=True))
@click.argument("namemap", type=click.Path(exists=True))
@click.argument("treefile", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("-s", "--summary-method", type=click.Choice(["lca", "majority", "best"]), default="lca", show_default=True, help="summary method for annotating ORFs; when using LCA, it's recommended that one limits the number of hits using --top-fraction though function will be assigned per the best hit; 'best' is fastest")
@click.option("-a", "--aggregation-method", type=click.Choice(["lca", "lca-majority", "majority"]), default="lca-majority", show_default=True, help="summary method for aggregating ORF taxonomic assignments to contig level assignment; 'lca' will result in most stringent, least specific assignments")
@click.option("--majority-threshold", type=float, default=0.51, show_default=True, help="constitutes a majority fraction at tree node for 'lca-majority' ORF aggregation method")
@click.option("--min-identity", type=int, default=70, show_default=True, help="minimum allowable percent ID of BLAST hit")
@click.option("--min-bitscore", type=int, default=0, show_default=True, help="minimum allowable bitscore of BLAST hit; 0 disables")
@click.option("--min-length", type=int, default=60, show_default=True, help="minimum allowable BLAST alignment length")
@click.option("--max-evalue", type=float, default=0.000001, show_default=True, help="maximum allowable e-value of BLAST hit")
@click.option("--max-hits", type=int, default=10, show_default=True, help="maximum number of BLAST hits to consider when summarizing ORFs; can drastically alter ORF LCA assignments if too high without further limits")
@click.option("--table-name", default="refseq", help="table name within namemap database; expected columns are 'name', 'function', and 'taxonomy'")
@click.option("--top-fraction", type=float, default=1, show_default=True, help="filters ORF BLAST hits by only keep hits within this fraction of the highest bitscore; this is recommended over --max-hits")
def run_refseq_parser(tsv, namemap, treefile, output, summary_method, aggregation_method, majority_threshold, min_identity, min_bitscore, min_length, max_evalue, max_hits, table_name, top_fraction):
"""Parse TSV (tabular BLAST output [-outfmt 6]), grabbing taxonomy metadata from ANNOTATION to
compute LCAs.
The BLAST hits are assumed to be sorted by query with decreasing bitscores (best alignment first):
\b
sort -k1,1 -k12,12rn tsv > sorted_tsv
Annotation file should include your BLAST subject sequence ID, a function, a taxonomy name,
the taxonomy ID, and the parent taxonomy ID. This file is generated from `prepare-refseq`:
\b
gi|507051347|ref|WP_016122340.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
gi|507052147|ref|WP_016123132.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
gi|507053266|ref|WP_016124222.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
The RefSeq function is always derived from the best BLAST hit.
The output will give contig, ORF ID, the lineage assigned to the contig based on
--aggregation-method, the probability of error (erfc), taxonomy assigned to the ORF, the
best hit's product, the best hit's evalue, and the best hit's bitscore:
\b
contig orf taxonomy erfc orf_taxonomy refseq refseq_evalue refseq_bitscore
k121_52126 k121_52126_1 root 1.0 root hypothetical protein 1.0e-41 176.8
"""
refseq_parser(tsv, namemap, treefile, output, summary_method, aggregation_method, majority_threshold, min_identity, min_bitscore, min_length, max_evalue, max_hits, table_name, top_fraction)
@cli.command("gff2tsv", short_help="writes version of Prokka TSV with contig as new first column")
@click.argument("gff", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("--feature-type", default="CDS", show_default=True, help="feature type in GFF annotation to print")
def run_gff_to_tsv(gff, output, feature_type):
import re
locus_tag_re = re.compile(r"locus_tag=(.*?)(?:;|$)")
ec_re = re.compile(r"eC_number=(.*?)(?:;|$)")
gene_re = re.compile(r"gene=(.*?)(?:;|$)")
product_re = re.compile(r"product=(.*?)(?:;|$)")
# print the header into the output file
print("contig_id", "locus_tag", "ftype", "gene", "EC_number", "product", sep="\t", file=output)
with open(gff) as gff_fh:
for line in gff_fh:
if line.startswith("##FASTA"):
break
if line.startswith("#"):
continue
toks = line.strip().split("\t")
if not toks[2] == feature_type:
continue
try:
locus_tag = locus_tag_re.findall(toks[-1])[0]
except IndexError:
locus_tag = ""
if not locus_tag:
logging.critical("Unable to locate a locus tag in [%s]" % toks[-1])
sys.exit(1)
try:
gene = gene_re.findall(toks[-1])[0]
except IndexError:
gene = ""
try:
ec_number = ec_re.findall(toks[-1])[0]
except IndexError:
ec_number = ""
try:
product = product_re.findall(toks[-1])[0]
except IndexError:
product = ""
print(toks[0], locus_tag, toks[2], gene, ec_number, product, sep="\t", file=output)
@cli.command("munge-blast", short_help="adds contig ID to prokka annotated ORFs")
@click.argument("tsv", type=click.Path(exists=True))
@click.argument("gff", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("--gene-id", default="ID", show_default=True, help="tag in gff attributes corresponding to ORF ID")
def run_munge_blast(tsv, gff, output, gene_id):
"""Prokka ORFs are reconnected to their origin contigs using the GFF of the Prokka output.
Contig output is re-inserted as column 1, altering blast hits to be tabular + an extra initial
column that will be used to place the ORFs into context.
"""
import re
gff_map = dict()
logging.info("step 1 of 2; parsing %s" % gff)
# gff attrs: ID=Flavobacterium_00802;inference=ab initio prediction:Prodigal:2.60;...
orf_id_re = re.compile(r"%s=(.*?)\;" % gene_id)
with open(gff) as prokka_gff:
for line in prokka_gff:
if line.startswith("##FASTA"):
break
if line.startswith("#"):
continue
toks = line.strip().split("\t")
try:
orf_id = orf_id_re.findall(toks[-1])[0]
except IndexError:
# some, like repeat regions, will not have a locus_tag=, but they also will not
# be in the .faa file that is being locally aligned
logging.warning("Unable to locate ORF ID using '%s' for line '%s'" % (gene_id, " ".join(toks)))
continue
gff_map[orf_id] = toks[0]
logging.info("step 2 of 2; parsing %s" % tsv)
# example blast hit:
# Flavobacterium_00002 gi|500936490|ref|WP_012025625.1| 100.0 187 0 0 1 187 1 187 1.7e-99 369.8
with open(tsv) as blast_hits:
for line in blast_hits:
toks = line.strip().split("\t")
try:
toks.insert(0, gff_map[toks[0]])
except KeyError:
logging.critical("%s was not found in the GFF [%s]" % (toks[0], gff))
logging.critical("processing of %s was halted" % tsv)
sys.exit(1)
print(*toks, sep="\t", file=output)
@cli.command("merge-tables", short_help="merge Prokka TSV, Counts, and Taxonomy")
@click.argument("prokkatsv", type=click.Path(exists=True))
@click.argument("refseqtsv", type=click.Path(exists=True))
@click.argument("output")
@click.option("--counts", type=click.Path(exists=True), help="Feature Counts result TSV")
@click.option("--completeness", type=click.Path(exists=True), help="CheckM completeness TSV")
@click.option("--taxonomy", type=click.Path(exists=True), help="CheckM taxonomy TSV")
@click.option("--fasta", multiple=True, type=click.Path(exists=True), help="Bin fasta file path; can be specified multiple times")
def run_merge_tables(prokkatsv, refseqtsv, output, counts, completeness, taxonomy, fasta):
"""Combines Prokka TSV, RefSeq TSV, and Counts TSV into a single table, merging on locus tag.
"""
merge_tables(prokkatsv, refseqtsv, output, counts, completeness, taxonomy, fasta)
@cli.command("make-config", short_help="prepopulate a configuration file with samples and defaults")
@click.argument("config")
@click.argument("path")
@click.option("--assembler", default="megahit",
type=click.Choice(["megahit", "spades"]),
show_default=True, help="contig assembler")
@click.option("--data-type", default="metagenome",
type=click.Choice(["metagenome", "metatranscriptome"]),
show_default=True, help="sample data type")
@click.option("--database-dir", default="databases", show_default=True,
help="location of formatted databases (from `atlas download`)")
# @click.option("--process", default="assemble",
# type=click.Choice(["annotate", "assemble"]),
# help="additional fields in the configuration file have no effect on the protocol, to limit the options for annotation only set `--process annotate`")
@click.option("--threads", default=multiprocessing.cpu_count(), type=int,
help="number of threads to use per multi-threaded job")
def run_make_config(config, path, data_type, database_dir, threads, assembler):
"""Write the file CONFIG and complete the sample names and paths for all
FASTQ files in PATH.
PATH is traversed recursively and adds any file with '.fastq' or '.fq' in
the file name with the file name minus extension as the sample ID.
"""
make_config(config, path, data_type, database_dir, threads, assembler)
@cli.command("QC", context_settings=dict(ignore_unknown_options=True), short_help="quality control workflow (without assembly)")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_qc(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the ATLAS Quality control protocol, the first step of the workflow.
A skeleton configuration file can be generated with defaults using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="qc")
@cli.command("assemble", context_settings=dict(ignore_unknown_options=True), short_help="assembly workflow")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_assemble(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the complete ATLAS protocol from raw reads through assembly, annotation, quantification,
and genomic binning.
A skeleton configuration file can be generated with defaults using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="complete")
@cli.command("annotate", context_settings=dict(ignore_unknown_options=True), short_help="annotation workflow")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_annotate(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the ATLAS annotation protocol on assembled contigs. If FASTQ files are provided
for a sample, quantification is also performed.
A skeleton configuration file can be generated using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="annotate")
@cli.command("download", context_settings=dict(ignore_unknown_options=True), short_help="download reference files")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="number of simultaneous downloads")
@click.option("-o", "--out-dir", default=os.path.join(os.path.realpath("."), "databases"), show_default=True, help="database download directory")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_download(jobs, out_dir, snakemake_args):
"""Executes a snakemake workflow to download reference database files and validate based on
their MD5 checksum.
"""
download(jobs, out_dir, snakemake_args)
if __name__ == "__main__":
cli()
| 54.336842
| 323
| 0.694692
| 2,154
| 15,486
| 4.896007
| 0.211699
| 0.036507
| 0.039826
| 0.050446
| 0.44671
| 0.427935
| 0.397876
| 0.359662
| 0.346482
| 0.314242
| 0
| 0.017597
| 0.174351
| 15,486
| 284
| 324
| 54.528169
| 0.807211
| 0.228206
| 0
| 0.331325
| 0
| 0.042169
| 0.291407
| 0.003762
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0.006024
| 0.072289
| 0
| 0.13253
| 0.024096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0590b43efec682503f6e281362973bb8f85de85
| 1,101
|
py
|
Python
|
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | null | null | null |
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | 237
|
2020-07-23T05:53:22.000Z
|
2022-03-30T23:02:35.000Z
|
tests/unit/core/types/test_relationships.py
|
jeffsawatzky/python-jsonapi
|
8f181d6764b525f58d06517c65b1f0d24f3c2282
|
[
"MIT"
] | null | null | null |
"""Test cases for the python_jsonapi.core.types.relationships module."""
from python_jsonapi.core.types.relationships import Relationship
from python_jsonapi.core.types.relationships import RelationshipsMixin
def test_relationship_init() -> None:
"""Can init a new relationships."""
sut = Relationship()
assert sut is not None
def test_mixin_init() -> None:
"""Can init a new mixin."""
sut = RelationshipsMixin()
assert sut is not None
relationship = Relationship()
sut = RelationshipsMixin(relationships={"self": relationship})
assert sut is not None
assert sut.relationships is not None
assert sut.relationships["self"] == relationship
def test_mixin_add_relationship() -> None:
"""Can add a new entry."""
sut = RelationshipsMixin()
sut.add_relationship(key="relationship1", relationship=Relationship())
sut.add_relationship(key="relationship2", relationship=Relationship())
assert sut.relationships is not None
assert sut.relationships["relationship1"] is not None
assert sut.relationships["relationship2"] is not None
| 34.40625
| 74
| 0.735695
| 130
| 1,101
| 6.138462
| 0.238462
| 0.090226
| 0.078947
| 0.075188
| 0.473684
| 0.407268
| 0.245614
| 0.132832
| 0.132832
| 0
| 0
| 0.004338
| 0.162579
| 1,101
| 31
| 75
| 35.516129
| 0.861171
| 0.126249
| 0
| 0.35
| 0
| 0
| 0.063762
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a05a419a9ddf5084b706e695f35bb68b2e11e8f7
| 698
|
py
|
Python
|
app/accounts/utilities.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 6
|
2017-12-13T21:53:05.000Z
|
2018-10-04T02:47:05.000Z
|
app/accounts/utilities.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 106
|
2019-08-11T23:00:39.000Z
|
2021-06-10T19:45:54.000Z
|
app/accounts/utilities.py
|
KryptedGaming/kryptedauth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 10
|
2020-01-18T11:28:44.000Z
|
2022-02-21T06:08:39.000Z
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
def username_or_email_resolver(username):
if User.objects.filter(email=username).exists():
return User.objects.get(email=username).username
else:
return username
def send_activation_email(user):
send_mail(
'Verify your Krypted account',
'Welcome to %s. \n Please click the following link to verify your account. \n https://%s/accounts/activate/%s' % (
settings.SITE_TITLE, settings.SITE_DOMAIN, user.info.secret),
settings.DEFAULT_FROM_EMAIL,
[user.email],
fail_silently=False)
| 33.238095
| 123
| 0.684814
| 90
| 698
| 5.177778
| 0.555556
| 0.064378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222063
| 698
| 20
| 124
| 34.9
| 0.858195
| 0
| 0
| 0
| 0
| 0.0625
| 0.199115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a05f5a0fc89824667b995e5851cdb833729517df
| 970
|
py
|
Python
|
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
mypage/paginator.py
|
kirill-ivanov-a/mypage-flask
|
b803dfdf3d38d32879d81b8682d51e387c8f709f
|
[
"MIT"
] | null | null | null |
from paginate_sqlalchemy import SqlalchemyOrmPage
class Paginator(SqlalchemyOrmPage):
def __init__(self, *args, radius=3, **kwargs):
super().__init__(*args, **kwargs)
self.radius = radius
self.page_range = self._make_page_range()
def _make_page_range(self):
if self.page_count < self.radius:
return list(p for p in range(1, self.page_count + 1))
if self.page - self.radius > 2:
page_range = [self.first_page, None]
page_range += list(p for p in range(self.page - self.radius, self.page))
else:
page_range = list(p for p in range(1, self.page))
if self.page + self.radius < self.last_page - 1:
page_range += list(p for p in range(self.page, self.page + self.radius + 1))
page_range += [None, self.last_page]
else:
page_range += list(p for p in range(self.page, self.last_page + 1))
return page_range
| 34.642857
| 88
| 0.609278
| 138
| 970
| 4.072464
| 0.224638
| 0.156584
| 0.128114
| 0.080071
| 0.427046
| 0.330961
| 0.330961
| 0.330961
| 0.330961
| 0.270463
| 0
| 0.011445
| 0.279381
| 970
| 27
| 89
| 35.925926
| 0.792561
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a06437850e2dae1448abd64b704f6b42218ef386
| 968
|
py
|
Python
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 1,428
|
2018-10-03T15:15:17.000Z
|
2019-03-31T18:38:36.000Z
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 1,162
|
2018-10-03T15:05:49.000Z
|
2018-10-18T14:17:52.000Z
|
Python/logging.py
|
saurabhcommand/Hello-world
|
647bad9da901a52d455f05ecc37c6823c22dc77e
|
[
"MIT"
] | 3,909
|
2018-10-03T15:07:19.000Z
|
2019-03-31T18:39:08.000Z
|
import datetime
# Log parm(File_name)
class logging_class:
def __init__(self, log_file_name, verbose):
self.log_file_name = log_file_name
self.stream = open(log_file_name, "a")
self.verbose = verbose
# Write a line in the log file
def create_log(self, to_add):
if (to_add != "\n"):
self.stream.write(str(datetime.datetime.now().replace(microsecond=0)))
if (self.verbose is True):
print (str(datetime.datetime.now().replace(microsecond=0)), end = " ")
if (self.verbose is True):
print(" ", end = " ")
print(to_add)
self.stream.write(" ")
self.stream.write(to_add)
self.stream.write("\n")
#add log lines, change behevior if tab or str
def add_logging(self, to_add):
if (type(to_add) == str):
self.create_log(to_add)
else:
for ii in to_add:
self.create_log(ii)
| 31.225806
| 86
| 0.572314
| 131
| 968
| 4.030534
| 0.320611
| 0.075758
| 0.083333
| 0.056818
| 0.32197
| 0.246212
| 0.155303
| 0
| 0
| 0
| 0
| 0.002972
| 0.304752
| 968
| 30
| 87
| 32.266667
| 0.781575
| 0.095041
| 0
| 0.086957
| 0
| 0
| 0.012629
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0
| 0.217391
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a064737d7eb5496d755ad0d39ca50e2c9279c4d9
| 10,541
|
py
|
Python
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | 5
|
2021-07-30T16:01:46.000Z
|
2021-12-14T15:24:29.000Z
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | 2
|
2021-08-13T12:19:13.000Z
|
2021-10-06T08:04:18.000Z
|
tfep/utils/cli/tool.py
|
andrrizzi/tfep
|
a98ec870007a2ceb72cab147d9e0dfffb7dc8849
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Utility classes to wrap command line tools.
The module provides a class :class:`.CLITool` that provides boilerplate code to
wrap command line tools and make them compatible to :class:`~tfep.utils.cli.Launcher`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import abc
import inspect
import os
# =============================================================================
# CLITOOL
# =============================================================================
class CLITool:
"""Command line tool wrapper.
The class mainly fulfills two roles:
1. Encapsulates input and outputs of a command and provide a command
specification that can be understood by :class:`tfep.utils.cli.Launcher`.
2. Converts and sanitizes Python types to string command line parameters.
3. Provides CLI interfaces with readable parameter names avoiding abbreviations
that makes the code harder to read.
Wrapping a new command line tool requires creating a new class that inherits
from ``CLITool`` and defines its arguments using the options descriptors such
as :class:`.AbsolutePathOption` and :class:`.FlagOption` (see examples below).
The constructor takes as input ordered and keyword arguments. Keyword arguments
must match those defined with the option descriptors when the wrapper is declared.
Ordered arguments must be strings are appended to the command as strings.
The path to the executable (or simply the executable name if it is in the
system path) can be set globally through the class variable ``EXECUTABLE_PATH``,
or it can be specific to the command instance as specified in the constructor.
To associate a command to a particular subprogram, you can use the
``SUBPROGRAM`` class variable. E.g., for the gmx program in the GROMACS suite,
creating ``CLITool`` that prepare a ``gmx mdrun ...`` command requires
setting ``SUBPROGRAM = 'mdrun'``.
Once defined and instantiated, a command can be run either using a
:class:`~tfep.utils.cli.Launcher` class or the standard module ``subprocess``
after building the command with the :func:`.CLITool.to_subprocess` method.
Parameters
----------
executable_path : str, optional
The executable path associated to the instance of the command. If this
is not specified, the ``EXECUTABLE_PATH`` class variable is used instead.
See Also
--------
`tfep.utils.cli.Launcher` : Launch and run commands.
Examples
--------
Suppose we want to create a wrapper for a subset of the command ``grep``
that supports reading the pattern from a file. We can create a wrapper
with the following syntax
>>> class MyGrep(CLITool):
... EXECUTABLE_PATH = 'grep'
... patterns_file_path = KeyValueOption('-f')
... max_count = KeyValueOption('-m')
... print_version = FlagOption('-v')
You can then create an command instance specifying the options. For example,
:class:`.FlagOption`s takes either ``True`` or ``False``.
>>> my_grep_cmd = MyGrep(print_version=True)
You can then pass the command to a :class:`~tfep.utils.cli.Launcher` or use
the :func:`.CLITool.to_subprocess` method can be used to convert the command
to a sanitized ``list`` that can be executed by the Python standard module
``subprocess``.
>>> my_grep_cmd.to_subprocess()
['grep', '-v']
Another example more complex example
>>> my_grep_cmd = MyGrep('input.txt', patterns_file_path='my_patterns.txt', max_count=3)
>>> my_grep_cmd.to_subprocess()
['grep', '-m', '3', '-f', 'my_patterns.txt', 'input.txt']
"""
SUBPROGRAM = None
def __init__(self, *args, executable_path=None, **kwargs):
self.args = args
self._executable_path = executable_path
# Check that keyword arguments match.
options_descriptions = self._get_defined_options()
for k, v in kwargs.items():
if k not in options_descriptions:
raise AttributeError('Undefined CLI option ' + k)
# Set the value.
setattr(self, k, v)
@property
def executable_path(self):
"""The path to the command executable to run."""
if self._executable_path is None:
return self.EXECUTABLE_PATH
return self._executable_path
@executable_path.setter
def executable_path(self, value):
self._executable_path = value
def to_subprocess(self):
"""Convert the command to a list that can be run with the ``subprocess`` module.
Returns
-------
subprocess_cmd : List[str]
The command in subprocess format. For example ``['grep', '-v']``.
"""
subprocess_cmd = [self.executable_path]
# Add subprogram
if self.SUBPROGRAM is not None:
subprocess_cmd.append(self.SUBPROGRAM)
# Append all options.
for option_descriptor in self._get_defined_options().values():
subprocess_cmd.extend(option_descriptor.to_subprocess(self))
# Append all ordered args.
subprocess_cmd.extend([str(x) for x in self.args])
return subprocess_cmd
@classmethod
def _get_defined_options(cls):
"""Return a dict attribute_name -> description object for all CLIOptions defined."""
options_descriptors = {}
for attribute_name, descriptor_object in inspect.getmembers(cls, inspect.isdatadescriptor):
if isinstance(descriptor_object, CLIOption):
options_descriptors[attribute_name] = descriptor_object
return options_descriptors
# =============================================================================
# CLI OPTIONS
# =============================================================================
class CLIOption(abc.ABC):
"""Generic descriptor for command line option.
This must be inherited by all options for :class:``.CLITool`` to automatically
discover the option. To implement this, it is sufficient to provide an
implementation of the ``to_subprocess()`` method, which takes the object
instance as input and outputs a list with the strings to append to the
command in ``subprocess`` format.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def __init__(self, option_name):
self.option_name = option_name
def __set_name__(self, owner_type, name):
self.public_name = name
self.private_name = '_' + name
def __get__(self, owner_instance, owner_type):
if owner_instance is None:
# This was call from the owner class. Return the descriptor object.
return self
return getattr(owner_instance, self.private_name, None)
def __set__(self, owner_instance, value):
setattr(owner_instance, self.private_name, value)
@abc.abstractmethod
def to_subprocess(self, owner_instance):
"""Return the strings to append to the command in ``subprocess`` format.
For example, it might return something like ``['-o', 'path_to_my_file.txt']``.
"""
pass
class KeyValueOption(CLIOption):
"""A generic command line key-value option.
This descriptor simply converts the value to string.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def to_subprocess(self, owner_instance):
"""Implements ``CLIOption.to_subprocess()``."""
value = getattr(owner_instance, self.private_name, None)
if value is None:
return []
return [self.option_name, str(value)]
class AbsolutePathOption(KeyValueOption):
"""A file or directory path that is converted to an absolute path when instantiated.
Relative file paths change change with the current working directory. This
option type converts relative paths to absolute paths when the option is
assigned so that it refers to the same file even if the working directory
is changed.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def __set__(self, owner_instance, value):
abs_path = os.path.abspath(value)
setattr(owner_instance, self.private_name, abs_path)
class FlagOption(CLIOption):
"""A generic command line flag option.
This descriptor accepts only ``True``/``False`` or ``None`` and it specifies
CLI flag parameters (i.e., that do not take a value). If ``None``, it is not
passed to the command. If ``False``, its behavior depends on the
``prepend_no_to_false`` parameter (see below).
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
prepend_to_false : str, optional
If given and the descriptor is ``False``, this string (typically ``'no'``)
is inserted into the flag passed to the command right after the dash
character(s).
"""
def __init__(self, option_name, prepend_to_false=None):
super().__init__(option_name)
self.prepend_to_false = prepend_to_false
def __set__(self, owner_instance, value):
if not isinstance(value, bool) and value is not None:
raise ValueError(self.public_name + ' must be either a boolean or None')
setattr(owner_instance, self.private_name, value)
def to_subprocess(self, owner_instance):
"""Implements ``CLIOption.to_subprocess()``."""
value = getattr(owner_instance, self.private_name, None)
if (value is None or (
(not value and self.prepend_to_false is None))):
return []
if value is True:
return [self.option_name]
# value is False and self.prepend_to_false is not None.
if self.option_name.startswith('--'):
n_dashes = 2
else:
n_dashes = 1
option_name = self.option_name[:n_dashes] + self.prepend_to_false + self.option_name[n_dashes:]
return [option_name]
| 36.223368
| 103
| 0.626411
| 1,297
| 10,541
| 4.950655
| 0.231303
| 0.028033
| 0.017443
| 0.022426
| 0.238748
| 0.1791
| 0.129263
| 0.096247
| 0.096247
| 0.096247
| 0
| 0.000859
| 0.226734
| 10,541
| 290
| 104
| 36.348276
| 0.786897
| 0.60573
| 0
| 0.146341
| 0
| 0
| 0.015672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.182927
| false
| 0.012195
| 0.036585
| 0
| 0.426829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a06853a9eca27d640f292fe2b2ffaac04fbafad7
| 1,128
|
py
|
Python
|
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
invite2app/lib/facebook_auth.py
|
andresgz/invite2app
|
3531db131c4f0646ae01b511971d6642128361e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import facebook
from allauth.socialaccount.models import SocialToken
from django.core.exceptions import ObjectDoesNotExist
class FacebookAuth(object):
"""
Interface bettween Django AllAuth and Facebook SDK
"""
def __init__(self, user_id):
super(FacebookAuth, self).__init__()
# Only integers are allowed
if not isinstance(user_id, (int, long)):
raise TypeError("An Integer is expected")
self.user_id = user_id
def get_graph(self):
"""
Returns a Graph object to be used on the Facebook SDK.
"""
return facebook.GraphAPI(access_token=self.get_access_token())
def get_access_token(self):
"""
Get a valid token for the user from AllAuth
"""
try:
token = SocialToken.objects.get(
account__user_id=self.user_id).token
except ObjectDoesNotExist:
raise NotValidFacebookAccount("A token has not been found")
return token
class NotValidFacebookAccount(Exception):
"""
NotValidAccount Exception.
"""
pass
| 27.512195
| 71
| 0.638298
| 126
| 1,128
| 5.539683
| 0.539683
| 0.051576
| 0.04298
| 0.051576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001225
| 0.276596
| 1,128
| 40
| 72
| 28.2
| 0.854167
| 0.199468
| 0
| 0
| 0
| 0
| 0.058182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0.05
| 0.15
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0767541c421c26c6de084316db254c59c03c5d0
| 17,875
|
py
|
Python
|
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
web_site/wx/lib.py
|
Fixdq/dj-deep
|
6712a722c7f620b76f21b1ebf0b618f42eb4a58a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2014-5-13
@author: skycrab
"""
import json
import time
import random
import string
import urllib
import hashlib
import threading
import traceback
import xml.etree.ElementTree as ET
import logging
from urllib import request as urllib2
from functools import wraps
from .config import WxPayConf, WxPayConf_shop
try:
import pycurl
from cStringIO import StringIO
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
logger = logging.getLogger('control')
def catch(func):
@wraps(func)
def wrap(*args,**kwargs):
try:
return func(*args,**kwargs)
except Exception as e:
print(traceback.format_exc())
return None
return wrap
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class Singleton(object):
"""可配置单例模式"""
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
with cls._instance_lock:
if not hasattr(cls, "_instance"):
impl = cls.configure() if hasattr(cls, "configure") else cls
instance = super(Singleton, cls).__new__(impl, *args, **kwargs)
if not isinstance(instance, cls):
instance.__init__(*args, **kwargs)
cls._instance = instance
return cls._instance
class class_property(object):
""" A property can decorator class or instance
class Foo(object):
@class_property
def foo(cls):
return 42
print(Foo.foo)
print(Foo().foo)
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
value = self.func(type)
return value
class BaseHttpClient(object):
include_ssl = False
def get(self, url, second=30):
if self.include_ssl:
return self.postXmlSSL(None, url, second, False, post=False)
else:
return self.postXml(None, url, second)
def postXml(self, xml, url, second=30):
if self.include_ssl:
return self.postXmlSSL(xml, url, second, cert=False)
else:
raise NotImplementedError("please implement postXML")
def postXmlSSL(self, xml, url, second=30, cert=True, cert_path=WxPayConf.SSLCERT_PATH,
key_path=WxPayConf.SSLKEY_PATH, post=True):
raise NotImplementedError("please implement postXMLSSL")
class UrllibClient(BaseHttpClient):
"""使用urlib2发送请求"""
def postXml(self, xml, url, second=30):
"""不使用证书"""
data = urllib2.urlopen(url, xml, timeout=second).read()
return data
class CurlClient(BaseHttpClient):
"""使用Curl发送请求"""
include_ssl = True
def __init__(self):
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.SSL_VERIFYHOST, False)
self.curl.setopt(pycurl.SSL_VERIFYPEER, False)
# 设置不输出header
self.curl.setopt(pycurl.HEADER, False)
def postXmlSSL(self, xml, url, second=30, cert=True, cert_path=WxPayConf.SSLCERT_PATH,
key_path=WxPayConf.SSLKEY_PATH, post=True):
"""使用证书"""
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.TIMEOUT, second)
# 设置证书
# 使用证书:cert 与 key 分别属于两个.pem文件
# 默认格式为PEM,可以注释
if cert:
self.curl.setopt(pycurl.SSLKEYTYPE, "PEM")
self.curl.setopt(pycurl.SSLKEY, key_path)
self.curl.setopt(pycurl.SSLCERTTYPE, "PEM")
self.curl.setopt(pycurl.SSLCERT, cert_path)
# post提交方式
if post:
self.curl.setopt(pycurl.POST, True)
self.curl.setopt(pycurl.POSTFIELDS, xml)
buff = StringIO()
self.curl.setopt(pycurl.WRITEFUNCTION, buff.write)
self.curl.perform()
return buff.getvalue()
class RequestsClient(BaseHttpClient):
include_ssl = True
def postXmlSSL(self, xml, url, second=30, cert=True,
cert_path=WxPayConf.SSLCERT_PATH, key_path=WxPayConf.SSLKEY_PATH, post=True):
if cert:
cert_config = (cert_path, key_path)
else:
cert_config = None
if post:
# res = requests.post(url, data=xml, second=30, cert=cert_config)
res = requests.post(url, data=xml, cert=cert_config)
else:
res = requests.get(url, timeout=second, cert=cert_config)
return res.content
class HttpClient(Singleton, BaseHttpClient):
@classmethod
def configure(cls):
config_client = WxPayConf.HTTP_CLIENT
client_cls = {"urllib": UrllibClient,
"curl": CurlClient,
"requests": RequestsClient}.get(config_client.lower(), None)
if client_cls:
return client_cls
if pycurl is not None:
print("HTTP_CLIENT config error, Use 'CURL'")
return CurlClient
if requests is not None:
print("HTTP_CLIENT config error, Use 'REQUESTS'")
return RequestsClient
else:
print("HTTP_CLIENT config error, Use 'URLLIB'")
return UrllibClient
class WeixinHelper(object):
@classmethod
def checkSignature(cls, signature, timestamp, nonce):
"""微信对接签名校验"""
tmp = [WxPayConf.TOKEN, timestamp, nonce]
tmp.sort()
code = hashlib.sha1("".join(tmp)).hexdigest()
return code == signature
@classmethod
def nonceStr(cls, length):
"""随机数"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
@classmethod
def xmlToArray(cls, xml):
"""将xml转为array"""
return dict((child.tag, child.text) for child in ET.fromstring(xml))
@classmethod
def oauth2(cls, redirect_uri, scope="snsapi_userinfo", state="STATE"):
"""网页授权获取用户信息
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_OAUTH_URL = "https://open.weixin.qq.com/connect/oauth2/authorize?appid={0}&redirect_uri={1}&response_type=code&scope={2}&state={3}#wechat_redirect"
return _OAUTH_URL.format(WxPayConf.APPID, urllib.quote(redirect_uri, safe=''), scope, state)
@classmethod
def proxy(cls, redirect_uri, scope="snsapi_userinfo", state="STATE", device="mobile"):
"""网页授权获取用户信息
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_PROXY_URL = "http://shop.xuemei99.com/open/proxy?redirect_uri={0}&scope={1}&state={2}&device={3}"
return _PROXY_URL.format(urllib.quote(redirect_uri, safe=''), scope, state, device)
@classmethod
def getAccessToken(cls, appid=WxPayConf.APPID, secret=WxPayConf.APPSECRET):
"""获取access_token
需要缓存access_token,由于缓存方式各种各样,不在此提供
http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html
"""
_ACCESS_URL = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
return HttpClient().get(_ACCESS_URL.format(appid, secret))
@classmethod
def getShopAccessToken(cls):
"""获取access_token
需要缓存access_token,由于缓存方式各种各样,不在此提供
http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html
"""
_ACCESS_URL = "http://shop.xuemei99.com/open/access_token?format=json"
return HttpClient().get(_ACCESS_URL)
@classmethod
def getUserInfo(cls, access_token, openid, lang="zh_CN"):
"""获取用户基本信息
http://mp.weixin.qq.com/wiki/14/bb5031008f1494a59c6f71fa0f319c66.html
"""
_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info?access_token={0}&openid={1}&lang={2}"
return HttpClient().get(_USER_URL.format(access_token, openid, lang))
@classmethod
def getUserInfoBatch(cls, data, access_token):
"""批量获取用户基本信息
http://mp.weixin.qq.com/wiki/1/8a5ce6257f1d3b2afb20f83e72b72ce9.html
"""
_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info/batchget?access_token={0}"
return HttpClient().postXml(data, _USER_URL.format(access_token))
@classmethod
def getAccessTokenByCode(cls, code):
"""通过code换取网页授权access_token, 该access_token与getAccessToken()返回是不一样的
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_CODEACCESS_URL = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
url = _CODEACCESS_URL.format(WxPayConf.APPID, WxPayConf.APPSECRET, code)
return HttpClient().get(url)
@classmethod
def getShopAccessTokenByCode(cls, code):
"""通过code换取网页授权access_token, 该access_token与getAccessToken()返回是不一样的
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_CODEACCESS_URL = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
url = _CODEACCESS_URL.format(WxPayConf_shop.APPID, WxPayConf_shop.APPSECRET, code)
return HttpClient().get(url)
@classmethod
def refreshAccessToken(cls, refresh_token):
"""刷新access_token, 使用getAccessTokenByCode()返回的refresh_token刷新access_token,可获得较长时间有效期
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_REFRESHTOKRN_URL = "https://api.weixin.qq.com/sns/oauth2/refresh_token?appid={0}&grant_type=refresh_token&refresh_token={1}"
return HttpClient().get(_REFRESHTOKRN_URL.format(WxPayConf.APPID, refresh_token))
@classmethod
def getSnsapiUserInfo(cls, access_token, openid, lang="zh_CN"):
"""拉取用户信息(通过网页授权)
"""
_SNSUSER_URL = "https://api.weixin.qq.com/sns/userinfo?access_token={0}&openid={1}&lang={2}"
return HttpClient().get(_SNSUSER_URL.format(access_token, openid, lang))
@classmethod
def getAccessTokenValid(cls, access_token, openid):
"""检测access_token是否过期"""
_ACCESSTOKEN_VALID_URL = 'https://api.weixin.qq.com/sns/auth?access_token={0}&openid={1}'
return HttpClient().get(_ACCESSTOKEN_VALID_URL.format(access_token, openid))
@classmethod
def getMaterialList(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/material/batchget_material?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def sendMsgAll(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/mass/sendall?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def send(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def sendTemplateMsg(cls, data, access_token):
"""发送模版消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/template/send?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def getTextMessage(cls, openid, message):
data = {
"touser": openid,
"msgtype":"text",
"text":
{
"content": message
}
}
return data
@classmethod
def sendTextMessage(cls, openid, message, access_token):
"""发送文本消息
"""
data = {
"touser": openid,
"msgtype":"text",
"text":
{
"content": message
}
}
return cls.send(data, access_token)
@classmethod
def getJsapiTicket(cls, access_token):
"""获取jsapi_tocket
"""
_JSAPI_URL = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token={0}&type=jsapi"
return HttpClient().get(_JSAPI_URL.format(access_token))
@classmethod
def jsapiSign(cls, jsapi_ticket, url):
"""jsapi_ticket 签名"""
sign = {
'nonceStr': cls.nonceStr(15),
'jsapi_ticket': jsapi_ticket,
'timestamp': int(time.time()),
'url': url
}
signature = '&'.join(['%s=%s' % (key.lower(), sign[key]) for key in sorted(sign)])
sign["signature"] = hashlib.sha1(signature).hexdigest()
return sign
@classmethod
def long2short(cls, long_url, access_token):
"""长链接转短链接
https://mp.weixin.qq.com/wiki/6/856aaeb492026466277ea39233dc23ee.html
"""
_SEND_URL = "https://api.weixin.qq.com/cgi-bin/shorturl?access_token={0}"
data = json.dumps({'long_url': long_url, 'action': 'long2short'}, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def getComponentAccessToken(cls, app_id, app_secret, ticket):
"""开放平台--获取component access token"""
_COMPONENT_URL = "https://api.weixin.qq.com/cgi-bin/component/api_component_token"
data = json.dumps({'component_appid': app_id, 'component_appsecret': app_secret, 'component_verify_ticket': ticket}, ensure_ascii=False)
return HttpClient().postXml(data, _COMPONENT_URL)
@classmethod
def getPreAuthCode(cls, app_id, component_access_token):
"""开放平台--获取预授权码"""
_PRE_AUTH_CODE_URL = "https://api.weixin.qq.com/cgi-bin/component/api_create_preauthcode?component_access_token={0}"
data = json.dumps({'component_appid': app_id}, ensure_ascii=False)
return HttpClient().postXml(data, _PRE_AUTH_CODE_URL.format(component_access_token))
@classmethod
def getQueryAuth(cls, app_id, code, component_access_token):
"""开放平台--使用授权码换取公众号的接口调用凭据和授权信息"""
_QUERY_AUTH_URL = "https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token={0}"
data = json.dumps({'component_appid': app_id, 'authorization_code': code}, ensure_ascii=False)
return HttpClient().postXml(data, _QUERY_AUTH_URL.format(component_access_token))
@classmethod
def getAuthorizerToken(cls, component_access_token, component_appid, authorizer_appid, authorizer_refresh_token):
"""开放平台--获取公众号接口调用token"""
_AUTH_TOKEN_URL = "https://api.weixin.qq.com/cgi-bin/component/api_authorizer_token?component_access_token={0}"
data = json.dumps({'component_appid': component_appid, 'authorizer_appid': authorizer_appid, 'authorizer_refresh_token': authorizer_refresh_token}, ensure_ascii=False)
return HttpClient().postXml(data, _AUTH_TOKEN_URL.format(component_access_token))
@classmethod
def getAuthInfo(cls, app_id, auth_app_id, component_access_token):
"""开放平台--获取授权方的公众号帐号基本信息"""
_AUTH_INFO_URL = "https://api.weixin.qq.com/cgi-bin/component/api_get_authorizer_info?component_access_token={0}"
data = json.dumps({'component_appid': app_id, 'authorizer_appid': auth_app_id}, ensure_ascii=False)
return HttpClient().postXml(data, _AUTH_INFO_URL.format(component_access_token))
@classmethod
def openOauth2(cls, app_id, c_app_id, redirect_uri, scope="snsapi_userinfo", state="STATE"):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_OAUTH_URL = "https://open.weixin.qq.com/connect/oauth2/authorize?appid={0}&redirect_uri={1}&response_type=code&scope={2}&state={3}&component_appid={4}#wechat_redirect"
return _OPEN_OAUTH_URL.format(app_id, urllib.quote(redirect_uri, safe=''), scope, state, c_app_id)
@classmethod
def openGetAccessToken(cls, app_id, code, c_app_id, token):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_TOKEN_URL = "https://api.weixin.qq.com/sns/oauth2/component/access_token?appid={0}&code={1}&grant_type=authorization_code&component_appid={2}&component_access_token={3}"
return HttpClient().get(_OPEN_TOKEN_URL.format(app_id, code, c_app_id, token))
@classmethod
def openRefreshAccessToken(cls, app_id, c_app_id, c_access_token, refresh_token):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_REFRESH_URL = "https://api.weixin.qq.com/sns/oauth2/component/refresh_token?appid={0}&grant_type=refresh_token&component_appid={1}&component_access_token={2}&refresh_token={3}"
return HttpClient().get(_OPEN_REFRESH_URL.format(app_id, c_app_id, c_access_token, refresh_token))
| 38.690476
| 190
| 0.658238
| 2,114
| 17,875
| 5.357616
| 0.168874
| 0.051474
| 0.038849
| 0.03152
| 0.511301
| 0.485079
| 0.444552
| 0.386633
| 0.343811
| 0.303549
| 0
| 0.031421
| 0.214825
| 17,875
| 461
| 191
| 38.774403
| 0.775561
| 0.14014
| 0
| 0.287671
| 0
| 0.075342
| 0.195753
| 0.003168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157534
| false
| 0
| 0.061644
| 0
| 0.424658
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a076da17c1234915c44f55110c45dfe832f020a4
| 4,723
|
py
|
Python
|
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
argo_dsl/tasks.py
|
zen-xu/argo-dsl
|
76b18073c8dd850b212ccaee2a0c95f718c67db6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from argo_dsl.api.io.argoproj.workflow import v1alpha1
if TYPE_CHECKING:
from .template import Template
class _StepOutputs(str):
_name: str
_kind: str
def __new__(cls, name, kind):
obj = super().__new__(cls, "{{steps.%s.outputs.%s}}" % (name, kind))
obj._name = name
obj._kind = kind
return obj
def __getattribute__(self, item: str) -> Any:
if item.startswith("_"):
return super().__getattribute__(item)
return "{{steps.%s.outputs.%s.%s}}" % (self._name, self._kind, item)
class _Item(str):
def __new__(cls) -> _Item:
return super().__new__(cls, "{{item}}")
Item = _Item()
SERIALIZE_ARGUMENT_FUNCTION = Callable[[Any], str]
SERIALIZE_ARGUMENT_METHOD = Callable[["Template", Any], str]
class TaskStep:
def __init__(
self,
workflow_step: v1alpha1.WorkflowStep,
serialize_argument_func: Union[SERIALIZE_ARGUMENT_FUNCTION, SERIALIZE_ARGUMENT_METHOD] = str,
):
self.workflow_step = workflow_step
self.serialize_argument_func = serialize_argument_func
self._arguments: Optional[Dict[str, Any]] = None
self._batch_arguments: Optional[Union[str, List[Dict[str, Any]]]] = None
self._sequence: Optional[v1alpha1.Sequence] = None
self._when: Optional[str] = None
def call(self, **arguments) -> TaskStep:
self._arguments = arguments
return self
def batch_call(self, batch_arguments: Union[str, List[Dict[str, Any]]]) -> TaskStep:
self._batch_arguments = batch_arguments
return self
def sequence(
self,
count: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
format: Optional[str] = None,
):
self._sequence = v1alpha1.Sequence(count=count, start=start, end=end, format=format)
return self
def when(self, expression: str):
self._when = expression
@property
def id(self) -> str:
return "{{steps.%s.id}}" % self.workflow_step.name
@property
def ip(self) -> str:
return "{{steps.%s.ip}}" % self.workflow_step.name
@property
def status(self) -> str:
return "{{steps.%s.status}}" % self.workflow_step.name
@property
def exit_code(self) -> str:
return "{{steps.%s.exitCode}}" % self.workflow_step.name
@property
def started_at(self) -> str:
return "{{steps.%s.startedAt}}" % self.workflow_step.name
@property
def finished_at(self) -> str:
return "{{steps.%s.finishedAt}}" % self.workflow_step.name
@property
def outputs_result(self) -> str:
return "{{steps.%s.outputs.result}}" % self.workflow_step.name
@property
def outputs_parameters(self) -> _StepOutputs:
return _StepOutputs(self.workflow_step.name, "parameters")
@property
def outputs_artifacts(self) -> _StepOutputs:
return _StepOutputs(self.workflow_step.name, "artifacts")
class TaskStepMaker:
def __init__(self, template: "Template"):
self.template = template
def __call__(self, name: str) -> TaskStep:
workflow_step = v1alpha1.WorkflowStep(name=name, template=self.template.name)
s = TaskStep(workflow_step, self.template.serialize_argument)
return s
class TaskStepRefer:
def __init__(self, template: str, name: str, cluster_scope: Optional[bool] = None):
self.template_ref = v1alpha1.TemplateRef(template=template, name=name, clusterScope=cluster_scope)
def __call__(self, name: str) -> TaskStep:
workflow_step = v1alpha1.WorkflowStep(name=name, templateRef=self.template_ref)
s = TaskStep(workflow_step)
return s
class TaskSteps:
def __init__(self):
self.steps: List[List[TaskStep]] = []
self._parallel: bool = False
self._inited_parallel_steps: bool = False
@contextmanager
def parallel(self):
try:
self._parallel = True
self._inited_parallel_steps = False
yield None
finally:
self._parallel = False
self._inited_parallel_steps = False
def add(self, step: TaskStep):
if self._parallel:
if not self._inited_parallel_steps:
self.steps.append([])
self._inited_parallel_steps = True
self.steps[-1].append(step)
else:
self.steps.append([step])
| 28.79878
| 106
| 0.645988
| 548
| 4,723
| 5.306569
| 0.180657
| 0.066025
| 0.060523
| 0.061898
| 0.26238
| 0.190165
| 0.10729
| 0.081155
| 0.045392
| 0.045392
| 0
| 0.004191
| 0.242219
| 4,723
| 163
| 107
| 28.97546
| 0.808326
| 0
| 0
| 0.180328
| 0
| 0
| 0.049757
| 0.030066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196721
| false
| 0
| 0.090164
| 0.081967
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a07a7419dbe104e7dbe0af27f725918587fdc9f2
| 4,596
|
py
|
Python
|
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | null | null | null |
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | 2
|
2019-04-02T13:18:23.000Z
|
2019-04-11T14:00:06.000Z
|
handlers/class_handler.py
|
Hargre/faltometro-bot
|
271772dc52c9d0454d96ef3c43e3a0da32075743
|
[
"MIT"
] | null | null | null |
import logging
import math
from emoji import emojize
from peewee import DoesNotExist
from telegram import ParseMode
from telegram.ext import CommandHandler
from telegram.ext import ConversationHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import RegexHandler
from constants import limit_status
from handlers.shared import cancel_handler
from handlers.shared import select_class_keyboard
from models.class_model import ClassModel
ASK_NAME, ASK_LIMIT = range(2)
DELETING_CLASS = range(1)
def add_class_entry(bot, update):
update.message.reply_text(
'Qual o nome da matéria?'
)
return ASK_NAME
def add_class_name(bot, update, user_data):
class_name = update.message.text
user_data['class_name'] = class_name
update.message.reply_text(
'Ok! E qual o limite de faltas?'
)
return ASK_LIMIT
def add_skip_limit(bot, update, user_data):
skipped_classes_limit = update.message.text
user_data['skipped_classes_limit'] = skipped_classes_limit
user_data['chat_id'] = update.message.chat_id
__save(user_data)
update.message.reply_text(
'Pronto!'
)
return ConversationHandler.END
def add_class_handler():
handler = ConversationHandler(
entry_points=[CommandHandler('add_materia', add_class_entry)],
states={
ASK_NAME: [
MessageHandler(
Filters.text,
add_class_name,
pass_user_data=True
)
],
ASK_LIMIT: [
RegexHandler(
'^\d+$',
add_skip_limit,
pass_user_data=True
)
],
},
fallbacks=[cancel_handler()]
)
return handler
def list_classes(bot, update):
classes = ClassModel.select().where(ClassModel.chat_id == update.message.chat_id)
response = ''
for class_model in classes:
line = (
'*%s:*\n``` %s / %s faltas\t\t\t\t%s```\n\n'
% (
class_model.class_name,
class_model.skipped_classes,
class_model.skipped_classes_limit,
__get_status_emoji(class_model.skipped_classes, class_model.skipped_classes_limit)
)
)
response += line
update.message.reply_text(response, parse_mode=ParseMode.MARKDOWN)
def list_classes_handler():
handler = CommandHandler('resumo', list_classes)
return handler
def delete_class_entry(bot, update):
select_class_keyboard(update)
return DELETING_CLASS
def delete_class(bot, update):
class_name = update.message.text
chat_id = update.message.chat_id
try:
missed_class = ClassModel.get((ClassModel.chat_id == chat_id) & (ClassModel.class_name == class_name))
missed_class.delete_instance()
update.message.reply_text(
'Matéria removida!',
parse_mode=ParseMode.MARKDOWN
)
return ConversationHandler.END
except DoesNotExist:
update.message.reply_text(
'Não conheço essa matéria! Tente novamente.'
)
def delete_class_handler():
handler = ConversationHandler(
entry_points=[CommandHandler('tirar_materia', delete_class_entry)],
states={
DELETING_CLASS: [
MessageHandler(
Filters.text,
delete_class,
)
],
},
fallbacks=[cancel_handler()]
)
return handler
def __get_status_emoji(skipped_classes, skipped_classes_limit):
status_ok = emojize(":white_check_mark:", use_aliases=True)
status_warning = emojize(":warning:", use_aliases=True)
status_danger = emojize(":sos:", use_aliases=True)
status_failed = emojize(":x:", use_aliases=True)
skipped_percent = (skipped_classes * 100) / skipped_classes_limit
skipped_percent = math.floor(skipped_percent)
if skipped_percent < limit_status.WARNING:
return status_ok
elif skipped_percent >= limit_status.WARNING and skipped_percent < limit_status.DANGER:
return status_warning
elif skipped_percent >= limit_status.DANGER and skipped_percent <= limit_status.LIMIT:
return status_danger
else:
return status_failed
def __save(user_data):
ClassModel.create(
chat_id = user_data['chat_id'],
class_name = user_data['class_name'],
skipped_classes_limit = int(user_data['skipped_classes_limit'])
)
| 27.035294
| 110
| 0.647302
| 515
| 4,596
| 5.467961
| 0.225243
| 0.064631
| 0.060724
| 0.046875
| 0.25071
| 0.136009
| 0.082386
| 0.037642
| 0.037642
| 0
| 0
| 0.001489
| 0.269365
| 4,596
| 170
| 111
| 27.035294
| 0.837105
| 0
| 0
| 0.207692
| 0
| 0.007692
| 0.066783
| 0.01414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084615
| false
| 0.015385
| 0.107692
| 0
| 0.284615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a07d57857f23110458e28cf9b4145b1716e6f940
| 2,502
|
py
|
Python
|
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | 3
|
2021-01-09T20:11:31.000Z
|
2022-03-31T02:05:52.000Z
|
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | null | null | null |
convert.py
|
povle/SP-SR2-converter
|
7a675204e15b340deac2b98634805cdf75e6fd4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import sys
import io
import os.path
import shutil
import requests
from convert_file import convert_file
from gooey import Gooey, GooeyParser
if len(sys.argv) >= 2:
if '--ignore-gooey' not in sys.argv:
sys.argv.append('--ignore-gooey')
@Gooey(program_name='SP to SR2 converter', tabbed_groups=True, optional_cols=1)
def main():
parser = GooeyParser()
basic_options = parser.add_argument_group('Basic Options')
group = basic_options.add_mutually_exclusive_group(required=True)
group.add_argument('--input_file', '-i', type=argparse.FileType('rb'), help='path to the source craft xml', widget='FileChooser')
group.add_argument('--id', help='ID of the craft (https://www.simpleplanes.com/a/??????/)')
basic_options.add_argument('--output_file', '-o', type=argparse.FileType('wb'), help='path to the output file')
advanced_options = parser.add_argument_group('Advanced Options')
advanced_options.add_argument('--scale', '-s', type=float, default=1, help='scale of the converted craft', widget='DecimalField')
group2 = advanced_options.add_mutually_exclusive_group()
group2.add_argument('--only_ids', nargs='*', metavar='part_id', help='convert only parts with given ids')
group2.add_argument('--exclude_ids', nargs='*', metavar='part_id', default=[], help='ignore parts with given ids')
group3 = advanced_options.add_mutually_exclusive_group()
group3.add_argument('--only_types', nargs='*', metavar='SP_type', help='convert only parts with given types')
group3.add_argument('--exclude_types', nargs='*', metavar='SP_type', default=[], help='ignore parts with given types')
args = parser.parse_args()
output_file = args.output_file or None
if args.id:
r = requests.get(f'http://www.simpleplanes.com/Client/DownloadAircraft?a={args.id}')
if r.content == b'0':
raise ValueError('Incorrect craft ID')
input_file = io.BytesIO(r.content)
if output_file is None:
output_file = open(args.id+'_SR.xml', 'wb')
else:
input_file = args.input_file
if output_file is None:
output_name = os.path.split(input_file.name)[1]
output_name = os.path.splitext(output_name)[0]+'_SR.xml'
output_file = open(output_name, 'wb')
with input_file as i, output_file as o:
converted = convert_file(i, args)
shutil.copyfileobj(converted, o)
if __name__ == '__main__':
main()
| 43.137931
| 133
| 0.688649
| 347
| 2,502
| 4.760807
| 0.32853
| 0.066586
| 0.033898
| 0.049031
| 0.257869
| 0.150121
| 0
| 0
| 0
| 0
| 0
| 0.006737
| 0.169464
| 2,502
| 57
| 134
| 43.894737
| 0.788258
| 0.008393
| 0
| 0.042553
| 0
| 0
| 0.239516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.170213
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a07db8162c85985e5fa4859871927e9c03a5f877
| 5,289
|
py
|
Python
|
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | 1
|
2016-01-20T14:36:02.000Z
|
2016-01-20T14:36:02.000Z
|
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | null | null | null |
bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py
|
dlab-berkeley/collaboratool-archive
|
fa474e05737f78e628d6b9398c58cf7c966a7bba
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pwd
import random
import traceback
import tempfile
import base64
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible import module_common
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for fetch operations '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
source = options.get('src', None)
dest = options.get('dest', None)
flat = options.get('flat', False)
flat = utils.boolean(flat)
fail_on_missing = options.get('fail_on_missing', False)
fail_on_missing = utils.boolean(fail_on_missing)
if source is None or dest is None:
results = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, result=results)
if flat:
if dest.endswith("/"):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = utils.path_dwim(self.runner.basedir, dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), conn.host, source)
dest = dest.replace("//","/")
# calculate md5 sum for the remote file
remote_md5 = self.runner._remote_md5(conn, tmp, source)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_md5 in ('1', '2') or self.runner.sudo:
slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
if slurpres.is_successful():
if slurpres.result['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres.result['content'])
if remote_data is not None:
remote_md5 = utils.md5s(remote_data)
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_md5 == '0':
result = dict(msg="unable to calculate the md5 sum of the remote file", file=source, changed=False)
return ReturnData(conn=conn, result=result)
if remote_md5 == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
return ReturnData(conn=conn, result=result)
if remote_md5 == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
return ReturnData(conn=conn, result=result)
# calculate md5 sum for the local file
local_md5 = utils.md5(dest)
if remote_md5 != local_md5:
# create the containing directories, if needed
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
conn.fetch_file(source, dest)
else:
f = open(dest, 'w')
f.write(remote_data)
f.close()
new_md5 = utils.md5(dest)
if new_md5 != remote_md5:
result = dict(failed=True, md5sum=new_md5, msg="md5 mismatch", file=source, dest=dest)
return ReturnData(conn=conn, result=result)
result = dict(changed=True, md5sum=new_md5, dest=dest)
return ReturnData(conn=conn, result=result)
else:
result = dict(changed=False, md5sum=local_md5, file=source, dest=dest)
return ReturnData(conn=conn, result=result)
| 42.653226
| 135
| 0.626962
| 702
| 5,289
| 4.645299
| 0.319088
| 0.024839
| 0.049065
| 0.058878
| 0.215885
| 0.175406
| 0.158234
| 0.120209
| 0.106716
| 0.106716
| 0
| 0.011564
| 0.280582
| 5,289
| 123
| 136
| 43
| 0.845466
| 0.240688
| 0
| 0.126582
| 0
| 0
| 0.089676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.139241
| 0
| 0.278481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a07ebd61f61d120e3815b7fb4a6cf2eeafd36431
| 4,563
|
py
|
Python
|
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
src/plot_by_genome.py
|
MaaT-Pharma/AMBER
|
76aa10e2295265b16337b7bfab769d67d3bea66a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os
import sys
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from src import plots
from src.utils import load_data
from src.utils import argparse_parents
def plot_by_genome(data, out_file=None, sort_by='completeness'):
not_sort_by = list(set(['purity','completeness']) - set([sort_by]))[0] # get the metric not sorted by
data = sorted(data, key=lambda x: x[sort_by])
genomes = []
precision = []
recall = []
for genome in data:
genomes.append(genome['mapped_genome'])
precision.append(genome['purity'])
recall.append(genome['completeness'])
sort = {'purity': precision, 'completeness': recall}
fig, ax1 = plt.subplots(figsize=(len(genomes) * 0.15, 5))
ax1.plot(np.arange(len(genomes)), sort[sort_by], color='black')
plt.xticks(np.arange(len(genomes)), genomes, rotation='vertical', fontsize="smaller")
ax1.plot(np.arange(len(genomes)), sort[not_sort_by], '.', color='red')
# transform y labels to percentages
vals = ax1.get_yticks()
ax1.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
plt.legend((sort_by.title(), not_sort_by.title()))
plt.grid(True)
plt.tight_layout()
if out_file is None:
plt.show()
else:
plt.savefig(os.path.normpath(out_file + '.png'), dpi=100, format='png', bbox_inches='tight')
plt.savefig(os.path.normpath(out_file + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_by_genome2(bin_metrics_per_query, binning_labels, output_dir):
colors_list = plots.create_colors_list()
if len(bin_metrics_per_query) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(6, 5))
# force axis to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
i = 0
for query_metrics in bin_metrics_per_query:
precision = []
recall = []
for metrics in query_metrics:
precision.append(metrics['purity'])
recall.append(metrics['completeness'])
axs.scatter(precision, recall, marker='o', color=colors_list[i], s=[8] * len(precision))
i += 1
# turn on grid
axs.minorticks_on()
axs.grid(which='major', linestyle='-', linewidth='0.5')
axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
plt.xlabel('Purity per bin')
plt.ylabel('Completeness per genome')
plt.tight_layout()
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.eps'), dpi=100, format='eps', bbox_inches='tight')
lgd = plt.legend(binning_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False)
for handle in lgd.legendHandles:
handle.set_sizes([100.0])
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
# def main():
# parser = argparse.ArgumentParser(description="Plot purity and completeness per genome. Genomes can be sorted by completeness (default) or purity")
# parser.add_argument('file', nargs='?', type=argparse.FileType('r'), help=argparse_parents.HELP_FILE)
# parser.add_argument('-s','--sort_by', help='Sort by either purity or completeness (default: completeness)', choices=set(['purity','completeness']))
# parser.add_argument('-o','--out_file', help='Path to store image (default: only show image)')
# args = parser.parse_args()
# if not args.file and sys.stdin.isatty():
# parser.print_help()
# parser.exit(1)
# metrics = load_data.load_tsv_table(sys.stdin if not sys.stdin.isatty() else args.file)
# if args.sort_by is not None:
# plot_by_genome(metrics, args.out_file, args.sort_by)
# else:
# plot_by_genome(metrics, args.out_file)
if __name__ == "__main__":
main()
| 38.669492
| 153
| 0.67368
| 653
| 4,563
| 4.546708
| 0.287902
| 0.024251
| 0.021893
| 0.035365
| 0.215898
| 0.215898
| 0.1903
| 0.092287
| 0.092287
| 0.08454
| 0
| 0.020419
| 0.17357
| 4,563
| 117
| 154
| 39
| 0.766905
| 0.23055
| 0
| 0.106667
| 0
| 0
| 0.109997
| 0.027499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.133333
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a080d1b200263a36cd31a3e857bf790cbd1e3259
| 16,548
|
py
|
Python
|
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | null | null | null |
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | 4
|
2019-11-15T10:21:20.000Z
|
2021-04-22T13:37:32.000Z
|
tests/test_references.py
|
isprojects/djangorestframework-inclusions
|
c6669f404a8a80f2c524a8adfb6548b2eef235c7
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from rest_framework.test import APITestCase
from testapp.models import (
A,
B,
C,
Child,
ChildProps,
Container,
Entry,
MainObject,
Parent,
Tag,
)
from .mixins import InclusionsMixin
class ReferenceTests(InclusionsMixin, APITestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tag1 = Tag.objects.create(name="you")
cls.tag2 = Tag.objects.create(name="are")
cls.tag3 = Tag.objects.create(name="it")
cls.parent1 = Parent.objects.create(name="Papa Johns")
cls.parent1.tags.set([cls.tag1, cls.tag2])
cls.parent2 = Parent.objects.create(name="Papa Roach")
cls.parent2.tags.set([cls.tag2])
cls.child1 = Child.objects.create(parent=cls.parent1, name="Children of Bodom")
cls.child2 = Child.objects.create(parent=cls.parent1, name="Children of Men")
cls.child1.tags.set([cls.tag3])
cls.parent1.favourite_child = cls.child2
cls.parent1.save()
cls.childprops = ChildProps.objects.create(child=cls.child2)
cls.container1 = Container.objects.create(name="container 1")
cls.container1.save()
cls.entryA = Entry.objects.create(name="A", container=cls.container1)
cls.entryA.tags.set([cls.tag1])
cls.entryA.save()
cls.entryB = Entry.objects.create(name="B", container=cls.container1)
cls.entryB.tags.set([cls.tag3])
cls.entryB.save()
def test_tag_list(self): # without pagination
expected = {
"data": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
],
"inclusions": {},
}
self.assertResponseData("tag-list", expected)
def test_tag_detail(self):
expected = {"data": {"id": self.tag1.id, "name": "you"}, "inclusions": {}}
self.assertResponseData("tag-detail", expected, pk=self.tag1.pk)
def test_custom_action_no_inclusion_serializer(self):
"""
Assert that custom actions with inclusion renderer don't trigger
inclusion machinery.
"""
expected = [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
]
self.assertResponseData("tag-custom-action", expected)
def test_custom_action_inclusion_serializer(self):
"""
Assert that the inclusion machinery does kick in if inclusion
serializers are involved.
"""
entry_c = C.objects.create()
expected = {"data": {"id": entry_c.id, "b": None}, "inclusions": {}}
self.assertResponseData("c-custom-action", expected, pk=entry_c.pk)
def test_parent_list(self): # with pagination
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.pk,
},
{
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
],
"inclusions": {},
}
self.assertResponseData("parent-list", expected)
def test_parent_list_include_tags(self):
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.pk,
},
{
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
]
},
}
self.assertResponseData("parent-list", expected, params={"include": "tags"})
def test_parent_detail(self):
expected = {
"data": {
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
"inclusions": {},
}
self.assertResponseData("parent-detail", expected, pk=self.parent2.pk)
def test_parent_detail_with_include(self):
expected = {
"data": {
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
"inclusions": {"testapp.Tag": [{"id": self.tag2.id, "name": "are"}]},
}
self.assertResponseData(
"parent-detail", expected, pk=self.parent2.pk, params={"include": "*"}
)
def test_nested_include(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "parent.tags"},
pk=self.child2.pk,
)
def test_include_all_detail(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
],
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.pk}
],
},
}
self.assertResponseData(
"child-detail", expected, params={"include": "*"}, pk=self.child2.pk
)
def test_include_all_list(self):
expected = {
"count": 2,
"next": None,
"previous": None,
"data": [
{
"id": self.child1.id,
"name": "Children of Bodom",
"childprops": None,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [self.tag3.id],
},
{
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
],
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.pk}
],
},
}
self.assertResponseData("child-list", expected, params={"include": "*"})
def test_include_fk_field(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.id}
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "childprops"},
pk=self.child2.pk,
)
def test_flattened_inclusions(self):
expected = {
"data": {
"id": self.child1.id,
"name": "Children of Bodom",
"childprops": None,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [self.tag3.id],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "tags,parent.tags"},
pk=self.child1.pk,
)
def test_nested_include_multiple_from_same_child(self):
self.child2.tags.set([self.tag1])
self.addCleanup(self.child2.tags.clear)
expected = {
"data": {
"id": self.childprops.id,
"child": {
"id": self.child2.id,
"parent": self.parent1.id,
"tags": [self.tag1.id],
},
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{
"id": self.tag2.id,
"name": "are",
}, # included from Parent inclusion
],
"testapp.Parent": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
}
],
},
}
self.assertResponseData(
"childprops-detail",
expected,
params={"include": "child.tags,child.parent"},
pk=self.childprops.pk,
)
def test_many(self):
expected = {
"data": {
"entries": [
{"id": self.entryA.id, "name": "A", "tags": [self.tag1.id]},
{"id": self.entryB.id, "name": "B", "tags": [self.tag3.id]},
],
"id": self.container1.id,
"name": "container 1",
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData(
"container-detail",
expected,
params={"include": "entries.tags"},
pk=self.container1.pk,
)
def test_post(self):
url = reverse("parent-list")
response = self.client.post(
url, {"name": "Papa Post", "tags": [self.tag2.id], "favourite_child": None}
)
json = response.json()
json["data"].pop("id")
self.assertEqual(
json,
{
"data": {
"favourite_child": None,
"name": "Papa Post",
"tags": [self.tag2.id],
},
"inclusions": {},
},
)
def test_post_with_error(self):
url = reverse("parent-list")
response = self.client.post(url, {"wrong": "WRONG"})
json = response.json()
# things should not be wrapped in data
self.assertEqual(
json,
{"name": ["This field is required."], "tags": ["This field is required."]},
)
def test_post_with_non_field_error(self):
url = reverse("parent-list")
response = self.client.post(url, {"name": "Trigger", "tags": [self.tag2.id]})
json = response.json()
# things should not be wrapped in data
self.assertEqual(json, {"invalid": "WRONG"})
def test_list_action(self):
url = reverse("parent-check")
response = self.client.post(url, {"random": "data"})
json = response.json()
self.assertEqual(json, {"arbitrary": "content"})
def test_detail_action(self):
url = reverse("parent-check2", kwargs={"pk": self.parent1.pk})
response = self.client.post(url, {"random": "data"})
json = response.json()
self.assertEqual(json, {"arbitrary": "content"})
def test_read_only_inclusions(self):
"""
NEXT-827 -- Inclusions should work with read-only fields.
"""
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{"id": self.entryA.id, "name": "A", "tags": [self.tag1.id]},
{"id": self.entryB.id, "name": "B", "tags": [self.tag3.id]},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData("entry-list", expected, params={"include": "tags"})
def test_nullable_relation(self):
"""
NEXT-856 -- requesting inclusions of nullable relations shouldn't crash.
"""
a1 = A.objects.create()
b1 = B.objects.create()
c1 = C.objects.create()
b2 = B.objects.create(a=a1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b1)
expected = {
"count": 3,
"previous": None,
"next": None,
"data": [
{"id": c1.id, "b": None},
{"id": c2.pk, "b": {"id": b2.id, "a": a1.id}},
{"id": c3.pk, "b": {"id": b1.id, "a": None}},
],
"inclusions": {"testapp.A": [{"id": a1.id}]},
}
self.assertResponseData("c-list", expected, params={"include": "b.a"})
def test_reverse_relation(self):
"""
NEXT-1052 revealed a problem in inclusions.
Response data in the form of:
[{
'external_notifications': [],
}]
where an inclusion field is in the ExernalNotification serializer
bugged out.
"""
main_object = MainObject.objects.create()
# no actual related objects exist in database
expected = {
"data": [{"id": main_object.id, "relatedobject_set": []}],
"inclusions": {},
}
self.assertResponseData("mainobject-list", expected, params={"include": "*"})
| 32.383562
| 87
| 0.435098
| 1,484
| 16,548
| 4.791779
| 0.115229
| 0.057376
| 0.035157
| 0.030375
| 0.593166
| 0.552806
| 0.536915
| 0.497117
| 0.482351
| 0.448038
| 0
| 0.016897
| 0.413464
| 16,548
| 510
| 88
| 32.447059
| 0.715743
| 0.041395
| 0
| 0.531323
| 0
| 0
| 0.148616
| 0.001467
| 0
| 0
| 0
| 0
| 0.053364
| 1
| 0.055684
| false
| 0
| 0.009281
| 0
| 0.069606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a08175a3e80e168fe04fe33684d0de9087ed3e33
| 2,652
|
py
|
Python
|
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
markups/restructuredtext.py
|
LukeC8/pymarkups
|
eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: ts=8:sts=8:sw=8:noexpandtab
# This file is part of python-markups module
# License: 3-clause BSD, see LICENSE file
# Copyright: (C) Dmitry Shachnev, 2012-2018
import markups.common as common
from markups.abstract import AbstractMarkup, ConvertedMarkup
class ReStructuredTextMarkup(AbstractMarkup):
"""Markup class for reStructuredText language.
Inherits :class:`~markups.abstract.AbstractMarkup`.
:param settings_overrides: optional dictionary of overrides for the
`Docutils settings`_
:type settings_overrides: dict
.. _`Docutils settings`: http://docutils.sourceforge.net/docs/user/config.html
"""
name = 'reStructuredText'
attributes = {
common.LANGUAGE_HOME_PAGE: 'http://docutils.sourceforge.net/rst.html',
common.MODULE_HOME_PAGE: 'http://docutils.sourceforge.net/',
common.SYNTAX_DOCUMENTATION: 'http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html'
}
file_extensions = ('.rst', '.rest')
default_extension = '.rst'
@staticmethod
def available():
try:
import docutils.core
except ImportError:
return False
return True
def __init__(self, filename=None, settings_overrides=None):
self.overrides = settings_overrides or {}
self.overrides.update({
'math_output': 'MathJax %s?config=TeX-AMS_CHTML' % common.MATHJAX_WEB_URL,
'syntax_highlight': 'short',
'halt_level': 5, # Never convert system messages to exceptions
})
AbstractMarkup.__init__(self, filename)
from docutils.core import publish_parts
self._publish_parts = publish_parts
def convert(self, text):
parts = self._publish_parts(text, source_path=self.filename,
writer_name='html5', settings_overrides=self.overrides)
# Determine head
head = parts['head']
# Determine body
body = parts['html_body']
# Determine title
title = parts['title']
# Determine stylesheet
origstyle = parts['stylesheet']
# Cut off <style> and </style> tags
stylestart = '<style type="text/css">'
stylesheet = ''
if stylestart in origstyle:
stylesheet = origstyle[origstyle.find(stylestart)+25:origstyle.rfind('</style>')]
stylesheet += common.get_pygments_stylesheet('.code')
return ConvertedReStructuredText(head, body, title, stylesheet)
class ConvertedReStructuredText(ConvertedMarkup):
def __init__(self, head, body, title, stylesheet):
ConvertedMarkup.__init__(self, body, title, stylesheet)
self.head = head
def get_javascript(self, webenv=False):
if 'MathJax.js?config=TeX-AMS_CHTML' not in self.head:
return ''
return ('<script type="text/javascript" src="%s?config=TeX-AMS_CHTML"></script>\n' %
common.get_mathjax_url(webenv))
| 30.837209
| 99
| 0.735294
| 319
| 2,652
| 5.949843
| 0.435737
| 0.044784
| 0.048472
| 0.054795
| 0.086407
| 0.035827
| 0
| 0
| 0
| 0
| 0
| 0.007048
| 0.144042
| 2,652
| 85
| 100
| 31.2
| 0.829075
| 0.236425
| 0
| 0
| 0
| 0.02
| 0.205691
| 0.058412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.42
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0827c33ad3c6db021a834ac073ebf6c9ba882a7
| 8,025
|
py
|
Python
|
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
Intelligent Systems and Decision Support Systems/pm-test1.py
|
johnpras/Uni_work
|
1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6
|
[
"MIT"
] | null | null | null |
import pandas as pd
from pm4py.objects.log.importer.xes import importer as xes_import
from pm4py.objects.log.util import log as utils
from pm4py.statistics.start_activities.log.get import get_start_activities
from pm4py.statistics.end_activities.log.get import get_end_activities
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.visualization.petrinet import factory as vis_factory
from pm4py.algo.discovery.alpha import factory as alpha_miner
from pm4py.algo.discovery.heuristics import factory as heuristics_miner
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation import factory as evaluation_factory
from pm4py.algo.conformance.tokenreplay import factory as token_replay
# Συνάρτηση που καλείται για κάθε αλγόριθμό και για τα δυο logs για να δούμε πόσα traces δεν συνάδουν
# με το process model
def print_fit_traces(log, net, initial_marking, final_marking):
replayed_traces = token_replay.apply(log, net, initial_marking, final_marking)
fit_traces = 0
for trace in replayed_traces:
if not trace["trace_is_fit"]:
fit_traces += 1
print("Number of non fit traces : ", fit_traces)
# 1. Διαβάζει το event log
log = xes_import.apply('edited_hh110_labour.xes')
trace_key_list = []
event_key_list = []
event_count = 0 # Counter για να μετρήσουμε το πλήθος των event
for trace in log:
# Βρίσκουμε τα keys κάθε trace και αν δεν υπάρχουν ήδη στη λίστα με τα key
# δηλαδή την trace_key_list τα προσθέτουμε στη λίστα.
for trace_key in trace.attributes.keys():
if trace_key not in trace_key_list:
trace_key_list.append(trace_key)
for event in trace:
# Κάνουμε το ίδιο και για τα keys των events
for event_key in event.keys():
if event_key not in event_key_list:
event_key_list.append(event_key)
event_count += 1 # Κάθε φορά που μπαίνουμε στην for των events αυξάνουμε τον counter κατά 1
# 2. Εμφανίζει τη δομή του trace και του event
print("Trace keys : " + str(trace_key_list))
print("Event keys : " + str(event_key_list))
# 3. Εμφανίζει το πλήθος των traces
print("Number of traces : " + str(len(log)))
# 4. Εμφανίζει το πλήθος των events
print("Number of events : " + str(event_count))
# 5. Εμφανίζει τα διαφορετικά events από τα οποία αποτελείται το event log
unique_events = utils.get_event_labels(log,'concept:name')
print("Events of log : " + str(unique_events))
# 6. Εμφανίζει τις δραστηριότητες με τις οποίες αρχίζουν και τελειώνουν τα
# traces και τη συχνότητα εμφάνισής τους
# Πρώτα βρίσκουμε τις δραστηριότητες με τις οποίες αρχίζουν
start_activities = get_start_activities(log)
print("Starting activities: " + str(start_activities))
# Και τώρα αντίστοιχα το ίδιο για τις δραστηριότητες με τις οποίες τελειώνουν
# τα traces
end_activities = get_end_activities(log)
print("End activities" + str(end_activities))
# 7. Εμφανίζει σε πίνακα το case id, activity name, transition (start ή
# complete), timestamp
# Φτιάχνουμε ένα άδειο DataFrame
log_df = pd.DataFrame(columns = ["Case ID" , "Activity Name" , "Transition" , "Timestamp"])
for trace_id, trace in enumerate(log):
for event_index, event in enumerate(trace):
#Φτιάχνουμε ένα DataFrame στο οποίο ουσιαστικά φορτώνουμε τα στοιχεία
#που θέλουμε από το τρέχον event, δηλαδή μια γραμμή του πίνακα
#που σκοπεύουμε να δημιουργήσουμε
row = pd.DataFrame({
"Case ID" : [trace.attributes["concept:name"]],
"Activity Name" : [event["concept:name"]],
"Transition" : [event["lifecycle:transition"]],
"Timestamp" : [event["time:timestamp"]]
})
#Κάνουμε append την γραμμή που φτιάξαμε στο DataFrame που ορίσαμε έξω από την
#επανάληψη
log_df = log_df.append(row, ignore_index = True)
print("Printing log table : \n")
print(log_df)
#Αν θέλουμε να εμφανίσουμε όλο το dataframe στην κονσόλα
# βγάζουμε από το σχόλιο την παρακάτω εντολή
#print(log_df.to_string(index=False))
#Για καλύτερη ανάγνωση εξάγουμε το log_df ως csv
log_df.to_csv('log_table.csv', index = False)
# 8. Φιλτράρει το event log και θα κρατήσει μόνο τα traces που τελειώνουν
# με την δραστηριότητα "end"
filtered_log = end_activities_filter.apply(log,["End"])
print("New log : \n " + str(filtered_log))
# Για επαλήθευση τυπώνουμε το size του filtered_log θα πρέπει να είναι
# ίσο με τη συχνότητα εμφάνισης του "End"
print("Size of filtered log : " + str(len(filtered_log)))
# Για καλύτερη ανάγνωση-επαλήθευση αν θέλουμε
# εξάγουμε το filtered_log ως csv
# βγάζοντας τις 2 επόμενες εντολές από τα comments
#filt_log_df = pd.DataFrame(filtered_log)
#filt_log_df.to_csv('filtered_log.csv')
# 9. Μοντέλα διεργασιών
# Alpha Miner
# Για το αρχικό log
net, initial_marking, final_marking = alpha_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_log_evaluation.csv')
# Για το filtered log
net, initial_marking, final_marking = alpha_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_filtered_log_evaluation.csv')
# Heuristics Miner
# Για το αρχικό log
net, initial_marking, final_marking = heuristics_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_log_evaluation.csv')
#alignments = alignment.apply_log(log, net, initial_marking, final_marking)
#pretty_print_alignments(alignments)
# Για το filtered log
net, initial_marking, final_marking = heuristics_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_filtered_log_evaluation.csv')
# Inductive Miner
# Για το αρχικό log
net, initial_marking, final_marking = inductive_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_log_evaluation.csv')
# Για το filtered log
net, initial_marking, final_marking = inductive_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_filtered_log_evaluation.csv')
| 42.68617
| 102
| 0.761994
| 1,140
| 8,025
| 5.138596
| 0.197368
| 0.046091
| 0.078354
| 0.1014
| 0.46125
| 0.410379
| 0.392625
| 0.386309
| 0.386309
| 0.362581
| 0
| 0.004284
| 0.156511
| 8,025
| 187
| 103
| 42.914439
| 0.861132
| 0.360997
| 0
| 0.319149
| 0
| 0
| 0.082136
| 0.004723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010638
| false
| 0
| 0.138298
| 0
| 0.148936
| 0.265957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0853c6f068e5b0ba0007116f943ea7455d91729
| 46,894
|
py
|
Python
|
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | null | null | null |
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | 2
|
2021-05-07T22:45:42.000Z
|
2021-09-22T04:35:15.000Z
|
src/scml_vis/presenter.py
|
yasserfarouk/scml-vis
|
a8daff36bb29867a67c9a36bcdca9ceef9350e53
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import shutil
import itertools
import random
import sys
import traceback
from pathlib import Path
import altair as alt
import pandas as pd
import plotly as plotly
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
from pandas.api.types import is_numeric_dtype
from plotly.validators.scatter.marker import SymbolValidator
from streamlit import cli as stcli
import scml_vis.compiler as compiler
from scml_vis.compiler import VISDATA_FOLDER
from scml_vis.utils import (
add_selector,
add_stats_display,
add_stats_selector,
load_data,
plot_network,
score_distribution,
score_factors,
)
__all__ = ["main"]
MARKERS = SymbolValidator().values[2::3]
MARKERS = [_ for _ in MARKERS if not any(_.startswith(x) for x in ("star", "circle", "square"))]
random.shuffle(MARKERS)
MARKERS = ["circle", "square"] + MARKERS
DB_FOLDER = Path.home() / "negmas" / "runsdb"
DB_NAME = "rundb.csv"
BASE_FOLDERS = [
Path.home() / "negmas" / "logs" / "scml" / "scml2020",
Path.home() / "negmas" / "logs" / "scml" / "scml2020oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021",
Path.home() / "negmas" / "logs" / "tournaments",
Path.home() / "negmas" / "tournaments",
]
def main(folder: Path):
st.set_page_config(layout="wide")
if folder is None:
add_base = st.sidebar.checkbox("Add Default paths", True)
add_cli = st.sidebar.checkbox("Add CLI runs", (DB_FOLDER / DB_NAME).exists())
options = dict(none="none")
if add_cli:
if (DB_FOLDER / DB_NAME).exists():
data = pd.read_csv(DB_FOLDER / DB_NAME, index_col=None, header=None)
data: pd.DataFrame
data = data.iloc[::-1]
data.columns = ["name", "type", "path"]
for _, x in data.iterrows():
options[x["path"]] = f"{x['type'][0]}:{x['name']}"
if add_base:
for base in BASE_FOLDERS:
type_ = base.name == "tournaments"
for child in base.glob("*"):
if not child.is_dir() or not compiler.has_logs(child):
continue
options[child] = f"{'t' if type_ else 'w'}:{child.name}"
folder = st.sidebar.selectbox("Select a run", list(options.keys()), format_func=lambda x: options[x])
if not folder or (isinstance(folder, str) and folder == "none"):
st.text(
"Cannot find any folders with logs.\nTry looking in default paths by checking 'Add Default paths' \nin the side bar or start the app with a folder containing log data using -f"
)
return
folder = Path(folder)
if folder.name != VISDATA_FOLDER:
folder = folder / VISDATA_FOLDER
if folder.exists():
re_compile = st.sidebar.button("Recompile visualization data?")
if re_compile:
st.error("Do you really, really, want to remove all visuallization data and recopmile?")
if st.button("Yes I'm OK with that"):
shutil.rmtree(folder)
if not folder.exists():
try:
do_compile = st.sidebar.button("Compile visualization data?")
if do_compile:
try:
compiler.main(folder.parent, max_worlds=None)
except Exception as e:
st.write(f"*Failed to compile visualization data for {folder}*\n### Exception:\n{str(e)}")
st.write(f"\n### Traceback:\n```\n{traceback.format_exc()}```")
else:
st.text("Either press 'Compile visualization data' to view logs of this folder or choose another one.")
return
except:
st.write(f"Folder {folder} contains no logs to use")
# folder = folder / VISDATA_FOLDER
# if not folder.exists():
# st.write(
# f"## SCML Visualizer\nError: No {VISDATA_FOLDER} folder found with visualization data at {str(folder)}"
# )
# return
if folder.name != VISDATA_FOLDER:
folder = folder / VISDATA_FOLDER
if not folder.exists():
st.write("Cannot find visualiation data")
return
st.write(f"## SCML Visualizer\n{str(folder.parent)}")
st.sidebar.markdown("## Data Selection")
tournaments = load_data(folder, "tournaments")
tournament_expander = st.sidebar.beta_expander("Tournament Selection")
with tournament_expander:
selected_tournaments, _, _ = add_selector(
st,
"",
tournaments["name"].unique(),
key="tournaments",
none=False,
default="one",
)
worlds = None
configs = load_data(folder, "configs")
if configs is None:
worlds = load_data(folder, "worlds")
config_names = worlds.loc[:, "name"].str.split("_").str[0].unique()
configs = pd.DataFrame(data=config_names, columns=["id"])
config_expander = st.sidebar.beta_expander("Config Selection")
with config_expander:
selected_configs, _, _ = add_selector(
st,
"",
configs["id"].unique(),
key="configs",
none=False,
default="all",
)
if worlds is None:
worlds = load_data(folder, "worlds")
if "config" not in worlds.columns:
worlds["config"] = worlds.loc[:, "name"].str.split("_").str[0]
worlds = worlds.loc[worlds.tournament.isin(selected_tournaments) & worlds.config.isin(selected_configs), :]
world_expander = st.sidebar.beta_expander("World Selection")
with world_expander:
selected_worlds, _, _ = add_selector(st, "", worlds.name, key="worlds", none=False, default="all")
worlds = worlds.loc[(worlds.name.isin(selected_worlds)), :]
agents = load_data(folder, "agents")
type_expander = st.sidebar.beta_expander("Type Selection")
with type_expander:
selected_types, _, _ = add_selector(st, "", agents.type.unique(), key="types", none=False, default="all")
agents = agents.loc[(agents.type.isin(selected_types)), :]
agent_expander = st.sidebar.beta_expander("Agent Selection")
with agent_expander:
selected_agents, _, _ = add_selector(st, "", agents.name.unique(), key="agents", none=False, default="all")
products = load_data(folder, "product_stats")
product_expander = st.sidebar.beta_expander("Product Selection")
with product_expander:
selected_products, _, _ = add_selector(
st, "", products["product"].unique(), key="products", none=False, default="all"
)
agents = agents.loc[(agents.type.isin(selected_types)), :]
nsteps = worlds.loc[worlds.name.isin(selected_worlds), "n_steps"].max()
nsteps = int(nsteps)
selected_steps = st.sidebar.slider("Steps", 0, nsteps, (0, nsteps))
selected_times = st.sidebar.slider("Relative Times", 0.0, 1.0, (0.0, 1.0))
st.sidebar.markdown("## Figure Selection")
# ts_figs = st.sidebar.beta_expander("Time Series")
# net_figs = st.sidebar.beta_expander("Networks")
# tbl_figs = st.sidebar.beta_expander("Tables")
# other_figs = st.sidebar.beta_expander("Others")
# if len(selected_worlds) == 1:
# fig_type = st.sidebar.selectbox(label="", options=["Time-series", "Networks", "Tables", "Others"], index=1)
# else:
# fig_type = st.sidebar.selectbox(label="", options=["Time-series", "Tables", "Others"], index=1)
#
# if fig_type == "Time-series":
# runner = display_time_series
# elif fig_type == "Networks":
# runner = display_networks
# elif fig_type == "Tables":
# runner = display_tables
# elif fig_type == "Others":
# runner = display_others
# else:
# st.text("Please choose what type of figures are you interested in")
# return
products_summary = (
products.loc[:, [_ for _ in products.columns if _ not in ("step", "relative_time")]]
.groupby(["tournament", "world", "product"])
.agg(["min", "max", "mean", "std"])
)
products_summary.columns = [f"{a}_{b}" for a, b in products_summary.columns]
products_summary = products_summary.reset_index()
data = dict(t=tournaments, w=worlds, a=agents, p=products_summary)
def filter(x, agent_field_sets):
if x is None:
return x
x = x.loc[(x.world.isin(selected_worlds)), :]
indx = None
for fields in agent_field_sets:
if not fields:
continue
indx = x[fields[0]].isin(selected_agents)
for f in fields[1:]:
indx = (indx) | (x[f].isin(selected_agents))
if indx is None:
return x
return x.loc[indx, :]
data["con"] = load_data(folder, "configs")
data["a"] = load_data(folder, "agents")
data["t"] = load_data(folder, "types")
data["c"] = filter(load_data(folder, "contracts"), [["buyer", "seller"]])
data["n"] = filter(load_data(folder, "negotiations"), [["buyer", "seller"]])
data["o"] = filter(load_data(folder, "offers"), [["sender", "receiver"]])
for runner, section_name in [
(display_networks, "Networks"),
(display_others, "Overview"),
(display_tables, "Tables"),
(display_time_series, "Time Series"),
]:
if section_name != "Time Series":
expander = st.sidebar.beta_expander(section_name, section_name == "Networks")
do_expand = expander.checkbox(f"Show {section_name}", section_name == "Networks")
else:
expander = st.sidebar
do_expand = st.sidebar.checkbox(section_name, True)
if do_expand:
runner(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=expander,
)
# st.sidebar.markdown("""---""")
def filter_by_time(x, cols, selected_steps, selected_times):
indx = None
for k in cols:
step_col, time_col = f"{k}step", f"{k}relative_time"
i = (x[step_col] >= selected_steps[0]) & (x[step_col] <= selected_steps[1])
i &= (x[time_col] >= selected_times[0]) & (x[time_col] <= selected_times[1])
if indx is None:
indx = i
else:
indx |= i
if indx is not None:
return x.loc[indx, :]
return x
def show_a_world(
world,
selected_steps,
selected_times,
data,
parent,
weight_field,
edge_weights,
edge_colors,
node_weight,
condition_field,
x,
src,
gallery,
):
nodes = data["a"].loc[data["a"].world == world, :]
nodes["score*cost"] = nodes["final_score"] * nodes["cost"]
fields = [_ for _ in nodes.columns]
nodes = nodes.to_dict("records")
added = -data["a"].input_product.min()
nlevels = data["a"].input_product.max() + 1 + added
level_max = [0] * (nlevels)
dx, dy = 10, 10
for node in nodes:
l = node["input_product"] + added
node["pos"] = ((l + 1) * dx, level_max[l] * dy)
level_max[l] += 1
nodes = {n["name"]: n for n in nodes}
seller_dict = dict(zip(fields, itertools.repeat(float("nan"))))
buyer_dict = dict(zip(fields, itertools.repeat(float("nan"))))
nodes["SELLER"] = {**seller_dict, **dict(pos=(0, dy * (level_max[0] // 2)), name="Seller", type="System")}
nodes["BUYER"] = {
**buyer_dict,
**dict(pos=((nlevels + 1) * dx, dy * (level_max[-1] // 2)), name="Buyer", type="System"),
}
edges, weights = [], []
weight_field_name = "quantity" if weight_field == "count" else weight_field
time_cols = (
[condition_field + "_step", condition_field + "_relative_time"]
if condition_field != "step"
else ["step", "relative_time"]
)
x = x.loc[x.world == world, [weight_field_name, "seller", "buyer"] + time_cols]
x = filter_by_time(x, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times)
x.drop(time_cols, axis=1, inplace=True)
if weight_field == "unit_price":
x = x.groupby(["seller", "buyer"]).mean().reset_index()
x["unit_price"].fillna(0.0, inplace=True)
elif weight_field == "count":
x = x.groupby(["seller", "buyer"]).count().reset_index()
x.rename(columns=dict(quantity="count"), inplace=True)
else:
x = x.groupby(["seller", "buyer"]).sum().reset_index()
for _, d in x.iterrows():
edges.append((d["seller"], d["buyer"], d[weight_field]))
parent.plotly_chart(
plot_network(
fields, nodes, edges=edges, node_weights=node_weight, edge_colors=edge_colors, edge_weights=edge_weights
)
)
if gallery:
return
col1, col2 = parent.beta_columns(2)
mydata = data[src]
myselected = mydata.loc[(mydata.world == world), :]
myselected = filter_by_time(
myselected, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times
)
seller = col1.selectbox("Seller", [""] + sorted(x["seller"].unique()), key=f"seller-{world}")
buyer = col2.selectbox("Buyer", [""] + sorted(x["buyer"].unique()), key=f"seller-{world}")
if seller:
myselected = myselected.loc[(myselected.seller == seller), :]
if buyer:
myselected = myselected.loc[(myselected.buyer == buyer), :]
myselected = myselected.reset_index()
options = myselected
if src == "n":
col1, col2 = parent.beta_columns(2)
broken = col1.checkbox("Broken", False, key=f"broken-{world}")
timedout = col2.checkbox("Timedout", False, key=f"timedout-{world}")
if not broken:
options = options.loc[~options.broken, :]
if not timedout:
options = options.loc[~options.timedout, :]
# options = options.loc[(options["seller"]==seller) & (options["buyer"]==buyer) & (options.world == world) & (options[f"{condition_field}_step"]<= selected_steps[1]) & (options[f"{condition_field}_step"]>= selected_steps[0]) , :]
if src == "c":
displayed_cols = (
[
"id",
"delivery_step",
"quantity",
"unit_price",
"total_price",
"n_neg_steps",
"concluded_step",
"signed_step",
"executed_step",
"negotiation",
]
+ (["buyer"] if not buyer else [])
+ (["seller"] if not seller else [])
)
elif src == "n":
displayed_cols = ["id", "delivery_step", "quantity", "unit_price", "timedout", "broken", "step", "rounds"]
else:
return
parent.dataframe(
myselected.loc[:, displayed_cols].sort_values(
["signed_step", "delivery_step"] if src == "c" else ["step", "delivery_step"]
)
)
contract = None
options = filter_by_time(
options, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times
)
if parent.checkbox("Ignore Exogenous", key=f"ignore-exogenous-{world}", value=True):
options = options.loc[(options["buyer"] != "BUYER") & (options["seller"] != "SELLER"), :]
if src == "n":
options = options.loc[:, "id"].values
if len(options) < 1:
return
neg = parent.selectbox(label="Negotiation", options=options, key=f"negotiationselect-{world}")
elif src == "c":
options = options.loc[:, "id"].values
if len(options) < 1:
return
elif len(options) == 1:
contract = options[0]
else:
contract = parent.selectbox(label="Contract", options=options, key=f"contractselect-{world}")
neg = myselected.loc[myselected["id"] == contract, "negotiation"]
if len(neg) > 0:
neg = neg.values[0]
else:
neg = None
else:
return
if contract is not None:
parent.write(data["c"].loc[data["c"]["id"] == contract, :])
if not neg or data["n"] is None or len(data["n"]) == 0:
return
neg_info = data["n"].loc[data["n"]["id"] == neg]
offers = data["o"]
offers = offers.loc[offers.negotiation == neg, :].sort_values(["round", "sender"])
# if len(offers) >= 2:
# offers = offers.loc[offers["sender"].shift(1) != offers["sender"],:]
offers.index = range(len(offers))
parent.write(neg_info)
if len(neg_info) < 1:
return
neg_info = neg_info.to_dict("records")[0]
if not neg_info["broken"] and not neg_info["timedout"]:
agreement = dict(
quantity=neg_info["quantity"],
delivery_step=neg_info["delivery_step"],
unit_price=neg_info["unit_price"],
total_price=neg_info["unit_price"] * neg_info["quantity"],
)
else:
agreement = None
parent.markdown(f"**Agreement**: {agreement}")
trange = (neg_info["min_delivery_step"], neg_info["max_delivery_step"])
c1, c2 = parent.beta_columns(2)
if trange[1] > trange[0]:
is_3d = c2.checkbox("3D Graph", key=f"threed-{world}")
else:
is_3d = False
use_ranges = c1.checkbox("Use issue ranges to set axes", True, key=f"useissueranges-{world}")
qrange = (neg_info["min_quantity"] - 1, neg_info["max_quantity"] + 1)
urange = (neg_info["min_unit_price"] - 1, neg_info["max_unit_price"] + 1)
if is_3d:
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter3d(
x=myoffers["quantity"],
y=myoffers["unit_price"],
z=myoffers["delivery_step"],
name=sender,
mode="lines+markers",
marker=dict(size=10),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter3d(
x=[agreement["quantity"]],
y=[agreement["unit_price"]],
z=[agreement["delivery_step"]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="diamond",
)
)
fig.update_layout(xaxis_title="quantity", yaxis_title="unit_price")
else:
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter(
x=myoffers["quantity"],
y=myoffers["unit_price"],
name=sender,
mode="lines+markers",
marker=dict(size=10),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter(
x=[agreement["quantity"]],
y=[agreement["unit_price"]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="star",
)
)
fig.update_layout(xaxis_title="quantity", yaxis_title="unit_price")
if use_ranges:
fig.update_layout(xaxis_range=qrange, yaxis_range=urange)
col1, col2 = parent.beta_columns(2)
def fig_1d(y):
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter(
x=myoffers["round"],
y=myoffers[y],
name=sender,
mode="lines+markers",
marker=dict(size=15),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter(
x=[offers["round"].max()],
y=[agreement[y]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="star",
)
)
fig.update_layout(xaxis_title="Round", yaxis_title=y)
fig.update_layout(yaxis_range=urange if y == "unit_price" else qrange if y == "quantity" else trange)
return fig
col1.plotly_chart(fig_1d("quantity"))
col1.plotly_chart(fig)
col2.plotly_chart(fig_1d("unit_price"))
if trange[1] > trange[0]:
col2.plotly_chart(fig_1d("delivery_step"))
parent.dataframe(offers)
WORLD_INDEX = 0
def display_networks(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
global WORLD_INDEX
max_worlds = parent.number_input("Max. Worlds", 1, None, 4)
if len(selected_worlds) < 1:
st.write("No worlds selected. Cannot show any networks")
return
if len(selected_worlds) > max_worlds:
st.write(f"More than {max_worlds} world selected ({len(selected_worlds)}). Will show the first {max_worlds}")
cols = st.beta_columns([1, 5, 1, 3])
# prev = cols[0].button("<")
# next = cols[2].button(">")
# if prev:
# WORLD_INDEX = (WORLD_INDEX - max_worlds) % len(selected_worlds)
# if next:
# WORLD_INDEX = (WORLD_INDEX + max_worlds) % len(selected_worlds)
WORLD_INDEX = cols[1].slider("", 0, len(selected_worlds) - 1, WORLD_INDEX)
randomize = cols[3].button("Randomize worlds")
if randomize:
random.shuffle(selected_worlds)
selected_worlds = selected_worlds[WORLD_INDEX : WORLD_INDEX + max_worlds]
what = parent.selectbox("Category", ["Contracts", "Negotiations"])
if what == "Contracts":
src = "c"
elif what == "Negotiations":
src = "n"
else:
src = "o"
x = data[src]
if x is None:
st.markdown(f"**{what}** data is **not** available in the logs.")
return
gallery = parent.checkbox("Gallery Mode", len(selected_worlds) > 1)
node_weight_options = sorted(
[_ for _ in data["a"].columns if is_numeric_dtype(data["a"][_]) and _ not in ("id", "is_default")]
)
default_node_weight = node_weight_options.index("final_score")
if default_node_weight is None:
default_node_weight = 0
with st.beta_expander("Networks Settings"):
cols = st.beta_columns(5 + int(gallery))
weight_field = cols[2].selectbox("Edge Weight", ["total_price", "unit_price", "quantity", "count"])
node_weight = cols[3].selectbox("Node Weight", ["none"] + node_weight_options, default_node_weight + 1)
per_step = cols[0].checkbox("Show one step only")
edge_weights = cols[0].checkbox("Variable Edge Width", True)
edge_colors = cols[0].checkbox("Variable Edge Colors", True)
if per_step:
selected_step = cols[1].number_input("Step", selected_steps[0], selected_steps[1], selected_steps[0])
selected_steps = [selected_step] * 2
x["total_price"] = x.quantity * x.unit_price
options = [_[: -len("_step")] for _ in x.columns if _.endswith("_step")]
if src != "c":
options.append("step")
condition_field = cols[4].selectbox("Condition", options, 0 if src != "n" else options.index("step"))
if gallery:
n_cols = cols[5].number_input("Columns", 1, 5, 2)
cols = st.beta_columns(n_cols)
else:
n_cols, cols = 1, [st]
for i, world in enumerate(selected_worlds):
show_a_world(
world,
selected_steps=selected_steps,
selected_times=selected_times,
data=data,
parent=cols[i % n_cols],
weight_field=weight_field,
edge_weights=edge_weights,
edge_colors=edge_colors,
node_weight=node_weight,
condition_field=condition_field,
x=x,
src=src,
gallery=gallery,
)
def display_tables(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
remove_single = parent.checkbox("Remove fields with a single value", True)
def order_columns(x):
cols = sorted(x.columns)
for c in [
"buyer_type",
"seller_type",
"delivery_step",
"quantity",
"unit_price",
"total_price",
"buyer",
"seller",
"name",
"id",
]:
if c in cols:
cols = [c] + [_ for _ in cols if _ != c]
for c in ["world", "config", "group", "tournament"]:
if c in cols:
cols = [_ for _ in cols if _ != c] + [c]
return x.loc[:, cols]
def remove_singletons(x):
selected = []
for c in x.columns:
if len(x[c].unique()) < 2:
continue
selected.append(c)
return x.loc[:, selected]
def show_table(x, must_choose=False):
x = order_columns(x)
if remove_single:
x = remove_singletons(x)
selected_cols = st.multiselect(label="Columns", options=x.columns)
if selected_cols or must_choose:
st.dataframe(x.loc[:, selected_cols])
else:
st.dataframe(x)
def create_chart(df, type):
if type == "Scatter":
return alt.Chart(df).mark_point()
if type == "Bar":
return alt.Chart(df).mark_bar()
if type == "Box":
return alt.Chart(df).mark_boxplot()
if type == "Line":
return alt.Chart(df).mark_line()
raise ValueError(f"Unknown marker type {type}")
for lbl, k, has_step in (
("Tournaments", "t", False),
("Configs", "con", False),
("Worlds", "w", False),
("Products", "p", False),
("Agents", "a", False),
("Contracts", "c", True),
("Negotiations", "n", True),
("Offers", "o", True),
):
if data[k] is None or not len(data[k]):
continue
if not parent.checkbox(label=lbl, key=f"tbl-{lbl}-c1"):
continue
if has_step:
df = filter_by_time(
data[k], ["signed_", "concluded_"] if k == "c" else [""], selected_steps, selected_times
)
else:
df = data[k]
if lbl == "Agents":
if st.checkbox("Ignore Default Agents", True, key=f"tbl-{lbl}-ignore-default"):
df = df.loc[~df["is_default"], :]
elif lbl == "Contracts":
if st.checkbox("Ignore Exogenous Contracts", True, key=f"tbl-{lbl}-ignore-exogenous"):
df = df.loc[df["n_neg_steps"] < 1, :]
show_table(df)
st.text(f"{len(df)} records found")
cols = st.beta_columns(6)
type_ = cols[0].selectbox("Chart", ["Scatter", "Line", "Bar", "Box"], 0, key=f"select-{lbl}-chart")
x = cols[1].selectbox("x", ["none"] + list(df.columns), key=f"select-{lbl}-x")
y = m = c = s = "none"
if x != "none":
y = cols[2].selectbox("y", ["none"] + list(df.columns), key=f"select-{lbl}-y")
if y != "none":
m = cols[3].selectbox("Mark", ["none"] + list(df.columns), key=f"select-{lbl}-mark")
c = cols[4].selectbox("Color", ["none"] + list(df.columns), key=f"select-{lbl}-color")
s = cols[5].selectbox("Size", ["none"] + list(df.columns), key=f"select-{lbl}-size")
kwargs = dict(x=x, y=y)
if m != "none":
kwargs["shape"] = m
if s != "none":
kwargs["size"] = s
if c != "none":
kwargs["color"] = c
else:
kwargs = dict(x=x, y=alt.X(x, bin=True))
chart = create_chart(df, type_ if y != "none" else "Bar").encode(**kwargs)
st.altair_chart(chart, use_container_width=True)
def display_time_series(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
settings = st.beta_expander("Time Series Settings")
ncols = settings.number_input("N. Columns", 1, 6, 2)
xvar = settings.selectbox("x-variable", ["step", "relative_time"], 1 - int(len(selected_worlds) == 1))
dynamic = settings.checkbox("Dynamic Figures", value=True)
sectioned = settings.checkbox("Figure Sections", True)
ci_level = settings.selectbox(options=[80, 90, 95], label="CI Level", index=2)
world_stats, selected_world_stats, combine_world_stats, overlay_world_stats = add_stats_selector(
folder,
"world_stats",
[[("world", selected_worlds), ("step", selected_steps), ("relative_time", selected_times)]],
xvar=xvar,
label="World Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
default_selector="one",
)
product_stats, selected_product_stats, combine_product_stats, overlay_product_stats = add_stats_selector(
folder,
"product_stats",
[[("product", selected_products), ("step", selected_steps), ("relative_time", selected_times)]],
xvar=xvar,
label="Product Statistics",
choices=lambda x: [
_
for _ in x.columns
if _ not in ("name", "world", "name", "tournament", "type", "step", "product", "relative_time")
],
default_selector="some",
default_choice=["trading_price"],
combine=False,
overlay=False,
)
default_agent_stats = [
"score",
"productivity",
"inventory_input",
"inventory_output",
"balance",
"assets",
"spot_market_loss",
"spot_market_quantity",
]
type_stats, selected_type_stats, combine_type_stats, overlay_type_stats = add_stats_selector(
folder,
"agent_stats",
[
[
("world", selected_worlds),
("type", selected_types),
("step", selected_steps),
("relative_time", selected_times),
]
],
xvar=xvar,
label="Type Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
key="type",
default_selector="some" if len(selected_worlds) != 1 else "none",
default_choice=default_agent_stats if len(selected_worlds) != 1 else None,
combine=False,
overlay=False,
)
agent_stats, selected_agent_stats, combine_agent_stats, overlay_agent_stats = add_stats_selector(
folder,
"agent_stats",
[
[
("world", selected_worlds),
("name", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
]
],
xvar=xvar,
label="Agent Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
default_selector="some" if len(selected_worlds) == 1 else "none",
default_choice=default_agent_stats if len(selected_worlds) == 1 else None,
combine=False,
overlay=False,
)
(
contract_stats_world,
selected_contract_stats_world,
combine_contract_stats_world,
overlay_contract_stats_world,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (World)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="world",
)
(
contract_stats_type,
selected_contract_stats_type,
combine_contract_stats_type,
overlay_contract_stats_type,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="type",
)
(
contract_stats_agent,
selected_contract_stats_agent,
combine_contract_stats_agent,
overlay_contract_stats_agent,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Agents)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="name",
)
def aggregate_contract_stats(stats, ignored_cols):
cols = [_ for _ in stats.columns if not any(_.endswith(x) for x in ["price", "quantity", "count"])]
ignored_cols = [_ for _ in cols if _.startswith(ignored_cols)]
cols = [_ for _ in cols if not _ in ignored_cols]
allcols = [_ for _ in stats.columns if not _ in ignored_cols]
# st.text(stats.columns)
# st.text(allcols)
# st.text(cols)
# st.text(len(stats))
stats = stats.loc[:, allcols].groupby(cols).sum()
# st.text(len(stats))
for c in stats.columns:
if c.endswith("unit_price"):
base = "_".join(c.split("_")[:-2])
stats[c] = stats[f"{base}_total_price"] / stats[f"{base}_quantity"]
stats[c] = stats[c].fillna(0)
# st.text(len(stats))
return stats.reset_index()
(
contract_stats_buyer_type,
selected_contract_stats_buyer_type,
combine_contract_stats_buyer_type,
overlay_contract_stats_buyer_type,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
# [("world", selected_worlds), ("seller", selected_agents)],
],
xvar=xvar,
label="Contract Statistics (Buyer Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="buyer_type",
)
(
contract_stats_seller_type,
selected_contract_stats_seller_type,
combine_contract_stats_seller_type,
overlay_contract_stats_seller_type,
) = add_stats_selector(
folder,
"contract_stats",
[
# [("world", selected_worlds), ("buyer", selected_agents)],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Seller Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="seller_type",
)
(
contract_stats_buyer,
selected_contract_stats_buyer,
combine_contract_stats_buyer,
overlay_contract_stats_buyer,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
# [("world", selected_worlds), ("seller", selected_agents)],
],
xvar=xvar,
label="Contract Statistics (Buyer)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="buyer",
)
(
contract_stats_seller,
selected_contract_stats_seller,
combine_contract_stats_seller,
overlay_contract_stats_seller,
) = add_stats_selector(
folder,
"contract_stats",
[
# [("world", selected_worlds), ("buyer", selected_agents)],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Seller)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="seller",
)
contract_stats_buyer = aggregate_contract_stats(contract_stats_buyer, "seller")
contract_stats_seller = aggregate_contract_stats(contract_stats_seller, "buyer")
contract_stats_buyer_type = aggregate_contract_stats(contract_stats_buyer, "seller_type")
contract_stats_seller_type = aggregate_contract_stats(contract_stats_seller, "buyer_type")
contract_stats_agent["agent"] = contract_stats_agent["seller"] + "->" + contract_stats_agent["buyer"]
contract_stats_agent["agent_type"] = contract_stats_agent["seller_type"] + "->" + contract_stats_agent["buyer_type"]
contract_stats_type["agent"] = contract_stats_type["seller"] + "->" + contract_stats_type["buyer"]
contract_stats_type["agent_type"] = contract_stats_type["seller_type"] + "->" + contract_stats_type["buyer_type"]
(
contract_stats_product,
selected_contract_stats_product,
combine_contract_stats_product,
overlay_contract_stats_product,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Product)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="product",
)
cols, start_col = add_stats_display(
world_stats,
selected_world_stats,
combine_world_stats,
overlay_world_stats,
ncols=ncols,
xvar=xvar,
hue="world",
title="World Figures",
sectioned=sectioned,
cols=None,
start_col=0,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
product_stats,
selected_product_stats,
combine_product_stats,
overlay_product_stats,
ncols=ncols,
xvar=xvar,
hue="product",
title="product Figures",
sectioned=sectioned,
cols=None,
start_col=0,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
type_stats,
selected_type_stats,
combine_type_stats,
overlay_type_stats,
ncols=ncols,
xvar=xvar,
hue="type",
title="Agent Type Figures",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
agent_stats,
selected_agent_stats,
combine_agent_stats,
overlay_agent_stats,
ncols=ncols,
xvar=xvar,
hue="name",
title="Agent Instance Figures",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_world,
selected_contract_stats_world,
combine_contract_stats_world,
overlay_contract_stats_world,
ncols=ncols,
xvar=xvar,
hue="world",
title="Trade Figures (World)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_type,
selected_contract_stats_type,
combine_contract_stats_type,
overlay_contract_stats_type,
ncols=ncols,
xvar=xvar,
hue="agent_type",
title="Trade Figures (Agent Type)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_buyer_type,
selected_contract_stats_buyer_type,
combine_contract_stats_buyer_type,
overlay_contract_stats_buyer_type,
ncols=ncols,
xvar=xvar,
hue="buyer_type",
title="Trade Figures (Buyer Type)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_seller_type,
selected_contract_stats_seller_type,
combine_contract_stats_seller_type,
overlay_contract_stats_seller_type,
ncols=ncols,
xvar=xvar,
hue="seller_type",
cols=cols,
start_col=start_col,
title="Trade Figures (Seller Type)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_agent,
selected_contract_stats_agent,
combine_contract_stats_agent,
overlay_contract_stats_agent,
ncols=ncols,
xvar=xvar,
hue="agent",
cols=cols,
start_col=start_col,
title="Trade Figures (Agent Instance)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_buyer,
selected_contract_stats_buyer,
combine_contract_stats_buyer,
overlay_contract_stats_buyer,
ncols=ncols,
xvar=xvar,
hue="buyer",
cols=cols,
start_col=start_col,
title="Trade Figures (Buyer Instance)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_seller,
selected_contract_stats_seller,
combine_contract_stats_seller,
overlay_contract_stats_seller,
ncols=ncols,
xvar=xvar,
hue="seller",
title="Trade Figures (Seller Instance)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_product,
selected_contract_stats_product,
combine_contract_stats_product,
overlay_contract_stats_product,
ncols=ncols,
xvar=xvar,
hue="product",
title="Trade Figures (Product)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
def display_others(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
# settings = parent.beta_expander("Settings")
# ncols = settings.number_input("N. Columns", min_value=1, max_value=6)
if parent.checkbox("Score Distribution", False):
score_distribution(selected_worlds, selected_agents, selected_types, data, parent=parent)
if parent.checkbox("Final Score Factors", False):
score_factors(selected_worlds, selected_agents, selected_types, data, parent=parent)
if __name__ == "__main__":
import sys
from streamlit import cli as stcli
folder = None
if len(sys.argv) > 1:
folder = Path(sys.argv[1])
if st._is_running_with_streamlit:
main(folder)
else:
sys.argv = ["streamlit", "run"] + sys.argv
sys.exit(stcli.main())
| 35.338357
| 233
| 0.565403
| 5,184
| 46,894
| 4.877508
| 0.087384
| 0.049871
| 0.010441
| 0.01582
| 0.490726
| 0.430216
| 0.407356
| 0.371248
| 0.357089
| 0.344869
| 0
| 0.005917
| 0.304431
| 46,894
| 1,326
| 234
| 35.365008
| 0.769269
| 0.043972
| 0
| 0.447587
| 0
| 0.000832
| 0.137268
| 0.006029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011647
| false
| 0
| 0.016639
| 0
| 0.049085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a08cb54701ee8d7129f53895ca2daa2a379bad89
| 4,431
|
py
|
Python
|
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | 2
|
2021-01-30T23:14:36.000Z
|
2021-02-17T01:41:56.000Z
|
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | null | null | null |
QFA/MO_1QFA.py
|
gustawlippa/QFA
|
7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import List
from math import sqrt
from QFA.Automaton import Automaton
from math import cos, sin, pi
class MO_1QFA(Automaton):
def __init__(self, alphabet: str,
initial_state: np.ndarray,
transition_matrices: List[np.ndarray],
projective_measurement: np.ndarray):
# list of chars
self.alphabet = alphabet
# np column vector, initial dist over states
self.initial_state = initial_state
# list of np matrices - position in list corresponds to position of letter in alphabet,
# perhaps a map could be better
self.transition_matrices = transition_matrices
# np matrix containing ones and zeroes
self.projective_measurement = projective_measurement
def process(self, word: str) -> (float, float):
acceptance_probability = self.initial_state
for letter in word:
transition_matrix = self.transition_matrices[self.alphabet.index(letter)]
acceptance_probability = transition_matrix @ acceptance_probability
acceptance_probability = self.projective_measurement @ acceptance_probability
acceptance_probability = np.vdot(acceptance_probability, acceptance_probability) # vdot(a,a) = norm squared (a)
return acceptance_probability, 0
def example():
print('MO_1QFA examples:')
mo_1qfa_example_1()
mo_1qfa_example_2()
mo_1qfa_example_3()
qfa = mo_1qfa_example_4()
return qfa
def mo_1qfa_example_1():
alphabet = 'a'
a_matrix = np.array([[sqrt(1/2), sqrt(1/2)], [sqrt(1/2), -sqrt(1/2)]])
initial_state = np.array([[1], [0]])
measurement = np.array([[0, 0], [0, 1]])
qfa = MO_1QFA(alphabet, initial_state, [a_matrix], measurement)
print('mo_qfa1')
# it should return 1/2
res = qfa.process('a')
print('a\t', res)
# example from QFA paper - returns 0 as it should
# the paper: https://www.researchgate.net/publication/264906610_Quantum_Finite_Automata
# Qiu, Daowen & Li, Lvzhou & Mateus, Paulo & Gruska, Jozef.
# (2012).
# Quantum Finite Automata. Handbook of Finite State Based Models and Applications.
# 10.1201/b13055-7.
res = qfa.process('aa')
print('aa\t', res)
return qfa
def mo_1qfa_example_2():
# example from wikipedia: (https://en.wikipedia.org/wiki/Quantum_finite_automata#Measure-once_automata)
alphabet = '01'
zero_matrix = np.array([[0, 1], [1, 0]])
one_matrix = np.array([[1, 0], [0, 1]])
projection_matrix = np.array([[1, 0], [0, 0]])
initial_state = np.array([[1], [0]])
qfa2 = MO_1QFA(alphabet, initial_state, [zero_matrix, one_matrix], projection_matrix)
# should behave like a DFA expecting words with an even number of '0's
print('mo_qfa2')
print('111\t', qfa2.process('111'))
print('101\t', qfa2.process('101'))
print('001\t', qfa2.process('001'))
print('\t', qfa2.process(''))
return qfa2
def mo_1qfa_example_3():
alphabet = '01'
zero_matrix = np.array([[0, 1], [1, 0]])
one_matrix = np.array([[1, 0], [0, 1]])
projection_matrix = np.array([[1, 0], [0, 0]])
# same example as the mo_1qfa_example_2, but the initial state is complex
initial_state = np.array([[1/2+1j/2], [1/(2*sqrt(2))+1j/(2*sqrt(2))]])
qfa3 = MO_1QFA(alphabet, initial_state, [zero_matrix, one_matrix], projection_matrix)
# one must remember that the initial state must be a quantum state, so it must comply with the normalisation
# condition
print('mo_qfa3')
print('111\t', qfa3.process('111'))
print('101\t', qfa3.process('101'))
print('001\t', qfa3.process('001'))
print('\t', qfa3.process(''))
return qfa3
def mo_1qfa_example_4():
# This automaton should accept the language L = {a^(3n)}
# words in L should have the acceptance probability 1
alphabet = 'a'
a_matrix = np.array([[cos(2*pi/3), -sin(2*pi/3)],
[sin(2*pi/3), cos(2*pi/3)]])
end_matrix = np.eye(2)
projection_matrix = np.array([[1, 0], [0, 0]])
initial_state = np.array([[1], [0]])
qfa = MO_1QFA(alphabet, initial_state, [a_matrix, end_matrix], projection_matrix)
print("mo_1qfa4")
print("a\t", qfa.process('a'))
print("aa\t", qfa.process('aa'))
print("aaa\t", qfa.process('aaa'))
return qfa
if __name__ == "__main__":
example()
| 31.425532
| 120
| 0.643421
| 622
| 4,431
| 4.413183
| 0.252412
| 0.032787
| 0.042623
| 0.02623
| 0.246266
| 0.211293
| 0.185428
| 0.161384
| 0.135155
| 0.126412
| 0
| 0.05146
| 0.219364
| 4,431
| 140
| 121
| 31.65
| 0.742122
| 0.232905
| 0
| 0.207317
| 0
| 0
| 0.041457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0
| 0.060976
| 0
| 0.231707
| 0.219512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a08ec5f751e5c0ed745a1196c05685644187a34f
| 591
|
py
|
Python
|
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
scripts/freq_shecker.py
|
Fumiya-K/ros_myo
|
dac160aae5d0cd75211c60261bd1232ef089e530
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Imu
import numpy as np
c_imu, c_angle = 0, 0
def cb_imu(msg):
global c_imu
c_imu += 1
def cb_angle(msg):
global c_angle
c_angle += 1
if c_angle == 400:
print("count of received data of angle = {}".format(c_angle))
print("count of received data of imu = {}".format(c_imu))
if __name__ == "__main__":
rospy.init_node("freq_checker")
imu_sub = rospy.Subscriber("/myo_raw/myo_ori", Vector3, cb_angle)
ang_sub = rospy.Subscriber("/myo_raw/myo_imu", Imu, cb_imu)
rospy.spin()
| 21.107143
| 66
| 0.717428
| 104
| 591
| 3.778846
| 0.423077
| 0.076336
| 0.066158
| 0.101781
| 0.26972
| 0.26972
| 0
| 0
| 0
| 0
| 0
| 0.018
| 0.153976
| 591
| 27
| 67
| 21.888889
| 0.768
| 0.033841
| 0
| 0
| 0
| 0
| 0.214035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.315789
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0963df40f1df1fa608416915de9bf22beecf414
| 1,692
|
py
|
Python
|
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
src/CyPhyMasterInterpreter/run_master_interpreter_sample.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
import win32com.client
# Disable early binding: full of race conditions writing the cache files,
# and changes the semantics since inheritance isn't handled correctly
import win32com.client.gencache
_savedGetClassForCLSID = win32com.client.gencache.GetClassForCLSID
win32com.client.gencache.GetClassForCLSID = lambda x: None
project = win32com.client.DispatchEx("Mga.MgaProject")
project.Open("MGA=" + r'D:\Projects\META\development\models\DynamicsTeam\MasterInterpreter\MasterInterpreter.mga')
# config_light = win32com.client.DispatchEx("CyPhyMasterInterpreter.ConfigurationSelectionLight")
# # GME id, or guid, or abs path or path to Test bench or SoT or PET
# config_light.ContextId = '{6d24a596-ec4f-4910-895b-d03a507878c3}'
# print config_light.SelectedConfigurationIds
# config_light.SetSelectedConfigurationIds(['id-0065-000000f1'])
# #config_light.KeepTemporaryModels = True
# #config_light.PostToJobManager = True
# master = win32com.client.DispatchEx("CyPhyMasterInterpreter.CyPhyMasterInterpreterAPI")
# master.Initialize(project)
# results = master.RunInTransactionWithConfigLight(config_light)
# It works only this way and does not worth the time to figure out the other way.
# will run ALL configurations.
focusobj = None
try:
project.BeginTransactionInNewTerr()
focusobj = project.GetObjectByID('id-0065-00000635')
finally:
project.AbortTransaction()
selectedobj=win32com.client.DispatchEx("Mga.MgaFCOs")
interpreter = "MGA.Interpreter.CyPhyMasterInterpreter"
launcher = win32com.client.DispatchEx("Mga.MgaLauncher")
launcher.RunComponent(interpreter, project, focusobj, selectedobj, 128)
project.Close()
| 38.454545
| 115
| 0.785461
| 182
| 1,692
| 7.258242
| 0.587912
| 0.095382
| 0.09084
| 0.061317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045087
| 0.121749
| 1,692
| 44
| 116
| 38.454545
| 0.843876
| 0.495272
| 0
| 0
| 0
| 0
| 0.234257
| 0.15869
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a097f2e9cca87b9c4ab3fbfbe7eb9b74f83ce331
| 4,051
|
py
|
Python
|
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
image_utils.py
|
datascisteven/Flictionary-Flask
|
9437f0b6377b11cecfa37c8a94eb68cc4e7018f8
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageOps
def view_image(img, filename = 'image'):
fig, ax = plt.subplots(figsize=(6, 9))
ax.imshow(img.reshape(96, 96).squeeze())
ax.axis('off')
plt.savefig(filename + '.png')
def convert_to_PIL(img):
img_r = img.reshape(96, 96)
pil_img = Image.new('RGB', (96, 96), 'white')
pixels = pil_img.load()
for i in range(0, 96):
for j in range(0, 96):
if img_r[i, j] > 0:
pixels[j, i] = (255 - int(img_r[i, j] * 255), 255 - int(img_r[i, j] * 255), 255 - int(img_r[i, j] * 255))
return pil_img
def convert_to_np(pil_img):
pil_img = pil_img.convert('RGB')
img = np.zeros((96, 96))
pixels = pil_img.load()
for i in range(0, 96):
for j in range(0, 96):
img[i, j] = 1 - pixels[j, i][0] / 255
return img
def crop_image(image):
cropped_image = image
# get image size
width, height = cropped_image.size
# get image pixels
pixels = cropped_image.load()
image_strokes_rows = []
image_strokes_cols = []
# run through the image
for i in range(0, width):
for j in range(0, height):
# save coordinates of the image
if (pixels[i,j][3] > 0):
image_strokes_cols.append(i)
image_strokes_rows.append(j)
# if image is not empty then crop to contents of the image
if (len(image_strokes_rows)) > 0:
# find the box for image
row_min = np.array(image_strokes_rows).min()
row_max = np.array(image_strokes_rows).max()
col_min = np.array(image_strokes_cols).min()
col_max = np.array(image_strokes_cols).max()
# find the box for cropping
margin = min(row_min, height - row_max, col_min, width - col_max)
# crop image
border = (col_min, row_min, width - col_max, height - row_max)
cropped_image = ImageOps.crop(cropped_image, border)
# get cropped image size
width_cropped, height_cropped = cropped_image.size
# create square resulting image to paste cropped image into the center
dst_im = Image.new("RGBA", (max(width_cropped, height_cropped), max(width_cropped, height_cropped)), "white")
offset = ((max(width_cropped, height_cropped) - width_cropped) // 2, (max(width_cropped, height_cropped) - height_cropped) // 2)
# paste to the center of a resulting image
dst_im.paste(cropped_image, offset, cropped_image)
#resize
dst_im.thumbnail((96, 96), Image.ANTIALIAS)
return dst_im
def normalize(arr):
arr = arr.astype('float')
# Do not touch the alpha channel
for i in range(3):
minval = arr[...,i].min()
maxval = arr[...,i].max()
if minval != maxval:
arr[...,i] -= minval
arr[...,i] *= (255.0/(maxval-minval))
return arr
def normalize_image(image):
arr = np.array(image)
new_img = Image.fromarray(normalize(arr).astype('uint8'),'RGBA')
return new_img
def alpha_composite(front, back):
front = np.asarray(front)
back = np.asarray(back)
result = np.empty(front.shape, dtype='float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
falpha = front[alpha] / 255.0
balpha = back[alpha] / 255.0
result[alpha] = falpha + balpha * (1 - falpha)
old_setting = np.seterr(invalid='ignore')
result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]
np.seterr(**old_setting)
result[alpha] *= 255
np.clip(result, 0, 255)
# astype('uint8') maps np.nan and np.inf to 0
result = result.astype('uint8')
result = Image.fromarray(result, 'RGBA')
return result
def alpha_composite_with_color(image, color=(255, 255, 255)):
back = Image.new('RGBA', size=image.size, color=color + (255,))
return alpha_composite(image, back)
def convert_to_rgb(image):
image_rgb = alpha_composite_with_color(image)
image_rgb.convert('RGB')
return image_rgb
| 28.935714
| 132
| 0.621328
| 596
| 4,051
| 4.068792
| 0.221477
| 0.049485
| 0.019794
| 0.051546
| 0.176907
| 0.054845
| 0.054845
| 0.054845
| 0.054845
| 0.054845
| 0
| 0.034325
| 0.244878
| 4,051
| 139
| 133
| 29.143885
| 0.758418
| 0.102197
| 0
| 0.068182
| 0
| 0
| 0.020155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102273
| false
| 0
| 0.045455
| 0
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a098e971e8b1b7172d8860ca8ed8514362a25eea
| 360
|
py
|
Python
|
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
src/lqc/generate/web_page/ui_tools/create.py
|
tysmith/layout-quickcheck
|
c5ba9431a40f650a594140541e32af7c8ff21695
|
[
"MIT"
] | null | null | null |
import os
def ui_tools_js():
js_string = ""
with open(os.path.join(os.path.dirname(__file__), 'template.js'), 'r') as f:
js_string = f.read()
return js_string
def ui_tools_html():
html_string = ""
with open(os.path.join(os.path.dirname(__file__), 'template.html'), 'r') as f:
html_string = f.read()
return html_string
| 25.714286
| 82
| 0.633333
| 56
| 360
| 3.75
| 0.357143
| 0.114286
| 0.095238
| 0.152381
| 0.466667
| 0.466667
| 0.466667
| 0.466667
| 0.466667
| 0.466667
| 0
| 0
| 0.211111
| 360
| 14
| 83
| 25.714286
| 0.739437
| 0
| 0
| 0
| 0
| 0
| 0.072022
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a09c1cbcccf7a63039a5587fbbf109f0b5dc595c
| 608
|
py
|
Python
|
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
grove_potentiometer.py
|
cpmpercussion/empi_controller
|
178d3952994d7e13067674cbcd261d945e6b4799
|
[
"MIT"
] | null | null | null |
import math
import sys
import time
from grove.adc import ADC
class GroveRotaryAngleSensor(ADC):
def __init__(self, channel):
self.channel = channel
self.adc = ADC()
@property
def value(self):
return self.adc.read(self.channel)
Grove = GroveRotaryAngleSensor
def main():
if len(sys.argv) < 2:
print('Usage: {} adc_channel'.format(sys.argv[0]))
sys.exit(1)
sensor = GroveRotaryAngleSensor(int(sys.argv[1]))
while True:
print('Rotary Value: {}'.format(sensor.value))
time.sleep(.2)
if __name__ == '__main__':
main()
| 17.882353
| 58
| 0.626645
| 75
| 608
| 4.906667
| 0.466667
| 0.089674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.243421
| 608
| 33
| 59
| 18.424242
| 0.78913
| 0
| 0
| 0
| 0
| 0
| 0.074013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.181818
| 0.045455
| 0.409091
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0a9d841059677b45b6f09a062af0ebdbc1dceea
| 5,394
|
py
|
Python
|
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
webapp/element43/apps/common/util.py
|
Ososope/eve_online
|
b368f77aaff403e5f1523a1a0e01d105fed0ada9
|
[
"BSD-3-Clause"
] | null | null | null |
# utility functions
import ast
import urllib
import datetime
import pytz
import pylibmc
# Import settings
from django.conf import settings
# API Models
from apps.api.models import APIKey, Character, APITimer
# Eve_DB Models
from eve_db.models import MapSolarSystem
# API Access Masks
CHARACTER_API_ACCESS_MASKS = {'AccountBalance': 1,
'AssetList': 2,
'CalendarEventAttendees': 4,
'CharacterSheet': 8,
'ContactList': 16,
'ContactNotifications': 32,
'FacWarStats': 64,
'IndustryJobs': 128,
'KillLog': 256,
'MailBodies': 512,
'MailingLists': 1024,
'MailMessages': 2048,
'MarketOrders': 4096,
'Medals': 8192,
'Notifications': 16384,
'NotificationTexts': 32768,
'Research': 65536,
'SkillInTraining': 131072,
'SkillQueue': 262144,
'Standings': 524288,
'UpcomingCalendarEvents': 1048576,
'WalletJournal': 2097152,
'WalletTransactions': 4194304,
'CharacterInfo': 25165824,
'AccountStatus': 33554432,
'Contracts': 67108864,
'Locations': 134217728}
def get_memcache_client():
"""
Returns a ready-to-use memcache client
"""
return pylibmc.Client(settings.MEMCACHE_SERVER,
binary=settings.MEMCACHE_BINARY,
behaviors=settings.MEMCACHE_BEHAVIOUR)
def dictfetchall(cursor):
"""
Returns all rows from a cursor as a dict
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def cast_empty_string_to_int(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return int(string) if string else 0
def cast_empty_string_to_float(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return float(string) if string else 0
def calculate_character_access_mask(sheets):
"""
Returns combined access mask for a list of API sheets.
"""
mask = 0
for sheet in sheets:
mask += CHARACTER_API_ACCESS_MASKS[sheet]
return mask
def manage_character_api_timers(character):
"""
Adds and removes character APITimers for a given character depending on the character's key permissions.
When we add more functions, we need to add them to the masks dictionary.
"""
key_mask = character.apikey.accessmask
for sheet in CHARACTER_API_ACCESS_MASKS:
mask = CHARACTER_API_ACCESS_MASKS[sheet]
if ((mask & key_mask) == mask):
# If we have permission, create timer if not already present
try:
APITimer.objects.get(character=character, apisheet=sheet)
except APITimer.DoesNotExist:
new_timer = APITimer(character=character,
corporation=None,
apisheet=sheet,
nextupdate=pytz.utc.localize(datetime.datetime.utcnow()))
new_timer.save()
else:
# If we are not permitted to do this, remove existent timers
try:
APITimer.objects.get(character=character, apisheet=sheet).delete
except APITimer.DoesNotExist:
pass
def validate_characters(user, access_mask):
"""
Returns characters of a user that match a given minimum access mask.
"""
# Get keys
keys = APIKey.objects.filter(user=user)
characters = []
for key in keys:
# Do a simple bitwise operation to determine if we have sufficient rights with this key.
if ((access_mask & key.accessmask) == access_mask):
# Get all chars from that key which have sufficient permissions.
characters += list(Character.objects.filter(apikey=key))
return characters
def find_path(start, finish, security=5, invert=0):
"""
Returns a list of system objects which represent the path.
start: system_id of first system
finish: system_id of last system
security: sec level of system * 10
invert: if true (1), use security as highest seclevel you want to enter, default (0) seclevel is the lowest you want to try to use
"""
# Set params
params = urllib.urlencode({'start': start, 'finish': finish, 'seclevel': security, 'invert': invert})
response = urllib.urlopen('http://localhost:3455/path', params)
path_list = ast.literal_eval(response.read())
path = []
for waypoint in path_list:
path.append(MapSolarSystem.objects.get(id=waypoint))
return path
| 31
| 134
| 0.561735
| 553
| 5,394
| 5.394213
| 0.415913
| 0.020114
| 0.023466
| 0.030841
| 0.147167
| 0.134764
| 0.098559
| 0.098559
| 0.063694
| 0.063694
| 0
| 0.039965
| 0.359844
| 5,394
| 173
| 135
| 31.179191
| 0.823921
| 0.213756
| 0
| 0.087912
| 0
| 0
| 0.09589
| 0.010763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087912
| false
| 0.010989
| 0.087912
| 0
| 0.252747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0af5afc99a71406be5ffead3cb66d5a5fbdf490
| 2,608
|
py
|
Python
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 2
|
2019-08-21T08:23:45.000Z
|
2019-09-25T13:20:28.000Z
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 11
|
2019-08-21T08:46:01.000Z
|
2021-09-08T01:18:04.000Z
|
crafting/CraftingHandler.py
|
uuk0/mcpython-4
|
1ece49257b3067027cc43b452a2fc44908d3514c
|
[
"MIT"
] | 5
|
2019-08-30T08:19:57.000Z
|
2019-10-26T03:31:16.000Z
|
"""mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
import crafting.IRecipeType
import json
import ResourceLocator
import item.ItemHandler
import traceback
import mod.ModMcpython
class CraftingHandler:
def __init__(self):
self.recipeinfotable = {}
# all shapeless recipes sorted after item count
self.crafting_recipes_shapeless = {}
# all shaped recipes sorted after item count and than size
self.crafting_recipes_shaped = {}
self.loaded_mod_dirs = []
def __call__(self, obj):
if issubclass(obj, crafting.IRecipeType.IRecipe):
self.recipeinfotable[obj.get_recipe_name()] = obj
else:
raise ValueError()
return obj
def add_recipe(self, recipe: crafting.IRecipeType.IRecipe):
recipe.register()
def add_recipe_from_data(self, data: dict):
name = data["type"]
if name in self.recipeinfotable:
recipe = self.recipeinfotable[name].from_data(data)
self.add_recipe(recipe)
return recipe
else:
raise ValueError("can't load recipe. recipe class {} not arrival".format(name))
def add_recipe_from_file(self, file: str):
try:
self.add_recipe_from_data(ResourceLocator.read(file, "json"))
except ValueError:
pass
def load(self, modname):
if modname in self.loaded_mod_dirs:
print("ERROR: mod '{}' has tried to load crafting recipes twice or more".format(modname))
return # make sure to load only ones!
self.loaded_mod_dirs.append(modname)
for itemname in ResourceLocator.get_all_entries("data/{}/recipes".format(modname)):
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:bake", self.add_recipe_from_file, itemname,
info="loading crafting recipe from {}".format(itemname))
G.craftinghandler = CraftingHandler()
def load_recipe_providers():
from . import (GridRecipes)
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:groups", load_recipe_providers,
info="loading crafting recipe groups")
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipes", G.craftinghandler.load, "minecraft",
info="loading crafting recipes")
| 35.243243
| 113
| 0.657209
| 301
| 2,608
| 5.55814
| 0.392027
| 0.032277
| 0.031082
| 0.030484
| 0.11835
| 0.086073
| 0.059773
| 0
| 0
| 0
| 0
| 0.00516
| 0.256902
| 2,608
| 73
| 114
| 35.726027
| 0.858101
| 0.145706
| 0
| 0.040816
| 0
| 0
| 0.12438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.020408
| 0.163265
| 0
| 0.387755
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b0788c0fdd53bb74359f134c5cbbe7dd53cb63
| 1,625
|
py
|
Python
|
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | null | null | null |
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | 1
|
2021-05-26T02:21:42.000Z
|
2021-05-26T02:21:42.000Z
|
xcache.py
|
ATLAS-Analytics/AlarmAndAlertService
|
a167439b0c3f3c9594af52bd21fe8713b5f47bf1
|
[
"MIT"
] | null | null | null |
# Checks number of concurrent connections from XCaches to MWT2 dCache.
# Creates alarm if more than 200 from any server.
# ====
# It is run every 30 min from a cron job.
import json
from datetime import datetime
import requests
from alerts import alarms
config_path = '/config/config.json'
with open(config_path) as json_data:
config = json.load(json_data,)
print('current time', datetime.now())
res = requests.get(
'http://graphite.mwt2.org/render?target=dcache.xrootd.*&format=json&from=now-2min')
if (res.status_code == 200):
data = res.json()
print(data)
print('recieved data on {} servers'.format(len(data)))
else:
print('problem in receiving connections!')
ALARM = alarms('Virtual Placement', 'XCache', 'large number of connections')
for server in data:
serverIP = server['target'].replace('dcache.xrootd.', '').replace('_', '.')
connections = server['datapoints'][-1][0]
timestamp = server['datapoints'][-1][1]
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime("%Y-%m-%d %H:%M:%S")
if not connections:
print('n connections not retrieved... skipping.')
continue
if connections < 200:
print('server {} has {} connections.'.format(serverIP, connections))
else:
source = {
"xcache": serverIP,
"n_connections": connections,
"timestamp": timestamp
}
print(source)
ALARM.addAlarm(
body='too many connections.',
tags=[serverIP],
source=source
)
| 30.092593
| 88
| 0.619692
| 187
| 1,625
| 5.347594
| 0.502674
| 0.054
| 0.034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014742
| 0.248615
| 1,625
| 53
| 89
| 30.660377
| 0.804259
| 0.099077
| 0
| 0.05
| 0
| 0.025
| 0.283073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b0d03bf62e28fff9360da39608230424f15bea
| 769
|
py
|
Python
|
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
Question3_Competetive_Programming/solution.py
|
Robotrek-TechTatva/big-pp
|
5790075638aa7f39d787dfc390f43da1cdb4ed56
|
[
"MIT"
] | null | null | null |
import csv
def area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1)
+ x3 * (y1 - y2)) / 2.0)
def isInside(lis):
x, y = 0, 0
x1, y1, x2, y2, x3, y3 = lis[1:]
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
y1 = int(y1)
y2 = int(y2)
y3 = int(y3)
A = area (x1, y1, x2, y2, x3, y3)
A1 = area (x, y, x2, y2, x3, y3)
A2 = area (x1, y1, x, y, x3, y3)
A3 = area (x1, y1, x2, y2, x, y)
if(A == A1 + A2 + A3):
return True
else:
return False
filename = "traingles.csv"
with open(filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for line in csv_reader:
if line:
print(isInside(line))
| 21.361111
| 49
| 0.470741
| 127
| 769
| 2.811024
| 0.322835
| 0.056022
| 0.089636
| 0.089636
| 0.156863
| 0.123249
| 0.089636
| 0
| 0
| 0
| 0
| 0.127835
| 0.369311
| 769
| 36
| 50
| 21.361111
| 0.608247
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0.035714
| 0.214286
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b1f6e65ee6e7176da940ac100c95bce2eaea30
| 238
|
py
|
Python
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
vasudev-sharma/course-content
|
46fb9be49da52acb5df252dda43f11b6d1fe827f
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 1
|
2021-06-09T09:56:21.000Z
|
2021-06-09T09:56:21.000Z
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
macasal/course-content
|
0fc5e1a0d736c6b0391eeab587012ed0ab01e462
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 1
|
2021-06-16T05:41:08.000Z
|
2021-06-16T05:41:08.000Z
|
tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py
|
macasal/course-content
|
0fc5e1a0d736c6b0391eeab587012ed0ab01e462
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
t = np.arange(0, 10, 0.1) # Time from 0 to 10 years in 0.1 steps
with plt.xkcd():
p = np.exp(0.3 * t)
fig = plt.figure(figsize=(6, 4))
plt.plot(t, p)
plt.ylabel('Population (millions)')
plt.xlabel('time (years)')
plt.show()
| 21.636364
| 64
| 0.60084
| 46
| 238
| 3.108696
| 0.630435
| 0.027972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073298
| 0.197479
| 238
| 11
| 65
| 21.636364
| 0.675393
| 0.151261
| 0
| 0
| 0
| 0
| 0.164179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b646cbb8b05a36f6c66a8ee0acf369718630ee
| 2,339
|
py
|
Python
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 5,504
|
2017-11-30T21:25:07.000Z
|
2022-03-31T17:00:58.000Z
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 247
|
2017-12-07T06:09:56.000Z
|
2022-03-23T05:34:47.000Z
|
src/binwalk/__main__.py
|
puppywang/binwalk
|
fa0c0bd59b8588814756942fe4cb5452e76c1dcd
|
[
"MIT"
] | 953
|
2017-12-01T17:05:17.000Z
|
2022-03-26T13:15:33.000Z
|
import os
import sys
# If installed to a custom prefix directory, binwalk may not be in
# the default module search path(s). Try to resolve the prefix module
# path and make it the first entry in sys.path.
# Ensure that 'src/binwalk' becomes '.' instead of an empty string
_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for _module_path in [
# from repo: src/scripts/ -> src/
_parent_dir,
# from build dir: build/scripts-3.4/ -> build/lib/
os.path.join(_parent_dir, "lib"),
# installed in non-default path: bin/ -> lib/python3.4/site-packages/
os.path.join(_parent_dir,
"lib",
"python%d.%d" % (sys.version_info[0], sys.version_info[1]),
"site-packages")
]:
if os.path.exists(_module_path) and _module_path not in sys.path:
sys.path = [_module_path] + sys.path
import binwalk
import binwalk.modules
def runme():
with binwalk.Modules() as modules:
try:
if len(sys.argv) == 1:
sys.stderr.write(modules.help())
sys.exit(1)
# If no explicit module was enabled in the command line arguments,
# run again with the default signature scan explicitly enabled.
elif not modules.execute():
# Make sure the Signature module is loaded before attempting
# an implicit signature scan; else, the error message received
# by the end user is not very helpful.
if hasattr(binwalk.modules, "Signature"):
modules.execute(*sys.argv[1:], signature=True)
else:
sys.stderr.write("Error: Signature scans not supported; ")
sys.stderr.write("make sure you have python-lzma installed and try again.\n")
sys.exit(2)
except binwalk.ModuleException as e:
sys.exit(3)
def main():
try:
# Special options for profiling the code. For debug use only.
if '--profile' in sys.argv:
import cProfile
sys.argv.pop(sys.argv.index('--profile'))
cProfile.run('runme()')
else:
runme()
except IOError:
pass
except KeyboardInterrupt:
sys.stdout.write("\n")
if __name__ == "__main__":
main()
| 35.984615
| 97
| 0.595554
| 302
| 2,339
| 4.513245
| 0.437086
| 0.026412
| 0.030814
| 0.02201
| 0.055759
| 0.032282
| 0
| 0
| 0
| 0
| 0
| 0.006732
| 0.301411
| 2,339
| 64
| 98
| 36.546875
| 0.827417
| 0.315092
| 0
| 0.090909
| 0
| 0
| 0.10649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.022727
| 0.113636
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b7ca82a2ce39606a44ac65893f26c1b02da5d3
| 3,174
|
py
|
Python
|
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | 14
|
2020-03-09T04:08:03.000Z
|
2021-12-29T14:53:32.000Z
|
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | null | null | null |
server.py
|
AndrewB330/BinanceTerminal
|
3699a295d2b2af810d30ff692bab4e106ec44392
|
[
"MIT"
] | null | null | null |
import time
import pymongo
import schedule
from order import *
from utils import *
# MONGODB
db = pymongo.MongoClient("mongodb://localhost:27017/")["ShaurmaBinanceTerminal"]
order_db = db["orders"]
JOB_INTERVAL = 10.0 # interval of updating
jobs_pool = {}
def worker(symbol):
try:
time_start = time.time()
# get all active orders
active_orders = order_db.find({
'$and': [
{'symbol': symbol},
{'$or': [
{'status': OrderStatus.WAITING},
{'status': OrderStatus.PLACED}
]}
]
})
# update all active orders
for json_order in active_orders:
order = Order(json_order)
order.update()
order_db.update_one({"_id": order._id}, {"$set": order.to_json()})
# adjust updating period
time_elapsed = min(JOB_INTERVAL, time.time() - time_start)
jobs_pool[symbol].interval = JOB_INTERVAL - time_elapsed
except Exception as e:
log.error('Worker %s error: %s', symbol, repr(e))
def jobs_maintainer():
# get all active symbols
cursor = order_db.find({
'$or': [
{'status': OrderStatus.WAITING},
{'status': OrderStatus.PLACED}
]
}).distinct('symbol')
working = set()
# run jobs for not working, but active symbols
for symbol in cursor:
if symbol not in jobs_pool:
log.info('Worker started, symbol: %s', symbol)
jobs_pool[symbol] = schedule.every(JOB_INTERVAL).seconds.do(worker, symbol=symbol)
jobs_pool[symbol].run()
working.add(symbol)
# remove jobs for working, but not active symbols
for k in list(jobs_pool.keys()):
if k not in working:
log.info('Worker stopped, symbol: %s', k)
schedule.cancel_job(jobs_pool[k])
jobs_pool.pop(k)
def initialize_test_db():
order_db.drop()
o = [
create_limit('BTCUSDT', Side.BUY, Decimal('7400.00'), Decimal('0.0015')),
create_limit('BTCUSDT', Side.BUY, Decimal('7103.65'), Decimal('0.0020')),
create_limit('BTCUSDT', Side.SELL, Decimal('9500.00'), Decimal('0.0030')),
create_limit('BTCUSDT', Side.SELL, Decimal('9600.00'), Decimal('0.0010')),
create_market_stop('BTCUSDT', Side.SELL, Decimal('6675.50'), Decimal('0.0035')),
create_trailing_market_stop('BTCUSDT', Side.SELL, Decimal('100.00'), Decimal('7600.00'), Decimal('0.0035')),
create_market_stop('XLMUSDT', Side.SELL, Decimal('0.105'), Decimal('50.0')),
create_trailing_market_stop('XLMUSDT', Side.SELL, Decimal('0.01'), Decimal('0.14'), Decimal('0.0015')),
create_trailing_market_stop('XLMUSDT', Side.SELL, Decimal('0.01'), Decimal('0.14'), Decimal('0.0015'))
]
for o in o:
order_db.insert_one(o.to_json())
print('Test DB initialized')
def run_server():
maintainer = schedule.every(5).seconds.do(jobs_maintainer)
maintainer.run()
while True:
schedule.run_pending()
time.sleep(0.1)
if __name__ == '__main__':
# initialize_test_db()
run_server()
| 31.74
| 116
| 0.598614
| 392
| 3,174
| 4.67602
| 0.316327
| 0.056738
| 0.057283
| 0.048009
| 0.254774
| 0.254774
| 0.148936
| 0.077469
| 0.077469
| 0.077469
| 0
| 0.046895
| 0.254253
| 3,174
| 99
| 117
| 32.060606
| 0.727503
| 0.074039
| 0
| 0.055556
| 0
| 0
| 0.13281
| 0.016388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.069444
| 0
| 0.125
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0b8186276c361f655fc43a3b80aba5c60bd0210
| 4,979
|
py
|
Python
|
sarenka/backend/api_searcher/searcher_full.py
|
adolabsnet/sarenka
|
2032aa6ddebfc69b0db551b7793080d17282ced2
|
[
"MIT"
] | 380
|
2019-12-05T09:37:47.000Z
|
2022-03-31T09:37:27.000Z
|
sarenka/backend/api_searcher/searcher_full.py
|
watchmen-coder/sarenka
|
d7fc0928e4992de3dbb1546137ca6a158e930ba8
|
[
"MIT"
] | 14
|
2020-09-26T17:49:42.000Z
|
2022-02-04T18:16:16.000Z
|
sarenka/backend/api_searcher/searcher_full.py
|
watchmen-coder/sarenka
|
d7fc0928e4992de3dbb1546137ca6a158e930ba8
|
[
"MIT"
] | 60
|
2021-01-01T16:25:30.000Z
|
2022-03-26T18:48:03.000Z
|
"""
Moduł spiający wszystkie wyszukiwania w jedną klasę - wszystkei dane dla adresu ip/domeny.
Klasa bezpośrednio używana w widoku Django.
"""
from rest_framework.reverse import reverse
from typing import List, Dict
import whois
import socket
from connectors.credential import CredentialsNotFoundError
from api_searcher.search_engines.censys_engine.censys_host_search import CensysHostSearch
from api_searcher.search_engines.shodan_engine.shodan_host_search import ShodanHostSearch
from .dns.dns_searcher import DNSSearcher, DNSSearcherError
class SearcherFull:
"""Klasa zwracajaca wszystkie znalezione dane - zwraca infromacje ze wszystkich serwisów trzeich, informacje o DNS etc."""
def __init__(self, ip_address:str, local_host_address="", user_credentials=None):
self.host = ip_address
self.host_address = local_host_address
self.user_credentials = user_credentials
def get_whois_data(self):
"""Metoda zwraca dane z bazy whois."""
return whois.whois(self.host)
def get_banner(self, port_list)->List[Dict]:
"""Metoda zwraca banery, które pórbuje uzyskac dla otwartych portów zwróconych przez seriwsy trzecie"""
result = []
for port in port_list:
s = socket.socket()
s.connect((self.host, int(port)))
s.settimeout(5)
try:
# jak nie ma banera to rzuca timeotam
response = s.recv(1024)
if response:
result.append({port: response})
except socket.timeout:
result.append({port: "Unable to grab banner."})
return result
def get_censys_data(self):
"""Metoda zwraca dane wyszukane w serwisie http://censys.io/"""
try:
if not self.user_credentials:
raise CredentialsNotFoundError("UserCredentials object does not exist.")
except CredentialsNotFoundError as ex:
settings_url = self.host_address + reverse("user_credentials")
return {
"censys": {
"error": "Unable to get credentials for service http://censys.io/. "
"Please create account on https://censys.io/ and add valid settings "
f"for SARENKA app on {settings_url}",
"details": str(ex)
}
}
try:
response = CensysHostSearch(self.user_credentials).get_data(self.host) #
response.update({"banners": self.get_banner(response["ports"])})
return response
except Exception as ex:
# censys nie udostępnia do importu klasy exceptionu CensysNotFoundException o.Ó
return {
"censys": {
"error": f"Unable to get infromation from https://censys.io/ service.",
"details": str(ex)
}
}
def get_shodan_data(self):
"""Metoda zwraca dane wyszukane w serwisie https://www.shodan.io/"""
try:
if not self.user_credentials:
raise CredentialsNotFoundError("UserCredentials object does not exist.")
except CredentialsNotFoundError as ex:
settings_url = self.host_address + reverse("user_credentials")
return {
"shodan": {
"error": "Unable to get credentials for service https://www.shodan.io/. "
"Please create account on https://www.shodan.io/ and add valid settings "
f"for SARENKA app on {settings_url}",
"details": str(ex)
}
}
try:
response = ShodanHostSearch(self.user_credentials).get_data(self.host) #
return response
except Exception as ex:
# censys nie udostępnia do importu klasy exceptionu CensysNotFoundException o.Ó
return {
"shodan": {
"error": f"Unable to get infromation from https://www.shodan.io/ service.",
"details": str(ex)
}
}
def get_dns_data(self):
"""Metoda zwraca informacje o rekordach DNS hosta."""
try:
data = DNSSearcher(self.host).get_data()
return data
except DNSSearcherError as ex:
return {"error": str(ex)}
except Exception as ex:
return {"error": f"Unable to get DNS record data for host={self.host}.", "details": str(ex)}
@property
def values(self):
"""Zwraca jsona ze wszystkimi danymi - metoda pomocna dla widoków Django."""
response = {
"whois": self.get_whois_data(),
"dns_data": self.get_dns_data(),
}
response.update({"censys": self.get_censys_data()})
response.update({"shodan": self.get_shodan_data()})
return response
| 40.811475
| 126
| 0.589677
| 530
| 4,979
| 5.422642
| 0.301887
| 0.027836
| 0.033055
| 0.027836
| 0.424843
| 0.391093
| 0.371608
| 0.30341
| 0.248434
| 0.248434
| 0
| 0.001481
| 0.321751
| 4,979
| 122
| 127
| 40.811475
| 0.849571
| 0.163687
| 0
| 0.361702
| 0
| 0
| 0.182127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0
| 0.085106
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0bb420692799a6a79988f6528e8182e5954185a
| 3,234
|
py
|
Python
|
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | 1
|
2019-12-01T05:48:00.000Z
|
2019-12-01T05:48:00.000Z
|
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | null | null | null |
cifar10/train.py
|
ashawkey/hawtorch
|
a6e28422da9258458b6268f5981c68d60623e12f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import hawtorch
import hawtorch.io as io
from hawtorch import Trainer
from hawtorch.metrics import ClassificationMeter
from hawtorch.utils import backup
import models
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='configs.json')
parser = parser.parse_args()
config_file = parser.config
args = io.load_json(config_file)
logger = io.logger(args["workspace_path"])
names = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def create_loaders():
# transforms
transform_train = transforms.Compose([
#transforms.RandomCrop(32, padding=4),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# CIFAR10 dataset
logger.info("Start creating datasets...")
train_dataset = datasets.CIFAR10(root=args["data_path"], train=True, transform=transform_train, download=True)
logger.info(f"Created train set! {len(train_dataset)}")
test_dataset = datasets.CIFAR10(root=args["data_path"], train=False, transform=transform_test, download=True)
logger.info(f"Created test set! {len(test_dataset)}")
# Data Loader
train_loader = DataLoader(dataset=train_dataset,
batch_size=args["train_batch_size"],
shuffle=True,
pin_memory=True,
)
test_loader = DataLoader(dataset=test_dataset,
batch_size=args["test_batch_size"],
shuffle=False,
pin_memory=True,
)
return {"train":train_loader,
"test":test_loader}
def create_trainer():
logger.info("Start creating trainer...")
device = args["device"]
model = getattr(models, args["model"])()
objective = getattr(nn, args["objective"])()
optimizer = getattr(optim, args["optimizer"])(model.parameters(), lr=args["lr"], weight_decay=args["weight_decay"])
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args["lr_decay_step"], gamma=args["lr_decay"])
metrics = [ClassificationMeter(10, names=names), ]
loaders = create_loaders()
trainer = Trainer(args, model, optimizer, scheduler, objective, device, loaders, logger,
metrics=metrics,
workspace_path=args["workspace_path"],
eval_set="test",
input_shape=(3,32,32),
report_step_interval=-1,
)
logger.info("Trainer Created!")
return trainer
if __name__ == "__main__":
backup(args["workspace_path"])
trainer = create_trainer()
trainer.train(args["epochs"])
trainer.evaluate()
| 34.042105
| 119
| 0.62987
| 357
| 3,234
| 5.54902
| 0.32493
| 0.02524
| 0.025745
| 0.037355
| 0.141343
| 0.141343
| 0.111055
| 0.111055
| 0.067643
| 0.067643
| 0
| 0.031596
| 0.246444
| 3,234
| 94
| 120
| 34.404255
| 0.781288
| 0.033704
| 0
| 0.114286
| 0
| 0
| 0.123437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.2
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0bbcdf05486aa95d06b89b25ca7866a985c51bb
| 718
|
py
|
Python
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | 2
|
2019-02-26T23:55:06.000Z
|
2019-02-26T23:56:09.000Z
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | 1
|
2019-02-10T07:00:39.000Z
|
2019-02-10T07:00:39.000Z
|
examples/scatterplot.py
|
ajduberstein/slayer
|
e4f2b6e0277ac38fe71ec99eaf3ee4769057b0ea
|
[
"MIT"
] | null | null | null |
"""
Example of how to make a Scatterplot with a time component
"""
import slayer as sly
import pandas as pd
DATA_URL = 'https://raw.githubusercontent.com/ajduberstein/sf_growth/master/public/data/business.csv'
businesses = pd.read_csv(DATA_URL)
FUCHSIA_RGBA = [255, 0, 255, 140]
color_scale = sly.ColorScale(
palette='random',
variable_name='neighborhood',
scale_type='categorical_random')
s = sly.Slayer(sly.Viewport(longitude=-122.43, latitude=37.76, zoom=11)) +\
sly.Timer(tick_rate=0.75) + \
sly.Scatterplot(
businesses,
position=['lng', 'lat'],
color=color_scale,
radius=50,
time_field='start_date')
s.to_html('scatterplot.html', interactive=True)
| 26.592593
| 101
| 0.693593
| 99
| 718
| 4.888889
| 0.717172
| 0.028926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043624
| 0.169916
| 718
| 26
| 102
| 27.615385
| 0.768456
| 0.08078
| 0
| 0
| 0
| 0.055556
| 0.239264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0bc99badd8c414f8e67c165139e1e1864acd087
| 3,699
|
py
|
Python
|
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
test_dictondisk.py
|
MKuranowski/dictondisk
|
ca25f8fed2f60d8ee63d6c5eaa9e620555581383
|
[
"MIT"
] | null | null | null |
import dictondisk
import random
import pytest
import os
remove_keys = {0, (33, 12.23), "c", "中国"}
vanilla_dict = {
0: 1337, 1: 3.14, 2: 2.71, 3: 1.61,
"a": "Ą", "b": "✈!", "c": "東京", "中国": "共産",
(1, .5): "lorem", (33, 12.23): "ipsum",
-1: ["one", "two", "three"]
}
def test_contains_update():
t = dictondisk.DictOnDisk(vanilla_dict)
for i in vanilla_dict:
assert i in t
def test_del():
t = dictondisk.DictOnDisk(vanilla_dict)
folder_name = t.folder.name
del t
assert not os.path.exists(folder_name)
def test_len():
t = dictondisk.DictOnDisk(vanilla_dict)
assert len(t) == len(vanilla_dict)
def test_getsetitem():
t = dictondisk.DictOnDisk(vanilla_dict)
for k, v in vanilla_dict.items():
assert t[k] == v
t[0] = 7331
t[-1] = ["three", "two", "one"]
assert t[0] == 7331
assert t[-1] == ["three", "two", "one"]
with pytest.raises(KeyError):
u = t["0"]
def test_delitem():
t = dictondisk.DictOnDisk(vanilla_dict)
for i in remove_keys:
del t[i]
for k in vanilla_dict:
if k in remove_keys:
assert k not in t
else:
assert k in t
with pytest.raises(KeyError):
del t["0"]
def test_iter():
t = dictondisk.DictOnDisk(vanilla_dict)
all_keys = set(vanilla_dict.keys())
for k in t:
all_keys.remove(k)
assert len(all_keys) == 0
def test_get():
t = dictondisk.DictOnDisk(vanilla_dict)
assert t.get(0) == 1337
assert t.get((1, .5), "nice") == "lorem"
assert t.get(52566) == None
assert t.get(-2, "VΣЯY ПIᄃΣ") == "VΣЯY ПIᄃΣ"
def test_copy():
t1 = dictondisk.DictOnDisk(vanilla_dict)
t2 = t1.copy()
assert t1.folder.name != t2.folder.name
for k, v in t1.items(): assert t2[k] == v
for k, v in t2.items(): assert t1[k] == v
def test_fromkeys():
# Check default value
t = dictondisk.DictOnDisk.fromkeys(vanilla_dict)
for key in vanilla_dict: assert t[key] == None
for key in t: assert key in vanilla_dict
# Check custom value
t = dictondisk.DictOnDisk.fromkeys(vanilla_dict, "🖲️")
for key in vanilla_dict: assert t[key] == "🖲️"
for key in t: assert key in vanilla_dict
def test_pop():
t = dictondisk.DictOnDisk(vanilla_dict)
# Check proper popping of values
v = t.pop((1, .5), None)
assert v == "lorem"
assert (1, .5) not in t
# Check proper returning of default
v = t.pop("654", "🍔")
assert v == "🍔"
# Check rasing of KeyError without default
with pytest.raises(KeyError):
v = t.pop(32156)
# Check raising TypeError on more-then-one-defualt
with pytest.raises(TypeError):
v = t.pop(-1, None, "🍿")
assert -1 in t
def test_popitem():
t = dictondisk.DictOnDisk(vanilla_dict)
k, v = t.popitem()
assert k in vanilla_dict
assert v == vanilla_dict[k]
def test_bool():
t = dictondisk.DictOnDisk()
assert bool(t) == False
t.update(vanilla_dict)
assert bool(t) == True
def test_eq():
t = dictondisk.DictOnDisk()
assert t == []
assert t == {}
t.update(vanilla_dict)
assert t == vanilla_dict
assert t != {}
t = dictondisk.DictOnDisk()
t[1] = "1"
t[2] = "2"
assert t == [(1, "1"), (2, "2")]
assert t != [(1, "1")]
def test_setdefault():
t = dictondisk.DictOnDisk(vanilla_dict)
assert t.setdefault(0, "aaaaa") == 1337
assert t.setdefault(89, "darkness") == "darkness"
assert t.setdefault((33, 12.23)) == "ipsum"
assert t.setdefault("🏯") == None
def test_view_keys():
pass
def test_view_values():
pass
def test_view_items():
pass
| 20.324176
| 58
| 0.593674
| 548
| 3,699
| 3.919708
| 0.202555
| 0.143389
| 0.146648
| 0.158752
| 0.307728
| 0.213687
| 0.16946
| 0.091248
| 0.028864
| 0
| 0
| 0.038952
| 0.257367
| 3,699
| 181
| 59
| 20.436464
| 0.739716
| 0.052176
| 0
| 0.2
| 0
| 0
| 0.038297
| 0
| 0
| 0
| 0
| 0
| 0.330435
| 1
| 0.147826
| false
| 0.026087
| 0.034783
| 0
| 0.182609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0bead6599200d03855aef8174ff835ecca2f74f
| 76,496
|
py
|
Python
|
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | 3
|
2022-01-05T09:10:18.000Z
|
2022-03-22T15:09:43.000Z
|
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | null | null | null |
commonroad/scenario/lanelet.py
|
CommonRoad/commonroad-io
|
93824961da9c41eb7768b5cf1acbed9a07446dc2
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import enum
from typing import *
import numpy as np
from shapely.geometry import MultiPolygon as ShapelyMultiPolygon
from shapely.geometry import Point as ShapelyPoint
from shapely.geometry import Polygon as ShapelyPolygon
from shapely.strtree import STRtree
import commonroad.geometry.transform
from commonroad.common.validity import *
from commonroad.geometry.shape import Polygon, ShapeGroup, Circle, Rectangle, Shape
from commonroad.scenario.intersection import Intersection
from commonroad.scenario.obstacle import Obstacle
from commonroad.scenario.traffic_sign import TrafficSign, TrafficLight
from commonroad.visualization.drawable import IDrawable
from commonroad.visualization.param_server import ParamServer
from commonroad.visualization.renderer import IRenderer
__author__ = "Christian Pek, Sebastian Maierhofer"
__copyright__ = "TUM Cyber-Physical Systems Group"
__credits__ = ["BMW CAR@TUM"]
__version__ = "2022.1"
__maintainer__ = "Sebastian Maierhofer"
__email__ = "commonroad@lists.lrz.de"
__status__ = "released"
class LineMarking(enum.Enum):
"""
Enum describing different types of line markings, i.e. dashed or solid lines
"""
DASHED = 'dashed'
SOLID = 'solid'
BROAD_DASHED = 'broad_dashed'
BROAD_SOLID = 'broad_solid'
UNKNOWN = 'unknown'
NO_MARKING = 'no_marking'
class LaneletType(enum.Enum):
"""
Enum describing different types of lanelets
"""
URBAN = 'urban'
COUNTRY = 'country'
HIGHWAY = 'highway'
DRIVE_WAY = 'driveWay'
MAIN_CARRIAGE_WAY = 'mainCarriageWay'
ACCESS_RAMP = 'accessRamp'
EXIT_RAMP = 'exitRamp'
SHOULDER = 'shoulder'
BUS_LANE = 'busLane'
BUS_STOP = 'busStop'
BICYCLE_LANE = 'bicycleLane'
SIDEWALK = 'sidewalk'
CROSSWALK = 'crosswalk'
INTERSTATE = 'interstate'
INTERSECTION = 'intersection'
UNKNOWN = 'unknown'
class RoadUser(enum.Enum):
"""
Enum describing different types of road users
"""
VEHICLE = 'vehicle'
CAR = 'car'
TRUCK = 'truck'
BUS = 'bus'
PRIORITY_VEHICLE = 'priorityVehicle'
MOTORCYCLE = 'motorcycle'
BICYCLE = 'bicycle'
PEDESTRIAN = 'pedestrian'
TRAIN = 'train'
TAXI = 'taxi'
class StopLine:
"""Class which describes the stop line of a lanelet"""
def __init__(self, start: np.ndarray, end: np.ndarray, line_marking: LineMarking, traffic_sign_ref: Set[int] = None,
traffic_light_ref: Set[int] = None):
self._start = start
self._end = end
self._line_marking = line_marking
self._traffic_sign_ref = traffic_sign_ref
self._traffic_light_ref = traffic_light_ref
def __eq__(self, other):
if not isinstance(other, StopLine):
warnings.warn(f"Inequality between StopLine {repr(self)} and different type {type(other)}")
return False
prec = 10
start_string = np.array2string(np.around(self._start.astype(float), prec), precision=prec)
start_other_string = np.array2string(np.around(other.start.astype(float), prec), precision=prec)
end_string = np.array2string(np.around(self._end.astype(float), prec), precision=prec)
end_other_string = np.array2string(np.around(other.end.astype(float), prec), precision=prec)
if start_string == start_other_string and end_string == end_other_string \
and self._line_marking == other.line_marking and self._traffic_sign_ref == other.traffic_sign_ref \
and self._traffic_light_ref == other.traffic_light_ref:
return True
warnings.warn(f"Inequality of StopLine {repr(self)} and the other one {repr(other)}")
return False
def __hash__(self):
start_string = np.array2string(np.around(self._start.astype(float), 10), precision=10)
end_string = np.array2string(np.around(self._end.astype(float), 10), precision=10)
sign_ref = None if self._traffic_sign_ref is None else frozenset(self._traffic_sign_ref)
light_ref = None if self._traffic_light_ref is None else frozenset(self._traffic_light_ref)
return hash((start_string, end_string, self._line_marking, sign_ref,
light_ref))
def __str__(self):
return f'StopLine from {self._start} to {self._end}'
def __repr__(self):
return f"StopLine(start={self._start.tolist()}, end={self._end.tolist()}, line_marking={self._line_marking}, " \
f"traffic_sign_ref={self._traffic_sign_ref}, traffic_light_ref={self._traffic_light_ref})"
@property
def start(self) -> np.ndarray:
return self._start
@start.setter
def start(self, value: np.ndarray):
self._start = value
@property
def end(self) -> np.ndarray:
return self._end
@end.setter
def end(self, value: np.ndarray):
self._end = value
@property
def line_marking(self) -> LineMarking:
return self._line_marking
@line_marking.setter
def line_marking(self, marking: LineMarking):
self._line_marking = marking
@property
def traffic_sign_ref(self) -> Set[int]:
return self._traffic_sign_ref
@traffic_sign_ref.setter
def traffic_sign_ref(self, references: Set[int]):
self._traffic_sign_ref = references
@property
def traffic_light_ref(self) -> Set[int]:
return self._traffic_light_ref
@traffic_light_ref.setter
def traffic_light_ref(self, references: Set[int]):
self._traffic_light_ref = references
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
This method translates and rotates a stop line
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation, 2), '<Lanelet/translate_rotate>: provided translation ' \
'is not valid! translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<Lanelet/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# create transformation matrix
t_m = commonroad.geometry.transform.translation_rotation_matrix(translation, angle)
line_vertices = np.array([self._start, self._end])
# transform center vertices
tmp = t_m.dot(np.vstack((line_vertices.transpose(), np.ones((1, line_vertices.shape[0])))))
tmp = tmp[0:2, :].transpose()
self._start, self._end = tmp[0], tmp[1]
class Lanelet:
"""
Class which describes a Lanelet entity according to the CommonRoad specification. Each lanelet is described by a
left and right boundary (polylines). Furthermore, lanelets have relations to other lanelets, e.g. an adjacent left
neighbor or a predecessor.
"""
def __init__(self, left_vertices: np.ndarray, center_vertices: np.ndarray, right_vertices: np.ndarray,
lanelet_id: int, predecessor=None, successor=None, adjacent_left=None,
adjacent_left_same_direction=None, adjacent_right=None, adjacent_right_same_direction=None,
line_marking_left_vertices=LineMarking.NO_MARKING, line_marking_right_vertices=LineMarking.NO_MARKING,
stop_line=None, lanelet_type=None, user_one_way=None, user_bidirectional=None, traffic_signs=None,
traffic_lights=None, ):
"""
Constructor of a Lanelet object
:param left_vertices: The vertices of the left boundary of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param center_vertices: The vertices of the center line of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param right_vertices: The vertices of the right boundary of the Lanelet described as a
polyline [[x0,y0],[x1,y1],...,[xn,yn]]
:param lanelet_id: The unique id (natural number) of the lanelet
:param predecessor: The list of predecessor lanelets (None if not existing)
:param successor: The list of successor lanelets (None if not existing)
:param adjacent_left: The adjacent left lanelet (None if not existing)
:param adjacent_left_same_direction: True if the adjacent left lanelet has the same driving direction,
false otherwise (None if no left adjacent lanelet exists)
:param adjacent_right: The adjacent right lanelet (None if not existing)
:param adjacent_right_same_direction: True if the adjacent right lanelet has the same driving direction,
false otherwise (None if no right adjacent lanelet exists)
:param line_marking_left_vertices: The type of line marking of the left boundary
:param line_marking_right_vertices: The type of line marking of the right boundary
:param stop_line: The stop line of the lanelet
:param lanelet_type: The types of lanelet applicable here
:param user_one_way: type of users that will use the lanelet as one-way
:param user_bidirectional: type of users that will use the lanelet as bidirectional way
:param traffic_signs: Traffic signs to be applied
:param traffic_lights: Traffic lights to follow
"""
# Set required properties
self._left_vertices = None
self._right_vertices = None
self._center_vertices = None
self._lanelet_id = None
self.lanelet_id = lanelet_id
self.left_vertices = left_vertices
self.right_vertices = right_vertices
self.center_vertices = center_vertices
# check if length of each polyline is the same
assert len(left_vertices[0]) == len(center_vertices[0]) == len(
right_vertices[0]), '<Lanelet/init>: Provided polylines do not share the same length! {}/{}/{}'.format(
len(left_vertices[0]), len(center_vertices[0]), len(right_vertices[0]))
# Set lane markings
self._line_marking_left_vertices = line_marking_left_vertices
self._line_marking_right_vertices = line_marking_right_vertices
# Set predecessors and successors
self._predecessor = None
if predecessor is None:
self._predecessor = []
else:
self.predecessor = predecessor
self._successor = None
if successor is None:
self._successor = []
else:
self.successor = successor
# Set adjacent lanelets
self._adj_left = None
self._adj_left_same_direction = None
if adjacent_left is not None:
self.adj_left = adjacent_left
self.adj_left_same_direction = adjacent_left_same_direction
self._adj_right = None
self._adj_right_same_direction = None
if adjacent_right is not None:
self.adj_right = adjacent_right
self.adj_right_same_direction = adjacent_right_same_direction
self._distance = None
self._inner_distance = None
# create empty polygon
self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0))))
self._dynamic_obstacles_on_lanelet = {}
self._static_obstacles_on_lanelet = set()
self._stop_line = None
if stop_line:
self.stop_line = stop_line
self._lanelet_type = None
if lanelet_type is None:
self._lanelet_type = set()
else:
self.lanelet_type = lanelet_type
self._user_one_way = None
if user_one_way is None:
self._user_one_way = set()
else:
self.user_one_way = user_one_way
self._user_bidirectional = None
if user_bidirectional is None:
self._user_bidirectional = set()
else:
self.user_bidirectional = user_bidirectional
# Set Traffic Rules
self._traffic_signs = None
if traffic_signs is None:
self._traffic_signs = set()
else:
self.traffic_signs = traffic_signs
self._traffic_lights = None
if traffic_lights is None:
self._traffic_lights = set()
else:
self.traffic_lights = traffic_lights
def __eq__(self, other):
if not isinstance(other, Lanelet):
warnings.warn(f"Inequality between Lanelet {repr(self)} and different type {type(other)}")
return False
list_elements_eq = self._stop_line == other.stop_line
lanelet_eq = True
polylines = [self._left_vertices, self._right_vertices, self._center_vertices]
polylines_other = [other.left_vertices, other.right_vertices, other.center_vertices]
for i in range(0, len(polylines)):
polyline = polylines[i]
polyline_other = polylines_other[i]
polyline_string = np.array2string(np.around(polyline.astype(float), 10), precision=10)
polyline_other_string = np.array2string(np.around(polyline_other.astype(float), 10), precision=10)
lanelet_eq = lanelet_eq and polyline_string == polyline_other_string
if lanelet_eq and self.lanelet_id == other.lanelet_id \
and self._line_marking_left_vertices == other.line_marking_left_vertices \
and self._line_marking_right_vertices == other.line_marking_right_vertices \
and set(self._predecessor) == set(other.predecessor) and set(self._successor) == set(other.successor) \
and self._adj_left == other.adj_left and self._adj_right == other.adj_right \
and self._adj_left_same_direction == other.adj_left_same_direction \
and self._adj_right_same_direction == other.adj_right_same_direction \
and self._lanelet_type == other.lanelet_type and self._user_one_way == self.user_one_way \
and self._user_bidirectional == other.user_bidirectional \
and self._traffic_signs == other.traffic_signs and self._traffic_lights == other.traffic_lights:
return list_elements_eq
warnings.warn(f"Inequality of Lanelet {repr(self)} and the other one {repr(other)}")
return False
def __hash__(self):
polylines = [self._left_vertices, self._right_vertices, self._center_vertices]
polyline_strings = []
for polyline in polylines:
polyline_string = np.array2string(np.around(polyline.astype(float), 10), precision=10)
polyline_strings.append(polyline_string)
elements = [self._predecessor, self._successor, self._lanelet_type, self._user_one_way,
self._user_bidirectional, self._traffic_signs, self._traffic_lights]
frozen_elements = [frozenset(e) for e in elements]
return hash((self._lanelet_id, tuple(polyline_strings), self._line_marking_left_vertices,
self._line_marking_right_vertices, self._stop_line, self._adj_left, self._adj_right,
self._adj_left_same_direction, self._adj_right_same_direction, tuple(frozen_elements)))
def __str__(self):
return f"Lanelet with id {self._lanelet_id} has predecessors {set(self._predecessor)}, successors " \
f"{set(self._successor)}, left adjacency {self._adj_left} with " \
f"{'same' if self._adj_left_same_direction else 'opposite'} direction, and " \
f"right adjacency with {'same' if self._adj_right_same_direction else 'opposite'} direction"
def __repr__(self):
return f"Lanelet(left_vertices={self._left_vertices.tolist()}, " \
f"center_vertices={self._center_vertices.tolist()}, " \
f"right_vertices={self._right_vertices.tolist()}, lanelet_id={self._lanelet_id}, " \
f"predecessor={self._predecessor}, successor={self._successor}, adjacent_left={self._adj_left}, " \
f"adjacent_left_same_direction={self._adj_left_same_direction}, adjacent_right={self._adj_right}, " \
f"adjacent_right_same_direction={self._adj_right_same_direction}, " \
f"line_marking_left_vertices={self._line_marking_left_vertices}, " \
f"line_marking_right_vertices={self._line_marking_right_vertices}), " \
f"stop_line={repr(self._stop_line)}, lanelet_type={self._lanelet_type}, " \
f"user_one_way={self._user_one_way}, " \
f"user_bidirectional={self._user_bidirectional}, traffic_signs={self._traffic_signs}, " \
f"traffic_lights={self._traffic_lights}"
@property
def distance(self) -> np.ndarray:
"""
:returns cumulative distance along center vertices
"""
if self._distance is None:
self._distance = self._compute_polyline_cumsum_dist([self.center_vertices])
return self._distance
@distance.setter
def distance(self, _):
warnings.warn('<Lanelet/distance> distance of lanelet is immutable')
@property
def inner_distance(self) -> np.ndarray:
"""
:returns minimum cumulative distance along left and right vertices, i.e., along the inner curve:
"""
if self._inner_distance is None:
self._inner_distance = self._compute_polyline_cumsum_dist([self.left_vertices, self.right_vertices])
return self._inner_distance
@property
def lanelet_id(self) -> int:
return self._lanelet_id
@lanelet_id.setter
def lanelet_id(self, l_id: int):
if self._lanelet_id is None:
assert is_natural_number(l_id), '<Lanelet/lanelet_id>: Provided lanelet_id is not valid! id={}'.format(l_id)
self._lanelet_id = l_id
else:
warnings.warn('<Lanelet/lanelet_id>: lanelet_id of lanelet is immutable')
@property
def left_vertices(self) -> np.ndarray:
return self._left_vertices
@left_vertices.setter
def left_vertices(self, polyline: np.ndarray):
if self._left_vertices is None:
self._left_vertices = polyline
assert is_valid_polyline(polyline), '<Lanelet/left_vertices>: The provided polyline ' \
'is not valid! id = {} polyline = {}'.format(self._lanelet_id, polyline)
else:
warnings.warn('<Lanelet/left_vertices>: left_vertices of lanelet are immutable!')
@property
def right_vertices(self) -> np.ndarray:
return self._right_vertices
@right_vertices.setter
def right_vertices(self, polyline: np.ndarray):
if self._right_vertices is None:
assert is_valid_polyline(polyline), '<Lanelet/right_vertices>: The provided polyline ' \
'is not valid! id = {}, polyline = {}'.format(self._lanelet_id,
polyline)
self._right_vertices = polyline
else:
warnings.warn('<Lanelet/right_vertices>: right_vertices of lanelet are immutable!')
@staticmethod
def _compute_polyline_cumsum_dist(polylines: List[np.ndarray], comparator=np.amin):
d = []
for polyline in polylines:
d.append(np.diff(polyline, axis=0))
segment_distances = np.empty((len(polylines[0]), len(polylines)))
for i, d_tmp in enumerate(d):
segment_distances[:, i] = np.append([0], np.sqrt((np.square(d_tmp)).sum(axis=1)))
return np.cumsum(comparator(segment_distances, axis=1))
@property
def center_vertices(self) -> np.ndarray:
return self._center_vertices
@center_vertices.setter
def center_vertices(self, polyline: np.ndarray):
if self._center_vertices is None:
assert is_valid_polyline(
polyline), '<Lanelet/center_vertices>: The provided polyline is not valid! polyline = {}'.format(
polyline)
self._center_vertices = polyline
else:
warnings.warn('<Lanelet/center_vertices>: center_vertices of lanelet are immutable!')
@property
def line_marking_left_vertices(self) -> LineMarking:
return self._line_marking_left_vertices
@line_marking_left_vertices.setter
def line_marking_left_vertices(self, line_marking_left_vertices: LineMarking):
if self._line_marking_left_vertices is None:
assert isinstance(line_marking_left_vertices,
LineMarking), '<Lanelet/line_marking_left_vertices>: Provided lane marking type of ' \
'left boundary is not valid! type = {}'.format(
type(line_marking_left_vertices))
self._line_marking_left_vertices = LineMarking.UNKNOWN
else:
warnings.warn('<Lanelet/line_marking_left_vertices>: line_marking_left_vertices of lanelet is immutable!')
@property
def line_marking_right_vertices(self) -> LineMarking:
return self._line_marking_right_vertices
@line_marking_right_vertices.setter
def line_marking_right_vertices(self, line_marking_right_vertices: LineMarking):
if self._line_marking_right_vertices is None:
assert isinstance(line_marking_right_vertices,
LineMarking), '<Lanelet/line_marking_right_vertices>: Provided lane marking type of ' \
'right boundary is not valid! type = {}'.format(
type(line_marking_right_vertices))
self._line_marking_right_vertices = LineMarking.UNKNOWN
else:
warnings.warn('<Lanelet/line_marking_right_vertices>: line_marking_right_vertices of lanelet is immutable!')
@property
def predecessor(self) -> list:
return self._predecessor
@predecessor.setter
def predecessor(self, predecessor: list):
if self._predecessor is None:
assert (is_list_of_natural_numbers(predecessor) and len(predecessor) >= 0), '<Lanelet/predecessor>: ' \
'Provided list ' \
'of predecessors is not ' \
'valid!' \
'predecessors = {}'.format(
predecessor)
self._predecessor = predecessor
else:
warnings.warn('<Lanelet/predecessor>: predecessor of lanelet is immutable!')
@property
def successor(self) -> list:
return self._successor
@successor.setter
def successor(self, successor: list):
if self._successor is None:
assert (is_list_of_natural_numbers(successor) and len(successor) >= 0), '<Lanelet/predecessor>: Provided ' \
'list of successors is not valid!' \
'successors = {}'.format(successor)
self._successor = successor
else:
warnings.warn('<Lanelet/successor>: successor of lanelet is immutable!')
@property
def adj_left(self) -> int:
return self._adj_left
@adj_left.setter
def adj_left(self, l_id: int):
if self._adj_left is None:
assert is_natural_number(l_id), '<Lanelet/adj_left>: provided id is not valid! id={}'.format(l_id)
self._adj_left = l_id
else:
warnings.warn('<Lanelet/adj_left>: adj_left of lanelet is immutable')
@property
def adj_left_same_direction(self) -> bool:
return self._adj_left_same_direction
@adj_left_same_direction.setter
def adj_left_same_direction(self, same: bool):
if self._adj_left_same_direction is None:
assert isinstance(same, bool), '<Lanelet/adj_left_same_direction>: provided direction ' \
'is not of type bool! type = {}'.format(type(same))
self._adj_left_same_direction = same
else:
warnings.warn('<Lanelet/adj_left_same_direction>: adj_left_same_direction of lanelet is immutable')
@property
def adj_right(self) -> int:
return self._adj_right
@adj_right.setter
def adj_right(self, l_id: int):
if self._adj_right is None:
assert is_natural_number(l_id), '<Lanelet/adj_right>: provided id is not valid! id={}'.format(l_id)
self._adj_right = l_id
else:
warnings.warn('<Lanelet/adj_right>: adj_right of lanelet is immutable')
@property
def adj_right_same_direction(self) -> bool:
return self._adj_right_same_direction
@adj_right_same_direction.setter
def adj_right_same_direction(self, same: bool):
if self._adj_right_same_direction is None:
assert isinstance(same, bool), '<Lanelet/adj_right_same_direction>: provided direction ' \
'is not of type bool! type = {}'.format(type(same))
self._adj_right_same_direction = same
else:
warnings.warn('<Lanelet/adj_right_same_direction>: adj_right_same_direction of lanelet is immutable')
@property
def dynamic_obstacles_on_lanelet(self) -> Dict[int, Set[int]]:
return self._dynamic_obstacles_on_lanelet
@dynamic_obstacles_on_lanelet.setter
def dynamic_obstacles_on_lanelet(self, obstacle_ids: Dict[int, Set[int]]):
assert isinstance(obstacle_ids, dict), '<Lanelet/obstacles_on_lanelet>: provided dictionary of ids is not a ' \
'dictionary! type = {}'.format(type(obstacle_ids))
self._dynamic_obstacles_on_lanelet = obstacle_ids
@property
def static_obstacles_on_lanelet(self) -> Union[None, Set[int]]:
return self._static_obstacles_on_lanelet
@static_obstacles_on_lanelet.setter
def static_obstacles_on_lanelet(self, obstacle_ids: Set[int]):
assert isinstance(obstacle_ids, set), '<Lanelet/obstacles_on_lanelet>: provided list of ids is not a ' \
'set! type = {}'.format(type(obstacle_ids))
self._static_obstacles_on_lanelet = obstacle_ids
@property
def stop_line(self) -> StopLine:
return self._stop_line
@stop_line.setter
def stop_line(self, stop_line: StopLine):
if self._stop_line is None:
assert isinstance(stop_line,
StopLine), '<Lanelet/stop_line>: ''Provided type is not valid! type = {}'.format(
type(stop_line))
self._stop_line = stop_line
else:
warnings.warn('<Lanelet/stop_line>: stop_line of lanelet is immutable!', stacklevel=1)
@property
def lanelet_type(self) -> Set[LaneletType]:
return self._lanelet_type
@lanelet_type.setter
def lanelet_type(self, lanelet_type: Set[LaneletType]):
if self._lanelet_type is None or len(self._lanelet_type) == 0:
assert isinstance(lanelet_type, set) and all(isinstance(elem, LaneletType) for elem in
lanelet_type), '<Lanelet/lanelet_type>: ''Provided type is ' \
'not valid! type = {}, ' \
'expected = Set[LaneletType]'.format(
type(lanelet_type))
self._lanelet_type = lanelet_type
else:
warnings.warn('<Lanelet/lanelet_type>: type of lanelet is immutable!')
@property
def user_one_way(self) -> Set[RoadUser]:
return self._user_one_way
@user_one_way.setter
def user_one_way(self, user_one_way: Set[RoadUser]):
if self._user_one_way is None:
assert isinstance(user_one_way, set) and all(
isinstance(elem, RoadUser) for elem in user_one_way), '<Lanelet/user_one_way>: ' \
'Provided type is ' \
'not valid! type = {}'.format(
type(user_one_way))
self._user_one_way = user_one_way
else:
warnings.warn('<Lanelet/user_one_way>: user_one_way of lanelet is immutable!')
@property
def user_bidirectional(self) -> Set[RoadUser]:
return self._user_bidirectional
@user_bidirectional.setter
def user_bidirectional(self, user_bidirectional: Set[RoadUser]):
if self._user_bidirectional is None:
assert isinstance(user_bidirectional, set) and all(
isinstance(elem, RoadUser) for elem in user_bidirectional), '<Lanelet/user_bidirectional>: ' \
'Provided type is not valid! type' \
' = {}'.format(type(user_bidirectional))
self._user_bidirectional = user_bidirectional
else:
warnings.warn('<Lanelet/user_bidirectional>: user_bidirectional of lanelet is immutable!')
@property
def traffic_signs(self) -> Set[int]:
return self._traffic_signs
@traffic_signs.setter
def traffic_signs(self, traffic_sign_ids: Set[int]):
if self._traffic_signs is None:
assert isinstance(traffic_sign_ids, set), '<Lanelet/traffic_signs>: provided list of ids is not a ' \
'set! type = {}'.format(type(traffic_sign_ids))
self._traffic_signs = traffic_sign_ids
else:
warnings.warn('<Lanelet/traffic_signs>: traffic_signs of lanelet is immutable!')
@property
def traffic_lights(self) -> Set[int]:
return self._traffic_lights
@traffic_lights.setter
def traffic_lights(self, traffic_light_ids: Set[int]):
if self._traffic_lights is None:
assert isinstance(traffic_light_ids, set), '<Lanelet/traffic_lights>: provided list of ids is not a ' \
'set! type = {}'.format(type(traffic_light_ids))
self._traffic_lights = traffic_light_ids
else:
warnings.warn('<Lanelet/traffic_lights>: traffic_lights of lanelet is immutable!')
@property
def polygon(self) -> Polygon:
return self._polygon
def add_predecessor(self, lanelet: int):
"""
Adds the ID of a predecessor lanelet to the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet not in self.predecessor:
self.predecessor.append(lanelet)
def remove_predecessor(self, lanelet: int):
"""
Removes the ID of a predecessor lanelet from the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet in self.predecessor:
self.predecessor.remove(lanelet)
def add_successor(self, lanelet: int):
"""
Adds the ID of a successor lanelet to the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet not in self.successor:
self.successor.append(lanelet)
def remove_successor(self, lanelet: int):
"""
Removes the ID of a successor lanelet from the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet in self.successor:
self.successor.remove(lanelet)
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
This method translates and rotates a lanelet
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation, 2), '<Lanelet/translate_rotate>: provided translation ' \
'is not valid! translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<Lanelet/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# create transformation matrix
t_m = commonroad.geometry.transform.translation_rotation_matrix(translation, angle)
# transform center vertices
tmp = t_m.dot(np.vstack((self.center_vertices.transpose(), np.ones((1, self.center_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._center_vertices = tmp.transpose()
# transform left vertices
tmp = t_m.dot(np.vstack((self.left_vertices.transpose(), np.ones((1, self.left_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._left_vertices = tmp.transpose()
# transform right vertices
tmp = t_m.dot(np.vstack((self.right_vertices.transpose(), np.ones((1, self.right_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._right_vertices = tmp.transpose()
# transform the stop line
if self._stop_line is not None:
self._stop_line.translate_rotate(translation, angle)
# recreate polygon in case it existed
self._polygon = Polygon(np.concatenate((self.right_vertices, np.flip(self.left_vertices, 0))))
def interpolate_position(self, distance: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]:
"""
Computes the interpolated positions on the center/right/left polyline of the lanelet for a given distance
along the lanelet
:param distance: The distance for the interpolation
:return: The interpolated positions on the center/right/left polyline and the segment id of the polyline where
the interpolation takes place in the form ([x_c,y_c],[x_r,y_r],[x_l,y_l], segment_id)
"""
assert is_real_number(distance) and np.greater_equal(self.distance[-1], distance) and np.greater_equal(distance,
0), \
'<Lanelet/interpolate_position>: provided distance is not valid! distance = {}'.format(
distance)
idx = np.searchsorted(self.distance, distance) - 1
while not self.distance[idx] <= distance:
idx += 1
r = (distance - self.distance[idx]) / (self.distance[idx + 1] - self.distance[idx])
return ((1 - r) * self._center_vertices[idx] + r * self._center_vertices[idx + 1],
(1 - r) * self._right_vertices[idx] + r * self._right_vertices[idx + 1],
(1 - r) * self._left_vertices[idx] + r * self._left_vertices[idx + 1], idx)
def convert_to_polygon(self) -> Polygon:
"""
Converts the given lanelet to a polygon representation
:return: The polygon of the lanelet
"""
warnings.warn("Use the lanelet property <polygon> instead", DeprecationWarning)
return self._polygon
def contains_points(self, point_list: np.ndarray) -> List[bool]:
"""
Checks if a list of points is enclosed in the lanelet
:param point_list: The list of points in the form [[px1,py1],[px2,py2,],...]
:return: List of Boolean values with True indicating point is enclosed and False otherwise
"""
assert isinstance(point_list,
ValidTypes.ARRAY), '<Lanelet/contains_points>: provided list of points is not a list! type ' \
'= {}'.format(type(point_list))
assert is_valid_polyline(
point_list), 'Lanelet/contains_points>: provided list of points is malformed! points = {}'.format(
point_list)
return [self._polygon.contains_point(p) for p in point_list]
def get_obstacles(self, obstacles: List[Obstacle], time_step: int = 0) -> List[Obstacle]:
"""
Returns the subset of obstacles, which are located in the lanelet, of a given candidate set
:param obstacles: The set of obstacle candidates
:param time_step: The time step for the occupancy to check
:return:
"""
assert isinstance(obstacles, list) and all(
isinstance(o, Obstacle) for o in obstacles), '<Lanelet/get_obstacles>: Provided list of obstacles' \
' is malformed! obstacles = {}'.format(obstacles)
# output list
res = list()
lanelet_shapely_obj = self._polygon.shapely_object
# look at each obstacle
for o in obstacles:
o_shape = o.occupancy_at_time(time_step).shape
# vertices to check
shape_shapely_objects = list()
# distinguish between shape and shape group and extract vertices
if isinstance(o_shape, ShapeGroup):
shape_shapely_objects.extend([sh.shapely_object for sh in o_shape.shapes])
else:
shape_shapely_objects.append(o_shape.shapely_object)
# check if obstacle is in lane
for shapely_obj in shape_shapely_objects:
if lanelet_shapely_obj.intersects(shapely_obj):
res.append(o)
break
return res
@staticmethod
def _merge_static_obstacles_on_lanelet(obstacles_on_lanelet1: Set[int], obstacles_on_lanelet2: Set[int]):
"""
Merges obstacle IDs of static obstacles on two lanelets
:param obstacles_on_lanelet1: Obstacle IDs on the first lanelet
:param obstacles_on_lanelet2: Obstacle IDs on the second lanelet
:return: Merged obstacle IDs of static obstacles on lanelets
"""
for obs_id in obstacles_on_lanelet2:
if obs_id not in obstacles_on_lanelet1:
obstacles_on_lanelet1.add(obs_id)
return obstacles_on_lanelet1
@staticmethod
def _merge_dynamic_obstacles_on_lanelet(obstacles_on_lanelet1: Dict[int, Set[int]],
obstacles_on_lanelet2: Dict[int, Set[int]]):
"""
Merges obstacle IDs of static obstacles on two lanelets
:param obstacles_on_lanelet1: Obstacle IDs on the first lanelet
:param obstacles_on_lanelet2: Obstacle IDs on the second lanelet
:return: Merged obstacle IDs of static obstacles on lanelets
"""
if len(obstacles_on_lanelet2.items()) > 0:
for time_step, ids in obstacles_on_lanelet2.items():
for obs_id in ids:
if obstacles_on_lanelet1.get(time_step) is not None:
if obs_id not in obstacles_on_lanelet1[time_step]:
obstacles_on_lanelet1[time_step].add(obs_id)
else:
obstacles_on_lanelet1[time_step] = {obs_id}
return obstacles_on_lanelet1
@classmethod
def merge_lanelets(cls, lanelet1: 'Lanelet', lanelet2: 'Lanelet') -> 'Lanelet':
"""
Merges two lanelets which are in predecessor-successor relation
:param lanelet1: The first lanelet
:param lanelet2: The second lanelet
:return: Merged lanelet (predecessor => successor)
"""
assert isinstance(lanelet1, Lanelet), '<Lanelet/merge_lanelets>: lanelet1 is not a valid lanelet object!'
assert isinstance(lanelet2, Lanelet), '<Lanelet/merge_lanelets>: lanelet1 is not a valid lanelet object!'
# check connection via successor / predecessor
assert lanelet1.lanelet_id in lanelet2.successor or \
lanelet2.lanelet_id in lanelet1.successor or \
lanelet1.lanelet_id in lanelet2.predecessor or \
lanelet2.lanelet_id in lanelet1.predecessor, '<Lanelet/merge_lanelets>: cannot merge two not ' \
'connected lanelets! successors of l1 = {}, successors ' \
'of l2 = {}'.format(lanelet1.successor, lanelet2.successor)
# check pred and successor
if lanelet1.lanelet_id in lanelet2.predecessor or lanelet2.lanelet_id in lanelet1.successor:
pred = lanelet1
suc = lanelet2
else:
pred = lanelet2
suc = lanelet1
# build new merged lanelet (remove first node of successor if both lanes are connected)
# check connectedness
if np.isclose(pred.left_vertices[-1], suc.left_vertices[0]).all():
idx = 1
else:
idx = 0
# create new lanelet
left_vertices = np.concatenate((pred.left_vertices, suc.left_vertices[idx:]))
right_vertices = np.concatenate((pred.right_vertices, suc.right_vertices[idx:]))
center_vertices = np.concatenate((pred.center_vertices, suc.center_vertices[idx:]))
lanelet_id = int(str(pred.lanelet_id) + str(suc.lanelet_id))
predecessor = pred.predecessor
successor = suc.successor
static_obstacles_on_lanelet = cls._merge_static_obstacles_on_lanelet(lanelet1.static_obstacles_on_lanelet,
lanelet2.static_obstacles_on_lanelet)
dynamic_obstacles_on_lanelet = cls._merge_dynamic_obstacles_on_lanelet(lanelet1.dynamic_obstacles_on_lanelet,
lanelet2.dynamic_obstacles_on_lanelet)
new_lanelet = Lanelet(left_vertices, center_vertices, right_vertices, lanelet_id, predecessor=predecessor,
successor=successor)
new_lanelet.static_obstacles_on_lanelet = static_obstacles_on_lanelet
new_lanelet.dynamic_obstacles_on_lanelet = dynamic_obstacles_on_lanelet
return new_lanelet
@classmethod
def all_lanelets_by_merging_successors_from_lanelet(cls, lanelet: 'Lanelet',
network: 'LaneletNetwork', max_length: float = 150.0) \
-> Tuple[List['Lanelet'], List[List[int]]]:
"""
Computes all reachable lanelets starting from a provided lanelet
and merges them to a single lanelet for each route.
:param lanelet: The lanelet to start from
:param network: The network which contains all lanelets
:param max_length: maximal length of merged lanelets can be provided
:return: List of merged lanelets, Lists of lanelet ids of which each merged lanelet consists
"""
assert isinstance(lanelet, Lanelet), '<Lanelet>: provided lanelet is not a valid Lanelet!'
assert isinstance(network, LaneletNetwork), '<Lanelet>: provided lanelet network is not a ' \
'valid lanelet network!'
assert network.find_lanelet_by_id(lanelet.lanelet_id) is not None, '<Lanelet>: lanelet not ' \
'contained in network!'
if lanelet.successor is None or len(lanelet.successor) == 0:
return [lanelet], [[lanelet.lanelet_id]]
merge_jobs = lanelet.find_lanelet_successors_in_range(network, max_length=max_length)
merge_jobs = [[lanelet] + [network.find_lanelet_by_id(p) for p in path] for path in merge_jobs]
# Create merged lanelets from paths
merged_lanelets = list()
merge_jobs_final = []
for path in merge_jobs:
pred = path[0]
merge_jobs_tmp = [pred.lanelet_id]
for lanelet in path[1:]:
merge_jobs_tmp.append(lanelet.lanelet_id)
pred = Lanelet.merge_lanelets(pred, lanelet)
merge_jobs_final.append(merge_jobs_tmp)
merged_lanelets.append(pred)
return merged_lanelets, merge_jobs_final
def find_lanelet_successors_in_range(self, lanelet_network: "LaneletNetwork", max_length=50.0) -> List[List[int]]:
"""
Finds all possible successor paths (id sequences) within max_length.
:param lanelet_network: lanelet network
:param max_length: abort once length of path is reached
:return: list of lanelet IDs
"""
paths = [[s] for s in self.successor]
paths_final = []
lengths = [lanelet_network.find_lanelet_by_id(s).distance[-1] for s in self.successor]
while paths:
paths_next = []
lengths_next = []
for p, le in zip(paths, lengths):
successors = lanelet_network.find_lanelet_by_id(p[-1]).successor
if not successors:
paths_final.append(p)
else:
for s in successors:
if s in p or s == self.lanelet_id or le >= max_length:
# prevent loops and consider length of first successor
paths_final.append(p)
continue
l_next = le + lanelet_network.find_lanelet_by_id(s).distance[-1]
if l_next < max_length:
paths_next.append(p + [s])
lengths_next.append(l_next)
else:
paths_final.append(p + [s])
paths = paths_next
lengths = lengths_next
return paths_final
def add_dynamic_obstacle_to_lanelet(self, obstacle_id: int, time_step: int):
"""
Adds a dynamic obstacle ID to lanelet
:param obstacle_id: obstacle ID to add
:param time_step: time step at which the obstacle should be added
"""
if self.dynamic_obstacles_on_lanelet.get(time_step) is None:
self.dynamic_obstacles_on_lanelet[time_step] = set()
self.dynamic_obstacles_on_lanelet[time_step].add(obstacle_id)
def add_static_obstacle_to_lanelet(self, obstacle_id: int):
"""
Adds a static obstacle ID to lanelet
:param obstacle_id: obstacle ID to add
"""
self.static_obstacles_on_lanelet.add(obstacle_id)
def add_traffic_sign_to_lanelet(self, traffic_sign_id: int):
"""
Adds a traffic sign ID to lanelet
:param traffic_sign_id: traffic sign ID to add
"""
self.traffic_signs.add(traffic_sign_id)
def add_traffic_light_to_lanelet(self, traffic_light_id: int):
"""
Adds a traffic light ID to lanelet
:param traffic_light_id: traffic light ID to add
"""
self.traffic_lights.add(traffic_light_id)
def dynamic_obstacle_by_time_step(self, time_step) -> Set[int]:
"""
Returns all dynamic obstacles on lanelet at specific time step
:param time_step: time step of interest
:returns: list of obstacle IDs
"""
if self.dynamic_obstacles_on_lanelet.get(time_step) is not None:
return self.dynamic_obstacles_on_lanelet.get(time_step)
else:
return set()
class LaneletNetwork(IDrawable):
"""
Class which represents a network of connected lanelets
"""
def __init__(self):
"""
Constructor for LaneletNetwork
"""
self._lanelets: Dict[int, Lanelet] = {}
# lanelet_id, shapely_polygon
self._buffered_polygons: Dict[int, ShapelyPolygon] = {}
self._strtee = None
# id(shapely_polygon), lanelet_id
self._lanelet_id_index_by_id: Dict[int, int] = {}
self._intersections: Dict[int, Intersection] = {}
self._traffic_signs: Dict[int, TrafficSign] = {}
self._traffic_lights: Dict[int, TrafficLight] = {}
# pickling of STRtree is not supported by shapely at the moment
# use this workaround described in this issue:
# https://github.com/Toblerity/Shapely/issues/1033
def __getstate__(self):
state = self.__dict__.copy()
del state["_strtee"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._create_strtree()
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
# reset
self._strtee = None
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
result._create_strtree()
# restore
self._create_strtree()
return result
def __eq__(self, other):
if not isinstance(other, LaneletNetwork):
warnings.warn(f"Inequality between LaneletNetwork {repr(self)} and different type {type(other)}")
return False
list_elements_eq = True
lanelet_network_eq = True
elements = [self._lanelets, self._intersections, self._traffic_signs, self._traffic_lights]
elements_other = [other._lanelets, other._intersections, other._traffic_signs, other._traffic_lights]
for i in range(0, len(elements)):
e = elements[i]
e_other = elements_other[i]
lanelet_network_eq = lanelet_network_eq and len(e) == len(e_other)
for k in e.keys():
if k not in e_other:
lanelet_network_eq = False
continue
if e.get(k) != e_other.get(k):
list_elements_eq = False
if not lanelet_network_eq:
warnings.warn(f"Inequality of LaneletNetwork {repr(self)} and the other one {repr(other)}")
return lanelet_network_eq and list_elements_eq
def __hash__(self):
return hash((frozenset(self._lanelets.items()), frozenset(self._intersections.items()),
frozenset(self._traffic_signs.items()), frozenset(self._traffic_lights.items())))
def __str__(self):
return f"LaneletNetwork consists of lanelets {set(self._lanelets.keys())}, " \
f"intersections {set(self._intersections.keys())}, " \
f"traffic signs {set(self._traffic_signs.keys())}, and traffic lights {set(self._traffic_lights.keys())}"
def __repr__(self):
return f"LaneletNetwork(lanelets={repr(self._lanelets)}, intersections={repr(self._intersections)}, " \
f"traffic_signs={repr(self._traffic_signs)}, traffic_lights={repr(self._traffic_lights)})"
def _get_lanelet_id_by_shapely_polygon(self, polygon: ShapelyPolygon) -> int:
return self._lanelet_id_index_by_id[id(polygon)]
@property
def lanelets(self) -> List[Lanelet]:
return list(self._lanelets.values())
@property
def lanelet_polygons(self) -> List[Polygon]:
return [la.polygon for la in self.lanelets]
@lanelets.setter
def lanelets(self, _):
warnings.warn('<LaneletNetwork/lanelets>: lanelets of network are immutable')
@property
def intersections(self) -> List[Intersection]:
return list(self._intersections.values())
@property
def traffic_signs(self) -> List[TrafficSign]:
return list(self._traffic_signs.values())
@property
def traffic_lights(self) -> List[TrafficLight]:
return list(self._traffic_lights.values())
@property
def map_inc_lanelets_to_intersections(self) -> Dict[int, Intersection]:
"""
:returns: dict that maps lanelet ids to the intersection of which it is an incoming lanelet.
"""
return {l_id: intersection for intersection in self.intersections for l_id in
list(intersection.map_incoming_lanelets.keys())}
@classmethod
def create_from_lanelet_list(cls, lanelets: list, cleanup_ids: bool = False):
"""
Creates a LaneletNetwork object from a given list of lanelets
:param lanelets: The list of lanelets
:param cleanup_ids: cleans up unused ids
:return: The LaneletNetwork for the given list of lanelets
"""
assert isinstance(lanelets, list) and all(
isinstance(la, Lanelet) for la in lanelets), '<LaneletNetwork/create_from_lanelet_list>:' \
'Provided list of lanelets is not valid! ' \
'lanelets = {}'.format(lanelets)
# create lanelet network
lanelet_network = cls()
# add each lanelet to the lanelet network
for la in lanelets:
lanelet_network.add_lanelet(copy.deepcopy(la), rtree=False)
if cleanup_ids:
lanelet_network.cleanup_lanelet_references()
lanelet_network._create_strtree()
return lanelet_network
@classmethod
def create_from_lanelet_network(cls, lanelet_network: 'LaneletNetwork', shape_input=None,
exclude_lanelet_types=None):
"""
Creates a lanelet network from a given lanelet network (copy); adding a shape reduces the lanelets to those
that intersect the shape provided and specifying a lanelet_type set excludes the lanelet types in the new
created network.
:param lanelet_network: The existing lanelet network
:param shape_input: The lanelets intersecting this shape will be in the new network
:param exclude_lanelet_types: Removes all lanelets with these lanelet_types
:return: The new lanelet network
"""
if exclude_lanelet_types is None:
exclude_lanelet_types = set()
new_lanelet_network = cls()
traffic_sign_ids = set()
traffic_light_ids = set()
lanelets = set()
if shape_input is not None:
for la in lanelet_network.lanelets:
if la.lanelet_type.intersection(exclude_lanelet_types) == set():
lanelet_polygon = la.polygon.shapely_object
if shape_input.shapely_object.intersects(lanelet_polygon):
for sign_id in la.traffic_signs:
traffic_sign_ids.add(sign_id)
for light_id in la.traffic_lights:
traffic_light_ids.add(light_id)
lanelets.add(la)
else:
for la in lanelet_network.lanelets:
if la.lanelet_type.intersection(exclude_lanelet_types) == set():
lanelets.add(la)
for sign_id in la.traffic_signs:
traffic_sign_ids.add(sign_id)
for light_id in la.traffic_lights:
traffic_light_ids.add(light_id)
for sign_id in traffic_sign_ids:
new_lanelet_network.add_traffic_sign(copy.deepcopy(lanelet_network.find_traffic_sign_by_id(sign_id)), set())
for light_id in traffic_light_ids:
new_lanelet_network.add_traffic_light(copy.deepcopy(lanelet_network.find_traffic_light_by_id(light_id)),
set())
for la in lanelets:
new_lanelet_network.add_lanelet(copy.deepcopy(la), rtree=False)
new_lanelet_network._create_strtree()
return new_lanelet_network
def _create_strtree(self):
"""
Creates spatial index for lanelets for faster querying the lanelets by position.
Since it is an immutable object, it has to be recreated after every lanelet addition or it should be done
once after all lanelets are added.
"""
# validate buffered polygons
def assert_shapely_polygon(lanelet_id, polygon):
if not isinstance(polygon, ShapelyPolygon):
warnings.warn(
f"Lanelet with id {lanelet_id}'s polygon is not a <shapely.geometry.Polygon> object! It will "
f"be OMITTED from STRtree, therefore this lanelet will NOT be contained in the results of the "
f"find_lanelet_by_<position/shape>() functions!!")
return False
else:
return True
self._buffered_polygons = {lanelet_id: lanelet_shapely_polygon for lanelet_id, lanelet_shapely_polygon in
self._buffered_polygons.items() if
assert_shapely_polygon(lanelet_id, lanelet_shapely_polygon)}
self._lanelet_id_index_by_id = {id(lanelet_shapely_polygon): lanelet_id for lanelet_id, lanelet_shapely_polygon
in self._buffered_polygons.items()}
self._strtee = STRtree(list(self._buffered_polygons.values()))
def remove_lanelet(self, lanelet_id: int, rtree: bool = True):
"""
Removes a lanelet from a lanelet network and deletes all references.
@param lanelet_id: ID of lanelet which should be removed.
@param rtree: Boolean indicating whether rtree should be initialized
"""
if lanelet_id in self._lanelets.keys():
del self._lanelets[lanelet_id]
del self._buffered_polygons[lanelet_id]
self.cleanup_lanelet_references()
if rtree:
self._create_strtree()
def cleanup_lanelet_references(self):
"""
Deletes lanelet IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._lanelets.keys())
for la in self.lanelets:
la._predecessor = list(set(la.predecessor).intersection(existing_ids))
la._successor = list(set(la.successor).intersection(existing_ids))
la._adj_left = None if la.adj_left is None or la.adj_left not in existing_ids else la.adj_left
la._adj_left_same_direction = None \
if la.adj_left_same_direction is None or la.adj_left not in existing_ids else la.adj_left_same_direction
la._adj_right = None if la.adj_right is None or la.adj_right not in existing_ids else la.adj_right
la._adj_right_same_direction = None \
if la.adj_right_same_direction is None or la.adj_right not in existing_ids else \
la.adj_right_same_direction
for inter in self.intersections:
for inc in inter.incomings:
inc._incoming_lanelets = set(inc.incoming_lanelets).intersection(existing_ids)
inc._successors_straight = set(inc.successors_straight).intersection(existing_ids)
inc._successors_right = set(inc.successors_right).intersection(existing_ids)
inc._successors_left = set(inc.successors_left).intersection(existing_ids)
inter._crossings = set(inter.crossings).intersection(existing_ids)
def remove_traffic_sign(self, traffic_sign_id: int):
"""
Removes a traffic sign from a lanelet network and deletes all references.
@param traffic_sign_id: ID of traffic sign which should be removed.
"""
if traffic_sign_id in self._traffic_signs.keys():
del self._traffic_signs[traffic_sign_id]
self.cleanup_traffic_sign_references()
def cleanup_traffic_sign_references(self):
"""
Deletes traffic sign IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._traffic_signs.keys())
for la in self.lanelets:
la._traffic_signs = la.traffic_signs.intersection(existing_ids)
if la.stop_line is not None and la.stop_line.traffic_sign_ref is not None:
la.stop_line._traffic_sign_ref = la.stop_line.traffic_sign_ref.intersection(existing_ids)
def remove_traffic_light(self, traffic_light_id: int):
"""
Removes a traffic light from a lanelet network and deletes all references.
@param traffic_light_id: ID of traffic sign which should be removed.
"""
if traffic_light_id in self._traffic_lights.keys():
del self._traffic_lights[traffic_light_id]
self.cleanup_traffic_light_references()
def cleanup_traffic_light_references(self):
"""
Deletes traffic light IDs which do not exist in the lanelet network. Useful when cutting out lanelet networks.
"""
existing_ids = set(self._traffic_lights.keys())
for la in self.lanelets:
la._traffic_lights = la.traffic_lights.intersection(existing_ids)
if la.stop_line is not None and la.stop_line.traffic_light_ref is not None:
la.stop_line._traffic_light_ref = la.stop_line.traffic_light_ref.intersection(existing_ids)
def remove_intersection(self, intersection_id: int):
"""
Removes a intersection from a lanelet network and deletes all references.
@param intersection_id: ID of intersection which should be removed.
"""
if intersection_id in self._intersections.keys():
del self._intersections[intersection_id]
def find_lanelet_by_id(self, lanelet_id: int) -> Lanelet:
"""
Finds a lanelet for a given lanelet_id
:param lanelet_id: The id of the lanelet to find
:return: The lanelet object if the id exists and None otherwise
"""
assert is_natural_number(
lanelet_id), '<LaneletNetwork/find_lanelet_by_id>: provided id is not valid! id = {}'.format(lanelet_id)
return self._lanelets[lanelet_id] if lanelet_id in self._lanelets else None
def find_traffic_sign_by_id(self, traffic_sign_id: int) -> TrafficSign:
"""
Finds a traffic sign for a given traffic_sign_id
:param traffic_sign_id: The id of the traffic sign to find
:return: The traffic sign object if the id exists and None otherwise
"""
assert is_natural_number(
traffic_sign_id), '<LaneletNetwork/find_traffic_sign_by_id>: provided id is not valid! ' \
'id = {}'.format(traffic_sign_id)
return self._traffic_signs[traffic_sign_id] if traffic_sign_id in self._traffic_signs else None
def find_traffic_light_by_id(self, traffic_light_id: int) -> TrafficLight:
"""
Finds a traffic light for a given traffic_light_id
:param traffic_light_id: The id of the traffic light to find
:return: The traffic light object if the id exists and None otherwise
"""
assert is_natural_number(
traffic_light_id), '<LaneletNetwork/find_traffic_light_by_id>: provided id is not valid! ' \
'id = {}'.format(traffic_light_id)
return self._traffic_lights[traffic_light_id] if traffic_light_id in self._traffic_lights else None
def find_intersection_by_id(self, intersection_id: int) -> Intersection:
"""
Finds a intersection for a given intersection_id
:param intersection_id: The id of the intersection to find
:return: The intersection object if the id exists and None otherwise
"""
assert is_natural_number(intersection_id), '<LaneletNetwork/find_intersection_by_id>: ' \
'provided id is not valid! id = {}'.format(intersection_id)
return self._intersections[intersection_id] if intersection_id in self._intersections else None
def add_lanelet(self, lanelet: Lanelet, rtree: bool = True):
"""
Adds a lanelet to the LaneletNetwork
:param lanelet: The lanelet to add
:param eps: The size increase of the buffered polygons
:param rtree: Boolean indicating whether rtree should be initialized
:return: True if the lanelet has successfully been added to the network, false otherwise
"""
assert isinstance(lanelet, Lanelet), '<LaneletNetwork/add_lanelet>: provided lanelet is not of ' \
'type lanelet! type = {}'.format(type(lanelet))
# check if lanelet already exists in network and warn user
if lanelet.lanelet_id in self._lanelets.keys():
warnings.warn('Lanelet already exists in network! No changes are made.')
return False
else:
self._lanelets[lanelet.lanelet_id] = lanelet
self._buffered_polygons[lanelet.lanelet_id] = lanelet.polygon.shapely_object
if rtree:
self._create_strtree()
return True
def add_traffic_sign(self, traffic_sign: TrafficSign, lanelet_ids: Set[int]):
"""
Adds a traffic sign to the LaneletNetwork
:param traffic_sign: The traffic sign to add
:param lanelet_ids: Lanelets the traffic sign should be referenced from
:return: True if the traffic sign has successfully been added to the network, false otherwise
"""
assert isinstance(traffic_sign, TrafficSign), '<LaneletNetwork/add_traffic_sign>: provided traffic sign is ' \
'not of type traffic_sign! type = {}'.format(type(traffic_sign))
# check if traffic already exists in network and warn user
if traffic_sign.traffic_sign_id in self._traffic_signs.keys():
warnings.warn('Traffic sign with ID {} already exists in network! '
'No changes are made.'.format(traffic_sign.traffic_sign_id))
return False
else:
self._traffic_signs[traffic_sign.traffic_sign_id] = traffic_sign
for lanelet_id in lanelet_ids:
lanelet = self.find_lanelet_by_id(lanelet_id)
if lanelet is not None:
lanelet.add_traffic_sign_to_lanelet(traffic_sign.traffic_sign_id)
else:
warnings.warn('Traffic sign cannot be referenced to lanelet because the lanelet does not exist.')
return True
def add_traffic_light(self, traffic_light: TrafficLight, lanelet_ids: Set[int]):
"""
Adds a traffic light to the LaneletNetwork
:param traffic_light: The traffic light to add
:param lanelet_ids: Lanelets the traffic sign should be referenced from
:return: True if the traffic light has successfully been added to the network, false otherwise
"""
assert isinstance(traffic_light, TrafficLight), '<LaneletNetwork/add_traffic_light>: provided traffic light ' \
'is not of type traffic_light! ' \
'type = {}'.format(type(traffic_light))
# check if traffic already exists in network and warn user
if traffic_light.traffic_light_id in self._traffic_lights.keys():
warnings.warn('Traffic light already exists in network! No changes are made.')
return False
else:
self._traffic_lights[traffic_light.traffic_light_id] = traffic_light
for lanelet_id in lanelet_ids:
lanelet = self.find_lanelet_by_id(lanelet_id)
if lanelet is not None:
lanelet.add_traffic_light_to_lanelet(traffic_light.traffic_light_id)
else:
warnings.warn('Traffic light cannot be referenced to lanelet because the lanelet does not exist.')
return True
def add_intersection(self, intersection: Intersection):
"""
Adds a intersection to the LaneletNetwork
:param intersection: The intersection to add
:return: True if the traffic light has successfully been added to the network, false otherwise
"""
assert isinstance(intersection, Intersection), '<LaneletNetwork/add_intersection>: provided intersection is ' \
'not of type Intersection! type = {}'.format(type(intersection))
# check if traffic already exists in network and warn user
if intersection.intersection_id in self._intersections.keys():
warnings.warn('Intersection already exists in network! No changes are made.')
return False
else:
self._intersections[intersection.intersection_id] = intersection
return True
def add_lanelets_from_network(self, lanelet_network: 'LaneletNetwork'):
"""
Adds lanelets from a given network object to the current network
:param lanelet_network: The lanelet network
:return: True if all lanelets have been added to the network, false otherwise
"""
flag = True
# add lanelets to the network
for la in lanelet_network.lanelets:
flag = flag and self.add_lanelet(la, rtree=False)
self._create_strtree()
return flag
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
Translates and rotates the complete lanelet network
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation,
2), '<LaneletNetwork/translate_rotate>: provided translation is not valid! ' \
'translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<LaneletNetwork/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# rotate each lanelet
for lanelet in self._lanelets.values():
lanelet.translate_rotate(translation, angle)
for traffic_sign in self._traffic_signs.values():
traffic_sign.translate_rotate(translation, angle)
for traffic_light in self._traffic_lights.values():
traffic_light.translate_rotate(translation, angle)
def find_lanelet_by_position(self, point_list: List[np.ndarray]) -> List[List[int]]:
"""
Finds the lanelet id of a given position
:param point_list: The list of positions to check
:return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned
"""
assert isinstance(point_list,
ValidTypes.LISTS), '<Lanelet/contains_points>: provided list of points is not a list! type ' \
'= {}'.format(
type(point_list))
return [[self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in
self._strtee.query(point) if lanelet_shapely_polygon.intersects(point)
or lanelet_shapely_polygon.buffer(1e-15).intersects(point)] for point in
[ShapelyPoint(point) for point in point_list]]
def find_lanelet_by_shape(self, shape: Shape) -> List[int]:
"""
Finds the lanelet id of a given shape
:param shape: The shape to check
:return: A list of lanelet ids. If the position could not be matched to a lanelet, an empty list is returned
"""
assert isinstance(shape, (Circle, Polygon, Rectangle)), '<Lanelet/find_lanelet_by_shape>: ' \
'provided shape is not a shape! ' \
'type = {}'.format(type(shape))
return [self._get_lanelet_id_by_shapely_polygon(lanelet_shapely_polygon) for lanelet_shapely_polygon in
self._strtee.query(shape.shapely_object) if lanelet_shapely_polygon.intersects(shape.shapely_object)]
def filter_obstacles_in_network(self, obstacles: List[Obstacle]) -> List[Obstacle]:
"""
Returns the list of obstacles which are located in the lanelet network
:param obstacles: The list of obstacles to check
:return: The list of obstacles which are located in the lanelet network
"""
res = list()
obstacle_to_lanelet_map = self.map_obstacles_to_lanelets(obstacles)
for k in obstacle_to_lanelet_map.keys():
obs = obstacle_to_lanelet_map[k]
for o in obs:
if o not in res:
res.append(o)
return res
def map_obstacles_to_lanelets(self, obstacles: List[Obstacle]) -> Dict[int, List[Obstacle]]:
"""
Maps a given list of obstacles to the lanelets of the lanelet network
:param obstacles: The list of CR obstacles
:return: A dictionary with the lanelet id as key and the list of obstacles on the lanelet as a List[Obstacles]
"""
mapping = {}
for la in self.lanelets:
# map obstacles to current lanelet
mapped_objs = la.get_obstacles(obstacles)
# check if mapping is not empty
if len(mapped_objs) > 0:
mapping[la.lanelet_id] = mapped_objs
return mapping
def lanelets_in_proximity(self, point: np.ndarray, radius: float) -> List[Lanelet]:
"""
Finds all lanelets which intersect a given circle, defined by the center point and radius
:param point: The center of the circle
:param radius: The radius of the circle
:return: The list of lanelets which intersect the given circle
"""
assert is_real_number_vector(point, length=2), '<LaneletNetwork/lanelets_in_proximity>: provided point is ' \
'not valid! point = {}'.format(point)
assert is_positive(
radius), '<LaneletNetwork/lanelets_in_proximity>: provided radius is not valid! radius = {}'.format(
radius)
# get list of lanelet ids
ids = self._lanelets.keys()
# output list
lanes = dict()
rad_sqr = radius ** 2
# distance dict for sorting
distance_list = list()
# go through list of lanelets
for i in ids:
# if current lanelet has not already been added to lanes list
if i not in lanes:
lanelet = self.find_lanelet_by_id(i)
# compute distances (we are not using the sqrt for computational effort)
distance = (lanelet.center_vertices - point) ** 2.
distance = distance[:, 0] + distance[:, 1]
# check if at least one distance is smaller than the radius
if any(np.greater_equal(rad_sqr, distance)):
lanes[i] = self.find_lanelet_by_id(i)
distance_list.append(np.min(distance))
# check if adjacent lanelets can be added as well
index_min_dist = np.argmin(distance - rad_sqr)
# check right side of lanelet
if lanelet.adj_right is not None:
p = (lanelet.right_vertices[index_min_dist, :] - point) ** 2
p = p[0] + p[1]
if np.greater(rad_sqr, p) and lanelet.adj_right not in lanes:
lanes[lanelet.adj_right] = self.find_lanelet_by_id(lanelet.adj_right)
distance_list.append(p)
# check left side of lanelet
if lanelet.adj_left is not None:
p = (lanelet.left_vertices[index_min_dist, :] - point) ** 2
p = p[0] + p[1]
if np.greater(rad_sqr, p) and lanelet.adj_left not in lanes:
lanes[lanelet.adj_left] = self.find_lanelet_by_id(lanelet.adj_left)
distance_list.append(p)
# sort list according to distance
indices = np.argsort(distance_list)
lanelets = list(lanes.values())
# return sorted list
return [lanelets[i] for i in indices]
def draw(self, renderer: IRenderer, draw_params: Union[ParamServer, dict, None] = None,
call_stack: Optional[Tuple[str, ...]] = tuple()):
renderer.draw_lanelet_network(self, draw_params, call_stack)
| 45.103774
| 120
| 0.629314
| 9,080
| 76,496
| 5.05022
| 0.065969
| 0.02015
| 0.013739
| 0.010533
| 0.508614
| 0.381368
| 0.307746
| 0.256324
| 0.214367
| 0.178559
| 0
| 0.00354
| 0.290891
| 76,496
| 1,695
| 121
| 45.130383
| 0.841826
| 0.167434
| 0
| 0.189623
| 0
| 0.00283
| 0.137656
| 0.053087
| 0
| 0
| 0
| 0
| 0.049057
| 1
| 0.120755
| false
| 0
| 0.016038
| 0.036792
| 0.256604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0bf221732ca55e79444af87da162c6c9266b8fc
| 363
|
py
|
Python
|
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 2
|
2020-10-12T06:50:03.000Z
|
2021-06-08T17:19:43.000Z
|
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | null | null | null |
djangoProject/djangoProject/myApp/urls.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 1
|
2020-12-22T16:44:50.000Z
|
2020-12-22T16:44:50.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path("test/", views.index, name = "index"),
path('completed/',views.show_completed,name= "completed"),
path('<int:action_id>/', views.show_action, name='action'),
path('update/', views.update_status, name="update_status"),
path('new/', views.new_action,name = "new_action"),
]
| 33
| 63
| 0.674931
| 47
| 363
| 5.06383
| 0.382979
| 0.07563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137741
| 363
| 11
| 64
| 33
| 0.760383
| 0
| 0
| 0
| 0
| 0
| 0.23416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0c050b20614c1dbb61208ccd768082e1160610d
| 14,673
|
py
|
Python
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | 4
|
2020-03-16T14:22:33.000Z
|
2021-09-08T17:56:47.000Z
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | 26
|
2020-05-28T20:58:42.000Z
|
2020-10-21T01:27:17.000Z
|
tests/test_data_frame.py
|
gordonwatts/dataframe_expressions
|
cf135415f739377e9c2accb82606957417c7e0e6
|
[
"MIT"
] | null | null | null |
import ast
from typing import List, Optional, cast
import pytest
from dataframe_expressions import (
Column, DataFrame, ast_Callable, ast_Column, ast_DataFrame, define_alias)
from .utils_for_testing import reset_var_counter # NOQA
# numpy math functions (??)
# Advanced math operators
# (https://docs.python.org/3/reference/datamodel.html?highlight=__add__#emulating-numeric-types)
# the operator "in" (contains)? to see if one jet is in another collection?
# the operator len
# Make sure if d1 and d2 are two different sized,sourced DataFrames, then d1[d2.x] fails
# Filter functions - so pass a filter that gets called with whatever you are filtering on, and
# returns.
# https://stackoverflow.com/questions/847936/how-can-i-find-the-number-of-arguments-of-a-python-function
# Aliases allow some recursion, but with total flexability. If there is a circle and you want
# things done a second time, they
# won't be. Perhaps when we have an actual problem we can resolve this.
def find_df(a: Optional[ast.AST]) -> List[ast_DataFrame]:
result: List[ast_DataFrame] = []
class find_it(ast.NodeVisitor):
def visit_ast_DataFrame(self, a: ast_DataFrame):
result.append(a)
if a is None:
return []
find_it().visit(a)
return result
def test_empty_ctor():
DataFrame()
def test_dataframe_attribute():
d = DataFrame()
ref = d.x
assert isinstance(ref, DataFrame)
assert isinstance(ref.child_expr, ast.AST)
assert ast.dump(ref.child_expr) == "Attribute(value=ast_DataFrame(), attr='x', ctx=Load())"
@pytest.mark.parametrize("comp_op, ast_type", [
(lambda a, b: a < b, ast.Lt),
(lambda a, b: a <= b, ast.LtE),
(lambda a, b: a > b, ast.Gt),
(lambda a, b: a >= b, ast.GtE),
(lambda a, b: a == b, ast.Eq),
(lambda a, b: a != b, ast.NotEq),
(lambda b, a: a < b, ast.Gt),
(lambda b, a: a <= b, ast.GtE),
(lambda b, a: a > b, ast.Lt),
(lambda b, a: a >= b, ast.LtE),
(lambda b, a: a == b, ast.Eq),
(lambda b, a: a != b, ast.NotEq),
])
def test_mask_operator_with_const(comp_op, ast_type):
d = DataFrame()
ref = comp_op(d.x, 10)
assert isinstance(ref, Column)
assert ref.type == type(bool)
assert isinstance(ref.child_expr, ast.Compare)
assert len(ref.child_expr.ops) == 1
assert len(ref.child_expr.comparators) == 1
left = ref.child_expr.left
right = ref.child_expr.comparators[0]
assert isinstance(left, ast_DataFrame)
assert isinstance(right, ast.Num)
assert isinstance(ref.child_expr.ops[0], ast_type)
def test_mask_operator_2nd_dataframe():
d = DataFrame()
ref = d.x < d.y
assert isinstance(ref, Column)
assert ref.type == type(bool)
assert ast.dump(ref.child_expr) == \
"Compare(left=ast_DataFrame(), ops=[Lt()], comparators=[ast_DataFrame()])"
assert isinstance(ref.child_expr, ast.Compare)
df = ref.child_expr.left # type: ast.AST
assert isinstance(df, ast_DataFrame)
parents = find_df(df.dataframe.child_expr)
assert len(parents) == 1
assert parents[0].dataframe is d
def test_mask_operator_and():
d = DataFrame()
ref1 = d.x != 10
ref2 = d.x != 8
ref3 = ref1 & ref2
assert ast.dump(ref3.child_expr) == \
"BoolOp(op=And(), values=[ast_Column(), ast_Column()])"
def test_mask_operator_and_attributes():
d = DataFrame()
ref1 = d.x
ref2 = d.x
ref3 = ref1 & ref2
assert ast.dump(ref3.child_expr) == \
"BoolOp(op=And(), values=[ast_DataFrame(), ast_DataFrame()])"
def test_mask_operator_or_attributes():
d = DataFrame()
ref1 = d.x
ref2 = d.x
ref3 = ref1 | ref2
assert ast.dump(ref3.child_expr) == \
"BoolOp(op=Or(), values=[Name(id='p', ctx=Load()), ast_DataFrame()])"
def test_mask_operator_and_attribute():
d = DataFrame()
ref1 = d.x
ref2 = d.x > 10
ref3 = ref1 & ref2
assert ast.dump(ref3.child_expr) == \
"BoolOp(op=And(), values=[ast_DataFrame(), ast_Column()])"
def test_mask_operator_invert_attributes():
d = DataFrame()
ref1 = d.x
ref3 = ~ref1
assert ref3.child_expr is not None
assert ast.dump(ref3.child_expr) == \
"UnaryOp(op=Invert(), operand=ast_DataFrame())"
def test_mask_operator_or():
d = DataFrame()
ref1 = d.x != 10
ref2 = d.x != 8
ref3 = ref1 | ref2
assert ast.dump(ref3.child_expr) == \
"BoolOp(op=Or(), values=[ast_Column(), ast_Column()])"
def test_mask_operator_not():
d = DataFrame()
ref1 = d.x != 10
ref3 = ~ref1
assert ast.dump(ref3.child_expr) == \
"UnaryOp(op=Invert(), operand=ast_Column())"
def test_invert_dataframe():
d = DataFrame()
ref1 = ~d
assert ref1.child_expr is not None
assert ast.dump(ref1.child_expr) == \
"UnaryOp(op=Invert(), operand=ast_DataFrame())"
assert ref1.filter is None
def test_masking_df():
d = DataFrame()
d1 = d[d.x > 10]
assert isinstance(d1, DataFrame)
assert isinstance(d1.filter, Column)
assert ast.dump(d1.filter.child_expr) == \
"Compare(left=ast_DataFrame(), ops=[Gt()], comparators=[Num(n=10)])"
def test_slicing_df():
d = DataFrame()
d1 = d[10]
assert isinstance(d1, DataFrame)
assert isinstance(d1.child_expr, ast.Subscript)
assert isinstance(d1.child_expr.slice, ast.Index)
assert isinstance(d1.child_expr.value, ast_DataFrame)
assert d1.child_expr.slice.value == 10
@pytest.mark.parametrize("bin_op, ast_op, reverse", [
(lambda a, b: a + b, ast.Add, False),
(lambda a, b: a - b, ast.Sub, False),
(lambda a, b: a * b, ast.Mult, False),
(lambda a, b: a / b, ast.Div, False),
(lambda a, b: b + a, ast.Add, True),
(lambda a, b: b - a, ast.Sub, True),
(lambda a, b: b * a, ast.Mult, True),
(lambda a, b: b / a, ast.Div, True),
])
def test_binary_operators(bin_op, ast_op, reverse):
d = DataFrame()
d1 = bin_op(d.x, 1000)
assert d1.filter is None
assert d1.child_expr is not None
assert isinstance(d1.child_expr, ast.BinOp)
left = d1.child_expr.left
right = d1.child_expr.right
if reverse:
left, right = right, left
assert ast.dump(left) == 'ast_DataFrame()'
assert ast.dump(right) == 'Num(n=1000)'
assert isinstance(d1.child_expr.op, ast_op)
def test_np_sin():
import numpy as np
d = DataFrame()
d1 = cast(DataFrame, np.sin(d.x)) # type: ignore
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Name(id='sin', ctx=Load()), args=[ast_DataFrame()], keywords=[])"
def test_python_abs():
d = DataFrame()
d1 = abs(d.x)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Name(id='abs', ctx=Load()), args=[ast_DataFrame()], keywords=[])"
def test_np_sin_kwargs():
import numpy as np
d = DataFrame()
d1 = cast(DataFrame, np.sin(d.x, bogus=22.0)) # type: ignore
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Name(id='sin', ctx=Load()), args=[ast_DataFrame()], " \
"keywords=[keyword(arg='bogus', value=Num(n=22.0))])"
def test_np_arctan2_with_args():
import numpy as np
d = DataFrame()
d1 = cast(DataFrame, np.arctan2(d.x, 100.0)) # type: ignore
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Name(id='arctan2', ctx=Load()), args=[ast_DataFrame(), " \
"Num(n=100.0)], keywords=[])"
def test_np_func_with_division():
import numpy as np
d = DataFrame()
f1 = np.log10(1.0/(d-1.0)) # type: ignore
from dataframe_expressions import dumps
assert '\n'.join(dumps(f1)) == '''df_1 = DataFrame()
df_2 = df_1 - 1.0
df_3 = 1.0 / df_2
df_4 = log10(df_3)'''
def test_np_func_where():
import numpy as np
d = DataFrame()
f1 = np.where(d.x > 0, d.x, d.y)
from dataframe_expressions import dumps
assert '\n'.join(dumps(cast(DataFrame, f1))) == '''df_1 = DataFrame()
df_2 = df_1.x
df_3 = df_2 > 0
df_4 = df_1.y
df_5 = np_where(df_3,df_2,df_4)'''
def test_np_func_histogram():
import numpy as np
d = DataFrame()
f1 = np.histogram(d.x, bins=50, range=(-0.5, 10.0))
from dataframe_expressions import dumps
assert '\n'.join(dumps(cast(DataFrame, f1))) == '''df_1 = DataFrame()
df_2 = df_1.x
df_3 = np_histogram(df_2,bins=50,range=(-0.5,10.0))'''
def test_fluent_function_no_args():
d = DataFrame()
d1 = d.count()
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), args=[], " \
"keywords=[])"
def test_fluent_function_pos_arg():
d = DataFrame()
d1 = d.count(22.0)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), " \
"args=[Num(n=22.0)], keywords=[])"
def test_fluent_function_kwarg():
d = DataFrame()
d1 = d.count(dude=22.0)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), args=[], " \
"keywords=[keyword(arg='dude', value=Num(n=22.0))])"
def test_test_fluent_function_df_arg():
d = DataFrame()
d1 = d.count(d)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), " \
"args=[ast_DataFrame()], keywords=[])"
def test_test_fluent_function_dfattr_arg():
d = DataFrame()
d1 = d.count(d.jets)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), " \
"args=[ast_DataFrame()], keywords=[])"
def test_test_fluent_function_dfattrattr_arg():
d = DataFrame()
d1 = d.jets.count(d.jets)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), " \
"args=[ast_DataFrame()], keywords=[])"
def test_test_fluent_function_dfattr1_arg():
d = DataFrame()
d1 = d.jets.count(d)
assert d1.filter is None
assert d1.child_expr is not None
assert ast.dump(d1.child_expr) == \
"Call(func=Attribute(value=ast_DataFrame(), attr='count', ctx=Load()), " \
"args=[ast_DataFrame()], keywords=[])"
def test_resolve_simple_alias():
define_alias("jets", "pts", lambda j: j.pt / 1000.0)
df = DataFrame()
df1 = df.jets.pts
assert df1.filter is None
assert df1.child_expr is not None
assert '1000' in ast.dump(df1.child_expr)
def test_resolve_hidden_alias():
define_alias("jets", "pt", lambda j: j.pt / 1000.0)
df = DataFrame()
df1 = df.jets.pt
assert df1.filter is None
assert df1.child_expr is not None
assert '1000' in ast.dump(df1.child_expr)
def test_resolve_dependent():
define_alias("jets", "pts", lambda j: j.pt / 1000.0)
define_alias("jets", "pt", lambda j: j.pt / 2000.0)
df = DataFrame()
df1 = df.jets.pts
assert df1.filter is None
assert df1.child_expr is not None
assert '1000' in ast.dump(df1.child_expr)
assert isinstance(df1.child_expr, ast.BinOp)
assert df1.child_expr.left is not None
assert isinstance(df1.child_expr.left, ast_DataFrame)
df2 = cast(ast_DataFrame, df1.child_expr.left)
assert df2.dataframe.child_expr is not None
assert '2000' in ast.dump(df2.dataframe.child_expr)
def check_for_compare(e: ast.AST, check: str):
assert isinstance(e, ast.Compare)
left = e.left # type: ast.AST
assert isinstance(left, ast_DataFrame)
assert left.dataframe.child_expr is not None
t = ast.dump(left.dataframe.child_expr)
assert check in t
def test_resolve_in_filter():
define_alias("jets", "pt", lambda j: j.pt / 2000.0)
df = DataFrame()
df1 = df.jets.pt[df.jets.pt > 50.0]
assert df1.filter is not None
assert isinstance(df1.filter, Column)
check_for_compare(df1.filter.child_expr, '2000')
def test_resolve_in_filter_twice():
define_alias("jets", "pt", lambda j: j.pt / 2000.0)
df = DataFrame()
df1 = df.jets.pt[(df.jets.pt > 50.0) & (df.jets.pt < 60.0)]
assert df1.filter is not None
assert isinstance(df1.filter.child_expr, ast.BoolOp)
bool_op = df1.filter.child_expr
assert len(bool_op.values) == 2
op_1 = bool_op.values[0] # type: ast.AST
op_2 = bool_op.values[1] # type: ast.AST
assert isinstance(op_1, ast_Column)
assert isinstance(op_2, ast_Column)
check_for_compare(op_1.column.child_expr, '2000')
check_for_compare(op_1.column.child_expr, '2000')
def test_lambda_argument():
df = DataFrame()
df1 = df.apply(lambda e: e)
assert df1.child_expr is not None
assert isinstance(df1.child_expr, ast.Call)
assert len(df1.child_expr.args) == 1
arg1 = df1.child_expr.args[0]
assert isinstance(arg1, ast_Callable)
def test_lambda_in_filter():
df = DataFrame()
df1 = df[df.apply(lambda e: e == 1)]
assert isinstance(df1.child_expr, ast_DataFrame)
assert df1.filter is not None
assert isinstance(df1.filter, Column)
assert isinstance(df1.filter.child_expr, ast.Call)
def test_shallow_copy():
df = DataFrame()
import copy
df1 = copy.copy(df)
assert df1 is not df
assert df1.child_expr is None
assert df1.filter is None
def test_shallow_copy_1():
df = DataFrame()
df1 = df.x
import copy
df2 = copy.copy(df1)
assert df2 is not df1
assert df2.child_expr is not None
assert df2.filter is None
def test_deep_copy():
df = DataFrame()
import copy
df1 = copy.deepcopy(df)
assert df1 is not df
assert df1.child_expr is None
assert df1.filter is None
def test_deep_copy_1():
df = DataFrame()
df1 = df.x
import copy
df2 = copy.deepcopy(df1)
assert df2 is not df1
assert df2.child_expr is not None
assert df2.filter is None
assert isinstance(df2.child_expr, ast.Attribute)
assert isinstance(df2.child_expr.value, ast_DataFrame)
df2_parent = cast(ast_DataFrame, df2.child_expr.value)
assert df2_parent is not df
| 28.827112
| 122
| 0.647584
| 2,260
| 14,673
| 4.05177
| 0.116814
| 0.084526
| 0.037239
| 0.040952
| 0.677842
| 0.601507
| 0.535001
| 0.470897
| 0.414328
| 0.392923
| 0
| 0.033267
| 0.213317
| 14,673
| 508
| 123
| 28.883858
| 0.760028
| 0.059701
| 0
| 0.474801
| 0
| 0
| 0.152389
| 0.073109
| 0
| 0
| 0
| 0
| 0.33687
| 1
| 0.114058
| false
| 0
| 0.047745
| 0
| 0.169761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0c41cacd5163331beb9572314dcb4bf4d9b8235
| 12,660
|
py
|
Python
|
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
main.py
|
ESSAKHI10/SharpZone
|
1d145cb22c5a8f6777d2f6e05a9a16f8e528c92c
|
[
"MIT"
] | null | null | null |
# import encode
import eel
import cv2
import io
import numpy as np
import base64
import os
import time
import face_recognition
import pickle
import imutils
import datetime
from multiprocessing.pool import ThreadPool
import random
import shutil
from database import *
from camera import VideoCamera
from SceneChangeDetect import sceneChangeDetect
import login
import encode_student_data
import warnings
warnings.filterwarnings('ignore')
eel.init('web')
# ------ Global Variable ----
camera_status = 1
capture_status = False
student_id = ''
fullnamee = ''
def recogFace(data, encoding):
return face_recognition.compare_faces(data["encodings"], encoding, tolerance=0.5)
def recogEncodings(rgb, boxes):
return face_recognition.face_encodings(rgb, boxes)
def recogLoc(rgb):
return face_recognition.face_locations(rgb, model="hog")
def gen1(url, student_class):
# change camera status for loading
eel.camera_status(3)
pool1 = ThreadPool(processes=1)
pool2 = ThreadPool(processes=2)
pool3 = ThreadPool(processes=3)
pool4 = ThreadPool(processes=4)
conn = create_connection()
cursor = conn.cursor()
sql = "SELECT student_id ,fullname FROM student_data WHERE class = ? "
val = [student_class]
cursor.execute(sql, val)
student_data = cursor.fetchall()
print('liste student : ')
print(student_data)
# Load the known face and encodings
# print("[INFO] loading encodings ..")
data = pickle.loads(open("encodings.pickle", "rb").read())
Attendees_Names = {}
encodings = []
boxes = []
frame = 0
Scene = sceneChangeDetect()
video = cv2.VideoCapture(url)
time.sleep(1.0)
global camera_status
camera_status = 1
# change the camera status
eel.camera_status(1)
while camera_status == 1:
frame += 1
if (frame == 100):
frame = 0
# print(camera_status)
success, img = video.read()
# if camera can't read frame(Camera error)
if success == False:
eel.camera_status(2)
break
if (Scene.detectChange(img) == True):
# Convert BGR to RGB
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(img, width=900)
r = img.shape[1] / float(rgb.shape[1])
# detect boxes
if (frame % 2 == 0):
boxes = pool1.apply_async(recogLoc, (rgb,)).get()
encodings = pool3.apply_async(
recogEncodings, (rgb, boxes,)).get()
names = []
# square over the facial encodings
for encoding in encodings:
# attempt to match each face then initialise a dicationary
# matches = face_recognition.compare_faces(data["encodings"], encoding,tolerance=0.5)
matches = pool2.apply_async(recogFace, (data, encoding,)).get()
name = "Unkown_"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dicationary to count the total number of times each face matched
matchedIds = [i for (i, b) in enumerate(matches) if b]
print('matches id ')
print(matchedIds)
counts = {}
counts.clear()
# loop over the recognized faces
for i in matchedIds:
name = data["names"][i]
print(name)
counts[name] = counts.get(name, 0) + 1
print('this is counts')
print(counts)
# determine the recognized faces with largest number
name = max(counts, key=counts.get)
print('d')
print(name)
if (name not in Attendees_Names):
Attendees_Names[name] = 1
for y in student_data:
nom = name.split('_')
if nom[0] in y:
print(y[1])
x = datetime.datetime.now()
date = str(x.day) + "-" + \
str(x.month) + "-" + str(x.year)
pool4.apply_async(
submit_live_attendance, (nom[0], student_class, date,))
eel.updateAttendance(y[1])()
names.append(name)
# loop over recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
print('names')
print(names)
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
# draw the predicted face name on the image
cv2.rectangle(img, (left, top), (right, bottom),
(0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
nom = name.split('_')
cv2.putText(img, nom[1], (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
ret, jpeg = cv2.imencode('.jpg', img)
img = jpeg.tobytes()
yield img
# camera is stopped by user
if success == True:
eel.camera_status(0)
@eel.expose
def start_video_py(cam_type, student_class):
switch = {
'1': 0,
'2': 1,
}
y = gen1(switch[cam_type], student_class)
print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy')
print(y)
print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyy')
for each in y:
blob = base64.b64encode(each)
blob = blob.decode("utf-8")
eel.updateImageSrc(blob)()
@eel.expose
def stop_video_py():
global camera_status
camera_status = 0
@eel.expose
def capture_photo_py(url):
print('smile')
y = gen(url)
for each in y:
blob = base64.b64encode(each)
blob = blob.decode("utf-8")
eel.updateStudentImageSrc(blob)()
def gen(url):
video = cv2.VideoCapture(url)
global camera_status
global capture_status
camera_status = 1
while camera_status == 1:
success, img = video.read()
if success == False:
print("cam nt cnt")
break
if capture_status == True:
save_path = 'dataset/' + student_id + '_' + fullnamee
filename = save_path + "/photo" + \
str(random.randint(0, 999)) + ".jpg"
if not os.path.exists(save_path):
os.makedirs(save_path)
cv2.imwrite(filename, img)
send_capture_photo(img)
capture_status = False
ret, jpeg = cv2.imencode('.jpg', img)
img = jpeg.tobytes()
yield img
def submit_live_attendance(stu_id, student_class, date):
attendance_class = {
"C2": "INSERT INTO C2(student_id,attendance_date) VALUES(?, ?);",
"C1": "INSERT INTO C1(student_id,attendance_date) VALUES(?, ?);",
}
# adding data to database
conn = create_connection()
cursor = conn.cursor()
sql = attendance_class[student_class]
val = [stu_id, date]
cursor.execute(sql, val)
conn.commit()
conn.close()
@eel.expose
def save_photo(studentId, full):
global student_id
global fullnamee
global capture_status
student_id = studentId
fullnamee = full
capture_status = True
def send_capture_photo(img):
ret, jpeg = cv2.imencode('.jpg', img)
img = jpeg.tobytes()
blob = base64.b64encode(img)
blob = blob.decode("utf-8")
eel.showCapturePhoto(blob)
# adding new student data
@eel.expose
def submit_student_data(stu_id, fullname, student_class, session):
try:
encode_student_data.encode_student_data(stu_id)
# adding data to database
conn = create_connection()
cursor = conn.cursor()
sql = "INSERT INTO student_data(student_id,fullname,class,session) VALUES(?, ?, ?, ?);"
val = [stu_id, fullname, student_class, session]
cursor.execute(sql, val)
conn.commit()
eel.student_data_saved()
conn.close()
except:
# delete face data from file
delete_student_data_file(student_id)
eel.failed_data_submit()
@eel.expose
def fetch_class_data(search_class):
conn = create_connection()
cursor = conn.cursor()
val = [search_class]
sql = "SELECT * FROM student_data WHERE class = ?"
result = cursor.execute(sql, val)
for x in result:
eel.setTableData(x[0], x[1], x[2], x[3])
conn.close()
def delete_student_data_file(student_id):
# delete face data from file
# load the face data
with open('encodings.pickle', 'rb') as f:
face_data = pickle.load(f)
index = []
encodings = face_data['encodings']
names = face_data['names']
# count face data length
for i, item in enumerate(names):
if student_id in item:
index.append(i)
# delete id
for i in index:
names.remove(student_id)
# delete encoding
for i in index:
del encodings[index[0]]
# saved modified face data
face_data['names'] = names
face_data['encodings'] = encodings
f = open("encodings.pickle", "wb")
f.write(pickle.dumps(face_data))
f.close()
@eel.expose
def deleteStudent(student_id):
try:
# delete student image folder
try:
path = 'dataset/' + student_id
shutil.rmtree(path)
except Exception as e:
print(e)
# delete student data from database
conn = create_connection()
cursor = conn.cursor()
val = [student_id]
sql = "DELETE FROM student_data where student_id = ?"
cursor.execute(sql, val)
conn.commit()
conn.close()
# print("delete success database")
# delete face data from file
delete_student_data_file(student_id)
eel.deleteStatus(student_id)
except Exception as e:
print(e)
eel.deleteStatus("")
@eel.expose
def fetchAttendance(attendanceClass, attendanceDate):
student_class = {
'C1': "SELECT DISTINCT(d.student_id),d.fullname,d.class,ac.attendance_date FROM C1 ac,student_data d WHERE ac.student_id=d.student_id AND attendance_date = ?;",
'C2': "SELECT DISTINCT(d.student_id),d.fullname,d.class,ac.attendance_date FROM C2 ac,student_data d WHERE ac.student_id=d.student_id AND attendance_date = ?;",
}
conn = create_connection()
cursor = conn.cursor()
val = [attendanceDate]
sql = student_class[attendanceClass]
cursor.execute(sql, val)
result = cursor.fetchall()
print(len(result))
if len(result) > 0:
for x in result:
eel.attendanceTable(x[0], x[1], x[2], x[3])
else:
eel.attendanceTable("no result found", "", "", "")
conn.close()
@eel.expose
def fetch_graph_data(graphClass):
student_class = {
'C1': "SELECT DISTINCT(attendance_date) FROM C1 ORDER BY attendance_date ASC LIMIT 06 ",
'C2': "SELECT DISTINCT(attendance_date) FROM C2 ORDER BY attendance_date ASC LIMIT 06 ",
}
attendance_class = {
'C1': "SELECT COUNT(DISTINCT(student_id)) FROM C1 WHERE attendance_date = ? ;",
'C2': "SELECT COUNT(DISTINCT(student_id)) FROM C2 WHERE attendance_date = ? ;",
}
conn = create_connection()
cursor = conn.cursor()
sql = student_class[graphClass]
result = cursor.execute(sql)
date_arr = []
data_arr = []
for x in result:
date_arr.append(x[0])
# print(date_arr)
sql = attendance_class[graphClass]
for x in date_arr:
val = [x]
result = cursor.execute(sql, val)
for x in result:
data_arr.append(x[0])
# print(data_arr)
cursor.close()
eel.updateGraph(date_arr, data_arr)
eel.start('template/pages/samples/login.html', size=(1307, 713))
#eel.start('template/index.html', size=(1307, 713))
# eel.start('dashboard.html', size=(1307, 713))
| 29.648712
| 169
| 0.562243
| 1,454
| 12,660
| 4.770289
| 0.21458
| 0.033737
| 0.015571
| 0.02624
| 0.298443
| 0.241494
| 0.190744
| 0.153979
| 0.128316
| 0.112168
| 0
| 0.019573
| 0.334123
| 12,660
| 426
| 170
| 29.71831
| 0.803203
| 0.098262
| 0
| 0.326797
| 0
| 0.006536
| 0.118904
| 0.04347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.065359
| 0.009804
| 0.130719
| 0.065359
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39f81d8b6eef50e0aea91f95d8884d5a0a59d256
| 3,713
|
py
|
Python
|
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
models/job.py
|
k-wojcik/kylin_client_tool
|
0fe827d1c8a86e3da61c85c48f78ce03c9260f3c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'Huang, Hua'
from models.object import JsonSerializableObj
class CubeJobStatus:
NEW = 'NEW'
PENDING = 'PENDING'
RUNNING = 'RUNNING'
ERROR = 'ERROR'
FINISHED = 'FINISHED'
DISCARDED = 'DISCARDED'
class JobInstance(JsonSerializableObj):
def __init__(self):
JsonSerializableObj.__init__(self)
self.uuid = None
self.last_modified = None
self.name = None
self.type = None
self.duration = None
self.related_cube = None
self.related_segment = None
self.exec_start_time = None
self.exec_end_time = None
self.mr_waiting = None
self.steps = None
self.submitter = None
@staticmethod
def from_json(json_dict):
if not json_dict or type(json_dict) != dict: return None
ji = JobInstance()
ji.uuid = json_dict.get('uuid')
ji.last_modified = json_dict.get('last_modified')
ji.name = json_dict.get('name')
ji.type = json_dict.get('type')
ji.duration = json_dict.get('duration')
ji.related_cube = json_dict.get('related_cube')
ji.related_segment = json_dict.get('related_segment')
ji.exec_start_time = json_dict.get('exec_start_time')
ji.exec_end_time = json_dict.get('exec_end_time')
ji.mr_waiting = json_dict.get('mr_waiting')
# deserialize json for steps
if json_dict.get('steps') and type(json_dict.get('steps')) == list:
step_list = json_dict.get('steps')
ji.steps = [JobStep.from_json(step) for step in step_list]
ji.submitter = json_dict.get('submitter')
return ji
def get_status(self):
if not self.steps:
return CubeJobStatus.ERROR
for job_step in self.steps:
if job_step.step_status in CubeJobStatus.ERROR:
return CubeJobStatus.ERROR
if job_step.step_status in CubeJobStatus.DISCARDED:
return CubeJobStatus.DISCARDED
# check the last step
job_step = self.steps[-1]
if job_step.step_status not in CubeJobStatus.FINISHED:
return CubeJobStatus.RUNNING
return CubeJobStatus.FINISHED
def get_current_step(self):
if not self.steps:
return 0
step_id = 1
for job_step in self.steps:
if job_step.step_status not in CubeJobStatus.FINISHED:
return step_id
step_id += 1
return len(self.steps)
class JobStep(JsonSerializableObj):
def __init__(self):
JsonSerializableObj.__init__(self)
self.name = None
self.sequence_id = None
self.exec_cmd = None
self.interrupt_cmd = None
self.exec_start_time = None
self.exec_end_time = None
self.exec_wait_time = None
self.step_status = None
self.cmd_type = None
self.info = None
self.run_async = None
@staticmethod
def from_json(json_dict):
if not json_dict or type(json_dict) != dict: return None
js = JobStep()
js.name = json_dict.get('name')
js.sequence_id = json_dict.get('sequence_id')
js.exec_cmd = json_dict.get('exec_cmd')
js.interrupt_cmd = json_dict.get('interrupt_cmd')
js.exec_start_time = json_dict.get('exec_start_time')
js.exec_end_time = json_dict.get('exec_end_time')
js.exec_wait_time = json_dict.get('exec_wait_time')
js.step_status = json_dict.get('step_status')
js.cmd_type = json_dict.get('cmd_type')
js.info = json_dict.get('info')
js.run_async = json_dict.get('run_async')
return js
| 30.434426
| 75
| 0.622408
| 481
| 3,713
| 4.528067
| 0.153846
| 0.113866
| 0.126263
| 0.041322
| 0.379706
| 0.353535
| 0.331497
| 0.308999
| 0.252984
| 0.188705
| 0
| 0.001881
| 0.284137
| 3,713
| 121
| 76
| 30.68595
| 0.817532
| 0.018314
| 0
| 0.255319
| 0
| 0
| 0.077177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.010638
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39f8e72f0d8a4ab5e6ca1adccd579c3125c23d90
| 1,643
|
py
|
Python
|
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | 6
|
2020-09-09T15:31:02.000Z
|
2022-02-16T04:57:55.000Z
|
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | null | null | null |
tools.py
|
yflyzhang/cascade_virality
|
9d856a3fbe45330a9434ba4bad9d5f248e2f1dd5
|
[
"MIT"
] | null | null | null |
import numpy as np
import networkx as nx
# For illustration purpose only [easy to understand the process]
# -----------------------------
def pure_cascade_virality(G):
'''G is a directed graph(tree)'''
if not nx.is_weakly_connected(G):
# return None
return
nodes = [k for (k,v) in G.out_degree() if v>0] # non-leaf nodes
virality = 0
for source in nodes:
path_lens = nx.single_source_shortest_path_length(G, source) # shortest path length
path_lens = {k: v for k, v in path_lens.items() if v > 0} # filter 0
virality += np.array(list(path_lens.values())).mean() # mean length from source to other nodes
return virality
# Works in a recursive manner [more efficient]
# -----------------------------
def recursive_path_length(G, V, seed):
'''G is a directed graph(tree)'''
V[seed] = []
for i in G.successors(seed):
V[seed].append(1)
V[seed] += [j+1 for j in recursive_path_length(G, V, i)]
return V[seed]
def recursive_cascade_virality(G, source=None):
'''G is a directed graph(tree)'''
if not nx.is_weakly_connected(G):
# return None
return
if not source:
# if root is not given, find it by yourself
source = [k for (k,v) in G.in_degree() if v==0][0]
V_dic = {}
recursive_path_length(G, V_dic, source)
# return V_dic # return original paths
virality = 0
for (k, v) in V_dic.items():
# print(k, v)
if len(v)>0:
virality += np.mean(v)
return virality # return cascade virality
| 23.471429
| 105
| 0.57395
| 241
| 1,643
| 3.796681
| 0.315353
| 0.013115
| 0.021858
| 0.030601
| 0.246995
| 0.178142
| 0.135519
| 0.135519
| 0.135519
| 0.135519
| 0
| 0.008518
| 0.285453
| 1,643
| 69
| 106
| 23.811594
| 0.770869
| 0.288497
| 0
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39f9818d1295e4cbcfc1bb13178c81b8bc72f7ba
| 1,492
|
py
|
Python
|
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | 1
|
2020-11-05T10:24:15.000Z
|
2020-11-05T10:24:15.000Z
|
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | null | null | null |
pyjira/actions.py
|
FulcrumIT/pyjira
|
8ed0d22136808ba95ace253e66dd4ad7bb6b387a
|
[
"MIT"
] | 2
|
2017-05-15T20:06:25.000Z
|
2020-11-17T09:46:34.000Z
|
import pyjira.api as _api
import json as _json
def get_issues(id, limit=50):
"""Return 50 issues for a project.
Parameters:
- id: id of a project.
- limit: max number of results to be returned.
"""
return _api.rest("/search?jql=project=" + str(id) + "&maxResults=" + str(limit))
def get_issue(id):
"""Get issue and its details.
Parameters:
- id: id of an issue.
"""
return _api.rest("/issue/" + str(id))
def get_all_fields():
"""Get all existing fields."""
return _api.rest("/field")
def get_field(id):
"""Get field and its details.
Parameters:
- id: id of a field.
"""
fields = _json.loads(get_all_fields())
for f in fields:
if (f["id"] == str(id) or
f["id"].replace("customfield_", "") == str(id)):
return _json.dumps(f)
def get_issue_fields(id, field_names_enabled=True):
"""Get all fields listed for an issue.
Parameters:
- id: id of an issue.
- field_names_enabled: if False, returns result with "customfield_" names.
True by default.
"""
issue = _json.loads(get_issue(id))
result = {}
for key, value in issue["fields"].items():
if ("customfield_" in key and
value and field_names_enabled):
field = _json.loads(get_field(key))
field_name = field["name"]
result[field_name] = value
elif value:
result[key] = value
return _json.dumps(result)
| 24.866667
| 84
| 0.589812
| 202
| 1,492
| 4.19802
| 0.29703
| 0.035377
| 0.066038
| 0.075472
| 0.125
| 0.103774
| 0.068396
| 0
| 0
| 0
| 0
| 0.003711
| 0.27748
| 1,492
| 59
| 85
| 25.288136
| 0.782931
| 0.283512
| 0
| 0
| 0
| 0
| 0.08478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39fb765d92c4d3395b8dec3f9fb952f0fa19dddd
| 963
|
py
|
Python
|
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | null | null | null |
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | 1
|
2021-09-11T22:53:48.000Z
|
2021-09-11T22:53:48.000Z
|
gongish/cli/serve.py
|
meyt/gongish
|
0e3cd478677c19c9331a2b563ce792d16f2860b3
|
[
"MIT"
] | null | null | null |
import importlib
from argparse import ArgumentParser
from wsgiref.simple_server import make_server
p = ArgumentParser(
prog="gongish serve", description="Serve a WSGI application"
)
p.add_argument(
"module",
nargs="?",
help="Module and application name (e.g: myapp:app)",
type=str,
)
p.add_argument(
"-b",
"--bind",
type=str,
help="Bind address (default: localhost:8080)",
default="localhost:8080",
)
def main(args):
args = p.parse_args(args)
module_name, module_attr = args.module.split(":")
module = importlib.import_module(module_name)
app = getattr(module, module_attr)
bind = args.bind
if bind.startswith(":"):
host = "localhost"
port = bind[1:]
elif ":" in bind:
host, port = bind.split(":")
else:
host = bind
port = "8080"
httpd = make_server(host, int(port), app)
print(f"Serving http://{host}:{port}")
httpd.serve_forever()
| 22.395349
| 64
| 0.626168
| 120
| 963
| 4.925
| 0.483333
| 0.033841
| 0.040609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01752
| 0.229491
| 963
| 42
| 65
| 22.928571
| 0.778976
| 0
| 0
| 0.111111
| 0
| 0
| 0.200415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.111111
| 0
| 0.138889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39fcb7feb468394c971a4be4fe1ebd1c774cf3a6
| 1,376
|
py
|
Python
|
examples/plot/lines.py
|
beidongjiedeguang/manim-express
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 12
|
2021-06-14T07:28:29.000Z
|
2022-02-25T02:49:49.000Z
|
examples/plot/lines.py
|
beidongjiedeguang/manim-kunyuan
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 1
|
2022-02-01T12:30:14.000Z
|
2022-02-01T12:30:14.000Z
|
examples/plot/lines.py
|
beidongjiedeguang/manim-express
|
e9c89b74da3692db3ea9b568727e78d5cbcef503
|
[
"MIT"
] | 2
|
2021-05-13T13:24:15.000Z
|
2021-05-18T02:56:22.000Z
|
from examples.example_imports import *
from manim_express.eager import PlotObj
scene = EagerModeScene(screen_size=Size.bigger)
graph = Line().scale(0.2)
# t0 = time.time()
#
# delta_t = 0.5
# for a in np.linspace(3, 12, 3):
# graph2 = ParametricCurve(lambda t: [t,
# 0.8 * np.abs(t) ** (6 / 7) + 0.9 * np.sqrt(abs(a - t ** 2)) * np.sin(
# a * t + 0.2),
# 0],
# t_range=(-math.sqrt(a), math.sqrt(a))).scale(0.5)
# scene.play(Transform(graph, graph2), run_time=3)
ps = np.random.rand(10, 3)
print(ps.shape)
print(ps[:, 0].max())
theta = np.linspace(0, 2 * PI, 100)
x = np.cos(theta)
y = np.sin(theta)
p = PlotObj(x, y)
scene.play(ShowCreation(p))
s = PlotObj(theta, x).set_color(RED)
scene.play(ShowCreation(s))
grid = p.get_grid(3, 3)
scene.add(grid)
scene.play(grid.animate.shift(LEFT))
scene.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN, RED))
scene.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
# scene.play(grid.animate.apply_complex_function(np.exp), run_time=5)
scene.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
scene.hold_on()
| 27.52
| 111
| 0.5625
| 211
| 1,376
| 3.56872
| 0.421801
| 0.095618
| 0.086321
| 0.132802
| 0.15405
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045635
| 0.267442
| 1,376
| 49
| 112
| 28.081633
| 0.701389
| 0.375727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39fede8d13d0249be971c45d4492a0a209527ae6
| 785
|
py
|
Python
|
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
get_observation.py
|
RadixSeven/FhirGaP
|
1fb8ff8b86089cdec9b1f796e06aeb0e20db14a0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Note that this is python3 only
import argparse
import requests
parser = argparse.ArgumentParser(
"Get an observation from a FHIR server with authentication")
parser.add_argument(
"id", help="The observation id to retrieve")
parser.add_argument(
"auth", default="Admin",
help="The authorization string to use. \"Bearer \" will be added to "
"the front.")
parser.add_argument(
"--url", default="http://35.245.174.218:8080/hapi-fhir-jpaserver/fhir/",
help="The base url of the server")
args = parser.parse_args()
headers = {
'Content-Type': "application/fhir+json; charset=utf-8",
'Authorization': "Bearer " + args.auth,
}
response = requests.get(args.url + "/Observation/" + args.id, headers=headers)
print(response.json())
| 30.192308
| 78
| 0.699363
| 107
| 785
| 5.093458
| 0.607477
| 0.049541
| 0.093578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025602
| 0.15414
| 785
| 25
| 79
| 31.4
| 0.795181
| 0.064968
| 0
| 0.15
| 0
| 0
| 0.443228
| 0.030096
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2600aabf67e890934f21d69c79a38729246c8b46
| 3,989
|
py
|
Python
|
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
model_training_scripts/image_processing.py
|
jc2554/iVending
|
2a6b04143a56e202eba99b0a509945cf31aa956d
|
[
"MIT"
] | null | null | null |
"""
script to post-process training images by using OpenCV face detection
and normalization
MIT License
Copyright (c) 2019 JinJie Chen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import cv2
import numpy as np
import os
"""
process all image in the user_id subdirectory , save processed images in the
user_id folderdirectory
"""
def process_images(user_id):
images = []
labels = []
labels_dic = {}
# list of people (subdirectory folder names)
people = [person for person in os.listdir("raw_image_data/")] if user_id == -1 else [str(user_id)]
count = 0
for i, person in enumerate(people):
labels_dic[i] = person
image_names = [image for image in os.listdir("raw_image_data/" + person)]
if not os.path.exists('image_data/'+person):
os.makedirs('image_data/'+person)
for j, image_name in enumerate(image_names):
image = cv2.imread("raw_image_data/" + person + '/' + image_name, 1)
images.append(image)
labels.append(person)
# face deection using the openCV Cascade Classifier
scale_factor = 1.2
min_neighbors = 5
min_size = (5, 5)
biggest_only = True
faces_coord = classifier.detectMultiScale(image,
scaleFactor=scale_factor,
minNeighbors=min_neighbors,
minSize=min_size,
flags=cv2.CASCADE_SCALE_IMAGE)
if not isinstance(faces_coord, type(None)):
faces = normalize_faces(image ,faces_coord)
cv2.imwrite('image_data/'+person+'/%s.jpeg' % (j), faces[0])
count += 1
print("Number of face image Generated: ", count)
return (images, np.array(labels), labels_dic)
"""
Normalize image by
Truncate out the face from teh image using the bounding box
Resize the image with interpolation using openCv
"""
def normalize_faces(image, faces_coord, size=(160, 160)):
faces = []
# cut image by the bounding box
for (x, y, w, h) in faces_coord:
w_rm = int(0.3 * w / 2)
faces.append(image[y: y + h, x + w_rm: x + w - w_rm])
images_norm = []
#resize image
for face in faces:
if image.shape < size:
image_norm = cv2.resize(face, size, interpolation=cv2.INTER_AREA)
else:
image_norm = cv2.resize(face, size, interpolation=cv2.INTER_CUBIC)
images_norm.append(image_norm)
return images_norm
parser = argparse.ArgumentParser()
parser.add_argument(
'--user', help='user id, -1 for all')
args = parser.parse_args()
print(args)
classifier = cv2.CascadeClassifier("../src/models/haarcascade_frontalface_default.xml")
images, labels, labels_dic = process_images(args.user)
print("num images: ", len(images))
print("labels_dic: ", labels_dic)
| 36.263636
| 102
| 0.656806
| 534
| 3,989
| 4.799625
| 0.400749
| 0.034335
| 0.029263
| 0.008584
| 0.077253
| 0.054623
| 0.036676
| 0.036676
| 0.036676
| 0
| 0
| 0.011186
| 0.260466
| 3,989
| 109
| 103
| 36.59633
| 0.857627
| 0.324141
| 0
| 0
| 0
| 0
| 0.088934
| 0.020082
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260184f873fdef40a6660e5cdbc4d152fa8c734a
| 1,816
|
py
|
Python
|
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
flask_map.py
|
zenranda/proj5-map
|
13dc8866483f45ac806342c1b3aa2eec1354a0dc
|
[
"Artistic-2.0"
] | null | null | null |
import flask
from flask import render_template
from flask import request
from flask import url_for
import json
import logging
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
###
# Pages
###
@app.route("/")
@app.route("/index")
@app.route("/map")
def index():
app.logger.debug("Main page entry")
if 'map' not in flask.session:
app.logger.debug("Sending map file")
app.logger.debug("Sending keys...")
with open('SECRETS.py') as key: #sends access token to the page
ent = "" #in theory, sensitive information
for line in key:
while ent == "":
ent = line
flask.session['confidental'] = ent
app.logger.debug("Sending loc data...")
with open('POI.txt') as points:
data = [] #reads the list of points
for line in points:
item = []
line = line.strip()
k = line.split("|")
item.append(k[0]) #puts each part of the point (name, lat, long) into a list
item.append(k[1])
item.append(k[2])
data.append(item) #adds the list with the data to another list
flask.session['points'] = data #sends that list to jinja
return flask.render_template('map.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("index")
return flask.render_template('page_not_found.html'), 404
#############
#
# Set up to run from cgi-bin script, from
# gunicorn, or stand-alone.
#
app.secret_key = CONFIG.secret_key
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
| 25.222222
| 91
| 0.614537
| 251
| 1,816
| 4.354582
| 0.410359
| 0.049405
| 0.064044
| 0.05764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009489
| 0.245595
| 1,816
| 71
| 92
| 25.577465
| 0.788321
| 0.161344
| 0
| 0
| 0
| 0
| 0.147574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.152174
| 0
| 0.23913
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2602aa16755c35ffe309d7e1deefd2b15d53fedd
| 3,717
|
py
|
Python
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 1
|
2021-12-13T11:52:39.000Z
|
2021-12-13T11:52:39.000Z
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2
|
2021-12-18T17:18:52.000Z
|
2022-01-26T09:23:23.000Z
|
tcvx21/grillix_post/observables/parallel_gradient_m.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2
|
2021-12-13T12:56:09.000Z
|
2022-01-25T20:30:28.000Z
|
import xarray as xr
import numpy as np
from pathlib import Path
from tcvx21.grillix_post.components import FieldlineTracer
from tcvx21.grillix_post.lineouts import Lineout
xr.set_options(keep_attrs=True)
def initialise_lineout_for_parallel_gradient(
lineout, grid, equi, norm, npol, stored_trace: Path = None
):
"""
Traces to find the forward and reverse lineouts for a given lineout
Expensive! Needs to be done once per lineout that you want to take gradients with
"""
fieldline_tracer = FieldlineTracer(equi)
try:
print(f"Attempting to read stored trace from {stored_trace}")
ds = xr.open_dataset(stored_trace)
assert np.allclose(ds["lineout_x"], lineout.r_points)
assert np.allclose(ds["lineout_y"], lineout.z_points)
except (FileNotFoundError, ValueError):
forward_trace, reverse_trace = fieldline_tracer.find_neighbouring_points(
lineout.r_points, lineout.z_points, n_toroidal_planes=int(npol)
)
ds = xr.Dataset(
data_vars=dict(
forward_x=("points", forward_trace[:, 0]),
forward_y=("points", forward_trace[:, 1]),
forward_l=("points", forward_trace[:, 2]),
reverse_x=("points", reverse_trace[:, 0]),
reverse_y=("points", reverse_trace[:, 1]),
reverse_l=("points", reverse_trace[:, 2]),
lineout_x=("points", lineout.r_points),
lineout_y=("points", lineout.z_points),
)
)
if stored_trace is not None:
if stored_trace.exists():
stored_trace.unlink()
ds.to_netcdf(stored_trace)
lineout.forward_lineout = Lineout(ds["forward_x"], ds["forward_y"])
lineout.forward_lineout.setup_interpolation_matrix(grid, use_source_points=True)
lineout.reverse_lineout = Lineout(ds["reverse_x"], ds["reverse_y"])
lineout.reverse_lineout.setup_interpolation_matrix(grid, use_source_points=True)
lineout.forward_distance = xr.DataArray(
ds["forward_l"], dims="interp_points"
).assign_attrs(norm=norm.R0)
lineout.reverse_distance = xr.DataArray(
ds["reverse_l"], dims="interp_points"
).assign_attrs(norm=norm.R0)
def compute_parallel_gradient(lineout, field):
"""
Computes the parallel gradient via centred differences
Note that you should multiply this by the penalisation direction function to get the direction 'towards the
wall'. This isn't quite the same as projecting onto the wall normal, but for computing the parallel
heat flux this is actually more helpful
"""
assert hasattr(lineout, "forward_lineout") and hasattr(
lineout, "reverse_lineout"
), f"Have to call initialise_lineout_for_parallel_gradient on lineout before trying to compute_parallel_gradient"
parallel_gradients = [
compute_gradient_on_plane(lineout, field, plane)
for plane in range(field.sizes["phi"])
]
return xr.concat(parallel_gradients, dim="phi")
def compute_gradient_on_plane(lineout, field, plane):
"""Computes the parallel gradient on a single plane"""
forward_value = lineout.forward_lineout.interpolate(
field.isel(phi=np.mod(plane + 1, field.sizes["phi"]))
)
reverse_value = lineout.forward_lineout.interpolate(
field.isel(phi=np.mod(plane - 1, field.sizes["phi"]))
)
two_plane_distance = lineout.forward_distance - lineout.reverse_distance
centred_difference = forward_value - reverse_value
return (
(centred_difference / two_plane_distance)
.assign_coords(phi=plane)
.assign_attrs(norm=field.norm / two_plane_distance.norm)
)
| 37.17
| 117
| 0.684154
| 469
| 3,717
| 5.191898
| 0.332623
| 0.03614
| 0.043121
| 0.017248
| 0.245585
| 0.173306
| 0.173306
| 0.141273
| 0.141273
| 0.110062
| 0
| 0.004811
| 0.217111
| 3,717
| 99
| 118
| 37.545455
| 0.831959
| 0.135324
| 0
| 0.029412
| 0
| 0
| 0.109424
| 0.020557
| 0
| 0
| 0
| 0
| 0.044118
| 1
| 0.044118
| false
| 0
| 0.073529
| 0
| 0.147059
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26043d5d77004fcf11c43eb7691efa8015b6c1e6
| 1,039
|
py
|
Python
|
rhymes.py
|
hayderkharrufa/arabic_poetry_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 72
|
2020-05-29T19:58:22.000Z
|
2022-03-11T18:53:56.000Z
|
rhymes.py
|
mahfoudhich/arabic_poem_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 1
|
2020-06-12T11:03:45.000Z
|
2020-08-05T17:52:27.000Z
|
rhymes.py
|
mahfoudhich/arabic_poem_generator
|
82f2ed726ec6270c3ee1d2f7c7aa1783df973708
|
[
"MIT"
] | 34
|
2020-06-04T14:38:39.000Z
|
2022-03-16T20:50:56.000Z
|
# coding: utf-8
# author: Haydara https://www.youtube.com/haydara
import pickle
with open('vocabs.pkl', 'rb') as pickle_load:
voc_list = pickle.load(pickle_load)
allowed_chars = ['ذ', 'ض', 'ص', 'ث', 'ق', 'ف', 'غ', 'ع', 'ه', 'خ', 'ح', 'ج', 'د',
'ش', 'س', 'ي', 'ب', 'ل', 'ا', 'أ', 'ت', 'ن', 'م', 'ك', 'ط', 'ئ', 'ء', 'ؤ', 'ر', 'ى',
'ة', 'و', 'ز', 'ظ', 'ّ', ' ']
max_word_length = 9
def rhymes_with(word):
if word not in ['الله', 'والله', 'بالله', 'لله', 'تالله']:
word = word.replace('ّ', '')
ending = word[-2:]
rhymes = []
for w in voc_list:
if len(w) < max_word_length and w.endswith(ending):
rhymes.append(w)
return rhymes
def rhymes_with_last_n_chars(word, n):
if word not in ['الله', 'والله', 'بالله', 'لله', 'تالله', 'فالله']:
word = word.replace('ّ', '')
ending = word[-n:]
rhymes = []
for w in voc_list:
if len(w) < max_word_length and w.endswith(ending):
rhymes.append(w)
return rhymes
| 29.685714
| 102
| 0.505294
| 154
| 1,039
| 3.318182
| 0.545455
| 0.058708
| 0.076321
| 0.043053
| 0.547945
| 0.547945
| 0.446184
| 0.446184
| 0.446184
| 0.317025
| 0
| 0.003932
| 0.26564
| 1,039
| 34
| 103
| 30.558824
| 0.661861
| 0.059673
| 0
| 0.48
| 0
| 0
| 0.101643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2607fe4913aa92b0a573f52ce885f77ac1e7a144
| 17,667
|
py
|
Python
|
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 3
|
2021-03-28T14:46:57.000Z
|
2022-01-03T17:25:19.000Z
|
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | null | null | null |
ots_eval/outlier_detection/doots.py
|
YellowOfTheEgg/ots-eval
|
8ec08e60330d41f8f7ffd571dd6301cdedaefd99
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T10:56:14.000Z
|
2022-01-11T10:56:14.000Z
|
import pandas
import numpy as np
from .reference_histogram_outlier import HistOutlier
from typing import Union, Tuple
class DOOTS(object):
def __init__(self, data: pandas.DataFrame, weighting: bool = False, jaccard: bool = False):
"""
Params:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id' containing objects,
timestamps, cluster belongings, features ..
Note: The first three columns can have custom names as long as they represent the object
identifier, the timestamp and the cluster identifier in the right order
Optional:
weighting (boolean) - indicating whether the weighting function should be applied
jaccard (boolean) - indicating whether the jaccard index should be used instead of the asymmetric proportion
"""
self._data = data
self._weighting = weighting
self._jaccard = jaccard
self._outlier_result = None
self._outlier_rating = None
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._cluster_compositions = self.obtain_cluster_compositions()
def get_outliers(self, tau: float) -> Tuple[pandas.DataFrame, pandas.DataFrame]:
"""
Parameters:
tau (float) - threshold for outlier detection
Returns:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id', 'outlier'
outlier_result (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time',
'cluster_end_time', 'rating', 'distance' and 'outlier'
"""
self.calc_outlier_degree()
return self.mark_outliers(tau)
def calc_outlier_degree(self) -> pandas.DataFrame:
"""
Returns:
outlier_rating (DataFrame) - pandas DataFrame with the columns 'object_id', 'start_time', 'end_time',
'cluster_end_time', 'rating' and 'distance' containing the subsequences'
distances to the reference sequence per time period
"""
rating = self.calc_outlier_rating()
outlier = HistOutlier()
self._outlier_rating = outlier.calc_outlier_degree(rating, self._data)
return self._outlier_rating
def mark_outliers(self, tau: float) -> Tuple[pandas.DataFrame, pandas.DataFrame]:
"""
Parameters:
tau (float) - threshold for outlier detection
Returns:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'time', 'cluster_id', 'outlier'
outlier_result (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time',
'cluster_end_time', 'rating', 'distance' and 'outlier'
"""
print('TAU: ', str(tau))
self._outlier_result = self._outlier_rating[(self._outlier_rating['distance'] >= tau) |
(self._outlier_rating['distance'] == -1)]
# mark outliers in the clusters
self._data = self._data.assign(outlier=1)
self._data = self._data.astype({self._object_column_name: str})
self._outlier_result = self._outlier_result.astype({self._object_column_name: str})
time_points = self._data[self._time_column_name].unique().tolist()
time_points.sort()
# mark outliers detected by distance with -1
for index, row in self._outlier_result.iterrows():
for time_point in time_points[
time_points.index(int(row['start_time'])):time_points.index(int(row['end_time'])) + 1]:
self._data.loc[(self._data[self._time_column_name] == time_point) &
(self._data[self._object_column_name] == row[self._object_column_name]), 'outlier'] = -1
conseq_outliers = self._outlier_result[self._outlier_result['distance'] == -1]
# mark conseq cluster outliers with -2, mark conseq outliers which also are outliers by distance with -3
for index, row in conseq_outliers.iterrows():
for time_point in time_points[
time_points.index(int(row['start_time'])):time_points.index(int(row['end_time'])) + 1]:
if self._data.loc[(self._data[self._time_column_name] == time_point) &
(self._data[self._object_column_name] == row[self._object_column_name]),
'outlier'].item() in [-1, -2, -3]:
self._data.loc[(self._data[self._time_column_name] == time_point) &
(self._data[self._object_column_name] == row[self._object_column_name]),
'outlier'] = -3
else:
self._data.loc[(self._data[self._time_column_name] == int(time_point)) &
(self._data[self._object_column_name] == row[self._object_column_name]),
'outlier'] = -2
return self._data, self._outlier_result
def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:
"""
Optional:
id (int, str, list) - int, str, list or None representing the data points that should be rated. If id is
None, all objects are rated
start_time (int) - time that should be considered as beginning
end_time (int) - int representing the timestamp which should be rated up to
Returns:
ratings (dict) - dict {<object_id>: <rating>} with ratings of objects
"""
ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)
if end_time is None:
end_time = np.max(self._data[self._time_column_name].unique())
ratings = self.calc_object_rating(ids_to_rate, end_time, start_time)
return ratings
def calc_object_rating(self, ids_to_rate: list, end_time: int, start_time: int = None) -> dict:
"""
Params:
ids_to_rate (list) - list of data points that should be rated
end_time (int) - representing the timestamp which should be rated up to
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings (dict) - dict {<object_id>: <rating>} with ratings of objects
"""
ratings = {}
gr_clusters = self._data.groupby(self._object_column_name)
# iterate over object ids
for id in ids_to_rate:
cur_group = gr_clusters.get_group(id)
cur_group = cur_group[cur_group[self._time_column_name] <= end_time]
if start_time is not None:
cur_group = cur_group[cur_group[self._time_column_name] >= start_time]
if len(cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name]) == 0:
# print('Object does not exist for timestamp ', str(end_time))
continue
# id of the cluster of the last considered timestamp
last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[0]
# if object is an outlier for the considered timestamp, it gets worst rating of 0.0
if int(last_cluster) < 0:
ratings[id] = 0.0
continue
cluster_ids = cur_group[self._cluster_column_name].unique()
object_ratings = []
num_clusters = 0
has_outlier = False
for cluster in cluster_ids:
if cluster == last_cluster:
continue
# Add the proportion of clusters before last timestamp, that merged in last cluster
else:
# outliers get worst rating of 0.0
if int(cluster) < 0:
object_ratings.append(0.0)
has_outlier = True
else:
object_ratings.append(self._cluster_compositions[last_cluster][cluster])
num_clusters += 1
if not has_outlier and len(object_ratings) == 0:
# print(str(id) + " has no data before t=" + str(end_time))
continue
if self._weighting:
try:
weighting_denominator = 0
for i in range(1, num_clusters + 1):
weighting_denominator += i
if num_clusters > 0:
object_rating = 0
for i in range(num_clusters):
object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)
else:
continue
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
else:
try:
object_rating = np.sum(object_ratings)
object_rating /= num_clusters
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
ratings[id] = round(object_rating, 3)
return ratings
def calc_outlier_rating(self) -> pandas.DataFrame:
"""
Returns:
data (DataFrame) - pandas DataFrame with columns 'object_id', 'start_time', 'end_time', 'cluster_end_time', 'rating'
containing the outlier rating for all subsequences
"""
ratings = []
timestamps = self._data[self._time_column_name].unique()
timestamps.sort()
for i in range(0, len(timestamps) - 1):
for j in range(i + 1, len(timestamps)):
time_ratings = self.rate_object(start_time=timestamps[i], end_time=timestamps[j])
for object in time_ratings:
cluster = self._data[(self._data[self._object_column_name] == object) &
(self._data[self._time_column_name] == timestamps[j])
][self._cluster_column_name].item()
ratings.append([object, timestamps[i], timestamps[j], cluster, time_ratings[object]])
outlier_rating = pandas.DataFrame(ratings, columns=[self._object_column_name, 'start_time', 'end_time',
'cluster_end_time', 'rating'])
return outlier_rating
######## HELPER FUNCTIONS ########
def get_feature_list(self, objects: list, time: int) -> np.ndarray:
"""
Params:
objects (list) - list of objects_ids that belong to considered cluster
time (int) - time of cluster that is considered
Returns:
feature_list (array) - array of shape (num_objects, num_features) containing the features of objects in the considered cluster
"""
feature_list = []
for obj in objects:
features = self._data[
(self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]
features = \
features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],
axis=1).iloc[0].tolist()
if len(features) <= 0:
print("No features found for object ", str(obj))
continue
feature_list.append(features)
return np.array(feature_list)
def get_num_timestamps(self, start_time: int, end_time: int) -> int:
"""
Params:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Returns:
num_timestamps (int) - number of timestamps between start_time and end_time
"""
timestamp_list = self._data[self._time_column_name].unique()
if start_time is not None:
timestamp_list = [i for i in timestamp_list if i >= start_time]
if end_time is not None:
timestamp_list = [i for i in timestamp_list if i <= end_time]
num_timestamps = len(timestamp_list)
return num_timestamps
def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:
"""
Params:
id (int, str, list) - int, str, list or None representing the data points that should be rated. If id is
None, all objects are rated
id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract
Optional:
start_time (int) - which timestamp to start at
end_time (int) - whicht timestamp to stop at
Returns:
ids_to_rate (list) - list of ids that should be rated
"""
if id is None:
data = self._data.copy()
if start_time is not None:
data = data[data[self._time_column_name] >= start_time]
if end_time is not None:
data = data[data[self._time_column_name] <= end_time]
ids_to_rate = data[id_name].unique().tolist()
elif isinstance(id, int) or isinstance(id, str):
ids_to_rate = [id]
elif isinstance(id, list):
ids_to_rate = id[:]
else:
raise Exception('id has to be int, str, list or None')
return ids_to_rate
def obtain_cluster_compositions(self) -> dict:
"""
Returns:
cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster
compositions
Example:
{5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that
100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5
"""
cluster_compositions = {}
g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])
if not self._jaccard:
cluster_members = self._data.groupby(self._cluster_column_name).count()
# iterate over all clusters - 'group' contains the time and cluster_id
# and 'objects' is the corresponding dataframe
for group, objects in g_clusters:
# Ignore outliers
if int(group[1]) < 0:
continue
objects = objects[self._object_column_name].values.tolist()
# temporal intersection
# select considered clusters with later timestamps than the current one to check which clusters the
# current one merged into and count, how many objects of the current cluster are in the considered clusters
# example of a series from the dataframe: [cluster_id, count] with [2, 10]
# meaning: 10 objects of the current cluster merged into the cluster with the id 2
temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &
(self._data[self._time_column_name] > group[0])]
).groupby(self._cluster_column_name).count()
# iterate over all clusters which the current cluster has merged into
# 'cluster' contains the cluster_id
# and 'con_objects' is the corresponding number of objects of the temporal intersection
for cluster, num_objects in temp_intersection.iterrows():
# Ignore outliers
if int(cluster) < 0:
continue
# for all considered clusters save the proportion of the current cluster that merged into the considered
# one
# example: {3: {2: 0.3}, 4: {2: 0.1}}
# meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4
if cluster not in cluster_compositions:
cluster_compositions[cluster] = {}
if self._jaccard:
# cardinality of the union of both considered clusters
card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |
(self._data[self._cluster_column_name] == group[1])]
[self._object_column_name].unique())
# jaccard distance
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(card_union), 3)
else:
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(cluster_members.loc[group[1]].values[1]), 3)
if group[1] not in cluster_compositions:
cluster_compositions[group[1]] = {}
return cluster_compositions
| 49.487395
| 138
| 0.572819
| 2,026
| 17,667
| 4.752221
| 0.116486
| 0.055048
| 0.031159
| 0.043623
| 0.418467
| 0.387723
| 0.324678
| 0.302971
| 0.279809
| 0.274823
| 0
| 0.008741
| 0.339503
| 17,667
| 356
| 139
| 49.626404
| 0.816351
| 0.314768
| 0
| 0.215789
| 0
| 0
| 0.017331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057895
| false
| 0
| 0.021053
| 0
| 0.136842
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260902dd3e508f3dd93dbd19435e62ca56223adf
| 3,405
|
py
|
Python
|
SWIG_fast_functions/test.py
|
twni2016/OrganSegRSTN_PyTorch
|
bf571320e718c8f138e04d48645e3b4dfe75801d
|
[
"MIT"
] | 100
|
2018-08-01T04:42:36.000Z
|
2022-03-23T07:01:21.000Z
|
SWIG_fast_functions/test.py
|
bharat3012/OrganSegRSTN_PyTorch
|
aff23489b1f3006761e3270178adfcccb63d0de9
|
[
"MIT"
] | 12
|
2018-08-07T10:35:47.000Z
|
2022-02-21T09:09:42.000Z
|
SWIG_fast_functions/test.py
|
bharat3012/OrganSegRSTN_PyTorch
|
aff23489b1f3006761e3270178adfcccb63d0de9
|
[
"MIT"
] | 35
|
2018-08-06T21:27:36.000Z
|
2021-11-03T10:20:16.000Z
|
import numpy as np
import fast_functions as ff
import time
def DSC_computation(label, pred):
pred_sum = pred.sum()
label_sum = label.sum()
inter_sum = (pred & label).sum()
return 2 * float(inter_sum) / (pred_sum + label_sum), inter_sum, pred_sum, label_sum
def post_processing(F, S, threshold, top2):
F_sum = F.sum()
if F_sum == 0:
return F
if F_sum >= np.product(F.shape) / 2:
return F
height = F.shape[0]
width = F.shape[1]
depth = F.shape[2]
ll = np.array(np.nonzero(S))
marked = np.zeros(F.shape, dtype = np.bool)
queue = np.zeros((F_sum, 3), dtype = np.int)
volume = np.zeros(F_sum, dtype = np.int)
head = 0
tail = 0
bestHead = 0
bestTail = 0
bestHead2 = 0
bestTail2 = 0
for l in range(ll.shape[1]):
if not marked[ll[0, l], ll[1, l], ll[2, l]]:
temp = head
marked[ll[0, l], ll[1, l], ll[2, l]] = True
queue[tail, :] = [ll[0, l], ll[1, l], ll[2, l]]
tail = tail + 1
while (head < tail):
t1 = queue[head, 0]
t2 = queue[head, 1]
t3 = queue[head, 2]
if t1 > 0 and F[t1 - 1, t2, t3] and not marked[t1 - 1, t2, t3]:
marked[t1 - 1, t2, t3] = True
queue[tail, :] = [t1 - 1, t2, t3]
tail = tail + 1
if t1 < height - 1 and F[t1 + 1, t2, t3] and not marked[t1 + 1, t2, t3]:
marked[t1 + 1, t2, t3] = True
queue[tail, :] = [t1 + 1, t2, t3]
tail = tail + 1
if t2 > 0 and F[t1, t2 - 1, t3] and not marked[t1, t2 - 1, t3]:
marked[t1, t2 - 1, t3] = True
queue[tail, :] = [t1, t2 - 1, t3]
tail = tail + 1
if t2 < width - 1 and F[t1, t2 + 1, t3] and not marked[t1, t2 + 1, t3]:
marked[t1, t2 + 1, t3] = True
queue[tail, :] = [t1, t2 + 1, t3]
tail = tail + 1
if t3 > 0 and F[t1, t2, t3 - 1] and not marked[t1, t2, t3 - 1]:
marked[t1, t2, t3 - 1] = True
queue[tail, :] = [t1, t2, t3 - 1]
tail = tail + 1
if t3 < depth - 1 and F[t1, t2, t3 + 1] and not marked[t1, t2, t3 + 1]:
marked[t1, t2, t3 + 1] = True
queue[tail, :] = [t1, t2, t3 + 1]
tail = tail + 1
head = head + 1
if tail - temp > bestTail - bestHead:
bestHead2 = bestHead
bestTail2 = bestTail
bestHead = temp
bestTail = tail
elif tail - temp > bestTail2 - bestHead2:
bestHead2 = temp
bestTail2 = tail
volume[temp: tail] = tail - temp
volume = volume[0: tail]
if top2:
target_voxel = np.where(volume >= (bestTail2 - bestHead2) * threshold)
else:
target_voxel = np.where(volume >= (bestTail - bestHead) * threshold)
F0 = np.zeros(F.shape, dtype = np.bool)
F0[tuple(map(tuple, np.transpose(queue[target_voxel, :])))] = True
return F0
print('python')
G = np.zeros((512,512,240),dtype=np.uint8)
G[128:384,128:384,60:180]=1
volume_data = np.load('1.npz')
F = volume_data['volume'].astype(np.uint8)
start_time = time.time()
F = post_processing(F, F, 1.0, False)
print(time.time() - start_time)
start_time = time.time()
for l in range(10):
DSC = DSC_computation(F,G)
print(DSC)
print(time.time() - start_time)
print('SWIG')
volume_data = np.load('1.npz')
G = np.zeros((512,512,240),dtype=np.uint8)
G[128:384,128:384,60:180]=1
F = volume_data['volume'].astype(np.uint8)
start_time = time.time()
ff.post_processing(F, F, 1.0, False)
print(time.time() - start_time)
start_time = time.time()
for l in range(10):
P = np.zeros(3, dtype = np.uint32)
ff.DSC_computation(F,G,P)
print(P, float(P[2]) * 2 / (P[0] + P[1]))
print(time.time() - start_time)
| 29.608696
| 85
| 0.595888
| 605
| 3,405
| 3.294215
| 0.14876
| 0.032112
| 0.02007
| 0.028098
| 0.554942
| 0.496237
| 0.439538
| 0.415454
| 0.415454
| 0.409433
| 0
| 0.089905
| 0.229075
| 3,405
| 114
| 86
| 29.868421
| 0.669333
| 0
| 0
| 0.254717
| 0
| 0
| 0.009401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.028302
| 0
| 0.084906
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260b9f9c6262684a4bbfdcc0510786d9313421e4
| 358
|
py
|
Python
|
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
tests/main.py
|
zodiuxus/opensimplex
|
d8c761d91834a809e51987d25439549c50f0effb
|
[
"MIT"
] | null | null | null |
from opensimplex import OpenSimplex
import torch, time
def opensimplex_test(device: str):
generator = torch.Generator(device=device)
start = time.time()
os = OpenSimplex(generator=generator)
end = time.time()
return os.noise2(10,10), device, end-start
print(opensimplex_test('cuda'))
print('')
print(opensimplex_test('cpu'))
| 27.538462
| 47
| 0.701117
| 44
| 358
| 5.636364
| 0.431818
| 0.181452
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016892
| 0.173184
| 358
| 13
| 48
| 27.538462
| 0.820946
| 0
| 0
| 0
| 0
| 0
| 0.020173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260d3744fb17af1e21703f4ae4917654e0d07e54
| 10,731
|
py
|
Python
|
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
part1/ELMo/modules/elmo.py
|
peter850706/Contextual-embeddings-for-sequence-classification
|
e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import namedtuple
from ELMo.modules.char_embedding import CharEmbedding
class ELMo(nn.Module):
"""Implement the Embeddings from Language Models (ELMo) as described in "Deep contextualized word representations" (https://arxiv.org/pdf/1802.05365.pdf)
Args:
hidden_size (int): The number of features in the hidden state h of the language models.
dim_projection (int):
char_embedding_kwargs (dict): The parameters for the CharEmbedding class (refer to modules/char_embedding.py).
"""
def __init__(self, hidden_size=2048, dim_projection=512, **char_embedding_kwargs):
super(ELMo, self).__init__()
self.char_embedding = CharEmbedding(**char_embedding_kwargs)
"""
self.output_layer = nn.Sequential(nn.Linear(dim_projection, dim_projection),
nn.ReLU(inplace=True))
"""
# forward language model
self.forward_lm = nn.Sequential()
self.forward_lm.add_module('lstm0', nn.LSTM(input_size=char_embedding_kwargs['projection_size'],
hidden_size=hidden_size,
num_layers=1,
dropout=0,
bidirectional=False))
self.forward_lm.add_module('linear0', nn.Linear(hidden_size, dim_projection))
self.forward_lm.add_module('lstm1', nn.LSTM(input_size=dim_projection,
hidden_size=hidden_size,
num_layers=1,
dropout=0,
bidirectional=False))
self.forward_lm.add_module('linear1', nn.Linear(hidden_size, dim_projection))
for name, param in self.forward_lm.named_parameters():
if 'lstm' in name and param.requires_grad:
# orthogonal initialization
if 'weight' in name:
nn.init.orthogonal_(param)
# bias = [b_ig | b_fg | b_gg | b_og], set b_fg (forget gate) to 1 and other gates to 0
elif 'bias' in name:
n = param.size(0)
param.data.fill_(0)
param.data[n // 4 : n // 2].fill_(1)
# backward language model
self.backward_lm = nn.Sequential()
self.backward_lm.add_module('lstm0', nn.LSTM(input_size=char_embedding_kwargs['projection_size'],
hidden_size=hidden_size,
num_layers=1,
dropout=0,
bidirectional=False))
self.backward_lm.add_module('linear0', nn.Linear(hidden_size, dim_projection))
self.backward_lm.add_module('lstm1', nn.LSTM(input_size=dim_projection,
hidden_size=hidden_size,
num_layers=1,
dropout=0,
bidirectional=False))
self.backward_lm.add_module('linear1', nn.Linear(hidden_size, dim_projection))
for name, param in self.backward_lm.named_parameters():
if 'lstm' in name and param.requires_grad:
# orthogonal initialization
if 'weight' in name:
nn.init.orthogonal_(param)
# bias = [b_ig | b_fg | b_gg | b_og], set b_fg (forget gate) to 1 and other gates to 0
elif 'bias' in name:
n = param.size(0)
param.data.fill_(0)
param.data[n // 4 : n // 2].fill_(1)
def packed_forward(self, rnn, padded_input, lengths):
"""
Args:
rnn:
padded_input (tensor) (padded_len, batch, features): The padded input.
lengths (LongTensor) (batch, ): The original length of the padded_input
Return:
padded_output (tensor) (padded_len, batch, features):
"""
lengths, sorted_indexes = torch.sort(lengths, descending=True) # sorted by descending order
padded_input = padded_input.index_select(dim=1, index=sorted_indexes)
packed_input = pack_padded_sequence(input=padded_input, lengths=lengths)
packed_output, _ = rnn(packed_input)
padded_output, _ = pad_packed_sequence(sequence=packed_output, padding_value=0)
unsorted_indexes = torch.argsort(sorted_indexes) # recover the original order
return padded_output.index_select(dim=1, index=unsorted_indexes)
def forward(self, forward_input, backward_input, word_lens):
"""
Args:
forward_input (tensor) (batch, padded_len):
word_lens (LongTensor) (batch, ): The original length of the input sentences.
Returns:
logits (dict):
forward (tensor) (batch, padded_len, dim_projection):
backward (tensor) (batch, padded_len, dim_projection):
"""
forward_char_embedding_features = self.char_embedding(forward_input).transpose(1, 0) # (padded_len, batch, projection_size)
backward_char_embedding_features = self.char_embedding(backward_input).transpose(1, 0) # (padded_len, batch, projection_size)
forward_lm_layer0_features = self.forward_lm.linear0(self.packed_forward(self.forward_lm.lstm0, forward_char_embedding_features, word_lens)) # (padded_len, batch, projection_size)
backward_lm_layer0_features = self.backward_lm.linear0(self.packed_forward(self.backward_lm.lstm0, backward_char_embedding_features, word_lens)) # (padded_len, batch, projection_size)
forward_lm_layer1_features = self.forward_lm.linear1(self.packed_forward(self.forward_lm.lstm1, forward_lm_layer0_features, word_lens)) # (padded_len, batch, projection_size)
backward_lm_layer1_features = self.backward_lm.linear1(self.packed_forward(self.backward_lm.lstm1, backward_lm_layer0_features, word_lens)) # (padded_len, batch, projection_size)
"""
# residual connection
forward_lm_layer1_features = forward_lm_layer0_features + forward_lm_layer1_features
backward_lm_layer1_features = backward_lm_layer0_features + backward_lm_layer1_features
#
forward_logits = self.output_layer(forward_lm_layer1_features)
backward_logits = self.output_layer(backward_lm_layer1_features)
"""
# residual connnection between layer0 and layer1
forward_logits = (forward_lm_layer0_features + forward_lm_layer1_features).transpose(1, 0)
backward_logits = (backward_lm_layer0_features + backward_lm_layer1_features).transpose(1, 0)
return {'forward': forward_logits, 'backward': backward_logits}
def concat_features(self, forward_features, backward_features, word_lens):
padded_len, batch, _ = backward_features.size()
indexes = list(range(padded_len))
for i in range(batch):
reversed_indexes = indexes[:word_lens[i]][::-1] + indexes[word_lens[i]:]
backward_features[:, i, :] = backward_features[:, i, :].index_select(dim=0,
index=torch.tensor(reversed_indexes,
dtype=torch.long,
device=word_lens.device))
return torch.cat([forward_features, backward_features], dim=-1)
def extract_features(self, forward_input, backward_input, word_lens):
"""
Args:
forward_input (tensor) (batch, padded_len):
word_lens (LongTensor) (batch, ): The original length of the input sentences.
Returns:
logits (dict):
forward (tensor) (batch, padded_len, dim_projection):
backward (tensor) (batch, padded_len, dim_projection):
"""
forward_char_embedding_features = self.char_embedding(forward_input).transpose(1, 0) # (padded_len, batch, projection_size)
backward_char_embedding_features = self.char_embedding(backward_input).transpose(1, 0) # (padded_len, batch, projection_size)
forward_lm_layer0_features = self.forward_lm.linear0(self.packed_forward(self.forward_lm.lstm0, forward_char_embedding_features, word_lens)) # (padded_len, batch, projection_size)
backward_lm_layer0_features = self.backward_lm.linear0(self.packed_forward(self.backward_lm.lstm0, backward_char_embedding_features, word_lens)) # (padded_len, batch, projection_size)
forward_lm_layer1_features = self.forward_lm.linear1(self.packed_forward(self.forward_lm.lstm1, forward_lm_layer0_features, word_lens)) # (padded_len, batch, projection_size)
backward_lm_layer1_features = self.backward_lm.linear1(self.packed_forward(self.backward_lm.lstm1, backward_lm_layer0_features, word_lens)) # (padded_len, batch, projection_size)
# concatenate forward and backward features
char_embedding_features = self.concat_features(forward_char_embedding_features,
backward_char_embedding_features,
word_lens).transpose(1, 0) # (batch, padded_len, 2 * projection_size)
lm_layer0_features = self.concat_features(forward_lm_layer0_features,
backward_lm_layer0_features,
word_lens).transpose(1, 0) # (batch, padded_len, 2 * projection_size)
lm_layer1_features = self.concat_features(forward_lm_layer1_features,
backward_lm_layer1_features,
word_lens).transpose(1, 0) # (batch, padded_len, 2 * projection_size)
Features = namedtuple('ELMoFeatures', ['char_embedding', 'lm_layer0', 'lm_layer1'])
return Features(*[char_embedding_features, lm_layer0_features, lm_layer1_features])
| 63.875
| 191
| 0.594353
| 1,150
| 10,731
| 5.213913
| 0.137391
| 0.042028
| 0.042695
| 0.048032
| 0.661775
| 0.625083
| 0.601401
| 0.59523
| 0.569046
| 0.552035
| 0
| 0.017232
| 0.324015
| 10,731
| 168
| 192
| 63.875
| 0.809347
| 0.203895
| 0
| 0.463918
| 0
| 0
| 0.021423
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051546
| false
| 0
| 0.051546
| 0
| 0.154639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260d56541b9590ff3dcf8aa4ac7f649e63e42413
| 3,106
|
py
|
Python
|
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | null | null | null |
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | 21
|
2021-01-08T18:03:32.000Z
|
2021-02-02T16:17:34.000Z
|
src/app/externalOutages/createRealTimeOutage.py
|
nagasudhirpulla/wrldc_codebook
|
8fbc795074e16e2012b29ae875b99aa721a7f021
|
[
"MIT"
] | null | null | null |
import datetime as dt
import cx_Oracle
from src.app.externalOutages.getReasonId import getReasonId
def createRealTimeOutage(pwcDbConnStr: str, elemTypeId: int, elementId: int, outageDt: dt.datetime, outageTypeId: int,
reason: str, elementName: str, sdReqId: int, outageTagId: int) -> int:
"""create a new row in real time outages pwc table and return the id of newly created row
Args:
pwcDbConnStr (str): [description]
elemTypeId (int): [description]
elementId (int): [description]
outageDt (dt.datetime): [description]
outageTypeId (int): [description]
reason (str): [description]
elementName (str): [description]
sdReqId (int): [description]
outageTagId (int): [description]
Returns:
int: id of newly created row
"""
newRtoId = -1
if outageDt == None:
return -1
if reason == None or reason == "":
reason = "NA"
reasId = getReasonId(pwcDbConnStr, reason, outageTypeId)
if reasId == -1:
return -1
outageDate: dt.datetime = dt.datetime(
outageDt.year, outageDt.month, outageDt.day)
outageTime: str = dt.datetime.strftime(outageDt, "%H:%M")
newRtoIdFetchSql = """
SELECT MAX(rto.ID)+1 FROM REPORTING_WEB_UI_UAT.real_time_outage rto
"""
rtoInsertSql = """
insert into reporting_web_ui_uat.real_time_outage rto(ID, ENTITY_ID, ELEMENT_ID, OUTAGE_DATE,
OUTAGE_TIME, RELAY_INDICATION_SENDING_ID, RELAY_INDICATION_RECIEVING_ID, CREATED_DATE,
SHUT_DOWN_TYPE, REASON_ID, CREATED_BY, MODIFIED_BY, REGION_ID, ELEMENTNAME,
SHUTDOWNREQUEST_ID, LOAD_AFFECTED, IS_LOAD_OR_GEN_AFFECTED, SHUTDOWN_TAG_ID, IS_DELETED) values
(:id, :elemTypeId, :elementId, :outageDate, :outageTime, 0, 0, CURRENT_TIMESTAMP, :outageTypeId,
:reasonId, 123, 123, 4, :elementName, :sdReqId, 0, 0, :outageTagId, NULL)
"""
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(pwcDbConnStr)
# get cursor for raw data table
dbCur = dbConn.cursor()
# execute the new rto id fetch sql
dbCur.execute(newRtoIdFetchSql)
dbRows = dbCur.fetchall()
newRtoId = dbRows[0][0]
sqlData = {"id": newRtoId, "elemTypeId": elemTypeId, "elementId": elementId,
"outageDate": outageDate, "outageTime": outageTime,
"outageTypeId": outageTypeId, "reasonId": reasId,
"elementName": elementName, "sdReqId": sdReqId,
"outageTagId": outageTagId}
# execute the new row insertion sql
dbCur.execute(rtoInsertSql, sqlData)
# commit the changes
dbConn.commit()
except Exception as e:
newRtoId = -1
print('Error while creating new real time outage entry in pwc table')
print(e)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
return newRtoId
| 36.541176
| 118
| 0.640052
| 345
| 3,106
| 5.649275
| 0.385507
| 0.025654
| 0.02155
| 0.016419
| 0.054387
| 0.03489
| 0.03489
| 0.03489
| 0
| 0
| 0
| 0.008319
| 0.264649
| 3,106
| 84
| 119
| 36.97619
| 0.845009
| 0.210882
| 0
| 0.115385
| 0
| 0.038462
| 0.331378
| 0.065354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.057692
| 0
| 0.134615
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
260e0a514e1da67dfebf1f15683649ad98d25110
| 918
|
py
|
Python
|
src/backend/marsha/core/utils/jitsi_utils.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
src/backend/marsha/core/utils/jitsi_utils.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
src/backend/marsha/core/utils/jitsi_utils.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
"""Utils for jitsi"""
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
import jwt
def create_payload(room, moderator=True):
"""Create the payload so that it contains each information jitsi requires"""
token_payload = {
"exp": timezone.now()
+ timedelta(seconds=settings.JITSI_JWT_TOKEN_EXPIRATION_SECONDS),
"iat": timezone.now(),
"moderator": moderator,
"aud": "jitsi",
"iss": settings.JITSI_JWT_APP_ID,
"sub": settings.JITSI_DOMAIN,
"room": room,
}
return token_payload
def generate_token(room, moderator):
"""Generate the access token that will give access to the room"""
token_payload = create_payload(room=room, moderator=moderator)
token = jwt.encode(
token_payload,
settings.JITSI_JWT_APP_SECRET,
algorithm="HS256",
)
return token
| 25.5
| 80
| 0.668845
| 109
| 918
| 5.46789
| 0.440367
| 0.080537
| 0.080537
| 0.063758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004249
| 0.230937
| 918
| 35
| 81
| 26.228571
| 0.839943
| 0.159041
| 0
| 0
| 0
| 0
| 0.050265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261a000d9348f195b1440a4cf608cb7c86cce74a
| 7,359
|
py
|
Python
|
tests/test_mappings.py
|
cpenv/cpenv
|
07e1a6b5d1b20af4adff0c5a6987c7cdc784cc39
|
[
"MIT"
] | 15
|
2017-02-14T04:16:59.000Z
|
2021-10-05T15:20:02.000Z
|
tests/test_mappings.py
|
cpenv/cpenv
|
07e1a6b5d1b20af4adff0c5a6987c7cdc784cc39
|
[
"MIT"
] | 32
|
2015-11-04T15:53:50.000Z
|
2021-12-12T03:28:23.000Z
|
tests/test_mappings.py
|
cpenv/cpenv
|
07e1a6b5d1b20af4adff0c5a6987c7cdc784cc39
|
[
"MIT"
] | 2
|
2017-02-24T16:30:39.000Z
|
2021-09-24T05:26:05.000Z
|
# -*- coding: utf-8 -*-
# Local imports
from cpenv import mappings
from cpenv.compat import platform
def test_platform_values():
'''join_dicts with platform values'''
tests = {
'implicit_set': {
'osx': 'osx',
'linux': 'linux',
'win': 'win',
},
'implicit_prepend': {
'osx': ['osx0', 'osx1'],
'linux': ['linux0', 'linux1'],
'win': ['win0', 'win1'],
},
'explicit_set': {
'set': {
'osx': 'osx',
'linux': 'linux',
'win': 'win',
}
},
'explicit_ops': {
'osx': [{'append': 'osx0'}, {'prepend': 'osx1'}],
'linux': [{'append': 'linux0'}, {'prepend': 'linux1'}],
'win': [{'append': 'win0'}, {'prepend': 'win1'}],
}
}
expected = {
'implicit_set': tests['implicit_set'],
'implicit_prepend': tests['implicit_prepend'],
'explicit_set': tests['explicit_set']['set'],
'explicit_ops': {
'osx': ['osx1', 'osx0'],
'linux': ['linux1', 'linux0'],
'win': ['win1', 'win0'],
}
}
results = mappings.join_dicts(tests)
p = platform
assert results['implicit_set'] == expected['implicit_set'][p]
assert results['implicit_prepend'] == expected['implicit_prepend'][p]
assert results['explicit_set'] == expected['explicit_set'][p]
assert results['explicit_ops'] == expected['explicit_ops'][p]
def test_join_case_insensitivity():
'''join_dicts is case insensitive'''
a = {'Var': 'a'} # Original mixed case
b = {'VAR': 'b'} # UPPER - set
c = {'var': ['0', '1']} # lower - prepend
# Ensure Var is properly set and case of key is changed
result = mappings.join_dicts(a, b)
assert result['VAR'] == 'b'
# Ensure Var is properly set, prepended to and case of key is changed
result = mappings.join_dicts(a, b, c)
assert result['var'] == ['0', '1', 'b']
def test_implicit_set_values():
'''join_dicts implicitly sets values'''
a = {'var': ['x', 'y']}
b = {'var': 'z'}
c = {'var': 'a'}
result = mappings.join_dicts(a, b)
assert result['var'] == b['var']
result = mappings.join_dicts(a, b, c)
assert result['var'] == c['var']
def test_implicit_prepend_values():
'''join_dicts implicitly prepends values'''
a = {'var': 'z'}
b = {'var': ['x', 'y']}
c = {'var': ['0', '1']}
result = mappings.join_dicts(a, b)
assert result['var'] == ['x', 'y', 'z']
result = mappings.join_dicts(a, b, c)
assert result['var'] == ['0', '1', 'x', 'y', 'z']
def test_explicit_set():
'''join_dicts with explicitly set items'''
a = {
'A': '0',
'B': ['1', '2']
}
# Explicit set str, list, non-existant key
b = {
'A': {'set': '1'},
'B': {'set': ['2', '3']},
'C': {'set': '4'},
}
result = mappings.join_dicts(a, b)
assert result == {'A': '1', 'B': ['2', '3'], 'C': '4'}
# Explicit set in list of ops
c = {
'A': [
{'set': '10'},
{'append': '20'},
]
}
result = mappings.join_dicts(a, b, c)
assert result == {'A': ['10', '20'], 'B': ['2', '3'], 'C': '4'}
def test_explicit_unset():
'''join_dicts with explicitly unset keys'''
a = {'A': '0'}
b = {'A': {'unset': '1'}}
result = mappings.join_dicts(a, b)
assert result == {}
def test_explicit_append():
'''join_dicts with explicitly appended values'''
a = {'A': '0'}
# Append one value
b = {'A': {'append': '1'}}
result = mappings.join_dicts(a, b)
assert result == {'A': ['0', '1']}
# Append list of values
c = {'A': {'append': ['2', '3']}}
result = mappings.join_dicts(a, b, c)
assert result == {'A': ['0', '1', '2', '3']}
# Multiple append operations
d = {'A': [{'append': '4'}, {'append': '5'}]}
result = mappings.join_dicts(a, b, c, d)
assert result == {'A': ['0', '1', '2', '3', '4', '5']}
# Append to non-existant var
e = {'B': {'append': '6'}}
result = mappings.join_dicts(a, b, c, d, e)
assert result == {'A': ['0', '1', '2', '3', '4', '5'], 'B': ['6']}
# Append duplicates are ignored
f = {'A': {'append': ['0', '5', '6']}}
result = mappings.join_dicts(a, b, c, d, e, f)
assert result == {'A': ['0', '1', '2', '3', '4', '5', '6'], 'B': ['6']}
def test_explicit_prepend():
'''join_dicts with explicitly prepended values'''
a = {'A': '0'}
# Prepend one value
b = {'A': {'prepend': '1'}}
result = mappings.join_dicts(a, b)
assert result == {'A': ['1', '0']}
# Prepend list of values
c = {'A': {'prepend': ['2', '3']}}
result = mappings.join_dicts(a, b, c)
assert result == {'A': ['2', '3', '1', '0']}
# Multiple prepend operations
d = {'A': [{'prepend': '4'}, {'prepend': '5'}]}
result = mappings.join_dicts(a, b, c, d)
assert result == {'A': ['5', '4', '2', '3', '1', '0']}
# Prepend to non-existant var
e = {'B': {'prepend': '6'}}
result = mappings.join_dicts(a, b, c, d, e)
assert result == {'A': ['5', '4', '2', '3', '1', '0'], 'B': ['6']}
# Prepend duplicates are ignored
f = {'A': {'prepend': ['0', '5', '6']}}
result = mappings.join_dicts(a, b, c, d, e, f)
assert result == {'A': ['6', '5', '4', '2', '3', '1', '0'], 'B': ['6']}
def test_explicit_remove():
'''join_dicts with explicitly removed values'''
a = {'A': ['0', '1', '2', '3', '4']}
# Remove one value
b = {'A': {'remove': '1'}}
result = mappings.join_dicts(a, b)
assert result == {'A': ['0', '2', '3', '4']}
# Remove list of values
c = {'A': {'remove': ['2', '4']}}
result = mappings.join_dicts(a, b, c)
assert result == {'A': ['0', '3']}
# Multiple remove operations
d = {'A': [{'remove': '0'}, {'remove': '3'}], 'B': {'remove': '6'}}
result = mappings.join_dicts(a, b, c, d)
assert result == {}
def test_explicit_complex_operation():
'''join_dicts with multiple explicit operations'''
a = {
'A': ['0', '1', '2'],
'B': '100',
'C': ['0'],
'D': '200',
}
b = {
'A': [
{'remove': ['1', '2']},
{'append': 'B'},
{'prepend': ['A', 'C']},
],
'B': [
{'set': ['A', 'B']},
{'prepend': 'C'},
{'remove': 'B'},
],
'C': [
{'set': ['A', 'B', 'C']},
{'prepend': 'Z'}
],
'D': {'remove': '200'},
}
expected = {
'A': ['A', 'C', '0', 'B'],
'B': ['C', 'A'],
'C': ['Z', 'A', 'B', 'C'],
}
result = mappings.join_dicts(a, b)
assert result == expected
def test_env_to_dict():
'''env_to_dict converts environment mapping to dict'''
env = {
'PATH': 'X:Y:Z',
'VAR': 'VALUE',
}
result = mappings.env_to_dict(env, pathsep=':')
assert result == {'PATH': ['X', 'Y', 'Z'], 'VAR': 'VALUE'}
def test_dict_to_env():
'''dict_to_env converts dict to environment mapping'''
data = {
'PATH': ['X', 'Y', 'Z'],
'VAR': 'VALUE',
}
result = mappings.dict_to_env(data, pathsep=':')
assert result == {'PATH': 'X:Y:Z', 'VAR': 'VALUE'}
| 27.055147
| 75
| 0.467455
| 909
| 7,359
| 3.684268
| 0.117712
| 0.091371
| 0.121827
| 0.157958
| 0.434757
| 0.37235
| 0.359809
| 0.343386
| 0.312033
| 0.258883
| 0
| 0.030176
| 0.28849
| 7,359
| 271
| 76
| 27.154982
| 0.609435
| 0.145944
| 0
| 0.28022
| 0
| 0
| 0.159117
| 0
| 0
| 0
| 0
| 0
| 0.159341
| 1
| 0.065934
| false
| 0
| 0.010989
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261cb72cdb5369f93d2ed990d5292c026f4a45f8
| 3,313
|
py
|
Python
|
logial_puzzle/logical_puzzle.py
|
Adeon18/logical_puzzle
|
9a5d210bed51a779ceb5b15f720fdecf1860ff76
|
[
"MIT"
] | null | null | null |
logial_puzzle/logical_puzzle.py
|
Adeon18/logical_puzzle
|
9a5d210bed51a779ceb5b15f720fdecf1860ff76
|
[
"MIT"
] | null | null | null |
logial_puzzle/logical_puzzle.py
|
Adeon18/logical_puzzle
|
9a5d210bed51a779ceb5b15f720fdecf1860ff76
|
[
"MIT"
] | null | null | null |
'''
https://github.com/Adeon18/logical_puzzle
'''
# There are now proper commits in this repository becouse I
# created 1 repo for 2 tasks and then had to move
import math
def check_rows(board: list) -> bool:
'''
Check for row correction in board.
>>> check_rows(["**** ****", "***1 ****", "** 3****", "* 4 1****",\
" 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
True
'''
for row in board:
used_elems = []
for elem in row:
if elem == ' ' or elem == '*':
continue
# Check for repetitiveness
if int(elem) in range(1, 10) and int(elem) not in used_elems:
used_elems.append(int(elem))
elif int(elem) in range(1, 10) and int(elem) in used_elems:
return False
return True
def check_colls(board: list) -> bool:
'''
Check for column correction in board.
>>> check_colls(["**** ****", "***1 ****", "** 3****", "* 4 1****",\
" 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
False
'''
new_lst = []
# We flip it and check for row correction
for i, row in enumerate(board):
new_elem = ''
for j, _ in enumerate(row):
new_elem += board[j][i]
new_lst.append(new_elem)
return check_rows(new_lst)
def get_color_comb(board: list, horizontal_coord: int,\
vertical_coord: int) -> str:
'''
Get one color combiation data. Return the elements which are in one color.
'''
# There's definately a better way to do this.
# Originally I wanted to do a diagonal search but it did not work even
# though i tried many times so I just get the cordinate and
# move down and then to the right.
line = ''
for vertical in range(vertical_coord, vertical_coord+5):
if board[vertical][horizontal_coord].isdigit():
line += board[vertical][horizontal_coord]
for horizontal in range(horizontal_coord + 1, horizontal_coord+5):
if board[vertical_coord+4][horizontal].isdigit():
line += board[vertical_coord+4][horizontal]
return line
def check_color(board: list) -> bool:
'''
Check for all colors, return False if any combination is wrong.
>>> check_color(["**** ****", "***1 ****", "** 3****", "* 4 1****",\
" 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
True
'''
dimension = 9
for i in range(0, dimension-4):
hor_coord = i
# Get the vert coord
vert_coord = math.floor(dimension/2) - i
# Pass it to a func and return the combination of nums in one color
combination = get_color_comb(board, hor_coord, vert_coord)
# Check for repetition immediately
if len(combination) != len(set(combination)):
return False
return True
def validate_board(board: list) -> bool:
'''
The main function for checking the board.
>>> validate_board(["**** ****", "***1 ****", "** 3****", "* 4 1****",\
" 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
False
'''
if check_rows(board) and check_colls(board) and check_color(board):
return True
return False
if __name__ == '__main__':
import doctest
print(doctest.testmod())
| 29.318584
| 78
| 0.543616
| 437
| 3,313
| 4.006865
| 0.306636
| 0.027413
| 0.029697
| 0.009138
| 0.186179
| 0.073101
| 0.073101
| 0.073101
| 0.073101
| 0.042262
| 0
| 0.032216
| 0.297314
| 3,313
| 112
| 79
| 29.580357
| 0.719931
| 0.408391
| 0
| 0.130435
| 0
| 0
| 0.005473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.043478
| 0
| 0.326087
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261cd8b29762935b9416a0a01f195708019addab
| 1,528
|
py
|
Python
|
tests/test_signing.py
|
apache/incubator-milagro-mfa-server
|
b33dfe864ff0bcb8a26a46745b9c596d72d22ccf
|
[
"Apache-2.0"
] | 21
|
2016-09-18T19:13:58.000Z
|
2021-11-10T18:35:30.000Z
|
tests/test_signing.py
|
apache/incubator-milagro-mfa-server
|
b33dfe864ff0bcb8a26a46745b9c596d72d22ccf
|
[
"Apache-2.0"
] | 3
|
2016-09-21T14:58:41.000Z
|
2019-05-29T23:35:32.000Z
|
tests/test_signing.py
|
apache/incubator-milagro-mfa-server
|
b33dfe864ff0bcb8a26a46745b9c596d72d22ccf
|
[
"Apache-2.0"
] | 15
|
2016-05-24T11:15:47.000Z
|
2021-11-10T18:35:22.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mpin_utils.common import signMessage, verifySignature
def test_signing_valid():
message = 'Hello world!'
key = 'super secret'
expected_signature = 'f577954ea54f8e8cc1b7d5d238dde635a783a3a37a4ba44877e9f63269cd4b53'
signature = signMessage(message, key)
assert signature == expected_signature
valid, reason, code = verifySignature(message, signature, key)
assert valid
assert reason == 'Valid signature'
assert code == 200
def test_signing_invalid():
message = 'Hello world!'
key = 'super secret'
signature = 'invalid signature'
valid, reason, code = verifySignature(message, signature, key)
assert not valid
assert reason == 'Invalid signature'
assert code == 401
| 32.510638
| 91
| 0.744764
| 194
| 1,528
| 5.829897
| 0.505155
| 0.05305
| 0.022989
| 0.028294
| 0.167993
| 0.167993
| 0.113174
| 0.113174
| 0.113174
| 0
| 0
| 0.040258
| 0.187173
| 1,528
| 46
| 92
| 33.217391
| 0.87037
| 0.492147
| 0
| 0.315789
| 0
| 0
| 0.212121
| 0.084321
| 0
| 0
| 0
| 0
| 0.368421
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261cf1e1d0b31aa16744bdc1d7356972182ab39f
| 587
|
py
|
Python
|
src/symplectic/test/test_jsonformat.py
|
pysymplectic/symplectic
|
bdb46157757eb6e1e12fd3694fdbd0bbf18a70db
|
[
"MIT"
] | null | null | null |
src/symplectic/test/test_jsonformat.py
|
pysymplectic/symplectic
|
bdb46157757eb6e1e12fd3694fdbd0bbf18a70db
|
[
"MIT"
] | 1
|
2017-11-15T22:38:40.000Z
|
2018-01-24T02:28:29.000Z
|
src/symplectic/test/test_jsonformat.py
|
pysymplectic/symplectic
|
bdb46157757eb6e1e12fd3694fdbd0bbf18a70db
|
[
"MIT"
] | null | null | null |
import tempfile
import unittest
from symplectic import jsonformat
class JSONFormatTest(unittest.TestCase):
def test_parse(self):
with tempfile.NamedTemporaryFile() as fp:
fp.write("""
{
"title": "things",
"slug": "stuff",
"date": "2017-09-14 22:21",
"author": "Foo Bar",
"contents": "stuff sure are things"
}
""".encode('utf-8'))
fp.flush()
res, = jsonformat.posts_from_json_files([fp.name])
self.assertEquals(res.title, 'things')
| 25.521739
| 62
| 0.531516
| 59
| 587
| 5.220339
| 0.745763
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.335605
| 587
| 22
| 63
| 26.681818
| 0.75641
| 0
| 0
| 0
| 0
| 0
| 0.396934
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261ded6e9abcc1091124bde1d09dfb13cef1f119
| 2,285
|
py
|
Python
|
yatube/posts/tests/test_forms.py
|
ShumilovAlexandr/hw03_forms
|
e75fd9a4db1fa7091205877f86d48613febf1484
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_forms.py
|
ShumilovAlexandr/hw03_forms
|
e75fd9a4db1fa7091205877f86d48613febf1484
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_forms.py
|
ShumilovAlexandr/hw03_forms
|
e75fd9a4db1fa7091205877f86d48613febf1484
|
[
"MIT"
] | null | null | null |
import shutil
import tempfile
from django import forms
from django.test import Client, TestCase
from django.urls import reverse
from posts.forms import PostForm
from posts.models import Group, Post, User
from django.conf import settings
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
class StaticURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username = 'auth')
cls.group = Group.objects.create(
title = 'Test title',
slug = 'test-slug',
description = 'Test description'
)
cls.post = Post.objects.create(
pk = '1',
text='Текстовый текст',
author=cls.user,
group = cls.group,
)
cls.form = PostForm()
cls.guest_client = Client()
cls.authorized_client = Client()
#Авторизуем пользователя
cls.authorized_client.force_login(cls.user)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_create_post(self):
posts_count = Post.objects.count()
form_data = {
'text': 'Текстовый текст',
'group': self.group.id,
'author':self.user
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertRedirects(response, reverse('posts:profile', args=[self.user.username]))
self.assertEqual(Post.objects.count(), posts_count+1)
self.assertTrue(
Post.objects.filter(
text = 'Текстовый текст',
group = self.group,
author = self.user
).exists()
)
def test_title_label(self):
text_label = self.form.fields['text'].label
self.assertTrue(text_label, 'Введите текст')
def test_title_label(self):
group_label = self.form.fields['group'].label
self.assertTrue(group_label, 'Выберите группу')
def test_title_help_text(self):
title_help_text = self.form.fields['text'].help_text
self.assertTrue(title_help_text, 'Напишите Ваш комментарий')
| 27.865854
| 91
| 0.615755
| 254
| 2,285
| 5.401575
| 0.311024
| 0.039359
| 0.039359
| 0.033528
| 0.077259
| 0.046647
| 0
| 0
| 0
| 0
| 0
| 0.001214
| 0.279212
| 2,285
| 81
| 92
| 28.209877
| 0.831815
| 0.010066
| 0
| 0.095238
| 0
| 0
| 0.086245
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.095238
| false
| 0
| 0.126984
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261e064420fd7dcd06ca3011998456030259d91a
| 8,460
|
py
|
Python
|
python_module/test/unit/functional/test_functional.py
|
WestCityInstitute/MegEngine
|
f91881ffdc051ab49314b1bd12c4a07a862dc9c6
|
[
"Apache-2.0"
] | 2
|
2020-03-26T08:26:29.000Z
|
2020-06-01T14:41:38.000Z
|
python_module/test/unit/functional/test_functional.py
|
ted51/MegEngine
|
f91881ffdc051ab49314b1bd12c4a07a862dc9c6
|
[
"Apache-2.0"
] | null | null | null |
python_module/test/unit/functional/test_functional.py
|
ted51/MegEngine
|
f91881ffdc051ab49314b1bd12c4a07a862dc9c6
|
[
"Apache-2.0"
] | 1
|
2020-11-09T06:29:51.000Z
|
2020-11-09T06:29:51.000Z
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
from helpers import opr_test
import megengine.functional as F
from megengine import Buffer, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [{"input": [maskv0, xv0, yv0]}, {"input": [maskv1, xv1, yv1]}]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
| 32.045455
| 88
| 0.61513
| 1,264
| 8,460
| 4
| 0.140823
| 0.055182
| 0.052611
| 0.035997
| 0.496835
| 0.433742
| 0.389438
| 0.379945
| 0.379945
| 0.375198
| 0
| 0.077893
| 0.20331
| 8,460
| 263
| 89
| 32.1673
| 0.672255
| 0.042553
| 0
| 0.273684
| 0
| 0
| 0.039674
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 1
| 0.115789
| false
| 0
| 0.026316
| 0.021053
| 0.163158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
261ea5255801b00d98a0de6fa447d7ccb3d9504f
| 2,964
|
py
|
Python
|
workflows/pipe-common/pipeline/common/container.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 126
|
2019-03-22T19:40:38.000Z
|
2022-02-16T13:01:44.000Z
|
workflows/pipe-common/pipeline/common/container.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 1,189
|
2019-03-25T10:39:27.000Z
|
2022-03-31T12:50:33.000Z
|
workflows/pipe-common/pipeline/common/container.py
|
msleprosy/cloud-pipeline
|
bccc2b196fad982380efc37a1c3785098bec6c85
|
[
"Apache-2.0"
] | 62
|
2019-03-22T22:09:49.000Z
|
2022-03-08T12:05:56.000Z
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class EnvironmentParametersParser:
def __init__(self, skip_params):
self.skip_params = skip_params
self.list_delimiter = ','
self.param_type_suffix = '_PARAM_TYPE'
self.pattern_prefix = 'p_'
self.exclude_suffix = '_exclude'
self.original_suffix = '_ORIGINAL'
self.preprocessed_types = {'common', 'input'}
def collect_params_from_env(self):
all_params = {}
file_patterns = {}
exclude_patterns = {}
param_types = {}
for name, param in os.environ.iteritems():
if name in self.skip_params:
continue
if name + self.param_type_suffix in os.environ:
if name.startswith(self.pattern_prefix):
if name.endswith(self.exclude_suffix):
exclude_patterns[
name[len(self.pattern_prefix):len(self.exclude_suffix) - 1]] = self.parse_param(param)
else:
file_patterns[name[len(self.pattern_prefix):]] = self.parse_param(param)
else:
param_type = os.environ[name + self.param_type_suffix]
param_types[name] = param_type
if param_type in self.preprocessed_types:
all_params[name] = os.environ[name + self.original_suffix]
else:
all_params[name] = param
return all_params, file_patterns, exclude_patterns, param_types
def parse_param(self, param):
return param.split(self.list_delimiter)
@classmethod
def get_env_value(cls, env_name, param_name=None, default_value=None):
if param_name is not None and param_name in os.environ:
return os.environ[param_name]
elif env_name in os.environ:
return os.environ[env_name]
elif default_value is None:
raise RuntimeError('Required parameter {} is not set'.format(env_name))
else:
return default_value
@classmethod
def has_flag(cls, env_name):
if env_name not in os.environ:
return False
if not os.environ[env_name]:
return False
if os.environ[env_name].lower() == 'true':
return True
else:
return False
| 39
| 114
| 0.619096
| 366
| 2,964
| 4.81694
| 0.34153
| 0.056154
| 0.031197
| 0.032331
| 0.191151
| 0.122518
| 0.086217
| 0.052184
| 0
| 0
| 0
| 0.006268
| 0.30027
| 2,964
| 75
| 115
| 39.52
| 0.84378
| 0.196694
| 0
| 0.181818
| 0
| 0
| 0.032953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.018182
| 0.018182
| 0.290909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2620addb6c0a2614912637726159387427b444d3
| 2,516
|
py
|
Python
|
python-skylark/skylark/tests/nla/test_nla.py
|
xdata-skylark/libskylark
|
89c3736136a24d519c14fc0738c21f37f1e10360
|
[
"Apache-2.0"
] | 86
|
2015-01-20T03:12:46.000Z
|
2022-01-10T04:05:21.000Z
|
python-skylark/skylark/tests/nla/test_nla.py
|
xdata-skylark/libskylark
|
89c3736136a24d519c14fc0738c21f37f1e10360
|
[
"Apache-2.0"
] | 48
|
2015-05-12T09:31:23.000Z
|
2018-12-05T14:45:46.000Z
|
python-skylark/skylark/tests/nla/test_nla.py
|
xdata-skylark/libskylark
|
89c3736136a24d519c14fc0738c21f37f1e10360
|
[
"Apache-2.0"
] | 25
|
2015-01-18T23:02:11.000Z
|
2021-06-12T07:30:35.000Z
|
import unittest
import numpy as np
import El
import skylark.nla as sl_nla
from .. import utils
class NLATestCase(unittest.TestCase):
"""Tests nla functions."""
def test_approximate_svd(self):
"""Compute the SVD of **A** such that **SVD(A) = U S V^T**."""
n = 100
# Generate random matrix
A = El.DistMatrix()
El.Uniform(A, n, n)
A = A.Matrix()
# Dimension to apply along.
k = n
U = El.Matrix()
S = El.Matrix()
V = El.Matrix()
sl_nla.approximate_svd(A, U, S, V, k = k)
# Check result
RESULT = El.Matrix()
El.Zeros(RESULT, n, n)
El.DiagonalScale( El.RIGHT, El.NORMAL, S, U );
El.Gemm( El.NORMAL, El.ADJOINT, 1, U, V, 1, RESULT );
self.assertTrue(utils.equal(A, RESULT))
def test_approximate_symmetric_svd(self):
"""Compute the SVD of symmetric **A** such that **SVD(A) = V S V^T**"""
n = 100
A = El.DistMatrix()
El.Uniform(A, n, n)
A = A.Matrix()
# Make A symmetric
for i in xrange(0, A.Height()):
for j in xrange(0, i+1):
A.Set(j,i, A.Get(i,j))
# Usign symmetric SVD
SA = El.Matrix()
VA = El.Matrix()
sl_nla.approximate_symmetric_svd(A, SA, VA, k = n)
# Check result
VAT = El.Matrix()
El.Copy(VA, VAT)
RESULT = El.Matrix()
El.Zeros(RESULT, n, n)
El.DiagonalScale( El.RIGHT, El.NORMAL, SA, VAT );
El.Gemm( El.NORMAL, El.ADJOINT, 1, VAT, VA, 1, RESULT );
self.assertTrue(utils.equal(A, RESULT))
def test_faster_least_squares_NORMAL(self):
"""Solution to argmin_X ||A * X - B||_F"""
m = 500
n = 100
# Generate problem
A, B, X, X_opt= (El.Matrix(), El.Matrix(), El.Matrix(), El.Matrix())
El.Gaussian(A, m, n)
El.Gaussian(X_opt, n, 1)
El.Zeros(B, m, 1)
El.Gemm( El.NORMAL, El.NORMAL, 1, A, X_opt, 0, B);
# Solve it using faster least squares
sl_nla.faster_least_squares(A, B, X)
# Check the norm of our solution
El.Gemm( El.NORMAL, El.NORMAL, 1, A, X, -1, B);
self.assertAlmostEqual(El.Norm(B), 0)
# Checking the solution
self.assertTrue(utils.equal(X_opt, X))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(NLATestCase)
unittest.TextTestRunner(verbosity=1).run(suite)
| 25.938144
| 79
| 0.543323
| 360
| 2,516
| 3.708333
| 0.275
| 0.07191
| 0.052434
| 0.041948
| 0.408989
| 0.337079
| 0.30412
| 0.242697
| 0.242697
| 0.205243
| 0
| 0.015716
| 0.31717
| 2,516
| 97
| 80
| 25.938144
| 0.76135
| 0.158585
| 0
| 0.277778
| 0
| 0
| 0.003831
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.055556
| false
| 0
| 0.092593
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2620ea952fc9ad7c4d6a79e6e5fedbfdc07b3d8b
| 563
|
py
|
Python
|
tut 9 blurring.py
|
arpit456jain/open-cv-tuts
|
2ef213b9522a145fa51342d8a1385222cbe265c3
|
[
"MIT"
] | null | null | null |
tut 9 blurring.py
|
arpit456jain/open-cv-tuts
|
2ef213b9522a145fa51342d8a1385222cbe265c3
|
[
"MIT"
] | null | null | null |
tut 9 blurring.py
|
arpit456jain/open-cv-tuts
|
2ef213b9522a145fa51342d8a1385222cbe265c3
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread("images/watter.jpeg")
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
kernel = np.ones((5,5),np.float32)/25
dst1 = cv2.filter2D(img,-1,kernel)
blur = cv2.blur(img,(5,5))
g_blur = cv2.GaussianBlur(img,(5,5),0)
median_blur = cv2.medianBlur(img,5)
titles = ['image','smooth 1','blur','gaussian blur','median blur']
images = [img,dst1,blur,g_blur,median_blur]
for i in range(5):
plt.subplot(2,3,i+1)
plt.title(titles[i])
plt.imshow(images[i],cmap = 'gray')
plt.show()
| 21.653846
| 66
| 0.687389
| 99
| 563
| 3.858586
| 0.474747
| 0.04712
| 0.026178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0.129663
| 563
| 25
| 67
| 22.52
| 0.718367
| 0
| 0
| 0
| 0
| 0
| 0.1121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2621c3a515fc4c1d4a5faef8759aaadc1e7eaeca
| 8,587
|
py
|
Python
|
backend/main.py
|
jinjf553/hook_up_rent
|
401ce94f3140d602bf0a95302ee47b7ef213b911
|
[
"MIT"
] | null | null | null |
backend/main.py
|
jinjf553/hook_up_rent
|
401ce94f3140d602bf0a95302ee47b7ef213b911
|
[
"MIT"
] | null | null | null |
backend/main.py
|
jinjf553/hook_up_rent
|
401ce94f3140d602bf0a95302ee47b7ef213b911
|
[
"MIT"
] | null | null | null |
import time
from datetime import timedelta
from random import choice
from typing import List, Optional
from fastapi import Depends, FastAPI, File, UploadFile
from fastapi.staticfiles import StaticFiles
from passlib.context import CryptContext
from pydantic import BaseModel
from sqlalchemy.orm import Session
from config import ACCESS_TOKEN_EXPIRE_MINUTES, HEADERS, STATIC_DIR
from orm import DBSession, DBUser
from utils import create_access_token, get_current_user
app = FastAPI()
''' 运行命令: uvicorn main:app --reload --host 0.0.0.0 --port 8000'''
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
class User(BaseModel):
# id: int
username: str
password: str
status: Optional[int] = 0
description: Optional[str] = ''
body: Optional[dict] = {'token': ''}
class Config:
orm_mode = True
class Houses(BaseModel):
title: str
description: str
price: str
size: str
oriented: str
roomType: str
floor: str
community: str
houseImg: str
supporting: str
class Config:
orm_mode = True
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/user/registered", response_model=User)
async def user_register(user: User, db: Session = Depends(DBSession)):
# 密码加密
password = CryptContext(schemes=["bcrypt"],
deprecated="auto").hash(user.password)
db_user = DBUser(username=user.username, password=password)
DBUser.add(db, db_user)
db_user.status, db_user.description = 200, '注册陈功!'
return db_user
@app.post("/user/login", response_model=User)
async def register(user: User, db: Session = Depends(DBSession)):
db_user = DBUser.get_by_username(db, user.username)
# 密码加密
verify = CryptContext(schemes=["bcrypt"], deprecated="auto")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
if not db_user:
db_user = User(username='', password='')
db_user.status = 300
db_user.description = '用户不存在!'
return db_user
elif not verify.verify(user.password, db_user.password):
db_user.status = 300
db_user.description = '你的账号或密码错误!'
return db_user
else:
db_user.status, db_user.description = 200, '账号登录成功!'
db_user.body['token'] = access_token
print(db_user.status)
return db_user
@app.get("/user", response_model=User)
async def read_users_me(username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
print('login_username: ', username, time.strftime('%M%S'))
db_user: User = DBUser.get_by_username(db, username)
if not db_user or not username:
db_user = User(username='', password='', status=400, description='登录信息失效,请重新登录!')
return db_user
db_user.description, db_user.status = '成功', 200
if 'token' in db_user.body:
db_user.body.pop('token')
db_user.body.update({'avatar': choice(HEADERS),
'nickname': f'好客_{str(db_user.id).rjust(6, "0")}',
'gender': choice(['男', '女']),
'phone': '小米', 'id': db_user.id})
return db_user
@app.get("/houses/condition")
async def get_houses_condition(id: str, db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功', 'body': {
'area': {'label': '区域', 'value': 'area', 'children': [{'label': '不限', 'value': 'null'}, {'label': '朝阳', 'value': 'AREA|zhaoyang'}]},
'characteristic': [{'label': '集中供暖', 'value': 'CHAR|jizhonggongnuan'}],
'floor': [{'label': '高楼层', 'value': 'FLOOR|1'}],
'rentType': [{'label': '不限', 'value': 'null'}],
'oriented': [{'label': '东', 'value': 'ORIEN|1'}],
'price': [{'label': '不限', 'value': 'null'}],
'roomType': [{'label': '一室', 'value': 'ROOM|1'}],
'subway': {'label': '地铁', 'value': 'subway', 'children': [{'label': '不限', 'value': 'null'}]}
}}
if id == 'AREA|1111':
return response_json
else:
response_json['body']['area']['children'] = [{'label': '不限', 'value': 'null'}, {'label': '宝山', 'value': 'AREA|baoshan'}]
return response_json
@app.get("/houses")
async def get_houses(cityId, area, mode, price, more, start, end, db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功', 'body': {
'list': [{'houseCode': '11', 'title': '线上', 'desc': 'subtitle', 'houseImg': 'static/images/轮播1.jpg', 'tags': ['近地铁'], 'price': 2000}]
}}
if area == 'AREA|zhaoyang':
return response_json
else:
response_json['body']['list'][0]['title'] = '线下'
return response_json
@app.get("/houses/params")
async def get_houses_params():
response_json = {'status': 200, 'description': '请求成功', 'body': {
'floor': [{'value': '1', 'label': '高楼层'}, {'value': '2', 'label': '中楼层'}, {'value': '3', 'label': '低楼层'}],
'roomType': [{'value': '1', 'label': '一室'}, {'value': '2', 'label': '二室'}, {'value': '3', 'label': '三室'}, {'value': '4', 'label': '四室'}],
'oriented': [{'value': '1', 'label': '东'}, {'value': '2', 'label': '南'}, {'value': '3', 'label': '西'}, {'value': '4', 'label': '北'}]}}
return response_json
@app.post("/house/image")
async def post_houses_image(file: List[UploadFile] = File(...), username: User = Depends(get_current_user)):
response_json = {'status': 200, 'description': '请求成功', 'body': []}
for x in file:
with open(f'{STATIC_DIR}/{x.filename}', 'wb') as f:
f.write(await x.read())
response_json['body'].append(x.filename)
return response_json
@app.get("/houses/{roomId}")
async def get_houses_room(roomId: int, db: Session = Depends(DBSession)):
response_json = {'status': 200,
'description': '请求成功',
'body': {'houseCode': '1111',
'title': '整租 中山路 历史最低点',
'community': '中山花园',
'description':
'近地铁,附近有商场!254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做',
'size': 100,
'floor': '高楼层',
'price': 3000,
'oriented': ['南'],
'roomType': '三室',
'supporting': ['衣柜', '洗衣机'],
'tags': ['近地铁', '集中供暖', '新上', '随时看房'],
'houseImg': [
'static/images/轮播1.jpg',
'static/images/轮播2.jpg',
'static/images/轮播3.jpg'
]}}
return response_json
@app.get("/user/houses")
async def get_user_houses(username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
print('username: ', username, time.strftime('%M%S'), type(username))
response_json = {'status': 200, 'description': '请求成功', 'body': [
{'houseCode': '1111',
'title': '整租 中山路 历史最低点',
'desc':
'近地铁,附近有商场!254对数据集跑一下第二版仿真工程。',
'price': 3000,
'tags': ['近地铁', '集中供暖', '新上', '随时看房'],
'houseImg': 'static/images/轮播1.jpg'}
]}
if not username:
response_json = {'status': 400, 'description': 'token已过期', 'body': []}
print(username)
return response_json
@app.post("/user/houses")
async def post_user_houses(house: Houses, username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功'}
if not username:
response_json = {'status': 400, 'description': 'token已过期'}
# print(house)
return response_json
@app.get("/area/community")
async def get_area_community(name: str, id: str):
response_json = {'status': 200, 'description': '请求成功', 'body': [
{'community': '123', 'communityName': name}
]}
return response_json
@app.get("/items/{item_id}")
def read_item(item_id: int, q: str = None):
return {"item_id": item_id, "q": q}
| 37.995575
| 448
| 0.584721
| 965
| 8,587
| 5.078756
| 0.241451
| 0.0404
| 0.036727
| 0.040808
| 0.449704
| 0.35809
| 0.310549
| 0.227301
| 0.210977
| 0.189349
| 0
| 0.0249
| 0.242343
| 8,587
| 225
| 449
| 38.164444
| 0.728405
| 0.003494
| 0
| 0.235955
| 0
| 0.005618
| 0.237422
| 0.046777
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011236
| false
| 0.044944
| 0.067416
| 0.011236
| 0.286517
| 0.022472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
262261b72d2641412ed943023fa74d4339a36852
| 7,665
|
py
|
Python
|
mmdet/models/losses/iou_losses.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 65
|
2021-12-30T03:30:52.000Z
|
2022-03-25T01:44:32.000Z
|
mmdet/models/losses/iou_losses.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:51:35.000Z
|
2022-01-01T14:42:37.000Z
|
mmdet/models/losses/iou_losses.py
|
jie311/miemiedetection
|
b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6
|
[
"Apache-2.0"
] | 7
|
2021-12-31T09:25:06.000Z
|
2022-03-10T01:25:09.000Z
|
#! /usr/bin/env python
# coding=utf-8
# ================================================================
#
# Author : miemie2013
# Created date: 2020-10-15 14:50:03
# Description : pytorch_ppyolo
#
# ================================================================
import torch
import torch.nn as nn
import torch as T
import torch.nn.functional as F
import numpy as np
from mmdet.models.bbox_utils import bbox_iou
class IouLoss(nn.Module):
"""
iou loss, see https://arxiv.org/abs/1908.03851
loss = 1.0 - iou * iou
Args:
loss_weight (float): iou loss weight, default is 2.5
max_height (int): max height of input to support random shape input
max_width (int): max width of input to support random shape input
ciou_term (bool): whether to add ciou_term
loss_square (bool): whether to square the iou term
"""
def __init__(self,
loss_weight=2.5,
giou=False,
diou=False,
ciou=False,
loss_square=True):
super(IouLoss, self).__init__()
self.loss_weight = loss_weight
self.giou = giou
self.diou = diou
self.ciou = ciou
self.loss_square = loss_square
def forward(self, pbox, gbox):
iou = bbox_iou(
pbox, gbox, giou=self.giou, diou=self.diou, ciou=self.ciou)
if self.loss_square:
loss_iou = 1 - iou * iou
else:
loss_iou = 1 - iou
loss_iou = loss_iou * self.loss_weight
return loss_iou
class IouAwareLoss(IouLoss):
"""
iou aware loss, see https://arxiv.org/abs/1912.05992
Args:
loss_weight (float): iou aware loss weight, default is 1.0
max_height (int): max height of input to support random shape input
max_width (int): max width of input to support random shape input
"""
def __init__(self, loss_weight=1.0, giou=False, diou=False, ciou=False):
super(IouAwareLoss, self).__init__(
loss_weight=loss_weight, giou=giou, diou=diou, ciou=ciou)
def forward(self, ioup, pbox, gbox):
iou = bbox_iou(
pbox, gbox, giou=self.giou, diou=self.diou, ciou=self.ciou)
# iou.requires_grad = False
iou = iou.detach()
loss_iou_aware = F.binary_cross_entropy_with_logits(
ioup, iou, reduction='none')
loss_iou_aware = loss_iou_aware * self.loss_weight
return loss_iou_aware
class MyIOUloss(nn.Module):
def __init__(self, reduction="none", loss_type="iou"):
super(MyIOUloss, self).__init__()
self.reduction = reduction
self.loss_type = loss_type
def forward(self, pred, target):
'''
输入矩形的格式是cx cy w h
'''
assert pred.shape[0] == target.shape[0]
boxes1 = pred
boxes2 = target
# 变成左上角坐标、右下角坐标
boxes1_x0y0x1y1 = torch.cat([boxes1[:, :2] - boxes1[:, 2:] * 0.5,
boxes1[:, :2] + boxes1[:, 2:] * 0.5], dim=-1)
boxes2_x0y0x1y1 = torch.cat([boxes2[:, :2] - boxes2[:, 2:] * 0.5,
boxes2[:, :2] + boxes2[:, 2:] * 0.5], dim=-1)
# 两个矩形的面积
boxes1_area = (boxes1_x0y0x1y1[:, 2] - boxes1_x0y0x1y1[:, 0]) * (boxes1_x0y0x1y1[:, 3] - boxes1_x0y0x1y1[:, 1])
boxes2_area = (boxes2_x0y0x1y1[:, 2] - boxes2_x0y0x1y1[:, 0]) * (boxes2_x0y0x1y1[:, 3] - boxes2_x0y0x1y1[:, 1])
# 相交矩形的左上角坐标、右下角坐标
left_up = torch.maximum(boxes1_x0y0x1y1[:, :2], boxes2_x0y0x1y1[:, :2])
right_down = torch.minimum(boxes1_x0y0x1y1[:, 2:], boxes2_x0y0x1y1[:, 2:])
# 相交矩形的面积inter_area。iou
inter_section = F.relu(right_down - left_up)
inter_area = inter_section[:, 0] * inter_section[:, 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = inter_area / (union_area + 1e-16)
if self.loss_type == "iou":
loss = 1 - iou ** 2
elif self.loss_type == "giou":
# 包围矩形的左上角坐标、右下角坐标
enclose_left_up = torch.minimum(boxes1_x0y0x1y1[:, :2], boxes2_x0y0x1y1[:, :2])
enclose_right_down = torch.maximum(boxes1_x0y0x1y1[:, 2:], boxes2_x0y0x1y1[:, 2:])
# 包围矩形的面积
enclose_wh = enclose_right_down - enclose_left_up
enclose_area = enclose_wh[:, 0] * enclose_wh[:, 1]
giou = iou - (enclose_area - union_area) / enclose_area
# giou限制在区间[-1.0, 1.0]内
giou = torch.clamp(giou, -1.0, 1.0)
loss = 1 - giou
if self.reduction == "mean":
loss = loss.mean()
elif self.reduction == "sum":
loss = loss.sum()
return loss
class GIoULoss(object):
"""
Generalized Intersection over Union, see https://arxiv.org/abs/1902.09630
Args:
loss_weight (float): giou loss weight, default as 1
eps (float): epsilon to avoid divide by zero, default as 1e-10
reduction (string): Options are "none", "mean" and "sum". default as none
"""
def __init__(self, loss_weight=1., eps=1e-10, reduction='none'):
self.loss_weight = loss_weight
self.eps = eps
assert reduction in ('none', 'mean', 'sum')
self.reduction = reduction
def bbox_overlap(self, box1, box2, eps=1e-10):
"""calculate the iou of box1 and box2
Args:
box1 (Tensor): box1 with the shape (..., 4)
box2 (Tensor): box1 with the shape (..., 4)
eps (float): epsilon to avoid divide by zero
Return:
iou (Tensor): iou of box1 and box2
overlap (Tensor): overlap of box1 and box2
union (Tensor): union of box1 and box2
"""
x1, y1, x2, y2 = box1
x1g, y1g, x2g, y2g = box2
xkis1 = torch.maximum(x1, x1g)
ykis1 = torch.maximum(y1, y1g)
xkis2 = torch.minimum(x2, x2g)
ykis2 = torch.minimum(y2, y2g)
w_inter = F.relu(xkis2 - xkis1)
h_inter = F.relu(ykis2 - ykis1)
overlap = w_inter * h_inter
area1 = (x2 - x1) * (y2 - y1)
area2 = (x2g - x1g) * (y2g - y1g)
union = area1 + area2 - overlap + eps
iou = overlap / union
return iou, overlap, union
def __call__(self, pbox, gbox, iou_weight=1., loc_reweight=None):
# x1, y1, x2, y2 = paddle.split(pbox, num_or_sections=4, axis=-1)
# x1g, y1g, x2g, y2g = paddle.split(gbox, num_or_sections=4, axis=-1)
# torch的split和paddle有点不同,torch的第二个参数表示的是每一份的大小,paddle的第二个参数表示的是分成几份。
x1, y1, x2, y2 = torch.split(pbox, split_size_or_sections=1, dim=-1)
x1g, y1g, x2g, y2g = torch.split(gbox, split_size_or_sections=1, dim=-1)
box1 = [x1, y1, x2, y2]
box2 = [x1g, y1g, x2g, y2g]
iou, overlap, union = self.bbox_overlap(box1, box2, self.eps)
xc1 = torch.minimum(x1, x1g)
yc1 = torch.minimum(y1, y1g)
xc2 = torch.maximum(x2, x2g)
yc2 = torch.maximum(y2, y2g)
area_c = (xc2 - xc1) * (yc2 - yc1) + self.eps
miou = iou - ((area_c - union) / area_c)
if loc_reweight is not None:
loc_reweight = torch.reshape(loc_reweight, shape=(-1, 1))
loc_thresh = 0.9
giou = 1 - (1 - loc_thresh
) * miou - loc_thresh * miou * loc_reweight
else:
giou = 1 - miou
if self.reduction == 'none':
loss = giou
elif self.reduction == 'sum':
loss = torch.sum(giou * iou_weight)
else:
loss = torch.mean(giou * iou_weight)
return loss * self.loss_weight
| 35.486111
| 119
| 0.562818
| 983
| 7,665
| 4.219736
| 0.208545
| 0.043394
| 0.027001
| 0.027724
| 0.292671
| 0.245419
| 0.148505
| 0.136933
| 0.080039
| 0.080039
| 0
| 0.063822
| 0.302935
| 7,665
| 215
| 120
| 35.651163
| 0.712521
| 0.241879
| 0
| 0.089431
| 0
| 0
| 0.008421
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 1
| 0.073171
| false
| 0
| 0.04878
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
262e2a6baf3b88e437fd423ee773c563521113f5
| 337
|
py
|
Python
|
src/movie.py
|
cannibalcheeseburger/RaMu-Discord-bot
|
6644cec0d249e6a5061ed16035102f97f9fc7ba7
|
[
"MIT"
] | 2
|
2020-05-28T12:50:33.000Z
|
2020-05-29T09:06:01.000Z
|
src/movie.py
|
cannibalcheeseburger/RaMu-Discord-bot
|
6644cec0d249e6a5061ed16035102f97f9fc7ba7
|
[
"MIT"
] | 2
|
2021-03-31T19:55:09.000Z
|
2021-12-13T20:42:08.000Z
|
src/movie.py
|
cannibalcheeseburger/RaMu-Discord-bot
|
6644cec0d249e6a5061ed16035102f97f9fc7ba7
|
[
"MIT"
] | null | null | null |
import imdb
def movie(name):
ia = imdb.IMDb()
movie_obj = ia.search_movie(name)
el = ia.get_movie(movie_obj[0].movieID)
title = str(el.get('title'))
year = str(el.get('year'))
plot = str(el.get('plot')[0])
ty = False
if str(el.get('kind')) == 'tv series':
ty = True
return(title,year,plot,ty)
| 25.923077
| 43
| 0.581602
| 54
| 337
| 3.555556
| 0.444444
| 0.104167
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007722
| 0.231454
| 337
| 13
| 44
| 25.923077
| 0.733591
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|