hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb793e1188c324699966ffa1be2a87ade1e4d1ed | 3,654 | py | Python | testsuite/tests/performance/conftest.py | mkudlej/3scale-tests | 6a8b46d9e82265570fd6c0783f7713ae04b00ad5 | [
"Apache-2.0"
] | null | null | null | testsuite/tests/performance/conftest.py | mkudlej/3scale-tests | 6a8b46d9e82265570fd6c0783f7713ae04b00ad5 | [
"Apache-2.0"
] | null | null | null | testsuite/tests/performance/conftest.py | mkudlej/3scale-tests | 6a8b46d9e82265570fd6c0783f7713ae04b00ad5 | [
"Apache-2.0"
] | null | null | null | """
Conftest for performance tests
"""
import asyncio
import os
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
import pytest
from hyperfoil import HyperfoilClient
from testsuite.perf_utils import HyperfoilUtils
from testsuite import rawobj
from testsuite.utils import randomize, blame
@pytest.fixture(scope='session')
def hyperfoil_client(testconfig):
"""Hyperfoil client"""
client = HyperfoilClient(testconfig['hyperfoil']['url'])
return client
@pytest.fixture(scope='session')
def root_path():
"""Root path for performance tests"""
return Path(os.path.abspath(__file__)).parent
@pytest.fixture(scope='module')
def number_of_products():
"""Number of created services (products)"""
return 1
@pytest.fixture(scope='module')
def number_of_backends():
"""Number of created backends for single service (product)"""
return 1
@pytest.fixture(scope='module')
def number_of_apps():
"""Number of created application for single service (product)"""
return 1
@pytest.fixture(scope='module')
def hyperfoil_utils(hyperfoil_client, template, request):
"""Init of hyperfoil utils"""
utils = HyperfoilUtils(hyperfoil_client, template)
request.addfinalizer(utils.finalizer)
return utils
@pytest.fixture(scope='module')
def shared_template(testconfig):
"""Shared template for hyperfoil test"""
shared_template = testconfig.get('hyperfoil', {}).get('shared_template', {})
return shared_template.to_dict()
@pytest.fixture(scope='module')
def applications(services, custom_application, lifecycle_hooks, number_of_apps):
"""Create multiple application for each service"""
def _create_apps(svc):
plan = svc.app_plans.list()[0]
return custom_application(rawobj.Application(randomize("App"), plan), hooks=lifecycle_hooks)
loop = asyncio.get_event_loop()
apps = []
futures = []
with ThreadPoolExecutor() as pool:
for svc in services:
futures += [
loop.run_in_executor(pool, _create_apps, svc)
for _ in range(number_of_apps)]
apps = loop.run_until_complete(asyncio.gather(*futures))
return apps
# pylint: disable=too-many-arguments
@pytest.fixture(scope='module')
def services(request, custom_backend, custom_service, custom_app_plan, number_of_products,
number_of_backends, service_proxy_settings, service_settings, private_base_url, lifecycle_hooks):
"""Create multiple services with multiple backends"""
def _create_services():
backends_mapping = {}
for j in range(number_of_backends):
backends_mapping[f"/{j}"] = custom_backend(endpoint=private_base_url("httpbin"))
service_settings.update({"name": blame(request, randomize("perf"))})
svc = custom_service(service_settings, service_proxy_settings, backends_mapping, hooks=lifecycle_hooks)
custom_app_plan(rawobj.ApplicationPlan(randomize("AppPlan")), svc)
return svc
loop = asyncio.get_event_loop()
services = []
with ThreadPoolExecutor() as pool:
futures = [
loop.run_in_executor(pool, _create_services)
for _ in range(number_of_products)]
services = loop.run_until_complete(asyncio.gather(*futures))
return services
@pytest.fixture(scope='module')
def promoted_services(services, production_gateway):
"""Promotes service and reloads production gateway"""
for svc in services:
version = svc.proxy.list().configs.latest()['version']
svc.proxy.list().promote(version=version)
production_gateway.reload()
return services
| 30.966102 | 111 | 0.713465 | 427 | 3,654 | 5.901639 | 0.266979 | 0.038095 | 0.071429 | 0.07619 | 0.255952 | 0.14246 | 0.14246 | 0.101587 | 0.065079 | 0.045238 | 0 | 0.001328 | 0.175424 | 3,654 | 117 | 112 | 31.230769 | 0.835048 | 0.128079 | 0 | 0.28 | 0 | 0 | 0.042839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.12 | 0 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb79bd58936dd84c23aa9d20122c1c703d68cba7 | 4,707 | py | Python | lexic/keyword_filer.py | virantha/lexic | c8663dbdf4b0da23ac7790e3b2b57f23cbae3dc1 | [
"Apache-2.0"
] | 1 | 2021-03-23T13:51:22.000Z | 2021-03-23T13:51:22.000Z | lexic/keyword_filer.py | virantha/lexic | c8663dbdf4b0da23ac7790e3b2b57f23cbae3dc1 | [
"Apache-2.0"
] | null | null | null | lexic/keyword_filer.py | virantha/lexic | c8663dbdf4b0da23ac7790e3b2b57f23cbae3dc1 | [
"Apache-2.0"
] | 1 | 2021-03-23T13:51:23.000Z | 2021-03-23T13:51:23.000Z | import logging, os, sys, shutil, hashlib, time
import functools
import yaml
from pathlib import Path
from PyPDF2 import PdfFileReader
from dateparser.search import search_dates
from datetime import datetime
import pytz
from textblob import TextBlob
logger = logging.getLogger(__name__)
class KeywordFiler:
def __init__(self, config, pdf_filename):
#self._load_yaml_and_validate(keyword_filename)
self.pdf_filename = pdf_filename
self.reader = PdfFileReader(pdf_filename)
self.root_path = config['root']
self.default_path = config['default']
self.folders_to_keywords = config['yaml']['folders']
logger.debug(f'keywords file: {self.folders_to_keywords}')
self.keywords_to_folders = self.reverse_keyword_dict(self.folders_to_keywords)
def _load_yaml_and_validate(self, keyword_filename):
with open(keyword_filename) as f:
self.yaml_config = yaml.load(f)
file_desc = 'YAML keywod file {keyword_filename}'
assert 'root' in self.yaml_config, f'{file_desc} must contain a root filing folder'
assert 'default' in self.yaml_config, f'{file_desc} must contain a default folder'
assert 'folders' in self.yaml_config, f'{file_desc} must contain a folders section'
self.root_path = self.yaml_config['root']
self.folders_to_keywords = self.yaml_config['folders']
logger.debug(f'keywords file: {self.folders_to_keywords}')
self.keywords_to_folders = self.reverse_keyword_dict(self.folders_to_keywords)
def iter_page_text(self):
num_pages = self.reader.getNumPages()
logger.debug(f'Found {num_pages} pages to scan in {self.pdf_filename}')
for page_num in range(num_pages):
text = self.reader.getPage(page_num).extractText()
text = text.encode('ascii', 'ignore')
text = text.decode('utf-8')
text = text.replace('\n', ' ')
yield text
def reverse_keyword_dict(self, folder_dict):
keywords_to_folders = {}
for folder, keyword_list in folder_dict.items():
for keyword in keyword_list:
assert keyword not in keywords_to_folders
keywords_to_folders[keyword] = folder
return keywords_to_folders
def find_matching_folder(self):
# Iterate through each page and search for each
keywords = list(self.keywords_to_folders.keys())
default_folder = self.default_path
folder = None
for page in self.iter_page_text():
for keyword in keywords:
if keyword in page.lower():
folder = self.keywords_to_folders[keyword]
logger.debug(f'Found matching keyword: {keyword} -> folder:{folder}')
return folder
# No match for folder so we need to set it to the default
return default_folder
def find_noun_phrases(self):
first_page = next(self.iter_page_text())
tb = TextBlob(first_page)
return tb.noun_phrases
def find_closest_date(self):
# Go through every page and search for dates using dateparser
# At the end, select the date closest to (and older) than the current date
# If no dates found (with full day month year), just use today's date
dates = []
for page_num, page in enumerate(self.iter_page_text()):
page_dates = search_dates(page, settings={'RETURN_AS_TIMEZONE_AWARE':False})
logger.debug(f'Found dates on page {page_num}')
logger.debug(page_dates)
if page_dates is not None:
for text, dt in page_dates:
if dt.year == 1900:
pass
else:
dates.append(pytz.utc.localize(dt))
#dates.append(dt.replace(tzinfo=None)))
# Sort date list
dates = sorted(dates)
logger.debug(dates)
# Now, iterate through list until we're at or above today's date
now = pytz.utc.localize(datetime.now())
if len(dates) == 0:
newest_date = now
else:
newest_date = dates[0]
for dt in dates:
if dt > now:
break
newest_date = dt
# Make sure the timezone is the local timezone
local_timezone = datetime.now().astimezone().tzinfo
logger.debug(f'Local timezone is {local_timezone}')
newest_date = newest_date.replace(tzinfo=local_timezone)
logger.info(f'Using date {newest_date} as creation date for document')
return newest_date | 40.577586 | 91 | 0.631188 | 600 | 4,707 | 4.748333 | 0.263333 | 0.030888 | 0.047736 | 0.044226 | 0.131274 | 0.122499 | 0.122499 | 0.122499 | 0.122499 | 0.122499 | 0 | 0.002383 | 0.286807 | 4,707 | 116 | 92 | 40.577586 | 0.846291 | 0.108349 | 0 | 0.067416 | 0 | 0 | 0.134432 | 0.018147 | 0 | 0 | 0 | 0 | 0.044944 | 1 | 0.078652 | false | 0.011236 | 0.101124 | 0 | 0.247191 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb7b9790e74bb9763eb8ce9b02c537e7e6bc9807 | 28,615 | py | Python | mediascope_api/crossweb/tasks.py | MEDIASCOPE-JSC/mediascope-api-lib | ba9c0e7ca579f63830d5f256a2f4e97727b351e3 | [
"BSD-3-Clause"
] | 6 | 2021-03-14T19:55:04.000Z | 2021-03-19T12:28:25.000Z | mediascope_api/crossweb/tasks.py | MEDIASCOPE-JSC/mediascope-api-lib | ba9c0e7ca579f63830d5f256a2f4e97727b351e3 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T15:44:16.000Z | 2022-02-10T15:44:16.000Z | mediascope_api/crossweb/tasks.py | MEDIASCOPE-JSC/mediascope-api-lib | ba9c0e7ca579f63830d5f256a2f4e97727b351e3 | [
"BSD-3-Clause"
] | null | null | null | import json
import pandas as pd
import numpy as np
import time
import datetime as dt
from pandas import DataFrame
from ..core import net
from . import catalogs
from ..core import errors
from ..core import utils
from . import checks
from pyparsing import (
Word,
delimitedList,
Group,
alphas,
alphanums,
Forward,
oneOf,
quotedString,
infixNotation,
opAssoc,
restOfLine,
CaselessKeyword,
ParserElement,
pyparsing_common as ppc
)
class CrossWebTask:
task_urls = {
'audience': '/task/media',
'total': '/task/media-total',
'ad': '/task/advertisiment'
}
def __new__(cls, settings_filename=None, cache_path=None, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(CrossWebTask, cls).__new__(cls, *args)
return cls.instance
def __init__(self, settings_filename=None, cache_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rnet = net.MediascopeApiNetwork(settings_filename, cache_path)
self.sql_parser = self._prepare_sql_parser()
self.usetypes = self.get_usetype()
self.cats = catalogs.CrossWebCats()
self.units = self.cats.get_media_unit()
self.task_info = dict()
self.task_info['task'] = dict()
self.media_attribs = self.cats.media_attribs[['sliceUnit', 'entityTitle', 'optionValue', 'optionName']].copy()
self.media_attribs['optionValue'] = self.media_attribs['optionValue'].astype('int32')
self.checks_module = checks.CrossWebChecker(self.cats)
@staticmethod
def _prepare_sql_parser():
"""
Подготовка SQL-like парсера, для разбора условий в фильтрах
Returns
-------
simple_sql : obj
Объект класса отвечающего за парсинг
"""
# define SQL tokens
select_stmt = Forward()
AND, OR, IN, NIN = map(
CaselessKeyword, "and or in nin".split()
)
ident = Word(alphas, alphanums + "_$").setName("identifier")
column_name = delimitedList(ident, ".", combine=True).setName("column name")
column_name.addParseAction()
binop = oneOf("= != > < <= >=", caseless=True)
real_num = ppc.real()
int_num = ppc.signed_integer()
column_rval = (
real_num | int_num | quotedString | column_name
) # need to add support for alg expressions
where_condition = Group(
(column_name + binop + column_rval)
| (column_name + IN + Group("(" + delimitedList(column_rval) + ")"))
| (column_name + IN + Group("(" + select_stmt + ")"))
)
where_expression = infixNotation(
where_condition,
[(AND, 2, opAssoc.LEFT), (OR, 2, opAssoc.LEFT), ],
)
# define the grammar
select_stmt <<= where_expression
simple_sql = select_stmt
# define Oracle comment format, and ignore them
oracle_sql_comment = "--" + restOfLine
simple_sql.ignore(oracle_sql_comment)
return simple_sql
def _get_point(self, left_obj, logic_oper, right_obj):
"""
Формирует объект - point понятный для API
Parameters
----------
left_obj : str
Левая часть выражения
logic_oper : str
Логический оператор
right_obj : obj
Правая часть выражения
Returns
-------
point : dict
Объект - point понятный для API
"""
# корректируем демо атрибуты: переводим названия в идентификаторы
# if left_obj in self.demo_dict:
# left_obj = self.demo_dict[left_obj]['v']
# проверяем логику
point = {}
if logic_oper == 'in':
# ожидаем в правой части список атрибутов, бежим по нему
if type(right_obj) == list:
point = {"unit": left_obj, "relation": "IN", "value": []}
for robj in right_obj:
if type(robj) == str and (robj == '(' or robj == ')'):
# пропускаем скобки, объекты и так лежат в отдельном списке
continue
# формируем условие в json формате
point['value'].append(robj)
elif logic_oper == '!=':
point = {"unit": left_obj, "relation": "NEQ", "value": right_obj}
elif logic_oper == '>':
point = {"unit": left_obj, "relation": "GT", "value": right_obj}
elif logic_oper == '<':
point = {"unit": left_obj, "relation": "LT", "value": right_obj}
elif logic_oper == '>=':
point = {"unit": left_obj, "relation": "GTE", "value": right_obj}
elif logic_oper == '<=':
point = {"unit": left_obj, "relation": "LTE", "value": right_obj}
else:
point = {"unit": left_obj, "relation": "EQ", "value": right_obj}
return point
def _find_points(self, obj):
"""
Ищет в исходном объекте, объкты типа point и преобразует их в формат API
"""
if type(obj) == list:
if len(obj) == 3 and type(obj[0]) == str and obj[1] in ['=', '!=', 'in', 'nin', ">", "<", ">=", "<="]:
return self._get_point(obj[0], obj[1], obj[2])
i = 0
while i < len(obj):
obj_item = obj[i]
if type(obj_item) == list:
obj[i] = self._find_points(obj_item)
i += 1
return obj
def _parse_expr(self, obj):
"""
Преобразует выражение для фильтрации из набора вложенных списков в формат API
Parameters
----------
obj : dict | list
Объект с условиями фильтрации в виде набора вложенных списков, полученный после работы SQL парсера
Returns
-------
jdat : dict
Условия фильтрации в формате API
"""
if type(obj) == list:
jdat = {}
for obj_item in obj:
if type(obj_item) == list:
ret_data = self._parse_expr(obj_item)
if jdat.get('children') is None:
jdat['children'] = [ret_data]
else:
jdat['children'].append(ret_data)
elif type(obj_item) == dict: # and 'point' in obj_item.keys():
if obj_item.get('elements') is None:
if jdat.get('elements') is None:
jdat['elements'] = []
jdat['elements'].append(obj_item)
else:
if jdat.get('children') is None:
jdat['children'] = []
jdat['children'].append(obj_item)
elif type(obj_item) == str and obj_item in ['or', 'and']:
jdat["operand"] = obj_item.upper()
return jdat
elif type(obj) == dict:
jdat = {'elements': []}
jdat['elements'].append(obj)
jdat["operand"] = 'OR'
return jdat
def _sql_to_json(self, sql_text):
"""
Преобразует условие фильтрации записанное в SQL натации, в формат API
Parameters
----------
sql_text : str
Текст условия в SQL формате
Returns
-------
obj : dict
Условия фильтрации в формате API
"""
sql_obj = self.sql_parser.parseString(sql_text)
#sql_obj.pprint()
s = sql_obj.asList()[0]
prep_points = self._find_points(s)
return self._parse_expr(prep_points)
@staticmethod
def _get_sql_from_list(obj_name, obj_data, oper):
result_text = ''
if obj_data is not None:
if type(obj_data) == list:
if len(obj_data) > 1:
result_text = f"{obj_name} {oper} ({','.join(str(x) for x in obj_data)})"
elif len(obj_data) == 1:
result_text = f"{obj_name} = { obj_data[0]}"
elif type(obj_data) == str:
result_text = f"{obj_name} = { obj_data}"
return result_text
def get_usetype(self):
"""
Получить списки доступных для использования в заданиях:
- статистик
- срезов
- фильтров
Returns
-------
info : dict
Словарь с доступными списками
"""
data = self.rnet.send_request_lo('get', '/dictionary/common/use-type', use_cache=True)
res = {}
if data is None or type(data) != dict:
return None
if 'data' not in data:
return None
res['id'] = []
res['name'] = []
for item in data['data']:
# print(item)
# print(type(item))
res['id'].append(item['id'])
res['name'].append(item['name'])
return pd.DataFrame(res)
def _check_units_in_task(self, statistics, slices, filters):
if type(statistics) == list:
# self.units
return None
def _add_filter(self, tsk, filter_obj, filter_name):
if filter_obj is not None:
if type(filter_obj) == dict:
tsk['filter'][filter_name] = filter
elif type(filter_obj) == str:
tsk['filter'][filter_name] = self._sql_to_json(filter_obj)
return tsk
def _add_range_filter(self, tsk, date_filter):
# Добавляем фильтр по диапазонам
if date_filter is not None and type(date_filter) == list and len(date_filter) > 0:
date_ranges = {
"operand": "OR",
"children": []
}
for dr in date_filter:
date_ranges['children'].append({
"operand": "AND",
"elements": [
{
"unit": "researchDate",
"relation": "GTE",
"value": dr[0]
},
{
"unit": "researchDate",
"relation": "LTE",
"value": dr[1]
}
]
})
tsk['filter']['dateFilter'] = date_ranges
@staticmethod
def _add_usetype_filter(tsk, usetype_filter):
# Добавляем фильтр по usetype
if usetype_filter is not None and type(usetype_filter) == list and len(usetype_filter) > 0:
usetype = {"operand": "OR", "elements": [{
"unit": "useTypeId",
"relation": "IN",
"value": usetype_filter
}]}
tsk['filter']['useTypeFilter'] = usetype
@staticmethod
def _add_scales(tsk, scales):
# Добавляем шкалы
if scales is not None:
scales_json = {}
for scale, val in scales.items():
scales_json[scale] = []
for v in val:
scales_json[scale].append({"from": v[0], "to": v[1]})
tsk['scales'] = scales_json
@staticmethod
def _add_slices(tsk, slices):
# Добавляем срезы
if slices is not None:
tsk['slices'] = slices
def build_task(self, task_type, task_name='', date_filter=None, usetype_filter=None, geo_filter=None,
demo_filter=None, mart_filter=None, slices=None, statistics=None, scales=None):
"""
Формирует текст задания для расчета статистик
Parameters
----------
task_type : str
Тип задания, возможные варианты:
- media
task_name : str
Название задания, если не задано - формируется как: пользователь + типа задания + дата/время
date_filter : list
Список периодов, период задается списком пар - (начало, конец):
Пример:
date_filter = [
('2021-07-05', '2021-07-18'),
('2021-09-06', '2021-09-26'),
('2021-10-18', '2021-10-31')
]
usetype_filter: list|None
Список Типов пользования Интернетом
Пример:
usetype_filter = [1, 2, 3]
geo_filter: list|None
Условия фильтрации по географии
Возможные варианты можно получить через метод `find_property` модуля catalogs:
>>> cats.find_property('CityPop', expand=True)
>>> cats.find_property('CityPop100', expand=True)
>>> cats.find_property('FederalOkrug', expand=True)
demo_filter: str|None
Условия фильтрации по демографическим атрибутам
Пример:
demo_filter = "sex = 1 AND occupation = 1"
Список допустимых атрибутов можно получить через метод `get_media_unit` модуля catalogs:
>>> cats.get_media_unit()['filters']['demo']
mart_filter: str|None
Условия фильтрации по медиа-объектам
Пример:
mart_filter = "crossMediaResourceId = 1150 OR crossMediaResourceId = 1093"
Список допустимых атрибутов можно получить через метод `get_media_unit` модуля catalogs:
>>> cats.get_media_unit()['filters']['mart']
slices: list
Порядок разбивки результата расчета, задается в виде списка
Пример:
slices = ["useTypeName", "researchWeek", "crossMediaResourceId"]
Список допустимых атрибутов можно получить через метод `get_media_unit` модуля catalogs:
>>> cats.get_media_unit()['slices']
statistics : list
Список статистик, которые необходимо расчитать.
Пример:
statistics = ['reach', 'reachPer', 'dr']
Список допустимых названий атрибутов можно получить через метод `get_media_unit` модуля catalogs:
>>> cats.get_media_unit()['statistics']
scales : dict|None
Шкалы для статистик "drfd" и "reachN".
Пример:
scales = {
'drfd':[(1, 5), (10, 20)],
'reachN':[(2, 10), (20, 255)]
}
Returns
-------
text : json
Задание в формате CrossWeb API
"""
if not self.checks_module.check_task(task_type, date_filter, usetype_filter, geo_filter,
demo_filter, mart_filter, slices, statistics, scales):
return
# Собираем JSON
tsk = {
"statistics": statistics,
"filter": {}
}
# Добавляем фильтры
self._add_range_filter(tsk, date_filter)
self._add_usetype_filter(tsk, usetype_filter)
self._add_filter(tsk, geo_filter, 'geoFilter')
self._add_filter(tsk, demo_filter, 'demo_filter')
self._add_filter(tsk, mart_filter, 'martFilter')
self._add_slices(tsk, slices)
self._add_scales(tsk, scales)
# Сохраняем информацию о задании, для последующего сохранения в Excel
tinfo = {
'task_name': task_name,
'date_filter': date_filter,
'usetype_filter': usetype_filter,
'geo_filter': geo_filter,
'demo_filter': demo_filter,
'mart_filter': mart_filter,
'slices': slices,
'statistics': statistics,
'scales': scales
}
self.save_report_info(tinfo)
# Возвращаем JSON
return json.dumps(tsk)
def send_task(self, task_type, data):
"""
Отправить задание на расчет
Parameters
----------
task_type: str
Тип задания
- audience - задание на расчет аудитории по media
- total - задание на расчет аудитори по total-media
- ad - задание на расчет аудитории по рекламе
data : str
Текст задания в JSON формате
Returns
-------
text : json
Ответ сервера, содержит taskid, который необходим для получения результата
"""
if data is None:
return
if task_type not in self.task_urls.keys():
return
try:
return self.rnet.send_request('post', self.task_urls[task_type], data)
except errors.HTTP400Error as e:
print(f"Ошибка: {e}")
def send_audience_task(self, data):
"""
Отправить задание на расчет аудиторных статистик по медиа
Parameters
----------
data : str
Текст задания в JSON формате
Returns
-------
text : json
Ответ сервера, содержит taskid, который необходим для получения результата
"""
return self.send_task('audience', data)
def send_total_audience_task(self, data):
"""
Отправить задание на расчет аудиторных статистик по тотал-медиа
Parameters
----------
data : str
Текст задания в JSON формате
Returns
-------
text : json
Ответ сервера, содержит taskid, который необходим для получения результата
"""
return self.send_task('total', data)
def send_advertisement_task(self, data):
"""
Отправить задание на расчет аудиторных статистик по рекламе
Parameters
----------
data : str
Текст задания в JSON формате
Returns
-------
text : json
Ответ сервера, содержит taskid, который необходим для получения результата
"""
return self.send_task('ad', data)
def wait_task(self, tsk):
"""
Ожидает окончание расчета задания или заданий.
Parameters
----------
tsk : dict|list
Задание в формате
{
'taskId': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'userName': 'user.name',
'message': 'Задача поступила в обработку'
}
или список заданий
[
{
'taskId': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'userName': 'user.name',
'message': 'Задача поступила в обработку'
},
...
]
Returns
-------
tsk : dict|list
Возвращает задание или список заданий
"""
if type(tsk) == dict:
if tsk.get('taskId') is not None:
tid = tsk.get('taskId', None)
tstate = ''
tstate_obj = None
cnt = 0
while cnt < 5:
try:
time.sleep(3)
tstate_obj = self.rnet.send_request('get', '/task/state/{}'.format(tid))
except errors.HTTP404Error:
cnt += 1
print(cnt)
except Exception:
raise Exception('Ошибка при получении статуса задания')
else:
break
if tstate_obj is not None:
tstate = tstate_obj.get('taskStatus', '')
print(f'Расчет задачи (id: {tsk["taskId"]}) [', end='')
s = dt.datetime.now()
# DONE, FAILED, IN_PROGRESS, CANCELLED, IN_QUEUE
while tstate == 'IN_QUEUE' or tstate == 'IN_PROGRESS':
print('=', end=' ')
time.sleep(3)
tstate_obj = self.rnet.send_request('get', '/task/state/{}'.format(tsk['taskId']))
if tstate_obj is not None:
tstate = tstate_obj.get('taskStatus', '')
time.sleep(1)
e = dt.datetime.now()
print(f"] время расчета: {str(e - s)}")
if tstate == 'DONE':
return tsk
elif type(tsk) == list:
tasks = list()
# получим все идентификаторы заданий
for t in tsk:
cur_task = t.get('task')
if cur_task is None or cur_task.get('taskId') is None:
continue
tasks.append(t)
# Проверим состояние заданий
print(f'Расчет задач ({len(tasks)}) [ ', end='')
s = dt.datetime.now()
errs = dict()
while True:
time.sleep(3)
# запросим состояние
done_count = 0
for t in tasks:
tid = t['task']['taskId']
tstate = ''
tstate_obj = self.rnet.send_request('get', '/task/state/{}'.format(tid))
if tstate_obj is not None:
tstate = tstate_obj.get('taskStatus', '')
if tstate == 'IN_PROGRESS' or tstate == 'PENDING' or tstate == 'IN_QUEUE' or tstate == 'IDLE':
continue
elif tstate == 'DONE':
done_count += 1
else:
errs[tid] = t
errs[tid]['state'] = tstate
break
print('=', end=' ')
if done_count == len(tsk):
break
if len(errs) > 0:
print(f"Одна или несколько задач завершились с ошибкой")
for tid, tstate in errs.items():
print(f"Задача: {tid} состояние: {tstate}")
return None
e = dt.datetime.now()
print(f"] время расчета: {str(e - s)}")
return tsk
def get_status(self, tsk):
"""
Получить статус расчета задания.
Parameters
----------
tsk : dict
Задание в формате
{
'taskId': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'userName': 'user.name',
'message': 'Задача поступила в обработку'
}
Returns
-------
tsk : dict
Возвращает задание и его состояние:
{
'taskId': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'userName': 'user.name',
'taskStatus': 'DONE',
'additionalParameters': {}
}
"""
if tsk.get('taskId') is not None:
tid = tsk.get('taskId', None)
tstate_obj = self.rnet.send_request('get', '/task/state/{}'.format(tid))
return tstate_obj
def get_result(self, tsk):
"""
Получить результат выполнения задания по его ID
Parameters
----------
tsk : dict
Задание
Returns
-------
text : json
Результат выполнения задания в JSON формате
"""
if tsk is None or tsk.get('taskId') is None:
return None
return self.rnet.send_request('get', '/task/result/{}'.format(tsk['taskId']))
def result2table(self, data, project_name=None):
"""
Получить результат выполнения задания по его ID
Parameters
----------
data : dict
Результат выполнения задания в JSON формате
project_name : str
Название проекта
Returns
-------
result : DataFrame
DataFrame с результатом выполнения задания
"""
res = {}
if data is None or type(data) != dict:
return None
if 'taskId' not in data or 'resultBody' not in data:
return None
if type(data['resultBody']) == list and len(data['resultBody']) == 0:
msg = data.get('message', None)
if msg is not None:
print(msg)
slices = set()
statistics = set()
for item in data['resultBody']:
stat = item['statistics']
sls = item['slice']
for k in sls.keys():
if k not in slices:
slices.add(k)
res[k] = []
for k in stat.keys():
if k not in statistics:
statistics.add(k)
res['stat.' + k] = []
for item in data['resultBody']:
stat = item['statistics']
sls = item['slice']
for k in slices:
if k in sls:
v = str(sls[k])
else:
v = '-'
res[k].append(v)
for k in statistics:
if k in stat:
v = stat[k]
else:
v = None
res['stat.' + k].append(v)
df = pd.DataFrame(res)
self._get_text_names(df)
df.replace(to_replace=[None], value=np.nan, inplace=True)
if project_name is not None:
df.insert(0, 'prj_name', project_name)
#df['date'] = pd.to_datetime(df['date'])
return df
@staticmethod
def get_excel_filename(task_name, export_path='../excel', add_dates=True):
"""
Получить имя excel файла
Parameters
----------
task_name : str
Название задания
export_path : str
Путь к папке с excel файлами
add_dates : bool
Флаг - добавлять в имя файла дату или нет, по умолчанию = True
Returns
-------
filename : str
Путь и имя excel файла
"""
return utils.get_excel_filename(task_name, export_path, add_dates)
def save_report_info(self, tinfo):
"""
Сохраняет общую информацию о заданиях. Использует при сохранении отчета в Excel
Parameters
----------
tinfo : dict
Параметры задания в виде словаря
"""
self.task_info['task'] = tinfo
def get_report_info(self):
"""
Возвращает информацию о расчитываемом отчете в виде DataFrame, которая была предварительно сохранена
с помощью метода save_audience_info
Returns
-------
result: DataFrame
Информация о расчитываемом отчете
"""
data = list()
for tk, tv in self.task_info['task'].items():
data.append(f"{tk}: {tv}")
return pd.DataFrame(data)
def _get_text_names(self, df, with_id=False):
df = self._get_text_name_for(df, 'demo', with_id)
df = self._get_text_name_for(df, 'geo', with_id)
df = self._get_text_name_for_mart(df)
return df
def _get_text_name_for(self, df: pd.DataFrame, entity_name: str, with_id=True):
if type(df) != pd.DataFrame:
return
id_name = ''
if with_id:
id_name = 'Name'
geo_attributes = self.cats.get_slices(entity_name)
for col in df.columns:
if col not in geo_attributes:
continue
# get cat
_attrs = self.media_attribs[self.media_attribs['sliceUnit'] == col].copy()[['optionValue', 'optionName']]
df[col] = df[col].astype('int32', errors='ignore')
df[col + id_name] = df.merge(_attrs, how='left', left_on=col, right_on='optionValue')['optionName']
return df
def _get_text_name_for_mart(self, df: pd.DataFrame):
if type(df) != pd.DataFrame:
return
matr_attributes = self.cats.get_slices('mart')
pos = 0
for col in df.columns:
pos += 1
if col not in matr_attributes:
continue
_attrs = pd.DataFrame()
if col == 'crossMediaProductId':
_attrs = self.cats.products
elif col == 'crossMediaHoldingId':
_attrs = self.cats.holdings
elif col == 'crossMediaResourceId':
_attrs = self.cats.resources
elif col == 'crossMediaThemeId':
_attrs = self.cats.themes
else:
break
df[col] = df[col].astype('int64')
df.insert(pos, col[:-2] + 'Name', df.merge(_attrs, how='left', left_on=col, right_on='id')['name'])
pos += 1
return df
| 32.29684 | 118 | 0.504561 | 2,930 | 28,615 | 4.778157 | 0.186348 | 0.007 | 0.008357 | 0.008 | 0.318571 | 0.256929 | 0.216214 | 0.203786 | 0.179 | 0.171 | 0 | 0.007248 | 0.387664 | 28,615 | 885 | 119 | 32.333333 | 0.791748 | 0.270802 | 0 | 0.216216 | 0 | 0 | 0.097608 | 0.001445 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0.004505 | 0.027027 | 0 | 0.184685 | 0.024775 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb7d7197e74ffc7b247904ec9df3d0178fdfdcd8 | 1,633 | py | Python | src/scripts.py | luoweihua7/TimerFlow | a147aebc902d7bdc77792a4cbdf4503b43a9add7 | [
"MIT"
] | null | null | null | src/scripts.py | luoweihua7/TimerFlow | a147aebc902d7bdc77792a4cbdf4503b43a9add7 | [
"MIT"
] | null | null | null | src/scripts.py | luoweihua7/TimerFlow | a147aebc902d7bdc77792a4cbdf4503b43a9add7 | [
"MIT"
] | null | null | null | # encoding: utf-8
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import time
from workflow import Workflow, ICON_WARNING, ICON_INFO, ICON_ERROR
def parseTime(time):
if time.isdigit():
return int(time)
table = {
's': 1,
'sec': 1,
'secs': 1,
'second': 1,
'seconds': 1,
'm': 60,
'min': 60,
'mins': 60,
'minute': 60,
'minutes': 60,
'h': 60*60,
'hour': 60*60,
'hours': 60*60
}
for expr in table:
if time.endswith(expr):
firstPart = time[:-(len(expr))]
if firstPart.isdigit():
return int(firstPart) * table[expr]
return 3*60
# def showNotification(wf, message):
# command = "/usr/bin/osascript -e 'display notification \"%s\" sound name \"\"'" % (message)
# os.system(command)
def playAlertSound():
os.system("afplay /System/Library/PrivateFrameworks/ScreenReader.framework/Versions/A/Resources/Sounds/EnterInvisibleArea.aiff")
class ScriptRunner(object):
def __init__(self):
self.wf = None
def run(self, wf):
self.wf = wf
timeStr = "3"
message = "Time's up!"
if wf.args:
timeStr = wf.args[0].strip()
if len(wf.args) > 1:
self.wf.logger.debug("wf.args")
self.wf.logger.debug(wf.args)
message = " ".join(wf.args[1:])
time.sleep(parseTime(timeStr))
print(message.encode('utf8'))
playAlertSound()
if __name__ == '__main__':
wf = Workflow()
app = ScriptRunner()
wf.run(app.run) | 22.680556 | 132 | 0.546234 | 192 | 1,633 | 4.567708 | 0.46875 | 0.041049 | 0.036488 | 0.038769 | 0.052452 | 0.052452 | 0 | 0 | 0 | 0 | 0 | 0.03257 | 0.304348 | 1,633 | 72 | 133 | 22.680556 | 0.739437 | 0.116963 | 0 | 0 | 0 | 0.019608 | 0.137787 | 0.075157 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.098039 | 0 | 0.254902 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb82c47c896394041088b1fee2509e0414e1e029 | 337 | py | Python | folk_rnn_site/folk_rnn_site/routing.py | boblsturm/folk-rnn-webapp | 7a304fae2dc27001e1cd04494160aa50cd631379 | [
"MIT"
] | 1 | 2018-05-22T03:55:25.000Z | 2018-05-22T03:55:25.000Z | folk_rnn_site/folk_rnn_site/routing.py | boblsturm/folk-rnn-webapp | 7a304fae2dc27001e1cd04494160aa50cd631379 | [
"MIT"
] | 94 | 2018-01-15T10:23:20.000Z | 2019-08-14T12:09:45.000Z | folk_rnn_site/folk_rnn_site/routing.py | boblsturm/folk-rnn-webapp | 7a304fae2dc27001e1cd04494160aa50cd631379 | [
"MIT"
] | 4 | 2018-02-02T11:20:06.000Z | 2021-06-05T12:00:48.000Z | from channels.routing import ProtocolTypeRouter, ChannelNameRouter
from composer import consumers
application = ProtocolTypeRouter({
# Empty for now (http->django views is added by default)
'websocket': consumers.ComposerConsumer,
'channel': ChannelNameRouter({
'folk_rnn': consumers.FolkRNNConsumer,
})
})
| 28.083333 | 66 | 0.735905 | 31 | 337 | 7.967742 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178042 | 337 | 11 | 67 | 30.636364 | 0.891697 | 0.160237 | 0 | 0.25 | 0 | 0 | 0.085409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb83ff622d1e0bf427f0cc862d2add00ee280d9d | 2,320 | py | Python | qtcalendar/test.py | asmateus/PyQtCalendar | b8e5e468082f08159744f692e8edaf2ad52fccbb | [
"MIT"
] | 7 | 2018-01-30T19:23:18.000Z | 2022-02-04T13:07:57.000Z | qtcalendar/test.py | asmateus/PyQtCalendar | b8e5e468082f08159744f692e8edaf2ad52fccbb | [
"MIT"
] | 1 | 2020-11-13T14:58:41.000Z | 2021-04-27T16:58:46.000Z | qtcalendar/test.py | asmateus/PyQtCalendar | b8e5e468082f08159744f692e8edaf2ad52fccbb | [
"MIT"
] | 3 | 2020-05-20T04:40:35.000Z | 2021-02-24T08:58:40.000Z | '''
Test module, so that you know how this works. Look at it, read the comments and
work out your way to your own implementation.
Note: imports should be at the beginning of the file, I will put them through out it
just so you know explicitly what is used for what, and what you can avoid.
'''
from example.holder.minimal import MinimalHolder
from entities import Calendar
ui = MinimalHolder()
'''
Lets create a calendar, suppose we want a calendar with sunday as the lead day
and with Colombian holidays. We do not want to modify anything else so, leave
the datatree as the default one.
Normally you need to supply the holiday list, lucky for you, a module in examples
already does the work for you.
'''
from example.connector import HolidayDownloader
from models import Calendar__Model
holidays = HolidayDownloader().getHolidayDates()
lead_day = Calendar__Model.TYPE_SUNDAY_LEADING
# Create the calendar instance
cal = Calendar(holidays=holidays, leading_day=lead_day)
'''
The previous code should give you a working calendar with no events (already
displayed). Now lets add an event in a date
of our choice. For that we will have to assign a minimal description for our
event, the description contains:
* place: the name of the place
* people: amount of people going
* init-date: datetime
* end-date: datetime
* fulfillment: how ready is the event
'''
import datetime as dt
date_selected = dt.date.today()
next_day = date_selected + dt.timedelta(3)
# Suppose our event starts at 17:00 and ends at 3:00 of the next day
description = {
'place': 'Disney',
'people': 202,
'init-date': dt.datetime.combine(date_selected, dt.time(17, 0, 0)),
'end-date': dt.datetime.combine(next_day, dt.time(3, 0, 0)),
'fulfillment': 1.0
}
date_selected_2 = dt.date(2017, 12, 1)
next_day_2 = date_selected_2 + dt.timedelta(1)
description_2 = {
'place': 'Universal',
'people': 202,
'init-date': dt.datetime.combine(date_selected_2, dt.time(17, 0, 0)),
'end-date': dt.datetime.combine(next_day_2, dt.time(3, 0, 0)),
'fulfillment': 0.5
}
cal.createEvent(description)
cal.createEvent(description_2)
# Add the calendar widget to your application holder
ui.getMainLayout().addWidget(cal.getView())
# Start UI
ui()
| 31.351351 | 88 | 0.718966 | 359 | 2,320 | 4.573816 | 0.415042 | 0.043849 | 0.034105 | 0.051157 | 0.130329 | 0.130329 | 0.105968 | 0.105968 | 0.105968 | 0.049939 | 0 | 0.02504 | 0.190948 | 2,320 | 73 | 89 | 31.780822 | 0.84976 | 0.190948 | 0 | 0.064516 | 0 | 0 | 0.084854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16129 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb856102948ef011c587812ca203707127f840e0 | 4,100 | py | Python | otter/otter.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | otter/otter.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | otter/otter.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
#import uuid
import os
from .html import *
from configparser import ConfigParser
from jinja2 import Template, Environment, FileSystemLoader
from pkg_resources import resource_string, resource_stream, resource_filename
default_config = resource_string(__name__, 'otter.conf')
class Otter():
"""
Otter is a pythonic report writing system designed to produce HTML
reports for long-running or complex jobs where and iPython
notebook would be an impractical way of presenting information.
"""
def __init__(self, filename, config_file=None, **kwargs):
"""
An Otter report is created by this class.
Parameters
----------
filename : str
The path to the location of the report, for example `/home/me/www/report.html`.
config_file: str
The location of the config file which should be used to generate the report.
"""
# Attempt to load in default meta data from a config file
# At the moment just the current directory, but should
# extend to look in home directory and environment variable location too
config = ConfigParser()
#if not config_file:
try:
config.read(default_config)
except TypeError: # Looks like Python 3
config.readfp(default_config.decode("utf-8"))
if config_file:
with open(config_file) as cf:
config.read_string(cf.read())
self.meta = {}
if config.has_section("meta"):
for option in config['meta']:
self.meta[option] = config.get('meta', option)
for option in kwargs.items():
self.meta[option[0]] = option[1]
try:
theme = config.get("theme", "location")
except:
print("Cannot find theme in the config file. Using the default theme.")
try:
theme = resource_filename(__name__, "themes/default/")
except:
print("No theme files found.")
self.env = Environment(loader=FileSystemLoader(theme))
self.reportfolder = filename+"_files"
self.foldername = os.path.basename(filename)+"_files/"
if not os.path.exists(self.reportfolder):
os.makedirs(self.reportfolder)
#self.reportfile= open(filename,"w")
self.reportfile = filename
self.meta.update(kwargs)
self.items = []
# Make an otter report work as a context manager
def __enter__(self):
"""
Execute this code when the context manager is created.
Right now, Otter doesn't actually need anything to be done at the
creation of a context, but that should really change at some point in
the future.
"""
pass
def __exit__(self, type, value, traceback):
"""
When the context ends, the report needs to be rendered.
"""
self.show()
def __add__(self, item):
return self.add(item)
def add(self, item):
if HTMLElement in type(item).mro():
self.items.append(item)
else:
item_ = HTMLElement()
item_ + item
self.items.append(item_)
return self
def show(self):
html = ''
for item in self.items:
html += repr(item)
output_html = self.env.get_template('body.html').render(meta=self.meta, body=html)
self._write(output_html)
def _write(self, text):
with open(self.reportfile, "w") as f:
f.write(text)
def _mkdir_recursive(self, path):
"""
Recursively create the directories required for the report.
Based off code from http://stackoverflow.com/questions/6004073/how-can-i-create-directories-recursively
by `Mars'.
"""
sub_path = os.path.dirname(path)
if not os.path.exists(sub_path):
self._mkdir_recursive(sub_path)
if not os.path.exists(path):
os.mkdir(path)
| 32.03125 | 111 | 0.597805 | 495 | 4,100 | 4.836364 | 0.39798 | 0.033417 | 0.008772 | 0.013784 | 0.024645 | 0.017544 | 0 | 0 | 0 | 0 | 0 | 0.004595 | 0.31 | 4,100 | 127 | 112 | 32.283465 | 0.84164 | 0.297561 | 0 | 0.074627 | 0 | 0 | 0.060413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119403 | false | 0.014925 | 0.074627 | 0.014925 | 0.238806 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8637c51d9b07c8db4537df6871db01ef6799e0 | 800 | py | Python | tests/integration_tests.py | knaw-huc/pagexml | ae47378e171837f90ff6d2599c220dc80afb5375 | [
"MIT"
] | 4 | 2021-05-08T15:33:45.000Z | 2022-02-02T18:13:38.000Z | tests/integration_tests.py | knaw-huc/pagexml | ae47378e171837f90ff6d2599c220dc80afb5375 | [
"MIT"
] | null | null | null | tests/integration_tests.py | knaw-huc/pagexml | ae47378e171837f90ff6d2599c220dc80afb5375 | [
"MIT"
] | null | null | null | import glob
import traceback
from icecream import ic
from pagexml.model.physical_document_model import pretty_print_textregion
from pagexml.parser import parse_pagexml_file
def main():
pagexml_basedir = "../golden-agents/pagexml/"
testdirs = glob.glob(pagexml_basedir + '[0-9AN]*')
# testdirs = glob.glob(pagexml_basedir + '10025*')
assert len(testdirs) > 0
for d in testdirs:
files = glob.glob(d + '/*.xml')
assert len(files) > 0
for fname in files:
ic(fname)
print(fname)
try:
scan_doc = parse_pagexml_file(fname)
pretty_print_textregion(scan_doc, print_stats=True)
except Exception:
print(traceback.format_exc())
if __name__ == '__main__':
main()
| 26.666667 | 73 | 0.62875 | 95 | 800 | 5.031579 | 0.463158 | 0.087866 | 0.087866 | 0.096234 | 0.125523 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015491 | 0.27375 | 800 | 29 | 74 | 27.586207 | 0.807229 | 0.06 | 0 | 0 | 0 | 0 | 0.062667 | 0.033333 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.045455 | false | 0 | 0.227273 | 0 | 0.272727 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb86e774de0ecdf4473c512f13355c3e0a60cf52 | 734 | py | Python | tests/model/abstract/test_updateable_model.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | 3 | 2020-06-17T17:58:54.000Z | 2022-03-27T17:26:07.000Z | tests/model/abstract/test_updateable_model.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | null | null | null | tests/model/abstract/test_updateable_model.py | dda-dev/ornitho-client-python | 94d09774026786c021f35cae8cc74b65a28075d9 | [
"MIT"
] | 1 | 2021-12-17T13:13:10.000Z | 2021-12-17T13:13:10.000Z | from unittest import TestCase, mock
import ornitho
from ornitho.model.abstract import UpdateableModel
ornitho.consumer_key = "ORNITHO_CONSUMER_KEY"
ornitho.consumer_secret = "ORNITHO_CONSUMER_SECRET"
ornitho.user_email = "ORNITHO_USER_EMAIL"
ornitho.user_pw = "ORNITHO_USER_PW"
ornitho.api_base = "ORNITHO_API_BASE"
class TestUpdateableModel(TestCase):
class MyModel(UpdateableModel):
ENDPOINT = "my_model"
# noinspection PyUnusedLocal
@staticmethod
def fake_request(**kwargs):
return "SUCCESS"
@mock.patch.object(MyModel, "request", fake_request)
def test_update(self):
my_model = self.MyModel()
response = my_model.update()
self.assertEqual("SUCCESS", response)
| 27.185185 | 56 | 0.739782 | 84 | 734 | 6.214286 | 0.452381 | 0.114943 | 0.068966 | 0.095785 | 0.180077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168937 | 734 | 26 | 57 | 28.230769 | 0.855738 | 0.035422 | 0 | 0 | 0 | 0 | 0.171388 | 0.032578 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.105263 | false | 0 | 0.157895 | 0.052632 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb88017d10be057a37135346d2e72acbed9a24a7 | 13,666 | py | Python | run_scripts/tools.py | CarmenSheppard/PneumoCaT2 | c41fcb412e239042daaa95132b1469ba897894f6 | [
"Unlicense"
] | null | null | null | run_scripts/tools.py | CarmenSheppard/PneumoCaT2 | c41fcb412e239042daaa95132b1469ba897894f6 | [
"Unlicense"
] | null | null | null | run_scripts/tools.py | CarmenSheppard/PneumoCaT2 | c41fcb412e239042daaa95132b1469ba897894f6 | [
"Unlicense"
] | null | null | null | """ Python 3.7+
Tools for PneumoKITy - used in more than one of the other run scripts
Carmen Sheppard 2019-2022
"""
import pandas as pd
import numpy as np
import subprocess
import os
import sys
from exceptions import CtvdbError, CtvdbFileError
from Database_tools.sqlalchemydeclarative import Genes, Variants, Serotype, SerotypeVariants, VariantGroup
def check_db_path(database):
"""
Checks path for CTVdb for integrity
:param database:
:return: None
"""
if os.path.isfile(os.path.join(database, "references.msh")) and \
os.path.isfile(os.path.join(database, "CTV.db")):
sys.stdout.write(f"Reference CTV.db database at {database} "
f"selected.\n")
else:
sys.stderr.write("ERROR: Check ctvdb path. Relevant folders, "
"'references.msh' file and "
"'CTV.db' must be present at "
"the database path\n")
sys.exit(1)
def check_version(software):
"""
Get version of software and return as string.Check for software error
:param software: string - path to software
:return: string of software version
"""
try:
# get version
output = subprocess.run([software, "-v"], stdout=subprocess.PIPE,
check=True)
version = ""
for line in output.stdout.decode('utf-8').splitlines():
if line != "":
version = line
break
else:
continue
except IOError:
sys.stderr.write(f"ERROR: Check path to software: {software}\n")
sys.exit(1)
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Check existence of correct "
f"program file at {software}\n")
sys.exit(1)
return version
def create_dataframe(input_file, header = "Serotype"):
"""
Parse in the input mash TSV file and add headers to columns check tsv datatypes
add header for first column
:param input_file: tsv file output from MASH run
:param header: headers to use for output serotype column - default "Serotype"
:return: fpormatted dataframe
"""
try:
df = pd.read_csv(input_file, sep='\t', header=None)
# remove empty columns
df = df.dropna(axis='columns', how='all')
sys.stdout.write("Analysing mash screen output.\n")
# rename columns to friendly headers
df.rename(
{0: 'identity', 1: 'shared-hashes', 2: 'median-multiplicity',
3: 'p-value', 4: header}, axis=1, inplace=True)
# reorder columns
df = df[[header, 'identity', 'shared-hashes',
'median-multiplicity', 'p-value']]
return df
except IOError:
sys.stderr.write('ERROR: error occurred with reading Mash screen '
'file\n')
sys.exit(1)
except pd.errors.EmptyDataError:
sys.stderr.write('ERROR: error occurred with reading Mash screen '
'file\n')
def run_mash_screen(analysis, ref_sketch, run_type="stage1"):
"""
Run MASH screen for any sketch file against any ref (stage 1 & 2)
:param analysis: analysis object
:param ref_sketch: String of path to reference sketch file
:param run_type: type of screen file for output (defaults to "serotype")
:return: string of tsv outfile path and name (saved to tmp).
"""
# check that ref file exists:
if not os.path.isfile(ref_sketch) or os.path.getsize(ref_sketch) == 0:
raise CtvdbFileError(f" Check ctvdb folder for presence of {analysis.folder} subfolder "
f"and correct reference sketch file.\n")
elif run_type != "stage1":
sys.stdout.write(f"Running stage 2 screen reference: {ref_sketch}\n")
else:
sys.stdout.write(f"Running stage 1 screen\n")
pass
if analysis.fastq_files:
argument = [analysis.mash, "screen", ref_sketch, "-p",
analysis.threads, analysis.fastq_files[0],
analysis.fastq_files[1]]
else:
argument = [analysis.mash, "screen", ref_sketch, "-p",
analysis.threads, analysis.assembly]
try:
data = subprocess.run(argument, capture_output=True, check=True, timeout=3600)
result = data.stdout.decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("Error with MASH subprocess - please check input file integrity")
sys.exit(1)
# TODO write mash output to log file once logging implemented in PneumoKITy
#sys.stderr.write(data.stderr.decode('utf-8'))
outfile = os.path.join(analysis.output_dir, f"{analysis.sampleid}_tmp",
f"{analysis.sampleid}_{run_type}_screen.tsv")
with open(outfile, "w") as f:
f.write(result)
return outfile
def filter_kmerhits(df, minpercent):
"""
Function to calculate % hits for each query and reduce dataframe to
those above the min kmer cut off.
:param df: pandas dataframe of MASH output
:param minpercent: int representing % hits of total kmers for serotype
:return: pandas.dataframe of rows representing kmer hits above % cutoff
and dataframe of all calculated kmer percents for reference
"""
# split hash values, calculate percentage + add to dataframe
hashes = df["shared-hashes"].str.split("/", n=1, expand=True)
df["hit_hashes"] = pd.to_numeric(hashes[0])
df["total_hashes"] = pd.to_numeric(hashes[1])
df["percent"] = df["hit_hashes"] / df["total_hashes"] * 100
filtered_kmerhits = df[df["percent"] >= minpercent]
return filtered_kmerhits, df
def apply_filters(df, minpercent, minmulti, top_hits = True):
"""
Apply specified filters to dataframe and get top 5 hits
:param minpercent:
:param df: pandas dataframe
:param minmulti: minimum multiplicity value
:param top_hits: create 5 hits output or not
:return: filtered dataframe
"""
# filter for kmer hits above percentage
filtered, original = filter_kmerhits(df, minpercent)
# filter for median-multiplicity if necessary (reads)
if minmulti != 1:
filtered = filtered[filtered["median-multiplicity"] >= minmulti]
if not top_hits:
# return data only if top hits = False
return filtered, original
# get top 5 hits as dict with percent (rounded to 2 dp)
top_hits_df = original.nlargest(5, 'percent').round(2)
top_hits_dict = top_hits_df.to_dict('index')
top_hits = {}
for i in top_hits_dict:
top_hits[top_hits_dict[i]['Serotype']] = top_hits_dict[i]['percent']
return filtered, original, top_hits
def create_csv(df, outpath, filename, index=False):
"""
create csv of pandas dataframe and save
:param df: pandas dataframe
:param outpath:
:param filename:
:param index: optional add index from df, defaults to False
:return: None
"""
try:
if not os.path.exists(outpath):
os.mkdir(outpath)
df.to_csv(os.path.join(outpath, filename), header=True,
index=index,
float_format=np.float32)
except IOError:
sys.stderr.write(" Error: Could not save csv. Please check output "
"path\n")
sys.exit(1)
def get_variant_ids(hit_variants, var_type, groupid, session, position=None):
"""
Returns variant id's by comparing to database
:param groupid:
:param hit_variants: dict of hit variants
:param var_type: type of variant to search (eg allele)
:param session: database session
:param position: protein position of variant default to None
:return: list of variant ids for hits (db objects)
"""
hit_var = []
# for each target/hit find the associated variant ID in database
for target in hit_variants:
if hit_variants[target] != 0:
# return variants associated with var type and variant result and position and SEROGROUP
gene_var = session.query(Variants.id).join(Genes).join(VariantGroup).filter(Genes.gene_name == target,
VariantGroup.grp_id == groupid, Variants.var_type == var_type,
Variants.variant == hit_variants[target],
Variants.position == position).all()
if gene_var:
hit_var.append(gene_var[0][0])
else:
raise CtvdbError(f"Variant {hit_variants[target]} not found")
else:
hit_var.append(0)
return hit_var
def find_phenotype(analysis, session):
"""
Function to find phenotype associated with a var ids from stage 2 analysis return final result
:param analysis:
:param session: active DB session
"""
# get variant ids associated with Serotype and group, unique combinations only
serorecords = session.query(Serotype.predicted_pheno, SerotypeVariants.variant_id).\
outerjoin(SerotypeVariants).filter(Serotype.group_id == analysis.grp_id).distinct().all()
# create dict of expected vars
expected_vars = {}
for item in serorecords:
if item[0] not in expected_vars: # set up
expected_vars[item[0]] = [item[1]]
else: # append to existing
expected_vars[item[0]].append(item[1])
detected_vars = []
# create list of var ids from analysis
# catch variants not found.
try:
for i in analysis.stage2_varids:
if i[0] != 0: # ignore undetected (0)
detected_vars.append(i[0])
#interpret results
for serotype in expected_vars:
a = set(expected_vars[serotype])
b = set(detected_vars)
if a == b:
analysis.predicted_serotype = serotype
break
if a != b and not analysis.predicted_serotype:
analysis.predicted_serotype = f"Serotype within {analysis.folder} unexpected variant pattern"
else:
analysis.predicted_serotype = analysis.stage1_result
except IndexError:
analysis.predicted_serotype = f"{analysis.stage1_result}: {analysis.stage2_result}"
sys.stdout.write(f"{analysis.predicted_serotype}\n")
def collate_results(collate_dir, results):
"""
If selected this will add results to a csv file at a specified collation directory location.
:param collate_dir: directory for collated csv file
:param results: results dataframe created from analysis object
:return: None
"""
collate_file = os.path.join(collate_dir, "Collated_result_data.csv")
#check whether collated result data file exists if not create it
try:
with open(collate_file, 'a') as f:
results.to_csv(f, header=f.tell() == 0, index=False)
except IOError:
sys.stderr.write(" Error: Could not save data to collated csv. Please check output "
"path\n")
sys.exit(1)
def handle_results(analysis):
from run_scripts.initialise_run import Category
quality, results = analysis.create_objdf()
# write csv
create_csv(quality, analysis.output_dir, f"{analysis.sampleid}_quality_system_data.csv")
create_csv(results, analysis.output_dir, f"{analysis.sampleid}_result_data.csv")
# if copy option is taken collate results at directory path specified
if analysis.csv_collate:
collate_results(analysis.csv_collate, results)
sys.stdout.write(f"Results collated at {analysis.csv_collate}/Collated_result_data.csv \n")
if analysis.runtype == 'mix' and analysis.category == Category.mix:
# if mixed serotype run - handle mixed serotypes (no variants)
mixstring, mix_df, analysis.mix_mm = analysis.handle_mixed(False)
analysis.stage2_output = "Analysed in PneumoKITy stage 1 only"
elif analysis.runtype == 'mix' and analysis.category == Category.mixed_variants:
# if mixed serotype run - handle mixed serotypes (with variants)
mixstring, mix_df,analysis.mix_mm = analysis.handle_mixed(True)
analysis.stage2_output = "Analysed in stage 2 - with limited subtype determination"
elif analysis.runtype == 'pure':
mix_df = None
analysis.write_report()
else:
mixstring = "Mixed serotypes not found"
mix_df = None
if mix_df is not None:
create_csv(mix_df, analysis.output_dir, f"{analysis.sampleid}_mixed_serotypes.csv")
#creates output files and write to stdout for results.
analysis.write_report(mixstring)
sys.stdout.write(f"CSV files written to {analysis.output_dir}.\n")
sys.stdout.write(f"Analysis RAG status: {analysis.rag_status} \n")
sys.stdout.write(f"Predicted serotype is {analysis.predicted_serotype}\n")
sys.stdout.write(f"{analysis.workflow} run complete.\n")
def cleanup(analysis):
"""
Removes files in tmp folder and tmp folder if empty (to avoid clashes with other processes
if run in parallel and same output folder specified.)
"""
save_path = os.path.join(analysis.output_dir, f"{analysis.sampleid}_tmp")
files = [name for name in os.listdir(save_path)]
try:
# remove files
for file in files:
if analysis.sampleid in file:
os.remove(os.path.join(save_path, file))
# remove directory if empty
if not os.listdir(save_path):
os.rmdir(save_path)
sys.stdout.write("tmp directory removed\n")
except OSError as e:
sys.stdout.write(f"Error: {save_path}: {e.strerror}")
| 36.540107 | 114 | 0.643934 | 1,752 | 13,666 | 4.928082 | 0.207192 | 0.01054 | 0.019458 | 0.017373 | 0.173732 | 0.134468 | 0.107482 | 0.069029 | 0.069029 | 0.058837 | 0 | 0.007625 | 0.261013 | 13,666 | 373 | 115 | 36.63807 | 0.847312 | 0.267525 | 0 | 0.20297 | 0 | 0 | 0.196954 | 0.046933 | 0 | 0 | 0 | 0.002681 | 0 | 1 | 0.059406 | false | 0.004951 | 0.039604 | 0 | 0.133663 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb89097f4e7f9df09efc0b48562a71656d49ff33 | 792 | py | Python | oldcode/map_to_ecoli_model.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 3 | 2018-03-29T12:14:05.000Z | 2021-03-22T09:04:22.000Z | oldcode/map_to_ecoli_model.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 9 | 2016-05-30T16:43:21.000Z | 2017-03-17T13:15:02.000Z | oldcode/map_to_ecoli_model.py | eladnoor/small-molecule-regulation | 83127f20859093a06ee493128d672ac7428cec83 | [
"MIT"
] | 1 | 2021-03-22T09:04:26.000Z | 2021-03-22T09:04:26.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 23:42:54 2016
@author: eladn
"""
import settings
import os
import pandas as pd
ki = pd.DataFrame.from_csv(os.path.join(settings.CACHE_DIR, 'ecoli_ki_bigg.csv'))
activators = pd.DataFrame.from_csv(os.path.join(settings.CACHE_DIR, 'ecoli_activating_bigg.csv'))
model_reactions = settings.get_reaction_table_from_xls()
bigg2ec = model_reactions.loc[:, ['Reaction Abbreviation', 'EC Number']]
bigg2ec.rename(columns={'Reaction Abbreviation': 'bigg.reaction'}, inplace=True)
bigg2ec = bigg2ec[~pd.isnull(bigg2ec['EC_Number'])]
# change all reaction IDs to lower-case (apparently the standards have changed
# since the model was published, and cases are different now).
bigg2ec['bigg.reaction'] = bigg2ec['bigg.reaction'].apply(unicode.lower) | 34.434783 | 97 | 0.758838 | 115 | 792 | 5.095652 | 0.591304 | 0.061433 | 0.051195 | 0.061433 | 0.167235 | 0.167235 | 0.167235 | 0.167235 | 0.167235 | 0.167235 | 0 | 0.02809 | 0.10101 | 792 | 23 | 98 | 34.434783 | 0.794944 | 0.267677 | 0 | 0 | 0 | 0 | 0.246935 | 0.043783 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8aab409b3c074b8404fd95df18640ee80de28e | 15,420 | py | Python | eelbrain/_result_plots.py | jpkulasingham/Eelbrain | 1061ce0b781a8e55ec187723b58491a5cde32e08 | [
"BSD-3-Clause"
] | 1 | 2021-07-26T21:22:35.000Z | 2021-07-26T21:22:35.000Z | eelbrain/_result_plots.py | jpkulasingham/Eelbrain | 1061ce0b781a8e55ec187723b58491a5cde32e08 | [
"BSD-3-Clause"
] | 1 | 2019-03-22T15:35:17.000Z | 2019-03-22T15:35:17.000Z | eelbrain/_result_plots.py | jpkulasingham/Eelbrain | 1061ce0b781a8e55ec187723b58491a5cde32e08 | [
"BSD-3-Clause"
] | null | null | null | # Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from math import floor, log10
from os import makedirs
from os.path import basename, dirname, exists, expanduser, isdir, join
import matplotlib as mpl
import numpy as np
from . import fmtxt, plot, testnd
from .plot._base import POINT
from ._data_obj import combine
# usage: with mpl.rc_context(RC):
FONT = 'Helvetica'
RC = {
'figure.dpi': 300,
'savefig.dpi': 300,
'savefig.transparent': True,
# Font
'font.family': 'sans-serif',
'font.sans-serif': FONT,
'font.size': 9,
# make sure equations use same font
'mathtext.fontset': 'custom',
'font.cursive': FONT,
'font.serif': FONT,
# subplot
'figure.subplot.top': 0.95,
# legend
'legend.fontsize': 6,
'legend.frameon': False,
}
for key in mpl.rcParams:
if 'width' in key:
RC[key] = mpl.rcParams[key] * 0.5
class PlotDestDir:
"""Generate paths for saving plots in figure-specific subdirectories
Parameters
----------
root : str
Directory in which to save files.
pix_fmt : str
Pixel graphics format (default ``png``).
vec_fmt : str
Vector graphics format (default ``pdf``).
name : str
Name for the info report (default is ``basename(root)``).
"""
def __init__(self, root, pix_fmt='png', vec_fmt='pdf', name=None):
root = expanduser(root)
if not exists(root):
makedirs(root)
else:
assert isdir(root)
assert pix_fmt.isalnum()
assert vec_fmt.isalnum()
if name is None:
name = basename(root)
if not name:
name = basename(dirname(root))
self.root = root
self._pix_fmt = pix_fmt
self._vec_fmt = vec_fmt
self.pix = join(root, '%s.' + pix_fmt)
self.vec = join(root, '%s.' + vec_fmt)
self.mov = join(root, '%s.mov')
self.txt = join(root, '%s.txt')
self.name = name
self.report = fmtxt.Report(name)
self._active_section = [self.report]
def with_ext(self, ext):
"""Generate path template ``%s.{ext}``"""
assert ext.isalnum()
return join(self.root, '%s.' + ext)
def subdir(self, dirname, name=None):
"""PlotDestDir object for a sub-directory"""
return PlotDestDir(join(self.root, dirname), self._pix_fmt, self._vec_fmt, name)
# MARK: report
def section(self, heading, level=1):
if level <= 0:
raise ValueError("level=%r; must be >= 1, section 0 is the document")
elif level > len(self._active_section):
raise RuntimeError("Can't add section with level %i before adding "
"section with level %i" % (level, level - 1))
while len(self._active_section) > level:
self._active_section.pop(-1)
section = self._active_section[-1].add_section(heading)
self._active_section.append(section)
def info(self, content):
"""Add ``info_string`` to the info list"""
section = self._active_section[-1]
section.append(content)
def save_info(self, format='html'):
"""Save info to ``info.txt``"""
dst = join(self.root, self.name)
try:
getattr(self.report, 'save_' + format)(dst)
except AttributeError:
raise ValueError("format=%r; Invalid format" % (format,))
def cname(cid):
if isinstance(cid, tuple):
return '-'.join(map(str, cid))
else:
return str(cid)
class ClusterPlotter:
"""Make plots for spatio-temporal clusters
returned by :meth:`MneExperiment.load_result_plotter`
Parameters
----------
ds : Dataset
Dataset with the data on which the test is based.
res : NDTest
Test result object with spatio-temporal cluster test result.
colors : dict
Colors for plotting data in a ``{cell: color}`` dictionary.
dst : str
Directory in which to place results.
vec_fmt : str
Format for vector graphics (default 'pdf').
pix_fmt : str
Format for pixel graphics (default 'png').
labels : dict
Labels for data in a ``{cell: label}`` dictionary (the default is to
use cell names).
h : scalar
Plot height in inches (default 1.2).
rc : dict
Matplotlib rc-parameters dictionary (the default is optimized for the
default plot size ``h=1.2``).
Notes
-----
After loading a :class:`ClusterPlotter`, its ``rc``, ``colors``, ``labels``
and ``h`` attributes can be updated to create different plot layouts without
reloading the data.
"""
def __init__(self, ds, res, colors, dst, vec_fmt='pdf', pix_fmt='png',
labels=None, h=1.2, rc=None):
self.rc = RC.copy()
if rc is not None:
self.rc.update(rc)
self.ds = ds
self.res = res
self.colors = colors
self.labels = labels
self.h = h
self._dst = PlotDestDir(dst, pix_fmt, vec_fmt)
self._is_anova = isinstance(self.res, testnd.anova)
def _ids(self, ids):
if isinstance(ids, (float, int)):
return self._ids_for_p(ids)
elif isinstance(ids, dict):
if not self._is_anova:
raise TypeError("ids can not be dict for results other than ANOVA")
out = []
for effect, cids in ids.items():
if isinstance(cids, float):
out.extend(self._ids_for_p(cids, effect))
else:
out.extend((effect, cid) for cid in cids)
return out
else:
return ids
def _ids_for_p(self, p, effect=None):
"Find cluster IDs for clusters with p-value <= p"
if effect is None:
clusters = self.res.find_clusters(p)
else:
clusters = self.res.find_clusters(p, effect=effect)
clusters[:, 'effect'] = effect
if self._is_anova:
return list(zip(clusters['effect'], clusters['id']))
else:
return clusters['id']
def _get_clusters(self, ids):
return [self._get_cluster(cid) for cid in ids]
def _get_cluster(self, cid):
if self._is_anova:
effect, cid = cid
return self.res.cluster(cid, effect)
else:
return self.res.cluster(cid)
def plot_color_list(self, name, cells, w=None, colors=None):
if colors is None:
colors = self.colors
with mpl.rc_context(self.rc):
p = plot.ColorList(colors, cells, self.labels, w=w, show=False)
p.save(self._dst.vec % "colorlist %s" % name, transparent=True)
p.close()
def plot_color_grid(self, name, row_cells, column_cells):
with mpl.rc_context(self.rc):
p = plot.ColorGrid(row_cells, column_cells, self.colors, labels=self.labels)
p.save(self._dst.vec % "colorgrid %s" % name, transparent=True)
p.close()
def plot_clusters_spatial(self, ids, views, w=600, h=480, prefix=''):
"""Plot spatial extent of the clusters
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
views : str | list of str | dict
Can a str or list of str to use the same views for all clusters. A dict
can have as keys labels or cluster IDs.
w, h : int
Size in pixels. The default (600 x 480) corresponds to 2 x 1.6 in
at 300 dpi.
prefix : str
Prefix to use for the image files (optional, can be used to
distinguish different groups of images sharing the same color-bars).
Notes
-----
The horizontal colorbar is 1.5 in wide, the vertical colorbar is 1.6 in
high.
"""
ids = self._ids(ids)
clusters = self._get_clusters(ids)
clusters_spatial = [c.sum('time') for c in clusters]
if isinstance(views, str):
views = (views,)
# vmax
vmin = min(c.min() for c in clusters_spatial)
vmax = max(c.max() for c in clusters_spatial)
abs_vmax = max(vmax, abs(vmin))
# anatomical extent
brain_colorbar_done = False
for cid, cluster in zip(ids, clusters_spatial):
name = cname(cid)
if prefix:
name = prefix + ' ' + name
for hemi in ('lh', 'rh'):
if not cluster.sub(source=hemi).any():
continue
brain = plot.brain.cluster(cluster, abs_vmax, views='lat',
background=(1, 1, 1), colorbar=False,
parallel=True, hemi=hemi, w=w, h=h)
for view in views:
brain.show_view(view)
brain.save_image(self._dst_pix % ' '.join((name, hemi, view)),
'rgba', True)
if not brain_colorbar_done:
with mpl.rc_context(self.rc):
label = "Sum of %s-values" % cluster.info['meas']
clipmin = 0 if vmin == 0 else None
clipmax = 0 if vmax == 0 else None
if prefix:
cbar_name = '%s cbar %%s' % prefix
else:
cbar_name = 'cbar %s'
h_cmap = 0.7 + POINT * mpl.rcParams['font.size']
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=h_cmap, w=1.5, show=False)
p.save(self._dst.vec % cbar_name % 'h', transparent=True)
p.close()
w_cmap = 0.8 + 0.1 * abs(floor(log10(vmax)))
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=1.6, w=w_cmap,
orientation='vertical', show=False)
p.save(self._dst.vec % cbar_name % 'v', transparent=True)
p.close()
brain_colorbar_done = True
brain.close()
def _get_data(self, model, sub, subagg):
"""Plot values in cluster
Parameters
----------
subagg : str
Index in ds: within index, collapse across other predictors.
"""
ds = self.ds
modelname = model
if sub:
ds = ds.sub(sub)
modelname += '[%s]' % sub
if subagg:
idx_subagg = ds.eval(subagg)
ds_full = ds.sub(np.invert(idx_subagg))
ds_agg = ds.sub(idx_subagg).aggregate("subject", drop_bad=True)
ds = combine((ds_full, ds_agg), incomplete='fill in')
ds['condition'] = ds.eval(model).as_factor()
model = 'condition'
modelname += '(agg %s)' % subagg
return ds, model, modelname
def plot_values(self, ids, model, ymax, ymin, dpi=300, sub=None,
subagg=None, cells=None, pairwise=False, colors=None,
prefix=None, w=None, filter=None, legend=False):
"""Plot values in cluster
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
model : str
Model defining cells which to plot separately.
ymax : scalar
Top of the y-axis.
ymin : scalar
Bottom of the y axis.
dpi : int
Figure DPI.
sub : str
Only use a subset of the data.
subagg : str
Index in ds: within index, collapse across other predictors.
cells : sequence of cells in model
Modify visible cells and their order. Only applies to the barplot.
Does not affect filename.
pairwise : bool
Add pairwise tests to barplots.
colors : dict
Substitute colors (default are the colors provided at
initialization).
prefix : str
Prefix to use for the image files (optional, can be used to
distinguish different groups of images sharing the same color-bars).
w : scalar
UTS-stat plot width (default is ``2 * h``).
filter : Filter
Filter signal for display purposes (optional).
legend : bool
Plot a color legend.
"""
if w is None:
w = self.h * 2
ds, model, modelname = self._get_data(model, sub, subagg)
ids = self._ids(ids)
if colors is None:
colors = self.colors
src = ds['srcm']
n_cells = len(ds.eval(model).cells)
w_bar = (n_cells * 2 + 4) * (self.h / 12)
with mpl.rc_context(self.rc):
for cid in ids:
name = cname(cid)
if prefix:
name = prefix + ' ' + name
cluster = self._get_cluster(cid)
y_mean = src.mean(cluster != 0)
y_tc = src.mean(cluster.any('time'))
# barplot
p = plot.Barplot(
y_mean, model, 'subject', None, cells, pairwise, ds=ds,
trend=False, corr=None, title=None, frame=False,
yaxis=False, ylabel=False, colors=colors, bottom=ymin,
top=ymax, w=w_bar, h=self.h, xlabel=None, xticks=None,
tight=False, test_markers=False, show=False)
p.save(self._dst.vec % ' '.join((name, modelname, 'barplot')),
dpi=dpi, transparent=True)
p.close()
# time-course
if filter is not None:
y_tc = filter.filtfilt(y_tc)
p = plot.UTSStat(
y_tc, model, match='subject', ds=ds, error='sem',
colors=colors, title=None, axtitle=None, frame=False,
bottom=ymin, top=ymax, legend=None, ylabel=None,
xlabel=None, w=w, h=self.h, tight=False, show=False)
dt = y_tc.time.tstep / 2.
mark_start = cluster.info['tstart'] - dt
mark_stop = cluster.info['tstop'] - dt
p.add_vspan(mark_start, mark_stop, color='k', alpha=0.1, zorder=-2)
p.save(self._dst.vec % ' '.join((name, modelname, 'timecourse')),
dpi=dpi, transparent=True)
p.close()
# legend (only once)
if legend:
p.save_legend(self._dst.vec % (modelname + ' legend'),
transparent=True)
legend = False
| 36.714286 | 88 | 0.535798 | 1,894 | 15,420 | 4.270855 | 0.210137 | 0.007417 | 0.014711 | 0.008901 | 0.209173 | 0.175176 | 0.156138 | 0.144641 | 0.113487 | 0.105575 | 0 | 0.009095 | 0.358236 | 15,420 | 419 | 89 | 36.801909 | 0.808306 | 0.250065 | 0 | 0.15102 | 0 | 0 | 0.063476 | 0 | 0 | 0 | 0 | 0 | 0.016327 | 1 | 0.069388 | false | 0 | 0.032653 | 0.004082 | 0.163265 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8bf63f0e720b704b1783731498fe587100f20f | 287 | py | Python | Data-Science-Libraries/dataFrame.py | dhingratul/Data-Science | f9127f4b5271016f591162c16935d5661dce8121 | [
"MIT"
] | null | null | null | Data-Science-Libraries/dataFrame.py | dhingratul/Data-Science | f9127f4b5271016f591162c16935d5661dce8121 | [
"MIT"
] | null | null | null | Data-Science-Libraries/dataFrame.py | dhingratul/Data-Science | f9127f4b5271016f591162c16935d5661dce8121 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 11:49:04 2017
@author: dhingratul
"""
import pandas as pd
d = {'name': pd.Series(['AD', "BD", "CD", "DE"], index=[1, 2, 3, 4]),
'age': pd.Series([12, 32, 44], index=[1, 3, 4])}
df = pd.DataFrame(d)
print(df)
| 22.076923 | 69 | 0.557491 | 51 | 287 | 3.137255 | 0.803922 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110169 | 0.1777 | 287 | 12 | 70 | 23.916667 | 0.567797 | 0.348432 | 0 | 0 | 0 | 0 | 0.08427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8c140ba885750b4d36737e18b252d2a1c7a7fa | 3,023 | py | Python | pex/enum.py | ShellAddicted/pex | f1060b784fc9c4337a514ed21357ea9e8c2e4f41 | [
"Apache-2.0"
] | 2,160 | 2015-01-06T17:57:39.000Z | 2022-03-30T19:59:01.000Z | pex/enum.py | sthagen/pex | 9bd4c178c93556faad3c8a1e75989c9288d09416 | [
"Apache-2.0"
] | 1,242 | 2015-01-22T14:56:46.000Z | 2022-03-31T18:02:38.000Z | pex/enum.py | Satertek/pex | 64de1c4cf031118ef446ac98a8c164c91c23bb9b | [
"Apache-2.0"
] | 248 | 2015-01-15T13:34:50.000Z | 2022-03-26T01:24:18.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import weakref
from collections import defaultdict
from functools import total_ordering
from _weakref import ReferenceType
from pex.common import qualified_name
from pex.typing import TYPE_CHECKING, Generic, cast
if TYPE_CHECKING:
from typing import Any, DefaultDict, List, Optional, Tuple, Type, TypeVar
_V = TypeVar("_V", bound="Enum.Value")
class Enum(Generic["_V"]):
@total_ordering
class Value(object):
_values_by_type = defaultdict(
list
) # type: DefaultDict[Type[Enum.Value], List[ReferenceType[Enum.Value]]]
@classmethod
def _iter_values(cls):
for ref in cls._values_by_type[cls]:
value = ref()
if value:
yield value
def __init__(self, value):
# type: (str) -> None
values = Enum.Value._values_by_type[type(self)]
self.value = value
self.ordinal = len(values)
values.append(weakref.ref(self))
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return repr(self.value)
def __eq__(self, other):
# type: (Any) -> bool
return self is other
@classmethod
def _create_type_error(cls, other):
# type: (Any) -> TypeError
return TypeError(
"Can only compare values of type {value_type} amongst themselves; given "
"{other!r} of type {other_type}.".format(
value_type=qualified_name(cls),
other=other,
other_type=qualified_name(other),
)
)
def __lt__(self, other):
# type: (Any) -> bool
if type(self) != type(other):
raise self._create_type_error(other)
return self.ordinal < cast(Enum.Value, other).ordinal
def __le__(self, other):
# type: (Any) -> bool
if type(self) != type(other):
raise self._create_type_error(other)
return self is other or self < other
_values = None # type: Optional[Tuple[_V, ...]]
@classmethod
def values(cls):
# type: (Type[Enum[_V]]) -> Tuple[_V, ...]
if cls._values is None:
cls._values = tuple(cls.Value._iter_values())
return cls._values
@classmethod
def for_value(
cls, # type: Type[Enum[_V]]
value, # type: str
):
# type: (...) -> _V
for v in cls.values():
if v.value == value:
return v
raise ValueError(
"{!r} of type {} must be one of {}".format(
value, type(value), ", ".join(map(repr, cls.values()))
)
)
| 29.930693 | 89 | 0.546477 | 336 | 3,023 | 4.690476 | 0.27381 | 0.034264 | 0.030457 | 0.030457 | 0.13198 | 0.098985 | 0.098985 | 0.098985 | 0.098985 | 0.098985 | 0 | 0.003036 | 0.346345 | 3,023 | 100 | 90 | 30.23 | 0.794534 | 0.149851 | 0 | 0.114286 | 0 | 0 | 0.059123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.114286 | 0.057143 | 0.414286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8d50777bfab9b46ee29f2a7ac7b267c7d4a088 | 18,316 | py | Python | models/convdraw.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | 2 | 2019-11-06T14:03:52.000Z | 2019-12-25T22:35:19.000Z | models/convdraw.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | null | null | null | models/convdraw.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | null | null | null | '''
copy and modified from https://github.com/l3robot/pytorch-ConvDraw/blob/master/src/models.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from models.reparam import NormalDistributionConv2d
from utils import pack_hiddens, unpack_hiddens
class ConvLSTMCell(nn.Module):
'''
Generate a convolutional LSTM cell
copied and modified from https://github.com/Atcold/pytorch-CortexNet/blob/master/model/ConvLSTMCell.py
'''
def __init__(self, input_size, hidden_size, kernel_size=5, stride=1, padding=2, train_init_state=False, height=None, width=None):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.train_init_state = train_init_state
self.height = height
self.width = width
# lstm gates
self.gates = nn.Conv2d(input_size + hidden_size, 4 * hidden_size, kernel_size=kernel_size, stride=stride, padding=padding)
# initial states
if self.train_init_state:
assert self.height and self.width
self.init_hidden = Parameter(torch.zeros(1, self.hidden_size, self.height, self.width))
self.init_cell = Parameter(torch.zeros(1, self.hidden_size, self.height, self.width))
def init_state(self, batch_size, spatial_size):
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if self.train_init_state:
return (self.init_hidden.expand(state_size),
self.init_cell.expand(state_size))
else:
weight = next(self.parameters())
return (weight.new_zeros(state_size),
weight.new_zeros(state_size))
def forward(self, input, prev_state):
''' forward stacked rnn one time step
Input:
input: batch_size x input_size x height x width
prev_state: (hidden, cell) of each ConvLSTM
Output:
output: hidden (of new_state), batch_size x hidden_size x hidden_height x hidden_width
new_state: (hidden, cell) of each ConvLSTM
'''
# get batch and spatial sizes
batch_size = input.data.size(0)
spatial_size = input.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
prev_state = self.init_state(batch_size, spatial_size)
prev_hidden, prev_cell = prev_state
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat((input, prev_hidden), 1)
outputs = self.gates(stacked_inputs)
# chunk across channel dimension
in_gate, remember_gate, out_gate, cell_gate = outputs.chunk(4, 1)
# apply sigmoid non linearity
in_gate = torch.sigmoid(in_gate)
remember_gate = torch.sigmoid(remember_gate)
out_gate = torch.sigmoid(out_gate)
# apply tanh non linearity
cell_gate = torch.tanh(cell_gate)
# compute current cell and hidden state
cell = (remember_gate * prev_cell) + (in_gate * cell_gate)
hidden = out_gate * torch.tanh(cell)
# pack output
new_state = (hidden, cell)
output = hidden
return output, new_state
class ConvLSTM(nn.Module):
def __init__(self, input_size, hidden_size, kernel_size=5, stride=1, padding=2, train_init_state=False, height=None, width=None, num_layers=1):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.train_init_state = train_init_state
self.height = height
self.width = width
self.num_layers = num_layers
if self.num_layers != 1:
raise NotImplementedError
# define rnn
self.rnn = ConvLSTMCell(input_size, hidden_size, kernel_size, stride, padding, train_init_state, height, width)
# initial states
if self.train_init_state:
assert self.height and self.width
self.init_hidden = Parameter(torch.zeros(1, 1, self.hidden_size, self.height, self.width))
self.init_cell = Parameter(torch.zeros(1, 1, self.hidden_size, self.height, self.width))
def init_state(self, batch_size, spatial_size):
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if self.train_init_state:
return (self.init_hidden.expand(state_size),
self.init_cell.expand(state_size))
else:
weight = next(self.parameters())
return (weight.new_zeros(state_size),
weight.new_zeros(state_size))
def forward(self, input, hiddens):
''' forward stacked rnn with sequence input
Input:
input: tensor, seq_len x batch_size x input_size
hiddens: (h_t, c_t) = a tuple if lstm
h_t = a tensor else
where h_t (and c_t) is tensor num_layers x batch_size x hidden_size x height x width
Output:
output: tensor, seq_len x batch_size x output_size
hiddens: (h_t, c_t) = a tuple if lstm
h_t = a tensor else
where h_t (and c_t) is tensor num_layers x batch_size x hidden_size x height x width
'''
# init output
output = []
# unpack hiddens
hiddens = unpack_hiddens(hiddens, 'LSTM')
# forward per time step
for i, input_t in enumerate(input.chunk(input.size(0), dim=0)):
# forward one time step
output_t, hiddens = self.rnn(input_t, hiddens)
# append output_t to output
output += [output_t.unsqueeze(0)]
# concat output
output = torch.cat(output, dim=0)
# pack hiddens
hiddens = pack_hiddens(hiddens, 'LSTM')
return output, hiddens
def flatten_parameters(self):
pass
class ConvDrawEncoderCell(nn.Module):
def __init__(self,
input_size,
hidden_size, # i.e. nc_lstm
nz,
kernel_size=5,
stride=1,
padding=2,
):
super().__init__()
self.rnn = ConvLSTMCell(input_size, hidden_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.reparam = NormalDistributionConv2d(hidden_size, nz, kernel_size=kernel_size, stride=stride, padding=padding)
def init_state(self, batch_size, spatial_size):
return self.rnn.init_state(batch_size, spatial_size)
def sample(self, mean, logvar):
return self.reparam.sample_gaussian(mean, logvar)
def forward(self, input, prev_state):
''' forward stacked rnn one time step
Input:
input: batch_size x input_size x height x width
prev_state: (hidden, cell) of each ConvLSTM
Output:
mean: batch_size x hidden_size x hidden_height x hidden_width
logvar: batch_size x hidden_size x hidden_height x hidden_width
hidden: hidden (of new_state), batch_size x hidden_size x hidden_height x hidden_width
new_state: (hidden, cell) of each ConvLSTM
'''
# forward rnn
hidden, new_state = self.rnn(input, prev_state)
# forward mean, logvar
mean, logvar = self.reparam(hidden)
return mean, logvar, hidden, new_state
class StackedConvDrawEncoderCell(nn.Module):
def __init__(self,
input_size,
#context_size, # i.e. nc_context
hidden_size, # i.e. nc_lstm
nz,
kernel_size=5,
padding=2,
num_layers=1,
dropout=0,
):
super().__init__()
self.input_size = input_size
#self.context_size = context_size
self.hidden_size = hidden_size
self.nz = nz
self.kernel_size = kernel_size
self.padding = padding
self.num_layers = num_layers
self.dropout = dropout
rnns = []
#rnns.append(ConvDrawEncoderCell(input_size+hidden_size+context_size, hidden_size, nz))
rnns.append(
ConvDrawEncoderCell(input_size+hidden_size,
hidden_size,
nz,
kernel_size=self.kernel_size,
padding=self.padding,
))
for i in range(1, num_layers):
#rnns.append(ConvDrawEncoderCell(nz+hidden_size+context_size, hidden_size, nz))
rnns.append(
ConvDrawEncoderCell(nz+hidden_size,
hidden_size,
nz,
kernel_size=self.kernel_size,
padding=self.padding,
))
self.rnns = nn.ModuleList(rnns)
def init_state(self, batch_size, spatial_size):
states = []
for i in range(len(self.rnns)):
state = self.rnns[i].init_state(batch_size, spatial_size)
states += [state]
return states
def sample(self, means, logvars):
zs = []
for i, (mean, logvar) in enumerate(zip(means, logvars)):
z = self.rnns[i].sample(mean, logvar)
zs += [z]
return zs
#def forward(self, input, context, prev_states, dec_hiddens):
def forward(self, input, prev_states, dec_hiddens):
''' forward stacked rnn one time step⋅
Input:
input: batch_size x input_size
prev_states: a list with length = number of layers (of stacked RNNs)
each element in the list: (h_t, c_t) (or h_t) of each RNN
Output:
output_t: batch_size x output_size
new_states: a list with length = number of layers (of stacked RNNs)
each element in the list: (h_t, c_t) (or h_t) of each RNN
'''
# init new_states
new_means = []
new_logvars = []
new_hiddens = []
new_states = []
# init input (first layer)
hidden_p = dec_hiddens[0]
#input_q = torch.cat([input, hidden_p, context], dim=1)
input_q = torch.cat([input, hidden_p], dim=1)
# forward rnn (first layer)
mean_q, logvar_q, hidden_q, state = self.rnns[0](input_q, prev_states[0])
new_means += [mean_q]
new_logvars += [logvar_q]
new_hiddens += [hidden_q]
new_states += [state]
# remaining layers
for j in range(1, self.num_layers):
# init input
hidden_p = dec_hiddens[j]
#input_q = torch.cat([mean_q, hidden_p, context], dim=1)
input_q = torch.cat([mean_q, hidden_p], dim=1)
# apply dropout
''' see https://discuss.pytorch.org/t/lstm-dropout-clarification-of-last-layer/5588 '''
if self.dropout > 0:
input_q = F.dropout(input_q, p=self.dropout, training=self.training, inplace=False)
# forward rnn
mean_q, logvar_q, hidden_q, state = self.rnns[j](input_q, prev_states[j])
new_means += [mean_q]
new_logvars += [logvar_q]
new_hiddens += [hidden_q]
new_states += [state]
return new_means, new_logvars, new_hiddens, new_states
class ConvDrawDecoderCell(nn.Module):
def __init__(self,
lstm_input_size,
reparam_input_size,
hidden_size, # i.e. nc_lstm
nz,
kernel_size=5,
stride=1,
padding=2,
dropout=0,
):
super().__init__()
self.rnn_p = ConvLSTMCell(lstm_input_size, hidden_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.reparam_p = NormalDistributionConv2d(reparam_input_size, nz, kernel_size=kernel_size, stride=stride, padding=padding)
self.dropout = dropout
def init_state(self, batch_size, spatial_size):
return self.rnn_p.init_state(batch_size, spatial_size)
def sample(self, mean, logvar):
return self.reparam_p.sample_gaussian(mean, logvar)
def forward(self, prev_state_p, input=None, z=None, higher_hiddens=[]):
''' forward stacked rnn one time step
Input:
inputs: a list of inputs, which will be concatenated
prev_state_p: (hidden, cell) of each ConvLSTM
Output:
mean: batch_size x hidden_size x hidden_height x hidden_width
logvar: batch_size x hidden_size x hidden_height x hidden_width
hidden: hidden (of new_state), batch_size x hidden_size x hidden_height x hidden_width
new_state: (hidden, cell) of each ConvLSTM
'''
# unpack hidden_p
hidden_p = prev_state_p[0]
# compute prior
input_reparam_p = torch.cat([hidden_p]+higher_hiddens, dim=1)
mean_p, logvar_p = self.reparam_p(input_reparam_p)
# sample z
if z is None:
z = self.reparam_p.sample_gaussian(mean_p, logvar_p)
# init input
input_p = torch.cat(
[z]
+ higher_hiddens
+ ([input] if input is not None else []),
dim=1)
# apply dropout
''' see https://discuss.pytorch.org/t/lstm-dropout-clarification-of-last-layer/5588 '''
if self.dropout > 0:
input_p = F.dropout(input_p, p=self.dropout, training=self.training, inplace=False)
# update prior rnn
hidden_p, new_state_p = self.rnn_p(input_p, prev_state_p)
return z, mean_p, logvar_p, hidden_p, new_state_p
class StackedConvDrawDecoderCell(nn.Module):
def __init__(self,
#input_size,
context_size, # i.e. nc_context
hidden_size, # i.e. nc_lstm
nz,
kernel_size=5,
padding=2,
num_layers=1,
dropout=0,
):
super().__init__()
#self.input_size = input_size
self.context_size = context_size
self.hidden_size = hidden_size
self.nz = nz
self.kernel_size = kernel_size
self.padding = padding
self.num_layers = num_layers
self.dropout = dropout
rnns = []
rnns.append(
#ConvDrawDecoderCell(input_size + nz + context_size + hidden_size*(num_layers-1),
ConvDrawDecoderCell(nz + context_size + hidden_size*(num_layers-1),
hidden_size*num_layers,
hidden_size,
nz,
kernel_size=self.kernel_size,
padding=self.padding,
dropout=self.dropout,
))
for i in range(1, num_layers):
rnns.append(
ConvDrawDecoderCell(nz + context_size + hidden_size*(num_layers-(i+1)),
hidden_size*(num_layers-i),
hidden_size,
nz,
kernel_size=self.kernel_size,
padding=self.padding,
dropout=0 if i == (num_layers-1) else self.dropout,
))
self.rnns = nn.ModuleList(rnns)
def init_state(self, batch_size, spatial_size):
states = []
for i in range(len(self.rnns)):
state = self.rnns[i].init_state(batch_size, spatial_size)
states += [state]
return states
def sample(self, means, logvars):
zs = []
for i, (mean, logvar) in enumerate(zip(means, logvars)):
z = self.rnns[i].sample(mean, logvar)
zs += [z]
return zs
def forward(self, context, prev_states, latents_q=None):
''' forward stacked rnn one time step⋅
Input:
input: batch_size x input_size
prev_states: a list with length = number of layers (of stacked RNNs)
each element in the list: (h_t, c_t) (or h_t) of each RNN
Output:
output_t: batch_size x output_size
new_states: a list with length = number of layers (of stacked RNNs)
each element in the list: (h_t, c_t) (or h_t) of each RNN
'''
# init new_states
new_latents = []
new_means = []
new_logvars = []
new_hiddens = []
new_states = []
# remaining layers
for j in range(self.num_layers-1, 0, -1):
# init input
z = latents_q[j] if latents_q is not None else None
# forward rnn
z_p, mean_p, logvar_p, hidden_p, state = self.rnns[j](
prev_states[j], input=context, z=z, higher_hiddens=new_hiddens)
new_latents += [z_p]
new_means += [mean_p]
new_logvars += [logvar_p]
new_hiddens += [hidden_p]
new_states += [state]
# init input (last layer)
z = latents_q[0] if latents_q is not None else None
# forward rnn (last layer)
z_p, mean_p, logvar_p, hidden_p, state = self.rnns[0](
prev_states[0], input=context, z=z, higher_hiddens=new_hiddens)
new_latents += [z_p]
new_means += [mean_p]
new_logvars += [logvar_p]
new_hiddens += [hidden_p]
new_states += [state]
# reverse list order
new_latents = new_latents[::-1]
new_means = new_means[::-1]
new_logvars = new_logvars[::-1]
new_hiddens = new_hiddens[::-1]
new_states = new_states[::-1]
return new_latents, new_means, new_logvars, new_hiddens, new_states
| 38.237996 | 147 | 0.569666 | 2,244 | 18,316 | 4.403743 | 0.086007 | 0.050597 | 0.029751 | 0.022263 | 0.71524 | 0.698239 | 0.674357 | 0.651791 | 0.618802 | 0.589658 | 0 | 0.006911 | 0.344344 | 18,316 | 478 | 148 | 38.317992 | 0.815805 | 0.224503 | 0 | 0.608997 | 0 | 0 | 0.000594 | 0 | 0 | 0 | 0 | 0 | 0.00692 | 1 | 0.079585 | false | 0.00346 | 0.020761 | 0.013841 | 0.183391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8e4c2323971b0435537f98e9177fea4ee6f997 | 2,445 | py | Python | glance/api/middleware/cache_manage.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | 2 | 2015-09-30T09:43:37.000Z | 2017-06-26T14:36:21.000Z | glance/api/middleware/cache_manage.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | null | null | null | glance/api/middleware/cache_manage.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image Cache Management API
"""
import logging
from glance.api import cached_images
from glance.common import wsgi
logger = logging.getLogger(__name__)
class CacheManageFilter(wsgi.Middleware):
def __init__(self, app, conf, **local_conf):
map = app.map
resource = cached_images.create_resource(conf)
map.connect("/cached_images",
controller=resource,
action="get_cached_images",
conditions=dict(method=["GET"]))
map.connect("/cached_images/{image_id}",
controller=resource,
action="delete_cached_image",
conditions=dict(method=["DELETE"]))
map.connect("/cached_images",
controller=resource,
action="delete_cached_images",
conditions=dict(method=["DELETE"]))
map.connect("/queued_images/{image_id}",
controller=resource,
action="queue_image",
conditions=dict(method=["PUT"]))
map.connect("/queued_images",
controller=resource,
action="get_queued_images",
conditions=dict(method=["GET"]))
map.connect("/queued_images/{image_id}",
controller=resource,
action="delete_queued_image",
conditions=dict(method=["DELETE"]))
map.connect("/queued_images",
controller=resource,
action="delete_queued_images",
conditions=dict(method=["DELETE"]))
logger.info(_("Initialized image cache management middleware"))
super(CacheManageFilter, self).__init__(app)
| 33.958333 | 78 | 0.601636 | 256 | 2,445 | 5.589844 | 0.4375 | 0.0587 | 0.1174 | 0.083857 | 0.410203 | 0.345912 | 0.345912 | 0.125786 | 0.074074 | 0 | 0 | 0.006414 | 0.298569 | 2,445 | 71 | 79 | 34.43662 | 0.827988 | 0.274029 | 0 | 0.5 | 0 | 0 | 0.18939 | 0.042784 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.078947 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8e818439696d1d98dff84f7c6d7caa5f340d62 | 1,225 | py | Python | client/GUI/client.py | Dzemoro/Project_OD | f83f1fa67482a909610f4f33476df56f9046a5c6 | [
"MIT"
] | null | null | null | client/GUI/client.py | Dzemoro/Project_OD | f83f1fa67482a909610f4f33476df56f9046a5c6 | [
"MIT"
] | 1 | 2022-01-25T23:04:33.000Z | 2022-01-25T23:04:34.000Z | client/GUI/client.py | Dzemoro/Project_OD | f83f1fa67482a909610f4f33476df56f9046a5c6 | [
"MIT"
] | 1 | 2022-02-19T13:19:32.000Z | 2022-02-19T13:19:32.000Z | from sys import path
from typing import Dict
if '' not in path:
path.append('')
import socket
import ssl
class Client:
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.context.load_cert_chain(certfile="C:\\Users\\mciec\\Desktop\\Studia\\OD\\Project_OD\\client\\GUI\\cert.pem")
self.context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
self.context.set_ciphers('AES256+ECDH:AES256+EDH')
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.conn = self.context.wrap_socket(self.client, server_hostname=str(self.ip))
self.conn.connect((str(ip), int(port)))
# if __name__ == "__main__":
## #conn.bind((HOST, PORT))
# conn.connect(('127.0.0.1',60000))
# run = True
# while run:
# msg = input()
# conn.send(msg.encode())
# if msg == 'QUIT':
# receive = conn.recv(1024)
# print(receive.decode())
# run = False
# else:
# receive = conn.recv(1024)
# print(receive.decode()) | 31.410256 | 121 | 0.600816 | 160 | 1,225 | 4.425 | 0.5125 | 0.077684 | 0.019774 | 0.033898 | 0.10452 | 0.10452 | 0.10452 | 0 | 0 | 0 | 0 | 0.032751 | 0.252245 | 1,225 | 39 | 122 | 31.410256 | 0.740175 | 0.316735 | 0 | 0 | 0 | 0.055556 | 0.114495 | 0.114495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb8f80930b7235adae3d663af8ae2dc847f773c6 | 1,976 | py | Python | tests/remoteexecution/simple.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | tests/remoteexecution/simple.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | tests/remoteexecution/simple.py | doraskayo/buildstream | 1c72d4342ae7df360808de22c5e49f55dbb6bec6 | [
"Apache-2.0"
] | null | null | null | # Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
from buildstream.testing.integration import assert_contains
pytestmark = pytest.mark.remoteexecution
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# Test building an executable with remote-execution:
@pytest.mark.datafiles(DATA_DIR)
def test_remote_autotools_build(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
element_name = "autotools/amhello.bst"
services = cli.ensure_services()
assert set(services) == set(["action-cache", "execution", "storage"])
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
result.assert_success()
assert_contains(
checkout,
[
"/usr",
"/usr/lib",
"/usr/bin",
"/usr/share",
"/usr/bin/hello",
"/usr/share/doc",
"/usr/share/doc/amhello",
"/usr/share/doc/amhello/README",
],
)
# Test running an executable built with remote-execution:
@pytest.mark.datafiles(DATA_DIR)
def test_remote_autotools_run(cli, datafiles):
project = str(datafiles)
element_name = "autotools/amhello.bst"
services = cli.ensure_services()
assert set(services) == set(["action-cache", "execution", "storage"])
services = cli.ensure_services()
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
result = cli.run(project=project, args=["shell", element_name, "/usr/bin/hello"])
result.assert_success()
assert result.output == "Hello World!\nThis is amhello 1.0.\n"
| 30.4 | 107 | 0.678138 | 238 | 1,976 | 5.504202 | 0.340336 | 0.050382 | 0.036641 | 0.058015 | 0.450382 | 0.403053 | 0.403053 | 0.403053 | 0.403053 | 0.403053 | 0 | 0.001246 | 0.187753 | 1,976 | 64 | 108 | 30.875 | 0.814953 | 0.126012 | 0 | 0.404762 | 0 | 0 | 0.182452 | 0.054038 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb906a795f9fcef27e639d9c312ec14bca9253d4 | 1,024 | py | Python | setup.py | bciobo/checkout-server | f6e178b6fc3a67a69608eb312a568d5b782755c6 | [
"MIT"
] | null | null | null | setup.py | bciobo/checkout-server | f6e178b6fc3a67a69608eb312a568d5b782755c6 | [
"MIT"
] | 1 | 2019-03-15T12:00:42.000Z | 2019-03-15T12:00:42.000Z | setup.py | bciobo/checkout-server | f6e178b6fc3a67a69608eb312a568d5b782755c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
checkout-server.setup
~~~~~~~~~~~~
"""
import io
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as f:
readme = f.read()
REQUIRES = [
'Flask==1.0.2',
'stripe==2.11.0',
'Flask-Cors==3.0.6'
]
setup(
name='checkout-server',
version='1.0.0',
url='https://github.com/bciobo/checkout-server',
license='MIT',
maintainer='bciobo',
maintainer_email='bogdan.ciobotaru1@gmail.com',
description='Backend logic for handling Stripe orders for Doodance clients.',
long_description=readme,
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
zip_safe=False,
install_requires=REQUIRES,
extras_require={
'dev': ['pytest', 'coverage', 'flake8'],
},
python_requires='>=3',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
)
| 22.755556 | 81 | 0.628906 | 123 | 1,024 | 5.121951 | 0.650407 | 0.066667 | 0.079365 | 0.08254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024155 | 0.191406 | 1,024 | 44 | 82 | 23.272727 | 0.736715 | 0.055664 | 0 | 0 | 0 | 0 | 0.336117 | 0.028184 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb9108196ef395834c790ca266eee524b8734553 | 2,172 | py | Python | tests/test_requests.py | aokilipa/Maintenance-Tracker | 432f0ef9bc23b295d17e6b4556b631bfec5ea1b6 | [
"MIT"
] | null | null | null | tests/test_requests.py | aokilipa/Maintenance-Tracker | 432f0ef9bc23b295d17e6b4556b631bfec5ea1b6 | [
"MIT"
] | 6 | 2018-05-25T14:08:04.000Z | 2018-06-17T21:41:26.000Z | tests/test_requests.py | antokish/Maintenance-Tracker | 432f0ef9bc23b295d17e6b4556b631bfec5ea1b6 | [
"MIT"
] | null | null | null | """
APIs endpoint test
test:
-> gets all the requests for logged in user
-> gets a request for a logged in user
-> creates a request
-> Modify a request
"""
import unittest
import json
import os
import pytest
from flask_restful import Api
from resources.requests import dtrequest, RequestResource, Request
from run import create_app
from app import api_bp
@pytest.mark.unittest
class ApiTest(unittest.TestCase):
""" API endpoints test case"""
def setUp(self):
#Declare test variables and initialize app
self.app = create_app('testing')
self.client = self.app.test_client
self.req = { "id": 5, "requestor":"Test Doe", "email": "john@gmail.com",
"type": "maintenance", "status":"Approved", "desc": "Description goes here"}
def tearDown(self):
pass
def test_api_can_get_all_requests(self):
"""Test api Get all the requests for a logged in user"""
response = self.client().get('/api/v1/user/request')
self.assertTrue(response.status_code, 200)
def test_api_can_get_request_by_id(self):
"""Test api can get a request for a logged in user"""
res = self.client().get('/api/v1/user/request/1')
self.assertEquals(res.status_code, 200)
def test_api_request_can_be_modified(self):
#Test api can modify a request
rv = self.client().post('/api/v1/user/request/',
data = json.dumps(dict({"requestor":"sue doe"})))
self.assertEquals(rv.status_code, 200)
res = self.client().put('/api/v1/user/request/1',
data = json.dumps(dict({"requestor":"Susan Sue"})))
self.assertEquals(rv.status_code, 200)
self.assertIn('Susan Sue', str(res.data))
def test_api_can_create_request(self):
"""Test api can create a request"""
res = self.client().post('/api/v1/user/request/', data = json.dumps(dict(self.req)))
self.assertEquals(res.status_code, 201)
self.assertIn('Test Doe', str(res.data))
#Make tests executable
if __name__ == "__main__":
unittest.main() | 29.753425 | 92 | 0.627532 | 290 | 2,172 | 4.568966 | 0.327586 | 0.042264 | 0.045283 | 0.060377 | 0.337358 | 0.232453 | 0.150943 | 0.070943 | 0.070943 | 0.070943 | 0 | 0.014042 | 0.245856 | 2,172 | 73 | 93 | 29.753425 | 0.794872 | 0.188306 | 0 | 0.054054 | 0 | 0 | 0.152425 | 0.049654 | 0 | 0 | 0 | 0 | 0.189189 | 1 | 0.162162 | false | 0.027027 | 0.216216 | 0 | 0.405405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb913c2f7858485cf899215b7ed0080d770967f0 | 4,369 | py | Python | tofu/tests/tests01_geom/tests03_core_data/WEST_PFC_DivLowGC_Notes.py | Louwrensth/tofu | df2841d24eaf223ae07d862ffaa33fdb2fc079d3 | [
"MIT"
] | 56 | 2017-07-09T10:29:45.000Z | 2022-03-31T02:44:50.000Z | tofu/tests/tests01_geom/tests03_core_data/WEST_PFC_DivLowGC_Notes.py | Louwrensth/tofu | df2841d24eaf223ae07d862ffaa33fdb2fc079d3 | [
"MIT"
] | 522 | 2017-07-02T21:06:07.000Z | 2022-03-02T08:07:57.000Z | tofu/tests/tests01_geom/test_data/WEST_PFC_DivLowGC_Notes.py | Didou09/tofu | 4a4e1f058bab8e7556ed9d518f90807cec605476 | [
"MIT"
] | 9 | 2017-07-02T20:38:53.000Z | 2021-12-04T00:12:30.000Z | #!/usr/bin/env python
# Built-in
import os
import argparse
# Common
import numpy as np
_save = True
_here = os.path.abspath(os.path.dirname(__file__))
_Exp, _Cls, _name = os.path.split(__file__)[1].split('_')[:3]
assert not any([any([ss in s for ss in ['Notes','.']])
for s in [_Exp, _Cls, _name]])
def get_notes():
""" By convention : D is a length of the element, d is a gap """
notes = {'DPhi':{}, 'dPhi':{}}
# Toroidal width (mm, inner outer)
notes['DPhi']['In'] = 25.508
notes['DPhi']['Out'] = 32.908
# Inter tiles distance (mm, uniform)
notes['dl'] = 1.500
# Poloidal/Radial total length (mm)
notes['DL'] = 573.995
# Number of tiles radially
notes['nb'] = 2
notes['nbPhi'] = 38*12
# Radial length of a tile (mm)
notes['Dl'] = np.array([316.495,256.000])
# Vertical height of tiles (mm, uniform)
notes['DZ'] = 25.538
# Toroidal space between needles (mm, inner outer)
notes['dPhi']['In'] = 0.666
notes['dPhi']['Out'] = 0.666
# (X,Z,Y) polygon of one needle (mm) !!!!!! (X,Z,Y)
# 1 mm should be added towards Z>0 in the direction normal to the divertor's upper surface
notes['sampleXZY'] = [[-440.221, -606.218, 1854.847],
[-440.163, -581.217, 1854.860],
[-440.748, -579.362, 1857.546],
[-506.714, -694.150, 2133.992],
[-506.951, -694.466, 2134.837],
[-510.087, -699.924, 2147.977],
[-510.336, -702.527, 2149.054],
[-508.309, -724.780, 2140.295],
[-508.995, -721.633, 2143.101],
[-510.684, -703.089, 2150.401],
[-511.270, -701.984, 2152.937],
[-514.021, -706.772, 2164.465],
[-514.742, -708.491, 2167.329],
[-565.491, -796.886, 2380.007],
[-565.707, -799.489, 2381.092],
[-563.726, -821.241, 2372.530]]
notes['sampleXZY'] = np.array(notes['sampleXZY'])
for kk in notes.keys():
if type(notes[kk]) is dict:
notes[kk]['In'] = notes[kk]['In']*1.e-3
notes[kk]['Out'] = notes[kk]['Out']*1.e-3
elif not 'nb' in kk:
notes[kk] = notes[kk]*1.e-3
return notes
def _get_inter(D0,u0,D1,u1):
k = -np.cross(D0-D1,u1)/np.cross(u0,u1)
return D0 + k*u0
def make_Poly(save=_save, path=_here):
notes = get_notes()
R = np.hypot(notes['sampleXZY'][:,0],notes['sampleXZY'][:,2])
Z = notes['sampleXZY'][:,1]
d = np.sqrt(np.diff(R)**2+np.diff(Z)**2)
indup = np.argmax(d)
e1 = np.array([R[indup+1]-R[indup], Z[indup+1]-Z[indup]])
e1 = e1/np.linalg.norm(e1)
e2 = np.r_[-e1[1], e1[0]]
P0 = (np.array([R[0],Z[0]])-0.01*e2)[:,np.newaxis]
PEnd = (np.array([R[-1],Z[-1]])-0.01*e2)[:,np.newaxis]
Poly = np.array([R[1:-1],Z[1:-1]])
Poly = np.concatenate((P0,Poly,PEnd),axis=1)
Poly0 = Poly[:,[0,2,-3,-1]]
# Making Poly1
D0 = Poly0[:,1]
u0 = Poly0[:,-2]-D0
k0 = -np.sum((D0-Poly[:,5])*u0)/np.linalg.norm(u0)**2
k1 = -np.sum((D0-Poly[:,10])*u0)/np.linalg.norm(u0)**2
P0 = (D0 + k0*u0)[:,np.newaxis]
P1 = (D0 + k1*u0)[:,np.newaxis]
D0, D1 = Poly[:,5], Poly[:,10]
u0, u1 = Poly[:,6]-D0, Poly[:,9]-D1
PI = _get_inter(D0,u0,D1,u1)[:,np.newaxis]
Poly1 = np.concatenate((Poly0[:,:2], P0,PI,P1, Poly0[:,2:]),axis=1)
if save:
cstr = '%s_%s_%s'%(_Exp,_Cls,_name)
pathfilext = os.path.join(path, cstr+'_V0.txt')
np.savetxt(pathfilext, Poly0.T)
pathfilext = os.path.join(path, cstr+'_V1.txt')
np.savetxt(pathfilext, Poly1.T)
pathfilext = os.path.join(path, cstr+'_V2.txt')
np.savetxt(pathfilext, Poly.T)
return Poly0, Poly1, Poly, notes
if __name__=='__main__':
# Parse input arguments
msg = 'Launch creation of polygons txt from bash'
parser = argparse.ArgumentParser(description = msg)
parser.add_argument('-save', type=bool, help='save ?', default=_save)
parser.add_argument('-path', type=str, help='saving path ?', default=_here)
args = parser.parse_args()
# Call wrapper function
make_Poly(save=args.save, path=args.path)
| 33.351145 | 94 | 0.527581 | 652 | 4,369 | 3.460123 | 0.407975 | 0.02172 | 0.014184 | 0.026596 | 0.100177 | 0.087766 | 0.025709 | 0 | 0 | 0 | 0 | 0.14901 | 0.271916 | 4,369 | 130 | 95 | 33.607692 | 0.560201 | 0.122454 | 0 | 0 | 0 | 0 | 0.060058 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.034483 | false | 0 | 0.034483 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb918c3993fe81ff747cdb83573597f481f77efa | 5,734 | py | Python | output/figuresAndTables/hsLsEvolution.py | AndresYague/Snuppat | 8a7f73fbc260bab67b5d38ed1efc628980f5047c | [
"MIT"
] | null | null | null | output/figuresAndTables/hsLsEvolution.py | AndresYague/Snuppat | 8a7f73fbc260bab67b5d38ed1efc628980f5047c | [
"MIT"
] | null | null | null | output/figuresAndTables/hsLsEvolution.py | AndresYague/Snuppat | 8a7f73fbc260bab67b5d38ed1efc628980f5047c | [
"MIT"
] | null | null | null | import sys, math, numpy, struct
import matplotlib.pyplot as plt
class readBinaryModels(object):
'''Class for reading binary models'''
def __init__(self, fil):
'''Initialize'''
super(readBinaryModels, self).__init__()
self.fread = open(fil, "rb")
self.head = None
self.model = None
def close(self):
'''Close file'''
self.fread.close()
def __readHeader(self):
'''Return header'''
head = []
byte = self.fread.read(4)
if len(byte) == 0:
return None
head.append(*struct.unpack('i', byte))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('i', self.fread.read(4)))
head.append(*struct.unpack('i', self.fread.read(4)))
return head
def nextModel(self):
'''Calculate next model, unpacked'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
self.model = []
for ii in range(self.head[3]):
s = []
for jj in range(self.head[4]):
s.append(*struct.unpack('d', self.fread.read(8)))
self.model.append(s)
return True
def readOnlyHeader(self):
'''Look only for the header and skip the rest'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
# Skip file
for ii in range(head[3]):
for jj in range(head[4]):
self.fread.read(8)
return True
def main():
'''Get evolution of hs/ls vs s in [X/Fe]'''
# Check arguments
if len(sys.argv) < 2:
print("Usage python {} <model>".format(sys.argv[0]))
return 1
data = "../../data/species.dat"
archivo = sys.argv[1]
hsLsElems = ["sr", "y", "zr", "ba", "la", "ce"]
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Now go model by model, calculating everything for every element
modelObj = readBinaryModels(archivo)
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
evolXFe = []; jj = -1
while True:
isNewModel = modelObj.nextModel()
if not isNewModel:
break
header = modelObj.head
model = modelObj.model
# Report some progress
print(jj)
# Find the surface for this model
for ii in range(1, len(model)):
mass = (model[ii - 1][0] + model[ii][0])*0.5
# If found surface, extract information
if mass >= 0.85:
prevLine = model[ii - 1]
newLine = model[ii]
# Take all abundances
dens = [(x + y)*0.5 for (x, y) in zip(prevLine[4:], newLine[4:])]
xFeVals = {}
# Add the values for each element
for ii in range(len(atomicNum)):
key = atomicNum[ii]
xFeVals[key] = xFeVals.get(key, 0) + dens[ii]*atomicMass[ii]
# Now calculate values of interest
selectedFe = []
feVal = xFeVals[namesZ["fe"]]
sunFeVal = solarValues[namesZ["fe"]]
for elem in hsLsElems:
try:
val = xFeVals[namesZ[elem]]/feVal + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
sunVal = solarValues.get(namesZ[elem], 1e-100)/sunFeVal
val = math.log10(val) - math.log10(sunVal)
selectedFe.append(val)
break
evolXFe.append(selectedFe)
# Calculate hs/Fe, ls/Fe and s/Fe
sFe = []; hsLs = []
for arr in evolXFe:
lsFe = sum(arr[0:3])/3.
hsFe = sum(arr[3:])/3.
sFe.append(sum(arr)/6.)
hsLs.append(hsFe - lsFe)
# Now plot values
plt.plot(sFe, hsLs, lw = 2)
plt.xlabel("[s/Fe]")
plt.ylabel("[hs/ls]")
print("# file: {}".format(archivo))
print("# sFe hsLs")
for ii in range(len(sFe)):
print("{} {}".format(sFe[ii], hsLs[ii]))
plt.show()
return 0
if __name__ == "__main__":
main()
| 29.864583 | 81 | 0.48256 | 642 | 5,734 | 4.275701 | 0.305296 | 0.029508 | 0.033151 | 0.040073 | 0.166849 | 0.135155 | 0.135155 | 0.135155 | 0.123133 | 0.104189 | 0 | 0.018933 | 0.392047 | 5,734 | 191 | 82 | 30.020942 | 0.768503 | 0.141088 | 0 | 0.153846 | 0 | 0 | 0.038193 | 0.013963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.017094 | 0 | 0.145299 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb92297d94954cbc5a93edd5fd8580576c2a92fb | 4,148 | py | Python | getdata.py | trapwalker/habrstat | 6c7be89bd522eaae4dd4e60b40cf443a1e47e241 | [
"MIT"
] | null | null | null | getdata.py | trapwalker/habrstat | 6c7be89bd522eaae4dd4e60b40cf443a1e47e241 | [
"MIT"
] | null | null | null | getdata.py | trapwalker/habrstat | 6c7be89bd522eaae4dd4e60b40cf443a1e47e241 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import requests
from html.parser import HTMLParser
import re
import logging
import datetime
import json
import sys
log = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig()
class Node:
tag: str
attrs: dict
def __init__(self, *av, **kw):
tag = kw.pop('tag', None)
classes2 = []
attrs = {}
if av:
tag, *av = av
tag, *classes2 = tag.split('.')
if av:
attrs.update(av[0])
assert not av[1:]
attrs.update(kw)
classes = set(filter(None, attrs.pop('class', '').split(' ')))
classes.update(classes2)
if classes:
attrs['class'] = ' '.join(classes)
self.tag = None if tag == '*' else tag
self.attrs = attrs
def __repr__(self):
args = []
if self.tag:
args.append(f'tag={self.tag!r}')
if self.attrs:
args.append(f'attrs={self.attrs}')
return f'{type(self).__name__}({", ".join(args)})'
@property
def classes(self):
return set(self.attrs.get('class', '').split())
def __eq__(self, other: 'Node'):
if self.tag is not None and other.tag is not None and self.tag != other.tag:
return False
self_classes = self.classes
pther_classes = other.classes
if pther_classes - self_classes:
return False
return True
class ExtHTMLParser(HTMLParser):
def __init__(self, **kw):
super().__init__(**kw)
self.path = []
def handle_starttag(self, tag, attrs):
self.path.append(Node(tag, dict(attrs)))
# Overridable -- handle end tag
def handle_endtag(self, tag):
self.path.pop(-1)
@property
def last_node(self):
return self.path and self.path[-1] or None
class HabrStatParser(ExtHTMLParser):
def __init__(self, user=None, **kw):
super().__init__(**kw)
no_metrics = set(self.__dict__.keys())
self.time = datetime.datetime.utcnow()
self.user = user
self.votes = None
self.karma = None
self.rating = None
self.subscribers = None
self.subscribes = None
self._metrics = set(self.__dict__.keys()) - no_metrics
def log_line(self):
stat = {k: v for k, v in self.__dict__.items() if k in self._metrics}
return json.dumps(stat, ensure_ascii=False, default=str)
def handle_starttag(self, tag, attrs):
super().handle_starttag(tag, attrs)
attrs = dict(attrs)
if self.votes is None and self.path[-2:] == [
Node('div.media-obj__body.media-obj__body_user-info'),
Node('a.user-info__stats-item.stacked-counter'),
]:
title = attrs.get('title', '')
karma = re.match(r'(?P<votes>\d+)\s+голо.*', title)
self.votes = int(karma.groups()[0])
@staticmethod
def cast_float(s: str):
try:
return float(s.replace(',', '.'))
except:
log.error(f'Cast error: {s!r} to float')
def handle_data(self, data):
if self.path[-3:] == [
Node('div.media-obj__body.media-obj__body_user-info'),
Node('a.user-info__stats-item.stacked-counter'),
Node('div.stacked-counter__value'),
]:
if self.karma is None:
self.karma = self.cast_float(data)
elif self.path[-2] == Node('a.stacked-counter_rating'):
self.rating = self.cast_float(data)
elif self.path[-2] == Node('a.stacked-counter_subscribers'):
if self.subscribers is None:
self.subscribers = int(data)
else:
self.subscribes = int(data)
def get_habr_stat(user, site='https://habr.com', lang='ru'):
url = f'{site}/{lang}/users/{user}/comments/'
with requests.get(url) as r:
data = r.text
hp = HabrStatParser(user=user)
hp.feed(data)
print(hp.log_line())
if __name__ == '__main__':
user = sys.argv[1]
get_habr_stat(user)
| 28.027027 | 84 | 0.557859 | 519 | 4,148 | 4.254335 | 0.271676 | 0.032609 | 0.021739 | 0.017663 | 0.172101 | 0.138587 | 0.112319 | 0.112319 | 0.112319 | 0.112319 | 0 | 0.004843 | 0.303038 | 4,148 | 147 | 85 | 28.217687 | 0.758907 | 0.011572 | 0 | 0.156522 | 0 | 0 | 0.115422 | 0.080771 | 0 | 0 | 0 | 0 | 0.008696 | 1 | 0.121739 | false | 0 | 0.06087 | 0.017391 | 0.295652 | 0.008696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb922cf62cdd16bff320630dd909e8f69c6ba1b4 | 9,164 | py | Python | api/app/utils/mongo_utils.py | crtarsorg/istinomer-factchecker | eb0639f526fc2b75bf4628dcff9080a030f34f38 | [
"CC0-1.0"
] | 2 | 2016-07-20T17:49:22.000Z | 2016-07-21T12:31:26.000Z | api/app/utils/mongo_utils.py | crtarsorg/istinomer-factchecker | eb0639f526fc2b75bf4628dcff9080a030f34f38 | [
"CC0-1.0"
] | null | null | null | api/app/utils/mongo_utils.py | crtarsorg/istinomer-factchecker | eb0639f526fc2b75bf4628dcff9080a030f34f38 | [
"CC0-1.0"
] | 9 | 2016-07-08T08:56:29.000Z | 2019-06-26T11:59:30.000Z | import pymongo
from bson import ObjectId
from datetime import datetime
class MongoUtils():
def __init__(self, mongo):
self.mongo = mongo
self.collection_name = 'entries'
def insert(self, doc):
self.mongo.db[self.collection_name].insert(doc)
def edit_entry_doc(self, query=None):
query_param = {
"_id": ObjectId(query['doc_id'])
}
update_fields = {
'grade': query['grade'],
'classification': query['classification'],
'category': query['category'],
'article': {
'author': query['author_of_article']
},
'quote': {
'politician': query['politician'],
'author': query['quote_author'],
'affiliation': query['quote_author_affiliation']
},
'new_update': True
}
if 'date_of_article_pub' in query:
if query['date_of_article_pub'] != "":
update_fields['article']['date'] = self.convert_str_to_date(
query['date_of_article_pub'])
if 'date_of_statement' in query:
if query['date_of_statement'] != "":
update_fields['quote']['date'] = self.convert_str_to_date(
query['date_of_statement'])
if 'promise_due_date' in query:
update_fields['promise'] = {
'due': self.convert_str_to_date(query['promise_due_date'])
}
# Call the function to update fields based on query params
self._update(query_param, update_fields)
def flag_entry_as_inappropriate(self, query):
query_param = {
"_id": ObjectId(query['doc_id'])
}
update_fields = {
'inappropriate': query['inappropriate'],
'new_update': True
}
# Call the function to update fields based on query params
self._update(query_param, update_fields)
def remove_inappropriate_flag_entry(self, query):
query_param = {
"_id": ObjectId(query['doc_id'])
}
update_fields = {
'inappropriate': '',
'new_update': True
}
# Call the function to update fields based on query params
self._update(query_param, update_fields)
def soft_delete_entry(self, query):
query_param = {
"_id": ObjectId(query['doc_id'])
}
update_fields = {
'delete': True,
'new_update': True
}
# Call the function to update fields based on query params
self._update(query_param, update_fields)
def _update(self, query_param, update_fields):
self.mongo.db[self.collection_name].update(
query_param, {"$set": update_fields})
def find(self, skip, limit, query={"delete": {'$exists': False}}):
docs = self.mongo.db[self.collection_name].find(query).skip(skip).limit(limit).sort("timestamp", pymongo.DESCENDING)
return docs
def total_facts(self, query={"delete": {'$exists': False}}):
docs_total = self.mongo.db[self.collection_name].find(query).count()
return docs_total
def get(self, query=None, chrome_user_id=None):
query_params = {}
project = {
"domain": True,
"url": True,
"text": True,
'timestamp': {'$dateToString': {'format': "%d/%m/%Y %H:%M:%S", "date": "$timestamp"}},
'quote.author': True,
'quote.affiliation': True,
'classifications': True,
'grade': True
}
if 'classifications' in query:
query_params["classification"] = {"$in": query['classifications']}
project['classification'] = True
if "Promise" in query["classifications"]:
if 'promise' in query:
query_params['promise'] = {}
if query['promise']['dueFrom'] and query['promise']['dueFrom'] != '':
query_params['promise.due'] = {}
query_params['promise.due']['$gte'] = \
self.convert_str_to_date(
query['promise']['dueFrom'])
project['promise'] = {
"due": {'$dateToString;': {'format': "%d/%m/%Y", "date": "$promise.due"}}
}
if query['promise']['dueTo'] and query['promise']['dueTo'] != '':
if 'due' not in query_params['promise']:
query_params['promise.due'] = {}
project['promise'] = {
"due": {'$dateToString;': {'format': "%d/%m/%Y", "date": "$promise.due"}}
}
query_params['promise.due']['$gte'] = \
self.convert_str_to_date(query['promise']['dueTo'])
if 'grades' in query:
if query['grades']:
query_params = {
"grade": {"$in": query['grades']}
}
project['grade'] = True
if 'categories' in query:
if query['categories']:
query_params = {
"category": {"$in": query['categories']}
}
project['category'] = True
if 'article' in query:
if 'authors' in query['article']:
if query['article']['authors']:
query_params['article.author'] = {
"$in": query['article']['authors']}
project['article.author'] = True
if 'date' in query['article']:
if query['article']['date']:
query_params['article.date'] = {}
if query['article']['date']['from'] and query['article']['date']['from'] != '':
query_params['article.date']['$gte'] = self.convert_str_to_date(
query['article']['date']['from'])
if query['article']['date']['to'] and query['article']['date']['to'] != '':
query_params['article.date']['$lte'] = self.convert_str_to_date(
query['article']['date']['to'])
project['article.date'] = {'$dateToString': {
'format': "%d/%m/%Y", "date": "$article.date"}}
# Build the quote query params
if 'quote' in query:
if 'politician' in query['quote']:
if query['quote']['politician']:
query_params['quote.politician'] = query['quote']['politician']
project['quote.politician'] = True
if 'author' in query['quote']:
if query['quote']['author']:
query_params['quote.author'] = query['quote']['author']
project['quote.author'] = True
if 'date' in query['quote']:
if query['quote']['date']:
query_params['quote.date'] = {}
if query['quote']['date']['from'] and query['quote']['date']['from'] != '':
query_params['quote.date']['$gte'] = self.convert_str_to_date(
query['quote']['date']['from'])
if query['quote']['date']['to'] and query['quote']['date']['to'] != '':
query_params['quote.date']['$lte'] = self.convert_str_to_date(
query['quote']['date']['to'])
project['quote.date'] = {'$dateToString': {
'format': "%d/%m/%Y", "date": "$quote.date"}}
# Make sure we only get for given chrome user, if chrome user id is specified:
if chrome_user_id:
query_params['chromeUserId'] = chrome_user_id
project['chromeUserId'] = True
project['inappropriate'] = True
project['new_update'] = True
else:
# Let's make sure we don't return entries that have been flagged as inappropriate:
query_params['inappropriate'] = {'$exists': False}
query_params['classification'] = {'$ne': "Backlog"}
pipeline = [
{"$match": query_params},
# {"$sort": SON([('timestamp', -1)])}
{"$sort": {'timestamp': -1}}
]
if project:
project_stage = {
"$project": project
}
pipeline.append(project_stage)
# Execute query
docs = list(self.mongo.db[self.collection_name].aggregate(
pipeline, cursor={}))
return docs
def find_entry_based_on_url(self, current_url):
query = {"url": current_url,
"delete": {'$exists': False},
"inappropriate": {'$exists': False}}
docs = self.mongo.db[self.collection_name].find(query)
return docs
@staticmethod
def convert_str_to_date(date_str):
return datetime.strptime(date_str, "%d/%m/%Y")
| 34.19403 | 124 | 0.49498 | 883 | 9,164 | 4.958097 | 0.147225 | 0.070352 | 0.02741 | 0.036546 | 0.382366 | 0.36455 | 0.298995 | 0.278438 | 0.231384 | 0.205802 | 0 | 0.000339 | 0.355849 | 9,164 | 267 | 125 | 34.322097 | 0.741318 | 0.050633 | 0 | 0.175532 | 0 | 0 | 0.208631 | 0.002762 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.015957 | 0.005319 | 0.111702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb949b1776c61b84da7c1dd20c3b8b777539d8e3 | 1,992 | py | Python | command_line/depends_on.py | TiankunZhou/dxtbx | 9a45d44ccc78dae7b4a33bd938df67d1bac56867 | [
"BSD-3-Clause"
] | null | null | null | command_line/depends_on.py | TiankunZhou/dxtbx | 9a45d44ccc78dae7b4a33bd938df67d1bac56867 | [
"BSD-3-Clause"
] | null | null | null | command_line/depends_on.py | TiankunZhou/dxtbx | 9a45d44ccc78dae7b4a33bd938df67d1bac56867 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import h5py
import dxtbx.util
sample = None
def depends_on(in_name):
f = h5py.File(in_name, "r")
depends = {}
global sample
def finder(thing, path):
if hasattr(thing, "attrs"):
if "depends_on" in thing.attrs:
depends[path] = thing.attrs["depends_on"]
if thing.attrs.get("NX_class", None) == "NXsample":
global sample
sample = path
if hasattr(thing, "keys"):
for k in thing:
try:
finder(thing[k], path=f"{path}/{k}")
except (IOError, TypeError, ValueError, KeyError):
pass
# clean up hierarchy to just have sample stuff
finder(f, path="")
delete = []
for d in sorted(depends):
if "/entry/sample/transformations" not in d:
delete.append(d)
for d in delete:
del depends[d]
# invert for printing
inverted = {}
for d in depends:
t = depends[d]
if t in inverted:
print(t, inverted[t], d)
inverted[t] = d
print("Dependency hierarchy in file:")
at = "."
depth = 0
while at in inverted:
print(f"{' ' * depth}+ {at}")
at = inverted[at]
depth += 2
print(f"{' ' * depth}+ {at}")
print("")
print(f"Sample at {sample} depends on:")
if "depends_on" in f[sample]:
print(f[sample]["depends_on"][()])
elif hasattr(f[sample], "attrs") and "depends_on" in f[sample].attrs:
print(f[sample].attrs["depends_on"])
else:
print(f"{sample} -> depends_on not found")
f.close()
def run(args=None):
dxtbx.util.encode_output_as_utf8()
parser = argparse.ArgumentParser(
description="Print depends_on hierarchy for Nexus files"
)
parser.add_argument("filename", help="The nexus file")
opts = parser.parse_args(args)
depends_on(opts.filename)
if __name__ == "__main__":
run()
| 23.162791 | 73 | 0.553715 | 248 | 1,992 | 4.342742 | 0.346774 | 0.091922 | 0.040854 | 0.033426 | 0.072423 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003668 | 0.315763 | 1,992 | 85 | 74 | 23.435294 | 0.7865 | 0.032129 | 0 | 0.065574 | 0 | 0 | 0.172468 | 0.015065 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0.016393 | 0.04918 | 0 | 0.098361 | 0.147541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb956495b31e3047caf2a8364d46ed8b02d587ea | 2,714 | py | Python | sim_utils/audit.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | sim_utils/audit.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | sim_utils/audit.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | class Audit(object):
def __init__(self):
"""
Constructor method for audit.
Attributes
==========
global_audit (dictionary):
Audit of high level metrics
unit_audit (dictionary):
Audit at unit level
"""
# Initialise global audits
self.global_audit_index_count = 0
self.global_audit = []
self.audit_unit_occupancy = []
self.audit_unit_occupancy_percent = []
self.audit_unit_occupancy_displaced_preferred = []
self.audit_unit_occupancy_displaced_destination = []
self.audit_unit_occupancy_waiting_preferred = []
def perform_global_audit(self, _model):
"""
Perform audit of high level model parameters/metrics
"""
while True:
if _model.env.now >= _model.params.sim_warmup:
# Global tracker audit
self.global_audit_index_count += 1
item = dict()
item['index'] = self.global_audit_index_count
item['time'] = _model.env.now
item['total_patients'] = _model.tracker['total_patients']
item['total_patients_asu'] = _model.tracker['total_patients_asu']
item['total_patients_waited'] = _model.tracker['total_patients_waited']
item['total_patients_displaced'] = _model.tracker['total_patients_displaced']
item['current_patients'] = _model.tracker['current_patients']
item['asu_patients_all'] = _model.tracker['current_asu_patients_all']
item['asu_patients_allocated'] = _model.tracker['current_asu_patients_allocated']
item['asu_patients_unallocated'] = _model.tracker['current_asu_patients_unallocated']
item['asu_patients_displaced'] = _model.tracker['current_asu_patients_displaced']
self.global_audit.append(item)
# Occupancy, displaced and waiting patients
self.audit_unit_occupancy.append(_model.unit_occupancy)
self.audit_unit_occupancy_percent.append(
(_model.unit_occupancy/_model.data.units_capacity)*100)
self.audit_unit_occupancy_displaced_preferred.append(
_model.unit_occupancy_displaced_preferred )
self.audit_unit_occupancy_displaced_destination.append(
_model.unit_occupancy_displaced_destination)
self.audit_unit_occupancy_waiting_preferred .append(
_model.unit_occupancy_waiting_preferred)
# Wait for next audit
yield _model.env.timeout(1)
| 43.079365 | 101 | 0.619381 | 270 | 2,714 | 5.774074 | 0.233333 | 0.12508 | 0.083387 | 0.141116 | 0.403464 | 0.248877 | 0.217447 | 0.163566 | 0.163566 | 0.163566 | 0 | 0.003145 | 0.296979 | 2,714 | 62 | 102 | 43.774194 | 0.813941 | 0.118644 | 0 | 0 | 0 | 0 | 0.172414 | 0.119598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb986dac601f5d708dbb738f760a5c3ba23b5df6 | 30,637 | py | Python | abutils/tenx/batch_cellranger.py | bnemoz/abutils | d5dfab90c885a5d948cc1cd8070100f0cdab1c7e | [
"MIT"
] | 4 | 2019-02-27T21:41:13.000Z | 2022-03-19T19:07:28.000Z | abutils/tenx/batch_cellranger.py | bnemoz/abutils | d5dfab90c885a5d948cc1cd8070100f0cdab1c7e | [
"MIT"
] | 1 | 2018-10-11T22:01:19.000Z | 2018-10-11T22:01:19.000Z | abutils/tenx/batch_cellranger.py | bnemoz/abutils | d5dfab90c885a5d948cc1cd8070100f0cdab1c7e | [
"MIT"
] | 5 | 2018-10-11T21:18:00.000Z | 2022-01-28T18:45:42.000Z | #!/usr/bin/env python
# filename: batch_cellranger.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from argparse import ArgumentParser
import csv
import os
import shutil
import subprocess as sp
import sys
import time
import urllib
import yaml
from natsort import natsorted
from sample_sheet import SampleSheet
from ..utils import log
from ..utils.pipeline import list_files, make_dir
from ..version import __version__
def parse_arguments(print_help=False):
parser = ArgumentParser(prog='batch_cellranger', description="Batch CellRanger processing of one or more 10x Genomics samples.")
parser.add_argument('-p', '--project-directory', dest='project_dir', required=True,
help="The project directory, where run data will be downloaded \
and output files will be written. Required.")
parser.add_argument('-c', '--config-file', dest='config_file', required=True,
help="The config file, in YML format. Required.")
parser.add_argument('-d', '--debug', dest="debug", action='store_true', default=False,
help="If set, logs information about successfully assigned sequences as well \
as unsuccessful sequences. Useful for debugging small test datasets, \
as the logging is fairly detailed. \
Default is to only log unsuccessful sequences.")
parser.add_argument('-v', '--version', action='version', \
version='%(prog)s {version}'.format(version=__version__))
if print_help:
parser.print_help()
else:
args = parser.parse_args()
args.project_dir = os.path.abspath(args.project_dir)
return args
class Args():
def __init__(self, project_dir=None, config_file=None, debug=False):
super(Args, self).__init__()
self.project_dir = project_dir
self.config_file = config_file
self.debug = debug
class Config():
'''
``Config`` provides the following attributes:
- ``config_file``: path to the configuration file, in YAML format.
- ``runs``: a list of ``Run`` objects
- ``samples``: a list of `Sample` objects. Samples are parsed from `Run`s, so
samples may exist in this list that will not be processed by any
of the cellranger operations.
- ``ops``: a dictionary with cellranger operations (count, vdj, aggr or features)
as keys and a list of subjects as values. Maps the operation with
the samples on which the operation will be performed.
- ``reference``: dictionary mapping sample names to the VDJ reference. Must include
a ``default`` reference, which will be used for all subjects not
specifically named in the dictionary.
- ``transcriptome``: same as ``reference``, but mapping samples to a reference
transcriptome for ``cellranger count`` operations.
- ``feature_reference``: same as ``reference``, but mapping samples to a
feature reference.
- ``uiport``: port for the cellranger UI. Default is 72647.
- ``cellranger``: path to the cellranger binary. Default is "cellranger", which
assumes that the cellranger binary is on your PATH.
'''
def __init__(self, config_file):
self.config_file = os.path.abspath(config_file)
self.reference = None
self.transcriptome = None
self.feature_reference = None
self.uiport = None
self.cellranger = None
self._runs = None
self._samples = None
self._ops = None
self._parse_config_file()
def __repr__(self):
rlist = ['BATCH CELLRANGER CONFIGURATION']
rlist.append('------------------------------')
rlist.append('config file: {}'.format(self.config_file))
rlist.append('VDJ reference:')
rlist.append(' - default: {}'.format(self.reference['default']))
for k, v in self.reference.items():
if k == 'default':
continue
rlist.append(' - {}: {}'.format(k, v))
rlist.append('transcriptome:')
rlist.append(' - default: {}'.format(self.transcriptome['default']))
for k, v in self.transcriptome.items():
if k == 'default':
continue
rlist.append(' - {}: {}'.format(k, v))
rlist.append('feature reference:')
rlist.append(' - default: {}'.format(self.feature_reference['default']))
for k, v in self.feature_reference.items():
if k == 'default':
continue
rlist.append(' - {}: {}'.format(k, v))
rlist.append('UI port: {}'.format(self.uiport))
rlist.append('cellranger binary: {}'.format(self.cellranger))
rlist.append('runs: {}'.format([r.name for r in self.runs]))
rlist.append('samples: {}'.format([s.name for s in self.samples]))
rlist.append('operations:')
rlist.append(' - vdj: {}'.format(self.ops.get('vdj', [])))
rlist.append(' - count: {}'.format(self.ops.get('count', [])))
# rlist.append(' - features: {}'.format(self.ops.get('features', [])))
rlist.append(' - aggr:')
for k, v in self.ops.get('aggr', {}).items():
rlist.append(' - {}: {}'.format(k, v))
return '\n'.join(rlist)
@property
def runs(self):
if self._runs is None:
return []
return self._runs
@runs.setter
def runs(self, runs):
self._runs = runs
@property
def samples(self):
if self._samples is None:
return []
return self._samples
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def ops(self):
if self._ops is None:
return []
return self._ops
@ops.setter
def ops(self, ops):
self._ops = ops
def _parse_config_file(self):
with open(self.config_file) as f:
config = yaml.safe_load(f)
# parse runs
self.runs = [Run(name, cfg) for name, cfg in config['runs'].items()]
# collect samples from runs
samples = []
for run in self.runs:
if run.samples is not None:
samples += run.samples
self.samples = list(set(samples))
# # assign runs to each sample:
# for run in self.runs:
# for s in samples:
# if s.name in [s.name for s in run.samples]:
# s.add_run(run.name)
# parse ops
self.ops = {}
self.ops['vdj'] = config.get('vdj', [])
self.ops['count'] = config.get('count', {})
self.ops['aggr'] = config.get('aggr', {})
# assign ops to each sample
for op, samples in self.ops.items():
if op in ['count']:
samples = [k for subject_dict in samples for k in subject_dict.keys()]
for s in self.samples:
if s.name in samples:
s.add_op(op)
# references
self.reference = config.get('vdj_reference', {})
self.transcriptome = config.get('transcriptome', {})
self.feature_reference = config.get('feature_reference', {})
# assign references/transcriptomes to each sample:
for s in self.samples:
s.reference = config['vdj_reference'].get(s.name, config['vdj_reference']['default'])
s.transcriptome = config['transcriptome'].get(s.name, config['transcriptome']['default'])
s.feature_reference = config['feature_reference'].get(s.name, config['feature_reference']['default'])
# general config options
self.uiport = config.get('uiport', 72647)
self.cellranger = config.get('cellranger', 'cellranger')
class Run():
'''
Object for aggregation of sequencing run information throughput the 10x processing
'''
def __init__(self, name, config):
self.name = name
self.config = config
self.url = config.get('url', None)
self.path = os.path.abspath(config['path']) if 'path' in config else None
self.is_compressed = config.get('is_compressed', True)
self.samplesheet = os.path.abspath(config['samplesheet']) if 'samplesheet' in config else None
self.simple_csv = os.path.abspath(config['simple_csv']) if 'simple_csv' in config else None
self.copy_to_project = config.get('copy_to_project', False)
self._fastq_path = None
self._samples = None
def __repr__(self):
rstring = 'RUN: {}'.format(self.name)
rlist = [rstring]
rlist.append('-' * len(rstring))
if self.url is not None:
rlist.append('url: {}'.format(self.url))
if self.path is not None:
rlist.append('path: {}'.format(self.path))
rlist.append('compressed: {}'.format(self.is_compressed))
if self.samplesheet is not None:
rlist.append('samplesheet: {}'.format(self.samplesheet))
if self.simple_csv is not None:
rlist.append('simple csv: {}'.format(self.simple_csv))
rlist.append('fastq path: {}'.format(self.fastq_path))
rlist.append('samples: {}'.format(self.samples))
return '\n'.join(rlist)
@property
def sample_names(self):
if self.samples is not None:
return [s.name for s in self.samples]
return []
@property
def fastq_path(self):
return self._fastq_path
@fastq_path.setter
def fastq_path(self, path):
self._fastq_path = path
@property
def samples(self):
if self._samples is None:
self._samples = self._parse_samples()
return self._samples
@samples.setter
def samples(self, samples):
self._samples = samples
def print_splash(self):
l = len(self.name)
logger.info('')
logger.info('-' * (l + 4))
logger.info(' ' + self.name)
logger.info('-' * (l + 4))
def get(self, raw_dir, log_dir=None, debug=None):
destination = os.path.join(os.path.abspath(raw_dir), self.name)
if all([self.path is not None, self.copy_to_project, not self.is_compressed]):
self.path = self._copy(destination, log_dir=log_dir, debug=debug)
if self.url is not None:
self.path = self._download(self.url, destination, log_dir=log_dir, debug=debug)
if self.is_compressed:
self.path = self._decompress(self.path, destination, log_dir=log_dir, debug=debug)
def mkfastq(self, fastq_dir, cellranger='cellranger', uiport=None, log_dir=None, debug=None):
logger.info('Running mkfastq....')
fastq_dir = os.path.abspath(fastq_dir)
make_dir(fastq_dir)
mkfastq_cmd = "cd '{}' && {} mkfastq".format(fastq_dir, cellranger)
mkfastq_cmd += ' --id={}'.format(self.name)
mkfastq_cmd += " --run='{}'".format(self.path)
if self.samplesheet is not None:
mkfastq_cmd += " --samplesheet='{}'".format(self.samplesheet)
else:
mkfastq_cmd += " --csv='{}'".format(self.simple_csv)
p = sp.Popen(mkfastq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
if uiport is not None:
mkfastq_cmd += " --uiport={}".format(uiport)
p = sp.Popen(mkfastq_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
time.sleep(5)
uifile = os.path.join(fastq_dir, '{}/_uiport'.format(self.name))
with open(uifile) as f:
uistring = f.read().strip()
external_ip = urllib.request.urlopen('https://api.ipify.org').read().decode('utf8')
uistring = 'http://' + external_ip + ':' + uistring.split(':')[-1]
logger.info(' - UI is at {}'.format(uistring))
o, e = p.communicate()
if debug:
logger.info('\nMKFASTQ')
logger.info(mkfastq_cmd)
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'mkfastq')
make_dir(log_subdir)
write_log(self.name, log_subdir, stdout=o, stderr=e)
# logger.info('done')
## NEED TO DOUBLE-CHECK WHAT THE FASTQ PATH ACTUALLY IS
## is it just --output-dir? or do they go into an --id subfolder?
return os.path.join(fastq_dir, '{}/outs/fastq_path'.format(self.name))
def _copy(self, destination, log_dir=None, debug=False):
shutil.copytree(self.path, destination)
return destination
def _download(self, url, destination, log_dir=None, debug=False):
logger.info('Downloading run data....')
destination = os.path.abspath(destination)
make_dir(destination)
wget_cmd = "wget {} '{}'".format(url, destination)
p = sp.Popen(wget_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if debug:
logger.info('\nDOWNLOAD')
logger.info(wget_cmd)
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'download')
make_dir(log_subdir)
write_log(self.name, log_subdir, stdout=o, stderr=e)
fname = os.path.basename(url)
# logger.info('done')
return os.path.join(destination, fname)
def _decompress(self, source, destination, log_dir=None, debug=False):
logger.info('Decompressing run data....')
source = os.path.abspath(source)
destination = os.path.abspath(destination)
make_dir(destination)
tar_cmd = "tar xzvf '{}' -C '{}'".format(source, destination)
p = sp.Popen(tar_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if debug:
logger.info('\nDECOMPRESS')
logger.info(tar_cmd)
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'decompress')
make_dir(log_subdir)
write_log(self.name, log_subdir, stdout=o, stderr=e)
run_dir = destination
for (root, subdirs, files) in os.walk(destination):
if 'RTAComplete.txt' in files:
run_dir = os.path.join(destination, root)
break
# logger.info('done')
return run_dir
def _parse_samples(self):
if self.samplesheet is not None:
return self._parse_samplesheet()
if self.simple_csv is not None:
return self._parse_simple_csv()
def _parse_samplesheet(self):
ss = SampleSheet(self.samplesheet)
samples = []
for s in ss.samples:
samples.append(Sample(s.Sample_ID, name=s.Sample_Name, index=s.index))
return samples
def _parse_simple_csv(self):
samples = []
with open(self.simple_csv) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
samples.append(Sample(r['Sample'], index=r['Index']))
return samples
class Sample():
'''
Object for aggregating information about a single sample
'''
def __init__(self, id, ops=None, name=None, reference=None, transcriptome=None, op_type=None, feature_reference=None, index=None):
self.id = id
self.name = name if name is not None else id
self.index = index
self.reference = reference
self.transcriptome = transcriptome
self.op_type = op_type
self.feature_reference = feature_reference
self.vdj_path = None
self.count_path = None
self.feature_path = None
self.aggr_path = None
self._ops = ops
self._fastqs = None
self._runs = None
def __lt__(self, other):
return all([self.name < other.name])
def __hash__(self):
return hash(self.id)
@property
def runs(self):
if self._runs is None:
return []
return self._runs
@property
def fastqs(self):
if self._fastqs is None:
return []
return self._fastqs
@property
def ops(self):
if self._ops is None:
return []
return self._ops
def add_run(self, run):
if self._runs is None:
self._runs = [run, ]
else:
self._runs.append(run)
def add_fastq_path(self, fastq):
if self._fastqs is None:
self._fastqs = [fastq, ]
else:
self._fastqs.append(fastq)
def add_op(self, op):
if self._ops is None:
self._ops = [op, ]
else:
self._ops.append(op)
#==================
# OPERATIONS
#==================
def cellranger_vdj(sample, vdj_dir, cellranger='cellranger', uiport=None, log_dir=None, debug=False):
'''
docstring
'''
vdj_dir = os.path.abspath(vdj_dir)
vdj_cmd = "cd '{}'".format(vdj_dir)
vdj_cmd += " && {} vdj --id {} --sample {} --reference '{}'".format(cellranger,
sample.name,
sample.id,
sample.reference)
for fastq in sample.fastqs:
vdj_cmd += " --fastq '{}'".format(fastq)
if uiport is not None:
vdj_cmd += ' --uiport {}'.format(uiport)
p = sp.Popen(vdj_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
time.sleep(3)
uifile = os.path.join(vdj_dir, '{}/_uiport'.format(self.name))
with open(uifile) as f:
uistring = f.read().strip()
external_ip = urllib.request.urlopen('https://api.ipify.org').read().decode('utf8')
uistring = 'http://' + external_ip + ':' + uistring.split(':')[-1]
logger.info('CellRanger UI is at {}'.format(uistring))
o, e = p.communicate()
if debug:
logger.info('\nCELLRANGER VDJ')
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'vdj')
make_dir(log_subdir)
write_log(sample.name, log_subdir, stdout=o, stderr=e)
return os.path.join(vdj_dir, sample.name)
def cellranger_count(group, samples, feature_ref, count_dir,
cellranger='cellranger', uiport=None, log_dir=None, debug=False):
'''
docstring
'''
count_dir = os.path.abspath(count_dir)
lib_csv = _make_feature_library_csv(samples, group, count_dir)
count_cmd = "cd '{}'".format(count_dir)
count_cmd += " && {} count --id {} --libraries {} --feature_ref {} --transcriptome '{}'".format(cellranger,
lib_csv,
feature_ref,
sample.id,
sample.transcriptome)
for fastq in sample.fastqs:
count_cmd += " --fastqs '{}'".format(fastq)
if uiport is not None:
count_cmd += " --uiport '{}'".format(uiport)
p = sp.Popen(count_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
time.sleep(3)
uifile = os.path.join(count_dir, '{}/_uiport'.format(self.name))
with open(uifile) as f:
uistring = f.read().strip()
external_ip = urllib.request.urlopen('https://api.ipify.org').read().decode('utf8')
uistring = 'http://' + external_ip + ':' + uistring.split(':')[-1]
logger.info('CellRanger UI is at {}'.format(uistring))
o, e = p.communicate()
if debug:
logger.info('\nCELLRANGER COUNT')
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'count')
make_dir(log_subdir)
write_log(sample.name, log_subdir, stdout=o, stderr=e)
return os.path.join(count_dir, sample.name)
# def cellranger_feature_barcoding(sample, feature_dir, cellranger='cellranger', uiport=None, log_dir=None, debug=False):
# feature_dir = os.path.abspath(feature_dir)
# lib_csv = _make_feature_library_csv(sample, feature_dir)
# feature_cmd = "cd '{}'".format(feature_dir)
# feature_cmd += " && {} count --id {} --libraries '{}' --feature-ref '{}' --sample {}'.format(cellranger,
# sample.name,
# lib_csv,
# sample.feature_reference,
# sample.name)
# p = sp.Popen(feature_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
# time.sleep(3)
# uifile = os.path.join(feature_dir, '{}/_uiport'.format(self.name))
# with open(uifile) as f:
# uistring = f.read().strip()
# external_ip = urllib.request.urlopen('https://api.ipify.org').read().decode('utf8')
# uistring = 'http://' + external_ip + ':' + uistring.split(':')[-1]
# logger.info('CellRanger UI is at {}'.format(uistring))
# o, e = p.communicate()
# if debug:
# logger.info('\nCELLRANGER FEATURES')
# logger.info(o)
# logger.info(e)
# if log_dir is not None:
# log_subdir = os.path.join(log_dir, 'features')
# make_dir(log_subdir)
# write_log(sample.name, log_subdir, stdout=o, stderr=e)
# return os.path.join(feature_dir, sample.name)
def _make_feature_library_csv(samples, feature_dir):
lib_str = 'fastqs,sample,library_type\n'
for sample in samples:
for fastq in sample.fastqs:
lib_str += '{},{},{}'.format(fastq, sample.name, sample.op_type)
lib_path = os.path.join(feature_dir, '{}_feature-library.csv'.format(sample.name))
with open(lib_path, 'w') as f:
f.write(lib_str)
return lib_path
def cellranger_aggr(samples, group, aggr_dir, normalize='mapped', cellranger='cellranger', uiport=None, log_dir=None, debug=False):
aggr_dir = os.path.abspath(aggr_dir)
aggr_csv = _make_aggr_csv(samples, aggr_dir)
aggr_cmd = "cd '{}'".format(aggr_dir)
aggr_cmd += " && {} count --id {} --csv '{}' --normalize {}".format(cellranger,
group,
aggr_csv,
normalize)
## Eventually want to replace grabbing stdout/stderr with p.communicate(), so we can grab the standard output
## in real time, parse out the url for the UI and print to screen so the user can follow along with the UI
p = sp.Popen(aggr_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if debug:
logger.info('\nCELLRANGER AGGR')
logger.info(o)
logger.info(e)
if log_dir is not None:
log_subdir = os.path.join(log_dir, 'aggr')
make_dir(log_subdir)
write_log(group, log_subdir, stdout=o, stderr=e)
return os.path.join(aggr_dir, group)
def _make_aggr_csv(samples, aggr_dir):
aggr_dir = os.path.join(aggr_dir)
aggr_csv = os.path.join(aggr_dir, 'aggr.csv')
lines = ['library_id,molecule_h5', ]
for sample in samples:
h5_path = os.path.join(sample.count_path, 'outs/molecule_info.h5')
lines.append('{},{}'.format(sample.id, h5_path))
with open(aggr_csv, 'w') as f:
f.write('\n'.join(lines))
return aggr_csv
def build_directory_structure(cfg):
dirs = {}
project_dir = os.path.abspath(args.project_dir)
make_dir(project_dir)
shutil.copy(cfg.config_file, os.path.join(project_dir, 'config.yaml'))
dirs['raw'] = os.path.join(project_dir, 'raw_data')
dirs['fastq'] = os.path.join(project_dir, 'fastqs')
dirs['vdj'] = os.path.join(project_dir, 'vdj')
dirs['count'] = os.path.join(project_dir, 'count')
# dirs['features'] = os.path.join(project_dir, 'features')
dirs['aggr'] = os.path.join(project_dir, 'aggr')
for op in cfg.ops.keys():
make_dir(dirs[op])
dirs['log'] = os.path.join(project_dir, 'logs')
make_dir(dirs['log'])
return dirs
def write_log(prefix, dir, stdout=None, stderr=None):
if stdout is not None:
stdout_file = os.path.join(dir, '{}.stdout'.format(prefix))
with open(stdout_file, 'w') as f:
f.write(stdout)
if stderr is not None:
stderr_file = os.path.join(dir, '{}.stderr'.format(prefix))
with open(stderr_file, 'w') as f:
f.write(stderr)
def print_plan(cfg):
'''
prints the plan (runs, samples, ops, references, etc)
'''
pass
def print_op_splash(op, samples):
pass
def print_aggr_splash(aggr):
pass
def main(args):
# parse the config file
cfg = Config(args.config_file)
print_plan(cfg)
# build directory structure
dirs = build_directory_structure(cfg)
# setup logging
run_log = os.path.join(dirs['log'], 'run.log')
log.setup_logging(run_log, print_log_location=False, debug=args.debug)
global logger
logger = log.get_logger()
# mkfastq
for run in cfg.runs:
run.print_splash()
run.get(dirs['raw'], log_dir=dirs['log'], debug=args.debug)
fastq_path = run.mkfastq(dirs['fastq'],
cellranger=args.cellranger,
log_dir=dirs['log'],
debug=args.debug)
for sample in cfg.samples:
if sample.name in run.samples:
sample.add_fastq_path(fastq_path)
# # operations (except aggr)
# opmap = {'vdj': cellranger_vdj,
# 'count': cellranger_count,
# 'features': cellranger_feature_barcoding}
# for op in ['vdj', 'count', 'features']:
# print_op_splash(op)
# opfunction = opmap[op]
# for sample in cfg.samples:
# if op not in sample.ops:
# continue
# opfunction(sample,
# dirs[op],
# cellranger=cfg.cellranger,
# uiport=cfg.uiport,
# log_dir=dirs['log'],
# debug=args.debug)
# vdj
print_op_splash('vdj', cfg.samples)
for sample in cfg.samples:
if 'vdj' not in sample.ops:
continue
path = cellranger_vdj(sample,
dirs['vdj'],
cellranger=cfg.cellranger,
uiport=cfg.uiport,
log_dir=dirs['log'],
debug=args.debug)
sample.vdj_path = path
# count
print_op_splash('count', cfg.samples)
for group, sample_dict in cfg.ops['count']:
samples = [s for s in cfg.samples if s.name in sample_dict]
for s in samples:
s.op_type = sample_dict[s.name]
path = cellranger_count(samples,
dirs['count'],
cellranger=cfg.cellranger,
uiport=cfg.uiport,
log_dir=dirs['log'],
debug=args.debug)
for sample in cfg.samples:
if 'count' not in sample.ops:
continue
path = cellranger_count(sample,
dirs['count'],
cellranger=cfg.cellranger,
uiport=cfg.uiport,
log_dir=dirs['log'],
debug=args.debug)
sample.count_path = path
# # features
# print_op_splash('features', cfg.samples)
# for sample in cfg.samples:
# if 'features' not in sample.ops:
# continue
# path = cellranger_feature_barcoding(sample,
# dirs['features'],
# cellranger=cfg.cellranger,
# uiport=cfg.uiport,
# log_dir=dirs['log'],
# debug=args.debug)
# sample.feature_path = path
# aggr
print_aggr_splash(cfg.ops['aggr'])
for group, sample_names in cfg.ops['aggr'].items():
samples = [s for s in cfg.samples if s.name in sample_names]
path = cellranger_aggr(samples,
group,
dirs['aggr'],
normalize='mapped',
cellranger=cfg.cellranger,
uiport=cfg.uiport,
log_dir=dirs['log'],
debug=args.debug)
for s in samples:
s.aggr_path = path
# compress
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 37.776819 | 134 | 0.562686 | 3,643 | 30,637 | 4.595937 | 0.113368 | 0.017918 | 0.020307 | 0.008063 | 0.390253 | 0.32312 | 0.292122 | 0.258257 | 0.230305 | 0.214239 | 0 | 0.001715 | 0.314685 | 30,637 | 810 | 135 | 37.823457 | 0.795723 | 0.206809 | 0 | 0.35316 | 0 | 0 | 0.08784 | 0.005121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087361 | false | 0.005576 | 0.026022 | 0.005576 | 0.185874 | 0.024164 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb9aed708b569527ddaa5ddee19f3a3c9d03c3e1 | 1,310 | py | Python | setup.py | arthurpessa/ordpy | fbef9a57fa98b76549d532ec68cc6b61d0c9a064 | [
"MIT"
] | 32 | 2021-02-14T15:32:35.000Z | 2022-03-16T07:54:54.000Z | setup.py | arthurpessa/ordpy | fbef9a57fa98b76549d532ec68cc6b61d0c9a064 | [
"MIT"
] | 1 | 2021-03-05T01:49:28.000Z | 2022-02-02T11:43:49.000Z | setup.py | arthurpessa/ordpy | fbef9a57fa98b76549d532ec68cc6b61d0c9a064 | [
"MIT"
] | 4 | 2021-05-19T17:12:13.000Z | 2022-03-07T22:59:01.000Z | #old version
# from setuptools import setup
# setup(name='ordpy',
# version='1.0.0',
# description='A Python package for data analysis with permutation entropy and ordinal networks methods.',
# url='https://github.com/arthurpessa/ordpy',
# author='Arthur A. B. Pessa and Haroldo V. Ribeiro',
# author_email='arthur_pessa@hotmail.com, hvr@dfi.uem.br',
# license='MIT',
# packages=['ordpy'],
# install_requires=['numpy'],
# python_requires=">=3.6",
# zip_safe=False
# )
import setuptools
with open("README.rst", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ordpy",
version="1.0.6",
author="Arthur A. B. Pessa and Haroldo V. Ribeiro",
author_email="arthur_pessa@hotmail.com, hvr@dfi.uem.br",
description="A Python package for data analysis with permutation entropy and ordinal networks methods.",
long_description=long_description,
long_description_content_type="text/x-rst; charset=UTF-8",
url="https://github.com/arthurpessa/ordpy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 33.589744 | 112 | 0.659542 | 165 | 1,310 | 5.145455 | 0.478788 | 0.070671 | 0.03298 | 0.04947 | 0.525324 | 0.525324 | 0.393404 | 0.393404 | 0.393404 | 0.393404 | 0 | 0.012322 | 0.194656 | 1,310 | 38 | 113 | 34.473684 | 0.792417 | 0.381679 | 0 | 0 | 0 | 0 | 0.463568 | 0.031407 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb9b6b4642ecf196ccda35a84628b7e3563814f2 | 1,211 | py | Python | inference/inference_bicubic.py | samueljackson92/BasicSR | 43f2166600e1960061da318ae6c1aabb7fa41584 | [
"Apache-2.0",
"MIT"
] | null | null | null | inference/inference_bicubic.py | samueljackson92/BasicSR | 43f2166600e1960061da318ae6c1aabb7fa41584 | [
"Apache-2.0",
"MIT"
] | null | null | null | inference/inference_bicubic.py | samueljackson92/BasicSR | 43f2166600e1960061da318ae6c1aabb7fa41584 | [
"Apache-2.0",
"MIT"
] | null | null | null | import argparse
import cv2
import glob
import numpy as np
import os
import torch
from PIL import Image
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/Set14/LRbicx3', help='input test image folder')
parser.add_argument('--output', type=str, default='results/EDSR', help='output folder')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.makedirs(args.output, exist_ok=True)
for idx, path in enumerate(sorted(glob.glob(os.path.join(args.input, '*')))):
imgname = os.path.splitext(os.path.basename(path))[0]
print('Testing', idx, imgname)
# read image
img = cv2.imread(path, cv2.IMREAD_COLOR)
shape = np.array(img.shape[:2])
# inference
try:
img = Image.fromarray(img)
output = img.resize(shape*3, Image.BICUBIC)
output = np.array(output)
except Exception as error:
print('Error', error, imgname)
else:
# save image
cv2.imwrite(os.path.join(args.output, f'{imgname}_bicubic.png'), output)
if __name__ == '__main__':
main()
| 31.051282 | 110 | 0.630884 | 157 | 1,211 | 4.770701 | 0.496815 | 0.032043 | 0.045394 | 0.037383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010776 | 0.233691 | 1,211 | 38 | 111 | 31.868421 | 0.796336 | 0.025599 | 0 | 0 | 0 | 0 | 0.113946 | 0.036565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.241379 | 0 | 0.275862 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cb9d953b09022a20a59241ac49df71dc6b8a1cb9 | 6,047 | py | Python | tests/unit/test_say.py | timgates42/pyttsx | 1a84ee33971951b1ea18f2708061a5d19ef94018 | [
"FSFAP"
] | 160 | 2016-10-04T22:45:36.000Z | 2022-02-10T06:41:56.000Z | tests/unit/test_say.py | simz089s/pyttsx | 4ad1e84fdefee4eed290fdc966573cb57d0b0079 | [
"FSFAP"
] | 27 | 2016-10-04T02:45:18.000Z | 2022-03-09T15:15:54.000Z | tests/unit/test_say.py | simz089s/pyttsx | 4ad1e84fdefee4eed290fdc966573cb57d0b0079 | [
"FSFAP"
] | 58 | 2016-10-06T16:53:43.000Z | 2021-10-21T22:17:35.000Z | '''
Tests say.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import unittest
import test_setup
import pyttsx
import itertools
class TestSay(unittest.TestCase):
utters = ['This is the first utterance',
'The second is an utterance as well']
names = ['utter1', 'utter2']
def setUp(self):
self.correct = []
for utter, name in zip(self.utters, self.names):
events = [{'type' : 'started-utterance'}]
last = 0
for word in utter.split(' '):
event = {'type' : 'started-word'}
event['length'] = len(word)
event['location'] = last
events.append(event)
last += len(word) + 1
events.append({'type' : 'finished-utterance', 'completed' : True})
for event in events:
event['name'] = name
self.correct.append(events)
self.events = []
self.engine = pyttsx.init(debug=False)
self.engine.connect('started-utterance', self._onUtterStart)
self.engine.connect('started-word', self._onUtterWord)
self.engine.connect('finished-utterance', self._onUtterEnd)
self.engine.connect('error', self._onUtterError)
def tearDown(self):
del self.engine
def _onUtterStart(self, **kwargs):
event = {'type' : 'started-utterance'}
event.update(kwargs)
self.events.append(event)
def _onUtterWord(self, **kwargs):
event = {'type' : 'started-word'}
event.update(kwargs)
self.events.append(event)
def _onUtterEnd(self, **kwargs):
event = {'type' : 'finished-utterance'}
event.update(kwargs)
self.events.append(event)
def _onUtterError(self, **kwargs):
event = {'type' : 'error'}
event.update(kwargs)
self.events.append(event)
def testSay(self):
self.engine.say(self.utters[0], self.names[0])
self.engine.runAndWait()
# number of events check
self.assert_(len(self.events) == len(self.correct[0]))
# event data check
for cevent, tevent in zip(self.correct[0], self.events):
self.assert_(cevent == tevent)
def testMultipleSay(self):
self.engine.say(self.utters[0], self.names[0])
self.engine.say(self.utters[1], self.names[1])
self.engine.runAndWait()
# number of events check
self.assert_(len(self.events) == len(self.correct[0]) + len(self.correct[1]))
# event data check
correct = itertools.chain(*self.correct)
for cevent, tevent in zip(correct, self.events):
self.assert_(cevent == tevent)
def testSayTypes(self):
self.engine.say(1.0)
self.engine.say(None)
self.engine.say(object())
self.engine.runAndWait()
# event data check
errors = filter(lambda e: e['type'] == 'error', self.events)
self.assert_(len(errors) == 0)
def testStop(self):
tok = None
def _onWord(**kwargs):
self.engine.stop()
self.engine.disconnect(tok)
tok = self.engine.connect('started-word', _onWord)
self.engine.say(self.utters[0], self.names[0])
self.engine.runAndWait()
# make sure it stopped short
self.assert_(len(self.events) < len(self.correct[0]))
end = self.events[-1]
self.assert_(not end['completed'])
def testStopBeforeSay(self):
self.engine.stop()
self.testSay()
def testMultipleStopBeforeSay(self):
self.engine.stop()
self.engine.stop()
self.testSay()
def testStartEndLoop(self):
def _onEnd(**kwargs):
self.engine.endLoop()
self.engine.connect('finished-utterance', _onEnd)
self.engine.say(self.utters[0], self.names[0])
self.engine.startLoop()
# number of events check
self.assert_(len(self.events) == len(self.correct[0]))
# event data check
for cevent, tevent in zip(self.correct[0], self.events):
self.assert_(cevent == tevent)
def testExternalLoop(self):
def _onEnd(**kwargs):
self.engine.endLoop()
# kill the engine built by setUp
del self.engine
self.engine = pyttsx.init('dummy')
self.engine.connect('started-utterance', self._onUtterStart)
self.engine.connect('started-word', self._onUtterWord)
self.engine.connect('finished-utterance', self._onUtterEnd)
self.engine.connect('error', self._onUtterError)
self.engine.connect('finished-utterance', _onEnd)
self.engine.say(self.utters[0], self.names[0])
self.engine.startLoop(False)
self.engine.iterate()
# number of events check
self.assert_(len(self.events) == len(self.correct[0]))
# event data check
for cevent, tevent in zip(self.correct[0], self.events):
self.assert_(cevent == tevent)
def testMultipleRuns(self):
self.testSay()
self.events = []
self.testSay()
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestSay)
#suite = unittest.TestLoader().loadTestsFromName('testExternalLoop', TestSay)
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | 35.570588 | 85 | 0.623615 | 719 | 6,047 | 5.191933 | 0.244784 | 0.101795 | 0.050094 | 0.027324 | 0.471471 | 0.421109 | 0.406108 | 0.37798 | 0.356014 | 0.319046 | 0 | 0.008424 | 0.25401 | 6,047 | 170 | 86 | 35.570588 | 0.819109 | 0.174797 | 0 | 0.458333 | 0 | 0 | 0.082663 | 0 | 0 | 0 | 0 | 0 | 0.091667 | 1 | 0.158333 | false | 0 | 0.033333 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba19d25e6463031fc8122479ec443ef023670e5 | 3,821 | py | Python | graystruct/rabbitmq.py | kondalrao/graystruct | dc068b69c35fa625664982d6b044547c4a82e134 | [
"BSD-3-Clause"
] | 3 | 2015-08-14T08:12:24.000Z | 2018-06-15T09:52:34.000Z | graystruct/rabbitmq.py | kondalrao/graystruct | dc068b69c35fa625664982d6b044547c4a82e134 | [
"BSD-3-Clause"
] | 2 | 2015-03-01T21:42:26.000Z | 2015-03-01T21:56:16.000Z | graystruct/rabbitmq.py | kondalrao/graystruct | dc068b69c35fa625664982d6b044547c4a82e134 | [
"BSD-3-Clause"
] | 2 | 2019-05-09T21:38:19.000Z | 2019-11-24T20:47:41.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Simon Jagoe and Enthought Ltd
# Portions copyright (c) 2011, Sever Băneşiu
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import
import amqp
from logging import Filter
from logging.handlers import SocketHandler
try:
from urllib.parse import urlparse, unquote
except ImportError:
from urlparse import urlparse
from urllib import unquote
from .handler import _CompressHandler
_ifnone = lambda v, x: x if v is None else v
class GELFRabbitHandler(_CompressHandler, SocketHandler):
"""RabbitMQ / Graylog Extended Log Format handler.
This is copied from ``graypy.rabbitmq`` and modified to use py-amqp
(AMQP 0.9.1). Additionally removes GELF-related options, which are
handled by the :class:`graystruct.encoder.GELFEncoder` class.
NOTE: this handler ingores all messages logged by amqp.
:param url: RabbitMQ URL (ex: amqp://guest:guest@localhost:5672/).
:param exchange: RabbitMQ exchange. Default 'logging.gelf'.
A queue binding must be defined on the server to prevent
log messages from being dropped.
:param exchange_type: RabbitMQ exchange type (default 'fanout').
"""
def __init__(self, url, exchange='logging.gelf', exchange_type='fanout',
virtual_host='/'):
self.url = url
parsed = urlparse(url)
if parsed.scheme != 'amqp':
raise ValueError('invalid URL scheme (expected "amqp"): %s' % url)
host = parsed.hostname or 'localhost'
port = _ifnone(parsed.port, 5672)
virtual_host = virtual_host if not unquote(parsed.path[1:]) \
else unquote(parsed.path[1:])
self.cn_args = {
'host': '%s:%s' % (host, port),
'userid': _ifnone(parsed.username, 'guest'),
'password': _ifnone(parsed.password, 'guest'),
'virtual_host': virtual_host,
'insist': False,
}
self.exchange = exchange
self.exchange_type = exchange_type
self.virtual_host = virtual_host
SocketHandler.__init__(self, host, port)
self.addFilter(ExcludeFilter('amqp'))
def makeSocket(self, timeout=1):
return RabbitSocket(
self.cn_args, timeout, self.exchange, self.exchange_type)
class RabbitSocket(object):
def __init__(self, cn_args, timeout, exchange, exchange_type):
self.cn_args = cn_args
self.timeout = timeout
self.exchange = exchange
self.exchange_type = exchange_type
self.connection = amqp.Connection(
connection_timeout=timeout, **self.cn_args)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange=self.exchange,
type=self.exchange_type,
durable=True,
auto_delete=False,
)
def sendall(self, data):
msg = amqp.Message(data, delivery_mode=2)
self.channel.basic_publish(
msg, exchange=self.exchange)
def close(self):
try:
self.connection.close()
except Exception:
pass
class ExcludeFilter(Filter):
def __init__(self, name):
"""Initialize filter.
Initialize with the name of the logger which, together with its
children, will have its events excluded (filtered out).
"""
if not name:
raise ValueError('ExcludeFilter requires a non-empty name')
self.name = name
self.nlen = len(name)
def filter(self, record):
return not (record.name.startswith(self.name) and (
len(record.name) == self.nlen or record.name[self.nlen] == "."))
| 32.939655 | 78 | 0.645381 | 456 | 3,821 | 5.289474 | 0.403509 | 0.054726 | 0.02073 | 0.039801 | 0.043118 | 0.043118 | 0.043118 | 0.043118 | 0.043118 | 0 | 0 | 0.008821 | 0.258309 | 3,821 | 115 | 79 | 33.226087 | 0.842272 | 0.261712 | 0 | 0.085714 | 0 | 0 | 0.061016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.028571 | 0.128571 | 0.028571 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba3ff6d4bc8fb6d7f77078e33418942901121e5 | 3,676 | py | Python | client.py | MrIgumnov96/API-Client | d42e303386ddea0d3e40739993cb491450fe9bb4 | [
"Unlicense"
] | null | null | null | client.py | MrIgumnov96/API-Client | d42e303386ddea0d3e40739993cb491450fe9bb4 | [
"Unlicense"
] | null | null | null | client.py | MrIgumnov96/API-Client | d42e303386ddea0d3e40739993cb491450fe9bb4 | [
"Unlicense"
] | null | null | null | import requests
import json
class Client:
# Definition of the construtor
def __init__( self, server_address, password ):
self.__server_address = server_address
self.__password = password
# Function returns True if it was possible to
# retrieve the response from the server.
def can_connect( self ):
try:
req = requests.get( self.__server_address )
if req.status_code == 200:
print( req.text )
return True
return False
except:
print( "An exception occurred when made an attemp to send a request to a server. The server may not be running." )
def print_records( self, records_raw_list ):
records_lists = json.loads( records_raw_list )
for records in records_lists:
print( records )
def print_page_options( self, records_raw_list, links_list ):
print( "Page Options" )
idx = 0
for link in links_list[1:]:
for key, value in link.items():
idx = idx + 1
print( "Enter digit {} to proceed to the {} page.".format( idx, key ) )
print( "Enter digit 5 to list records." )
print( "Enter digit 6 to return to Main Menu." )
inpt = self.get_numeric_input()
if inpt >= 1 and inpt <= 4:
link_list = links_list[ inpt ].values()
for link in link_list:
if str( link ) == 'None':
self.main_menu()
else:
self.go_to_page( link )
if inpt == 5:
self.print_records( records_raw_list )
self.print_page_options( records_raw_list, links_list )
if inpt == 6:
self.main_menu()
def go_to_page( self, page_address ):
req = requests.get(
page_address,
headers = { 'api_key': self.__password } )
if req.status_code == 200:
dict = req.json()
print ( "We are at page: {}".format( page_address ) )
self.print_page_options( dict[ 'records' ], dict[ 'links' ] )
else:
print( "It was not possible to open the page, the following error occured: {}".format( req.status_code ) )
def get_campaign_statistics( self ):
self.go_to_page( self.__server_address + '/campaign_statistics' )
def get_campaigns( self ):
self.go_to_page( self.__server_address + '/campaigns' )
def get_creatives( self ):
self.go_to_page( self.__server_address + '/creatives' )
def main_menu( self ):
print( "Main Menu" )
print( "Enter one of the following options:" )
print( "1 to get to campaign statistics" )
print( "2 to get to campaigns" )
print( "3 to get to creatives" )
print( "4 to exit from program" )
inpt = self.get_numeric_input()
if inpt == 1:
self.get_campaign_statistics()
elif inpt == 2:
self.get_campaigns()
elif inpt == 3:
self.get_creatives()
elif inpt == 4:
print( "Good bye" )
exit()
else:
print('Option does not exist.')
self.main_menu()
def get_numeric_input( self ):
inpt = input()
if inpt.isnumeric():
return int( inpt )
print( "Input is not numeric. Please enter numeric value." )
return self.get_numeric_input()
client = Client( "http://127.0.0.1:5000/", 'uHL6FHwsIXgk8ke3uAdNNg' )
if client.can_connect():
client.main_menu() | 31.418803 | 126 | 0.553591 | 439 | 3,676 | 4.425968 | 0.273349 | 0.046835 | 0.052496 | 0.024704 | 0.124035 | 0.081832 | 0.081832 | 0.081832 | 0 | 0 | 0 | 0.015145 | 0.353373 | 3,676 | 117 | 127 | 31.418803 | 0.802272 | 0.030196 | 0 | 0.116279 | 0 | 0.011628 | 0.178271 | 0.006176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0.034884 | 0.023256 | 0 | 0.197674 | 0.267442 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba55df444b4255ce389d46b8a6a51447cd89a73 | 2,262 | py | Python | generator/utils.py | UBT-AI2/rtlode | 866df92d0a2211fd3ca5dd064160007036a2f8c4 | [
"MIT"
] | null | null | null | generator/utils.py | UBT-AI2/rtlode | 866df92d0a2211fd3ca5dd064160007036a2f8c4 | [
"MIT"
] | null | null | null | generator/utils.py | UBT-AI2/rtlode | 866df92d0a2211fd3ca5dd064160007036a2f8c4 | [
"MIT"
] | null | null | null | from myhdl import Signal, SignalType, block, always_seq, modbv, intbv, always_comb
@block
def reinterpret_as_signed(data_in, data_out):
@always_comb
def _assign():
data_out.next = data_in.signed()
return _assign
def clone_signal(sig, reset_value=0):
"""
Clone a single signal.
:param sig: signal to be cloned
:param reset_value: reset value of new signal
:return: new signal
"""
if sig._type == bool:
return Signal(bool(reset_value))
if isinstance(sig.val, modbv):
value = modbv(reset_value, min=sig.min, max=sig.max)
elif isinstance(sig.val, intbv):
value = intbv(reset_value, min=sig.min, max=sig.max)
else:
raise NotImplemented()
return Signal(value)
def clone_signal_structure(sig_data, value=0):
"""
Clone a signal structure.
:param sig_data: signal structure to be cloned
:param value: reset value of new signals
:return: new signal structure
"""
if isinstance(sig_data, SignalType):
return clone_signal(sig_data, reset_value=value)
elif isinstance(sig_data, list):
return [clone_signal_structure(sub_sig, value) for sub_sig in sig_data]
else:
raise Exception('Can not clone signal data structure.')
"""
No easy way to implement these assigns smarter because of limitations of the myhdl conversion.
Each signal can only be driven by one instance.
"""
@block
def assign(clk, condition, in_val, out_val):
@always_seq(clk.posedge, reset=None)
def _assign():
if condition:
out_val.next = in_val
return _assign
@block
def assign_2(clk, condition_1, in_val_1, condition_2, in_val_2, out_val):
@always_seq(clk.posedge, reset=None)
def _assign():
if condition_1:
out_val.next = in_val_1
if condition_2:
out_val.next = in_val_2
return _assign
@block
def assign_3(clk, condition_1, in_val_1, condition_2, in_val_2, condition_3, in_val_3, out_val):
@always_seq(clk.posedge, reset=None)
def _assign():
if condition_1:
out_val.next = in_val_1
if condition_2:
out_val.next = in_val_2
if condition_3:
out_val.next = in_val_3
return _assign
| 26 | 96 | 0.666667 | 331 | 2,262 | 4.302115 | 0.232628 | 0.042135 | 0.042135 | 0.050562 | 0.351124 | 0.265449 | 0.265449 | 0.265449 | 0.226124 | 0.226124 | 0 | 0.014052 | 0.244916 | 2,262 | 86 | 97 | 26.302326 | 0.819672 | 0.117595 | 0 | 0.490196 | 0 | 0 | 0.020101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.196078 | false | 0 | 0.019608 | 0 | 0.372549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba71b2febff9a086e948915b681dc2593253f36 | 6,402 | py | Python | bika/lims/content/pricelist.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/content/pricelist.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | bika/lims/content/pricelist.py | hocinebendou/bika.gsoc | 85bc0c587de7f52073ae0e89bddbc77bf875f295 | [
"MIT"
] | null | null | null | from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.widgets.datetimewidget import DateTimeWidget
from bika.lims.config import PRICELIST_TYPES, PROJECTNAME
from bika.lims.content.bikaschema import BikaFolderSchema
from bika.lims.interfaces import IPricelist
from DateTime import DateTime
from persistent.mapping import PersistentMapping
from plone.app.folder import folder
from Products.Archetypes.public import *
from Products.CMFCore import permissions
from zope.interface import implements
from Products.CMFCore.utils import getToolByName
from Products.CMFCore import permissions
schema = BikaFolderSchema.copy() + Schema((
StringField('Type',
required=1,
vocabulary=PRICELIST_TYPES,
widget=SelectionWidget(
format='select',
label=_("Pricelist for"),
),
),
BooleanField('BulkDiscount',
default=False,
widget=SelectionWidget(
label=_("Bulk discount applies"),
),
),
FixedPointField('BulkPrice',
widget=DecimalWidget(
label=_("Discount %"),
description=_("Enter discount percentage value"),
),
),
BooleanField('Descriptions',
default=False,
widget=BooleanWidget(
label=_("Include descriptions"),
description=_("Select if the descriptions should be included"),
),
),
TextField('Remarks',
searchable=True,
default_content_type='text/plain',
allowed_content_types=('text/plain', ),
default_output_type="text/plain",
widget=TextAreaWidget(
macro="bika_widgets/remarks",
label=_("Remarks"),
append_only=True,
),
),
),
)
Field = schema['title']
Field.required = 1
Field.widget.visible = True
Field = schema['effectiveDate']
Field.schemata = 'default'
Field.required = 0 # "If no date is selected the item will be published
#immediately."
Field.widget.visible = True
Field = schema['expirationDate']
Field.schemata = 'default'
Field.required = 0 # "If no date is chosen, it will never expire."
Field.widget.visible = True
def apply_discount(price=None, discount=None):
return float(price) - (float(price) * float(discount)) / 100
def get_vat_amount(price, vat_perc):
return float(price) * float(vat_perc) / 100
class PricelistLineItem(PersistentMapping):
pass
class Pricelist(folder.ATFolder):
implements(IPricelist)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
security.declarePublic('current_date')
def current_date(self):
""" return current date """
return DateTime()
security.declareProtected(permissions.ModifyPortalContent,
'processForm')
registerType(Pricelist, PROJECTNAME)
def ObjectModifiedEventHandler(instance, event):
""" Various types need automation on edit.
"""
if not hasattr(instance, 'portal_type'):
return
if instance.portal_type == 'Pricelist':
""" Create price list line items
"""
# Remove existing line items
instance.pricelist_lineitems = []
for p in instance.portal_catalog(portal_type=instance.getType(),
inactive_state="active"):
obj = p.getObject()
itemDescription = None
itemAccredited = False
if instance.getType() == "LabProduct":
print_detail = ""
if obj.getVolume():
print_detail = print_detail + str(obj.getVolume())
if obj.getUnit():
print_detail = print_detail + str(obj.getUnit())
if obj.getVolume() or obj.getUnit():
print_detail = " (" + print_detail + ")"
itemTitle = obj.Title() + print_detail
else:
itemTitle = obj.Title()
cat = None
if obj.getPrice():
price = float(obj.getPrice())
totalprice = float(obj.getTotalPrice())
vat = totalprice - price
else:
price = 0
totalprice = 0
vat = 0
elif instance.getType() == "AnalysisService":
#
if str(obj.getUnit()):
print_detail = " (" + str(obj.getUnit()) + ")"
itemTitle = obj.Title() + print_detail
else:
itemTitle = obj.Title()
itemAccredited = obj.getAccredited()
#
cat = obj.getCategoryTitle()
if instance.getBulkDiscount():
price = float(obj.getBulkPrice())
vat = get_vat_amount(price, obj.getVAT())
totalprice = price + vat
else:
if instance.getBulkPrice():
discount = instance.getBulkPrice()
price = float(obj.getPrice())
price = apply_discount(price, discount)
vat = get_vat_amount(price, obj.getVAT())
totalprice = price + vat
elif obj.getPrice():
price = float(obj.getPrice())
vat = get_vat_amount(price, obj.getVAT())
totalprice = price + vat
else:
totalprice = 0
price = 0
vat = 0
if instance.getDescriptions():
itemDescription = obj.Description()
li = PricelistLineItem()
li['title'] = itemTitle
li['ItemDescription'] = itemDescription
li['CategoryTitle'] = cat
li['Accredited'] = itemAccredited
li['Subtotal'] = "%0.2f" % price
li['VATAmount'] = "%0.2f" % vat
li['Total'] = "%0.2f" % totalprice
instance.pricelist_lineitems.append(li)
| 33.518325 | 75 | 0.565292 | 571 | 6,402 | 6.23993 | 0.339755 | 0.030873 | 0.023576 | 0.019085 | 0.186921 | 0.158013 | 0.094022 | 0.094022 | 0.094022 | 0.066517 | 0 | 0.005202 | 0.339425 | 6,402 | 190 | 76 | 33.694737 | 0.837314 | 0.031553 | 0 | 0.307692 | 0 | 0 | 0.073803 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032051 | false | 0.00641 | 0.102564 | 0.012821 | 0.198718 | 0.044872 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba7b5859345badfa4b065b8dfe990acbfb3a7a8 | 2,607 | py | Python | models/modules/discrimators.py | qianbenb/Shift-Net_pytorch | c765939bed64b9604e9ea7ce2c14b2b2c69046d4 | [
"MIT"
] | 1 | 2019-04-24T10:01:29.000Z | 2019-04-24T10:01:29.000Z | models/modules/discrimators.py | qianbenb/Shift-Net_pytorch | c765939bed64b9604e9ea7ce2c14b2b2c69046d4 | [
"MIT"
] | null | null | null | models/modules/discrimators.py | qianbenb/Shift-Net_pytorch | c765939bed64b9604e9ea7ce2c14b2b2c69046d4 | [
"MIT"
] | null | null | null | import functools
import torch.nn as nn
from .denset_net import *
from .modules import *
################################### This is for D ###################################
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_spectral_norm=True):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), use_spectral_norm),
nn.LeakyReLU(0.2)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias), use_spectral_norm),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias), use_spectral_norm),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2)
]
sequence += [spectral_norm(nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw), use_spectral_norm)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
# Defines a densetnet inspired discriminator (Should improve its ability to create stronger representation)
class DenseNetDiscrimator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_spectral_norm=True):
super(DenseNetDiscrimator, self).__init__()
self.model = densenet121(pretrained=True, use_spectral_norm=use_spectral_norm)
self.use_sigmoid = use_sigmoid
if self.use_sigmoid:
self.sigmoid = nn.Sigmoid()
def forward(self, input):
if self.use_sigmoid:
return self.sigmoid(self.model(input))
else:
return self.model(input) | 38.910448 | 123 | 0.608746 | 329 | 2,607 | 4.56231 | 0.264438 | 0.05996 | 0.079947 | 0.058628 | 0.469021 | 0.43038 | 0.417722 | 0.376416 | 0.351765 | 0.351765 | 0 | 0.019402 | 0.268508 | 2,607 | 67 | 124 | 38.910448 | 0.767698 | 0.070963 | 0 | 0.388889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0.018519 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cba7c376556dad4deb7f20152d0a22279c4279fd | 5,081 | py | Python | ppcls/optimizer/optimizer.py | flyseaworld/PaddleClas | 414273652fe73217a2c835d1edc61509a95479ec | [
"Apache-2.0"
] | 7 | 2021-11-01T08:44:06.000Z | 2022-01-10T09:42:34.000Z | ppcls/optimizer/optimizer.py | livingbody/PaddleClas | 955adcf7a4b618d789b97d978a05ec9cd8dc151e | [
"Apache-2.0"
] | null | null | null | ppcls/optimizer/optimizer.py | livingbody/PaddleClas | 955adcf7a4b618d789b97d978a05ec9cd8dc151e | [
"Apache-2.0"
] | 1 | 2021-11-16T16:31:05.000Z | 2021-11-16T16:31:05.000Z | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import paddle
import paddle.regularizer as regularizer
__all__ = ['OptimizerBuilder']
class L1Decay(object):
"""
L1 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def __init__(self, factor=0.0):
super(L1Decay, self).__init__()
self.factor = factor
def __call__(self):
reg = regularizer.L1Decay(self.factor)
return reg
class L2Decay(object):
"""
L2 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def __init__(self, factor=0.0):
super(L2Decay, self).__init__()
self.factor = factor
def __call__(self):
reg = regularizer.L2Decay(self.factor)
return reg
class Momentum(object):
"""
Simple Momentum optimizer with velocity state.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def __init__(self,
learning_rate,
momentum,
parameter_list=None,
regularization=None,
**args):
super(Momentum, self).__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.parameter_list = parameter_list
self.regularization = regularization
def __call__(self):
opt = paddle.optimizer.Momentum(
learning_rate=self.learning_rate,
momentum=self.momentum,
parameters=self.parameter_list,
weight_decay=self.regularization)
return opt
class RMSProp(object):
"""
Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
rho (float) - rho value in equation.
epsilon (float) - avoid division by zero, default is 1e-6.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def __init__(self,
learning_rate,
momentum,
rho=0.95,
epsilon=1e-6,
parameter_list=None,
regularization=None,
**args):
super(RMSProp, self).__init__()
self.learning_rate = learning_rate
self.momentum = momentum
self.rho = rho
self.epsilon = epsilon
self.parameter_list = parameter_list
self.regularization = regularization
def __call__(self):
opt = paddle.optimizer.RMSProp(
learning_rate=self.learning_rate,
momentum=self.momentum,
rho=self.rho,
epsilon=self.epsilon,
parameters=self.parameter_list,
weight_decay=self.regularization)
return opt
class OptimizerBuilder(object):
"""
Build optimizer
Args:
function(str): optimizer name of learning rate
params(dict): parameters used for init the class
regularizer (dict): parameters used for create regularization
"""
def __init__(self,
function='Momentum',
params={'momentum': 0.9},
regularizer=None):
self.function = function
self.params = params
# create regularizer
if regularizer is not None:
mod = sys.modules[__name__]
reg_func = regularizer['function'] + 'Decay'
del regularizer['function']
reg = getattr(mod, reg_func)(**regularizer)()
self.params['regularization'] = reg
def __call__(self, learning_rate, parameter_list=None):
mod = sys.modules[__name__]
opt = getattr(mod, self.function)
return opt(learning_rate=learning_rate,
parameter_list=parameter_list,
**self.params)()
| 31.171779 | 93 | 0.632553 | 560 | 5,081 | 5.539286 | 0.278571 | 0.073501 | 0.036106 | 0.02579 | 0.515152 | 0.476467 | 0.476467 | 0.448098 | 0.41715 | 0.41715 | 0 | 0.009131 | 0.288723 | 5,081 | 162 | 94 | 31.364198 | 0.849198 | 0.374533 | 0 | 0.511905 | 0 | 0 | 0.022267 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.071429 | 0 | 0.309524 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbab21d705fb413179c8b17f32f63f23548d4f64 | 1,186 | py | Python | codes/utils/profiler.py | shagunsodhani/consistent-dynamics | cc1527f2468cdcebea9a57387254278eb5547fe3 | [
"MIT"
] | 8 | 2019-05-06T13:30:57.000Z | 2020-05-25T20:32:47.000Z | codes/utils/profiler.py | shagunsodhani/consistent-dynamics | cc1527f2468cdcebea9a57387254278eb5547fe3 | [
"MIT"
] | null | null | null | codes/utils/profiler.py | shagunsodhani/consistent-dynamics | cc1527f2468cdcebea9a57387254278eb5547fe3 | [
"MIT"
] | 2 | 2019-05-06T15:11:42.000Z | 2020-03-06T12:36:16.000Z | import torch
import subprocess
import os
def get_cuda_memory_allocated():
# In Mb
return torch.cuda.memory_allocated() / (2 ** 20)
def get_cuda_memory_cached():
# In Mb
return torch.cuda.memory_cached()/(2**20)
def get_gpu_memory_map():
"""Get the current gpu usage. Taken from https://gist.github.com/vardaan123/53a49a789b27bf829bb3799c60e26705
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
])
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().decode('utf-8').split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
print(gpu_memory_map)
return gpu_memory_map
def get_cpu_memory_map():
'''Get the cpu usage of the current process'''
import psutil
pid = os.getpid()
py = psutil.Process(pid)
memory_use = py.memory_info()[0] / 2. ** 20 # memory use in GB...I think
print('memory use:', memory_use)
return memory_use
| 28.238095 | 112 | 0.649241 | 165 | 1,186 | 4.49697 | 0.478788 | 0.097035 | 0.06469 | 0.043127 | 0.067385 | 0.067385 | 0 | 0 | 0 | 0 | 0 | 0.040305 | 0.22597 | 1,186 | 41 | 113 | 28.926829 | 0.767974 | 0.279089 | 0 | 0 | 0 | 0 | 0.098039 | 0.063725 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0.083333 | 0.5 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbab4b584f5c983e28927fbac2c843e6d55981b3 | 489 | py | Python | sorting_algos/selection_sort.py | michaelgraemeshort/more_py_algos | 5220c76a20bb4713bef66b4ce19194e917eb3e7e | [
"MIT"
] | null | null | null | sorting_algos/selection_sort.py | michaelgraemeshort/more_py_algos | 5220c76a20bb4713bef66b4ce19194e917eb3e7e | [
"MIT"
] | null | null | null | sorting_algos/selection_sort.py | michaelgraemeshort/more_py_algos | 5220c76a20bb4713bef66b4ce19194e917eb3e7e | [
"MIT"
] | null | null | null | # iterate through array, swapping each element as far back as necessary
# no, that's insertion sort
# selection sort iterates through the array and finds the smallest element each time
from random import randint
test_list = [randint(0, 10) for i in range(10)]
def selection_sort(l):
for i in range(len(l) - 1):
for j in range(i + 1, len(l)):
if l[j] < l[i]:
l[i], l[j] = l[j], l[i]
print(test_list)
selection_sort(test_list)
print(test_list)
| 23.285714 | 84 | 0.652352 | 85 | 489 | 3.682353 | 0.482353 | 0.102236 | 0.028754 | 0.070288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018817 | 0.239264 | 489 | 20 | 85 | 24.45 | 0.822581 | 0.364008 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbac35b44ad826703053525f855c179eac5f6cbb | 8,141 | py | Python | app/products/views.py | oeilgauche/vicuna | 3c7c6a1cefa24dc724056c478a4b25cba3092310 | [
"MIT"
] | null | null | null | app/products/views.py | oeilgauche/vicuna | 3c7c6a1cefa24dc724056c478a4b25cba3092310 | [
"MIT"
] | null | null | null | app/products/views.py | oeilgauche/vicuna | 3c7c6a1cefa24dc724056c478a4b25cba3092310 | [
"MIT"
] | null | null | null | # Import flask dependencies
from flask import (Blueprint, request, render_template,
flash, g, session, redirect, url_for)
# Import password / encryption helper tools
from werkzeug import check_password_hash, generate_password_hash
# Import the database object from the main app module
from app import db
# Define the blueprint: 'auth', set its url prefix: app.url/auth
products = Blueprint('products', __name__, url_prefix='/backend/products')
# Import helpers
from ..helpers.helpers import read_setting
# Import the forms
from .forms import AddProduct, AddCategory
# Import the models
from .models import Product, Category, StockHistory
from ..suppliers.models import Supplier
from ..settings.models import VAT
# Import helpers
from ..helpers.helpers import (to_int, to_dec, to_dec_string, add_vat,
calc_margin)
# Import Babel
from app import babel
from config import LANGUAGES
# Set the route and accepted methods
@products.route('/')
def products_list():
products = Product.query.order_by(Product.reference)
for product in products:
product.stock = to_dec_string(product.stock)
product.selling_price = to_dec_string(product.selling_price)
return render_template('products/list.html',
title='Products',
products=products)
@products.route('/add', methods=['GET', 'POST'])
def product_add():
form = AddProduct()
vat = [(v.id, to_dec(v.amount)) for v in VAT.query.order_by(VAT.name)]
suppliers = [(s.id, s.name) for s in Supplier.query.order_by(Supplier.name)]
categories = [(c.id, c.name) for c in Category.query.order_by(Category.code)]
units = [(k, v) for k, v in read_setting("units").iteritems()]
conditioning_units = [(k, v) for k, v in read_setting("conditioning").iteritems()]
form.vat.choices = vat
form.suppliers.choices = suppliers
form.categories.choices = categories
form.unit.choices = units
form.conditioning_unit.choices = conditioning_units
if form.validate_on_submit():
# Storing the buying price as an integer
buying_price_int = to_int(form.buying_price.data)
stock_int = to_int(form.stock.data)
selling_price_no_tax_int = to_int(form.selling_price_no_tax.data)
selling_price = int(add_vat(to_int(form.selling_price_no_tax.data), form.vat.data))
product = Product(name=form.name.data, reference=form.reference.data,
unit=form.unit.data, packing=to_int(form.packing.data),
conditioning=to_int(form.conditioning.data),
conditioning_unit=form.conditioning_unit.data,
supplier_reference=form.supplier_reference.data,
ean=form.ean.data, description=form.description.data,
buying_price=buying_price_int,
selling_price_no_tax=selling_price_no_tax_int,
selling_price=selling_price,
stock=stock_int,
vat_id=form.vat.data)
db.session.add(product)
db.session.commit()
# Add stock to History
stock_history = StockHistory(amount=stock_int, product=product)
db.session.add(stock_history)
db.session.commit()
for s in form.suppliers.data:
supp = Supplier.query.filter_by(id=s).first()
supp.products.append(product)
db.session.add(supp)
db.session.commit()
c = form.categories.data
cat = Category.query.filter_by(id=c).first()
cat.products.append(product)
db.session.add(cat)
db.session.commit()
flash('Product %s added!' % form.name.data, 'success')
return redirect(url_for('products.products_list'))
return render_template('products/add.html',
title='Add a Product',
action='add',
form=form)
@products.route('/edit/<int:id>', methods=['GET', 'POST'])
def product_edit(id):
# Initialize form
product = Product.query.get_or_404(id)
form = AddProduct()
# Create lists
vat_query = VAT.query.order_by(VAT.name)
for v in vat_query:
v.amount = to_dec(v.amount)
form.vat.choices = [(v.id, v.amount) for v in vat_query]
form.suppliers.choices = [(s.id, s.name) for s in Supplier.query.order_by(Supplier.name)]
form.categories.choices = [(c.id, c.name) for c in Category.query.order_by(Category.code)]
if form.validate_on_submit():
product.name = form.name.data
product.reference = form.reference.data
product.unit = form.unit.data
product.packing = to_int(form.packing.data)
product.conditioning = to_int(form.conditioning.data)
product.conditioning_unit = form.conditioning_unit.data
product.supplier_reference = form.supplier_reference.data
product.ean = form.ean.data
product.description = form.description.data
product.buying_price = to_int(form.buying_price.data)
product.selling_price_no_tax = to_int(form.selling_price_no_tax.data)
product.selling_price = int(add_vat(to_int(form.selling_price_no_tax.data), form.vat.data))
product.vat_id=form.vat.data
product.stock = to_int(form.stock.data)
product.supplier = []
#product.category = []
db.session.add(product)
db.session.commit()
stock_history = StockHistory(amount=to_int(form.stock.data), product=product)
db.session.add(stock_history)
db.session.commit()
for s in form.suppliers.data:
supp = Supplier.query.filter_by(id=s).first()
supp.products.append(product)
db.session.add(supp)
db.session.commit()
c = form.categories.data
cat = Category.query.filter_by(id=c).first()
cat.products.append(product)
db.session.add(cat)
db.session.commit()
flash('Product %s (Reference %s) modified!' % (product.name, product.reference), 'success')
return redirect(url_for('products.products_list'))
#Populate the fields
form.suppliers.data = [s.id for s in product.supplier]
form.categories.data = [0, product.category.id]
form.name.data = product.name
form.reference.data = product.reference
form.unit.data = product.unit
form.packing.data = to_dec(product.packing)
form.conditioning.data = to_dec(product.conditioning)
form.conditioning_unit.data = product.conditioning_unit
form.supplier_reference.data = product.supplier_reference
form.ean.data = product.ean
form.description.data = product.description
form.buying_price.data = to_dec(product.buying_price)
form.selling_price_no_tax.data = to_dec(product.selling_price_no_tax)
form.vat.data = product.vat_id
form.stock.data = to_dec(product.stock)
return render_template('products/add.html',
title='Edit product',
action='edit',
form=form)
@products.route('/delete/<int:id>', methods=['GET', 'POST'])
def product_delete(id):
product = Product.query.get_or_404(id)
db.session.delete(product)
db.session.commit()
flash('Product ' + product.name + ' deleted!', 'success')
return redirect(url_for('products.products_list'))
@products.route('/view/<int:id>', methods=['GET', 'POST'])
def product_view(id):
product = Product.query.get_or_404(id)
product.packing = to_dec(product.packing)
product.conditioning = to_dec(product.conditioning)
product.buying_price = to_dec(product.buying_price)
product.stock = to_dec(product.stock)
product.vat.amount = to_dec_string(product.vat.amount)
product.selling_price_no_tax = to_dec_string(product.selling_price_no_tax)
product.selling_price = to_dec_string(product.selling_price)
margin = calc_margin(product.buying_price, product.selling_price_no_tax)
return render_template('products/view.html', title='Product',
product=product, margin=margin)
@products.route('/categories', methods=['GET', 'POST'])
def categories():
form = AddCategory()
categories = Category.query.order_by(Category.code)
if form.validate_on_submit():
category = Category(name=form.name.data, code=form.code.data)
db.session.add(category)
db.session.commit()
flash('Category %s added!' % form.name.data, 'success')
return redirect(url_for('products.categories'))
return render_template('products/categories.html',
title='Categories',
categories=categories,
form=form)
@products.route('/categories/delete/<int:id>', methods=['GET', 'POST'])
def category_delete(id):
category = Category.query.get_or_404(id)
db.session.delete(category)
db.session.commit()
flash('Category %s deleted!' % category.name, 'success')
return redirect(url_for('products.categories')) | 36.022124 | 93 | 0.739713 | 1,163 | 8,141 | 5.012898 | 0.114359 | 0.045283 | 0.020069 | 0.037907 | 0.518696 | 0.444254 | 0.342024 | 0.255575 | 0.208748 | 0.183362 | 0 | 0.00184 | 0.131925 | 8,141 | 226 | 94 | 36.022124 | 0.823122 | 0.052082 | 0 | 0.289773 | 0 | 0 | 0.073757 | 0.015193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039773 | false | 0.005682 | 0.0625 | 0 | 0.159091 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbac4e19eb4842d3b99851b114712a0b8d6ea9a0 | 1,594 | py | Python | tests/statistical_clear_sky/algorithm/serialization/test_serialization_mixin.py | elsirdavid/StatisticalClearSky | bc3aa9de56a9347c10e2afe23af486d32d476273 | [
"BSD-2-Clause"
] | 16 | 2019-05-09T14:17:22.000Z | 2022-02-23T18:41:13.000Z | tests/statistical_clear_sky/algorithm/serialization/test_serialization_mixin.py | elsirdavid/StatisticalClearSky | bc3aa9de56a9347c10e2afe23af486d32d476273 | [
"BSD-2-Clause"
] | 7 | 2019-07-09T18:32:29.000Z | 2021-07-01T22:28:32.000Z | tests/statistical_clear_sky/algorithm/serialization/test_serialization_mixin.py | elsirdavid/StatisticalClearSky | bc3aa9de56a9347c10e2afe23af486d32d476273 | [
"BSD-2-Clause"
] | 4 | 2019-12-20T19:15:09.000Z | 2021-04-29T17:40:40.000Z | import unittest
import numpy as np
import tempfile
import shutil
import os
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
class TestSerializationMixin(unittest.TestCase):
def setUp(self):
self._temp_directory = tempfile.mkdtemp()
self._filepath = os.path.join(self._temp_directory, 'state_data.json')
def tearDown(self):
shutil.rmtree(self._temp_directory)
def test_serialization(self):
power_signals_d = np.array([[3.65099996e-01, 0.00000000e+00,
0.00000000e+00, 2.59570003e+00],
[6.21100008e-01, 0.00000000e+00,
0.00000000e+00, 2.67740011e+00],
[8.12500000e-01, 0.00000000e+00,
0.00000000e+00, 2.72729993e+00],
[9.00399983e-01, 0.00000000e+00,
0.00000000e+00, 2.77419996e+00]])
rank_k = 4
original_iterative_fitting = IterativeFitting(power_signals_d,
rank_k=rank_k)
original_iterative_fitting.save_instance(self._filepath)
deserialized_iterative_fitting = IterativeFitting.load_instance(
self._filepath)
np.testing.assert_array_equal(deserialized_iterative_fitting.
_power_signals_d,
original_iterative_fitting.
_power_signals_d)
| 38.878049 | 78 | 0.552698 | 155 | 1,594 | 5.419355 | 0.432258 | 0.095238 | 0.114286 | 0.066667 | 0.197619 | 0.128571 | 0.128571 | 0.128571 | 0 | 0 | 0 | 0.177177 | 0.373275 | 1,594 | 40 | 79 | 39.85 | 0.663664 | 0 | 0 | 0 | 0 | 0 | 0.00941 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbacc5db9f6b221debb39a238e5cea2b26e592ba | 2,941 | py | Python | face_anti-spoof_challenge@CVPR_zpeng/read_data.py | avuku06/G | 8b505691f305783d66a1976ad51748073d2e76f6 | [
"Apache-2.0"
] | 1 | 2021-08-10T09:44:52.000Z | 2021-08-10T09:44:52.000Z | face_anti-spoof_challenge@CVPR_zpeng/read_data.py | y435449/Face-Knowing | 8b505691f305783d66a1976ad51748073d2e76f6 | [
"Apache-2.0"
] | null | null | null | face_anti-spoof_challenge@CVPR_zpeng/read_data.py | y435449/Face-Knowing | 8b505691f305783d66a1976ad51748073d2e76f6 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import numpy as np
import os
from torch.utils.data import Dataset
import math
import cv2
import torchvision
import torch
# CASIA-SURF training dataset and our private dataset
depth_dir_train_file = os.getcwd() +'/data/2depth_train.txt'
label_dir_train_file = os.getcwd() + '/data/2label_train.txt'
# CASIA-SURF Val data
depth_dir_val_file = os.getcwd() +'/data/depth_val.txt'
label_dir_val_file = os.getcwd() +'/data/label_val.txt' #val-label 100%
# # CASIA-SURF Test data
depth_dir_test_file = os.getcwd() +'/data/depth_test.txt'
label_dir_test_file = os.getcwd() +'/data/label_test.txt'
class CASIA(Dataset):
def __init__(self, transform=None, phase_train=True, data_dir=None,phase_test=False):
self.phase_train = phase_train
self.phase_test = phase_test
self.transform = transform
try:
with open(depth_dir_train_file, 'r') as f:
self.depth_dir_train = f.read().splitlines()
with open(label_dir_train_file, 'r') as f:
self.label_dir_train = f.read().splitlines()
with open(depth_dir_val_file, 'r') as f:
self.depth_dir_val = f.read().splitlines()
with open(label_dir_val_file, 'r') as f:
self.label_dir_val = f.read().splitlines()
if self.phase_test:
with open(depth_dir_test_file, 'r') as f:
self.depth_dir_test = f.read().splitlines()
with open(label_dir_test_file, 'r') as f:
self.label_dir_test = f.read().splitlines()
except:
print('can not open files, may be filelist is not exist')
exit()
def __len__(self):
if self.phase_train:
return len(self.depth_dir_train)
else:
if self.phase_test:
return len(self.depth_dir_test)
else:
return len(self.depth_dir_val)
def __getitem__(self, idx):
if self.phase_train:
depth_dir = self.depth_dir_train
label_dir = self.label_dir_train
label = int(label_dir[idx])
label = np.array(label)
else:
if self.phase_test:
depth_dir = self.depth_dir_test
label_dir = self.label_dir_test
label = int(label_dir[idx])
# label = np.random.randint(0,2,1)
label = np.array(label)
else:
depth_dir = self.depth_dir_val
label_dir = self.label_dir_val
label = int(label_dir[idx])
label = np.array(label)
depth = Image.open(depth_dir[idx])
depth = depth.convert('RGB')
if self.transform:
depth = self.transform(depth)
if self.phase_train:
return depth,label
else:
return depth,label,depth_dir[idx]
| 34.197674 | 89 | 0.588575 | 393 | 2,941 | 4.129771 | 0.19084 | 0.098583 | 0.066543 | 0.05915 | 0.529267 | 0.317314 | 0.232286 | 0.044362 | 0.044362 | 0 | 0 | 0.004458 | 0.313499 | 2,941 | 85 | 90 | 34.6 | 0.799406 | 0.053723 | 0 | 0.246377 | 0 | 0 | 0.064505 | 0.015856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.115942 | 0 | 0.246377 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbad92a8fa4b7c057a8126395fcbd2a64cd1ffdb | 3,026 | py | Python | wxStocks_modules/wxStocks_testing.py | scoofy/wxStocks | b9e2839dd7d906e3e983c5a5bc21e7fa9253dfc6 | [
"Unlicense"
] | 211 | 2015-01-22T15:16:10.000Z | 2022-02-06T09:39:00.000Z | wxStocks_modules/wxStocks_testing.py | scoofy/wxStocks | b9e2839dd7d906e3e983c5a5bc21e7fa9253dfc6 | [
"Unlicense"
] | 5 | 2015-03-18T23:48:22.000Z | 2020-06-27T23:34:10.000Z | wxStocks_modules/wxStocks_testing.py | scoofy/wxStocks | b9e2839dd7d906e3e983c5a5bc21e7fa9253dfc6 | [
"Unlicense"
] | 66 | 2015-04-16T20:36:06.000Z | 2020-10-06T17:40:59.000Z | import logging, inspect, numpy, sys
import wxStocks_modules.wxStocks_formulas as formula
def run_test(
sample_stock,
sample_annual_data,
sample_analyst_estimates,
stock_list,
quit=False
):
annual_data_attribute_list = return_dictionary_of_object_attributes_and_values(sample_annual_data)
analyst_estimates_attribute_list = return_dictionary_of_object_attributes_and_values(sample_analyst_estimates)
data_lists = [annual_data_attribute_list, analyst_estimates_attribute_list]
for attribute_list in data_lists:
if attribute_list:
for attribute in attribute_list:
setattr(sample_stock, attribute, attribute_list[attribute])
else:
print(sample_stock.symbol, "needs to be updated")
print("\n\n\n\n\n\n")
print("-" * 3300)
print("Testing area")
print("\n\n\n\n\n\n")
print("\n\n\n")
for equation in formula.stock_only_needed:
print("\n\n\n")
print("trying: %s %s" % (sample_stock.symbol, equation.__name__))
try:
print(equation(sample_stock))
continue
except Exception as exception:
print("")
print(type(exception), exception, line_number())
print("function", equation.__name__, ":", "failed for", sample_stock.ticker)
for equation in formula.stock_plus_stock_list_needed:
print("\n\n\n")
print("trying: %s %s" % (sample_stock.symbol, equation.__name__))
try:
print(equation(sample_stock, stock_list))
continue
except Exception as exception:
print("")
print(type(exception), exception, line_number())
print("function", equation.__name__, ":", "failed for", sample_stock.ticker)
for equation in formula.annual_data_needed:
print("\n\n\n")
print("trying: %s %s" % (sample_stock.symbol, equation.__name__))
try:
print(equation(sample_stock, sample_annual_data))
continue
except Exception as exception:
print("")
print(type(exception), exception, line_number())
print("function", equation.__name__, ":", "failed for", sample_stock.ticker)
print("\n\n\n")
print_this_dict = return_dictionary_of_object_attributes_and_values(sample_stock)
#for attribute in print_this_dict:
# print("%s:" % attribute, print_this_dict[attribute])
print("\n\n\n")
if quit:
sys.exit()
####################### Utility functions #################################################
def return_dictionary_of_object_attributes_and_values(obj):
attribute_list = []
if obj:
for key in obj.__dict__:
if key[:1] != "__":
attribute_list.append(key)
obj_attribute_value_dict = {}
for attribute in attribute_list:
obj_attribute_value_dict[attribute] = getattr(obj, attribute)
#for attribute in obj_attribute_value_dict:
# print(attribute, ":", obj_attribute_value_dict[attribute])
return obj_attribute_value_dict
def line_number():
"""Returns the current line number in our program."""
return "File: %s\nLine %d:" % (inspect.getframeinfo(inspect.currentframe()).filename.split("/")[-1], inspect.currentframe().f_back.f_lineno)
############################################################################################ | 31.852632 | 141 | 0.705221 | 392 | 3,026 | 5.091837 | 0.227041 | 0.022044 | 0.021042 | 0.032064 | 0.56012 | 0.45992 | 0.45992 | 0.438377 | 0.397796 | 0.397796 | 0 | 0.002257 | 0.121613 | 3,026 | 95 | 142 | 31.852632 | 0.748683 | 0.083939 | 0 | 0.442857 | 0 | 0 | 0.080385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.028571 | 0 | 0.1 | 0.385714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbaf43c0a07eae0d08b5eefb8ad9791e2da57522 | 1,213 | py | Python | src/utils/bandwidth_selection.py | liyuan9988/IVOPEwithACME | d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558 | [
"MIT"
] | 1 | 2020-09-05T01:25:39.000Z | 2020-09-05T01:25:39.000Z | src/utils/bandwidth_selection.py | liyuan9988/IVOPEwithACME | d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558 | [
"MIT"
] | null | null | null | src/utils/bandwidth_selection.py | liyuan9988/IVOPEwithACME | d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558 | [
"MIT"
] | null | null | null | # pylint: disable=bad-indentation,missing-function-docstring
import functools
from acme.tf import networks
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import cdist
import sonnet as snt
def get_bsuite_median(environment_spec, dataset):
data = next(iter(dataset)).data
obs, action = data[:2]
action_network = functools.partial(
tf.one_hot, depth=environment_spec.actions.num_values)
net = networks.CriticMultiplexer(action_network=action_network)
inputs = net(obs, action)
arr = inputs.numpy()
dists = cdist(arr, arr, "sqeuclidean")
return 1.0 / np.median(dists)
def get_dm_control_median(dataset):
data = next(iter(dataset)).data
obs, action = data[:2]
net = networks.CriticMultiplexer()
inputs = net(obs, action)
arr = inputs.numpy()
dists = cdist(arr, arr, "sqeuclidean")
return 1.0 / np.median(dists)
def get_median(task_id, environment_spec, dataset):
if task_id.startswith("dm_control"):
return get_dm_control_median(dataset)
elif task_id.startswith("bsuite"):
return get_bsuite_median(environment_spec, dataset)
else:
raise ValueError(f"task id {task_id} not known")
| 28.209302 | 67 | 0.713932 | 164 | 1,213 | 5.128049 | 0.402439 | 0.035672 | 0.078478 | 0.061831 | 0.436385 | 0.388823 | 0.309156 | 0.309156 | 0.309156 | 0.309156 | 0 | 0.006048 | 0.182193 | 1,213 | 42 | 68 | 28.880952 | 0.841734 | 0.047815 | 0 | 0.387097 | 0 | 0 | 0.056375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbb24c4b6fcf99de3b261cbc2512af6cc60538d7 | 17,562 | py | Python | test/test_DataSource.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 20 | 2020-11-03T10:20:32.000Z | 2022-03-01T13:28:39.000Z | test/test_DataSource.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 70 | 2020-11-05T08:06:57.000Z | 2022-03-31T11:20:59.000Z | test/test_DataSource.py | suchak1/hyperdrive | 8bc78af179de8d2b26968683d3248840f7470d4c | [
"MIT"
] | 5 | 2021-04-07T05:26:40.000Z | 2022-02-25T15:26:02.000Z | import os
import sys
import pytest
from time import sleep, time
from random import choice
import pandas as pd
sys.path.append('src')
from DataSource import MarketData, IEXCloud, Polygon, \
StockTwits, LaborStats, Glassnode # noqa autopep8
import Constants as C # noqa autopep8
from Workflow import Flow # noqa autopep8
md = MarketData()
iex = IEXCloud()
poly = Polygon()
twit = StockTwits()
twit.token = ''
bls = LaborStats()
glass = Glassnode()
flow = Flow()
def use_dev_bucket(data_src_obj):
data_src_obj.writer.store.bucket_name = os.environ['S3_DEV_BUCKET']
data_src_obj.reader.store.bucket_name = os.environ['S3_DEV_BUCKET']
return data_src_obj
if not C.CI:
iex.token = os.environ['IEXCLOUD_SANDBOX']
iex = use_dev_bucket(iex)
md = use_dev_bucket(md)
poly = use_dev_bucket(poly)
twit = use_dev_bucket(twit)
bls = use_dev_bucket(bls)
glass = use_dev_bucket(glass)
# or simply make DevStore class that has s3 dev bucket name
iex.base = 'https://sandbox.iexapis.com'
exp_symbols = ['AAPL', 'FB', 'DIS']
retries = 10
class TestMarketData:
def test_init(self):
assert type(md).__name__ == 'MarketData'
assert hasattr(md, 'writer')
assert hasattr(md, 'reader')
assert hasattr(md, 'finder')
assert hasattr(md, 'provider')
def test_try_again(self):
assert md.try_again(lambda: 0) == 0
with pytest.raises(ZeroDivisionError):
md.try_again(lambda: 0 / 0)
def test_get_symbols(self):
symbols = set(md.get_symbols())
for symbol in exp_symbols:
assert symbol in symbols
def test_get_dividends(self):
df = md.get_dividends(symbol='AAPL')
assert {C.EX, C.PAY, C.DEC, C.DIV}.issubset(df.columns)
assert len(df) > 15
assert len(df[df[C.EX] < '2015-12-25']) > 0
assert len(df[df[C.EX] > '2020-01-01']) > 0
def test_standardize_dividends(self):
columns = ['exDate', 'paymentDate', 'declaredDate', 'amount']
new_cols = [C.EX, C.PAY, C.DEC, C.DIV]
sel_idx = 2
selected = columns[sel_idx:]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_dividends('AAPL', df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_dividends('AAPL', df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx < sel_idx else not col_in_df
def test_save_dividends(self):
symbol = 'O'
div_path = md.finder.get_dividends_path(symbol)
temp_path = f'{div_path}_TEMP'
if os.path.exists(div_path):
os.rename(div_path, temp_path)
for _ in range(retries):
iex.save_dividends(
symbol=symbol, timeframe='5y', retries=1, delay=0)
if not md.reader.check_file_exists(div_path):
delay = choice(range(5, 10))
sleep(delay)
else:
break
assert md.reader.check_file_exists(div_path)
assert md.reader.store.modified_delta(div_path).total_seconds() < 60
df = md.reader.load_csv(div_path)
assert {C.EX, C.PAY, C.DEC, C.DIV}.issubset(df.columns)
assert len(df) > 0
if os.path.exists(temp_path):
os.rename(temp_path, div_path)
def test_get_splits(self):
df = md.get_splits('NFLX')
assert {C.EX, C.DEC, C.RATIO}.issubset(df.columns)
assert len(df) > 0
def test_standardize_splits(self):
columns = ['exDate', 'paymentDate', 'declaredDate', 'ratio']
new_cols = [C.EX, C.PAY, C.DEC, C.RATIO]
sel_idx = 2
selected = columns[sel_idx:]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_splits('NFLX', df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_splits('NFLX', df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx < sel_idx else not col_in_df
def test_save_splits(self):
symbol = 'AAPL'
splt_path = md.finder.get_splits_path(symbol)
temp_path = f'{splt_path}_TEMP'
if os.path.exists(splt_path):
os.rename(splt_path, temp_path)
for _ in range(retries):
iex.save_splits(symbol=symbol, timeframe='5y', retries=1, delay=0)
if not md.reader.check_file_exists(splt_path):
delay = choice(range(5, 10))
sleep(delay)
else:
break
assert md.reader.check_file_exists(splt_path)
assert md.reader.store.modified_delta(splt_path).total_seconds() < 60
df = md.reader.load_csv(splt_path)
assert {C.EX, C.DEC, C.RATIO}.issubset(df.columns)
assert len(df) > 0
if os.path.exists(temp_path):
os.rename(temp_path, splt_path)
def test_get_social_sentiment(self):
df = md.get_social_sentiment('TSLA')
assert len(df) > 0
assert {C.TIME, C.POS, C.NEG}.issubset(df.columns)
def test_get_social_volume(self):
df = md.get_social_volume('TSLA')
assert len(df) > 0
assert {C.TIME, C.VOL, C.DELTA}.issubset(df.columns)
def test_save_social_sentiment(self):
symbol = 'ADBE'
sent_path = md.finder.get_sentiment_path(symbol)
temp_path = f'{sent_path}_TEMP'
if os.path.exists(sent_path):
os.rename(sent_path, temp_path)
twit.save_social_sentiment(
symbol=symbol, timeframe='1d', retries=1, delay=0)
assert md.reader.check_file_exists(sent_path)
assert md.reader.store.modified_delta(sent_path).total_seconds() < 60
df = md.reader.load_csv(sent_path)
assert {C.TIME, C.POS, C.NEG, C.VOL, C.DELTA}.issubset(df.columns)
assert len(df) > 0
if os.path.exists(temp_path):
os.rename(temp_path, sent_path)
def test_standardize_sentiment(self):
columns = ['timestamp', 'bullish', 'bearish']
new_cols = [C.TIME, C.POS, C.NEG]
sel_idx = 2
selected = columns[sel_idx:]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_sentiment('AAPL', df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_sentiment('AAPL', df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx < sel_idx else not col_in_df
def test_standardize_volume(self):
columns = ['timestamp', 'volume_score', 'volume_change']
new_cols = [C.TIME, C.VOL, C.DELTA]
sel_idx = 2
selected = columns[sel_idx:]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_volume('AAPL', df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_volume('AAPL', df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx < sel_idx else not col_in_df
def test_standardize_ohlc(self):
columns = ['date', 'open', 'high', 'low', 'close', 'volume']
new_cols = [C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE, C.VOL]
sel_idx = 2
selected = columns[:sel_idx]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_ohlc('NFLX', df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_ohlc('NFLX', df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx >= sel_idx else not col_in_df
def test_save_ohlc(self):
symbol = 'NFLX'
ohlc_path = md.finder.get_ohlc_path(symbol)
temp_path = f'{ohlc_path}_TEMP'
if os.path.exists(ohlc_path):
os.rename(ohlc_path, temp_path)
for _ in range(retries):
iex.save_ohlc(symbol=symbol, timeframe='1m', retries=1, delay=0)
if not md.reader.check_file_exists(ohlc_path):
delay = choice(range(5, 10))
sleep(delay)
else:
break
assert md.reader.check_file_exists(ohlc_path)
assert md.reader.store.modified_delta(ohlc_path).total_seconds() < 60
df = md.reader.load_csv(ohlc_path)
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 0
if os.path.exists(temp_path):
os.rename(temp_path, ohlc_path)
def test_save_intraday(self):
symbol = 'NFLX'
timeframe = '4d'
dates = md.traveller.dates_in_range(timeframe)
intra_paths = [md.finder.get_intraday_path(
symbol, date) for date in dates]
filenames = set(iex.save_intraday(symbol=symbol, timeframe=timeframe))
intersection = filenames.intersection(intra_paths)
assert intersection
for path in intersection:
df = md.reader.load_csv(path)
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 0
os.remove(path)
def test_get_ohlc(self):
df = md.get_ohlc('NFLX', '2m')
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 0
def test_get_intraday(self):
df = pd.concat(md.get_intraday(symbol='NFLX', timeframe='2m'))
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 0
def test_get_unemployment_rate(self):
df = md.get_unemployment_rate()
assert {C.TIME, C.UN_RATE}.issubset(df.columns)
assert len(df) > 100
def test_standardize_unemployment(self):
columns = ['time', 'value']
new_cols = [C.TIME, C.UN_RATE]
sel_idx = 1
selected = columns[:sel_idx]
df = pd.DataFrame({column: [0] for column in columns})
standardized = md.standardize_unemployment(df)
for column in new_cols:
assert column in standardized
df.drop(columns=selected, inplace=True)
standardized = md.standardize_unemployment(df)
for curr_idx, column in enumerate(new_cols):
col_in_df = column in standardized
assert col_in_df if curr_idx >= sel_idx else not col_in_df
def test_save_unemployment_rate(self):
assert 'unemployment.csv' in md.save_unemployment_rate(timeframe='2y')
def test_save_s2f_ratio(self):
assert 's2f.csv' in md.save_s2f_ratio()
def test_save_diff_ribbon(self):
assert 'diff_ribbon.csv' in md.save_diff_ribbon()
def test_save_sopr(self):
assert 'sopr.csv' in md.save_sopr()
class TestIEXCloud:
def test_init(self):
assert type(iex).__name__ == 'IEXCloud'
assert hasattr(iex, 'base')
assert hasattr(iex, 'version')
assert hasattr(iex, 'token')
assert hasattr(iex, 'provider')
def test_get_dividends(self):
df = []
for i in range(retries):
if not len(df):
df = iex.get_dividends(symbol='AAPL', timeframe='5y')
if not i:
delay = choice(range(5, 10))
sleep(delay)
else:
break
assert len(df) > 0
assert {C.EX, C.PAY, C.DEC, C.DIV}.issubset(df.columns)
def test_get_splits(self):
df1, df2 = [], []
for i in range(retries):
if not(len(df1) or len(df2)):
df1 = iex.get_splits(symbol='AAPL', timeframe='5y')
df2 = iex.get_splits(symbol='NFLX', timeframe='5y')
if not i:
delay = choice(range(5, 10))
sleep(delay)
else:
break
assert len(df1) or len(df2)
assert {C.EX, C.DEC, C.RATIO}.issubset(
df1.columns) or {C.EX, C.DEC, C.RATIO}.issubset(df2.columns)
def test_get_ohlc(self):
df = iex.get_ohlc(symbol='AAPL', timeframe='1m')
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 10
def test_get_intraday(self):
df = pd.concat(iex.get_intraday(symbol='AAPL', timeframe='1w'))
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 1000
class TestPolygon:
def test_init(self):
assert type(poly).__name__ == 'Polygon'
assert hasattr(poly, 'client')
assert hasattr(poly, 'provider')
def test_get_dividends(self):
if not flow.is_any_workflow_running():
df = poly.get_dividends(symbol='AAPL', timeframe='5y')
assert {C.EX, C.PAY, C.DEC, C.DIV}.issubset(df.columns)
assert len(df) > 0
else:
print(
'Skipping Polygon.io dividends test because update in progress'
)
def test_get_splits(self):
if not flow.is_any_workflow_running():
df = poly.get_splits(symbol='AAPL')
assert {C.EX, C.DEC, C.RATIO}.issubset(df.columns)
assert len(df) > 0
else:
print('Skipping Polygon.io splits test because update in progress')
def test_get_ohlc(self):
if not flow.is_any_workflow_running():
df = poly.get_ohlc(symbol='AAPL', timeframe='1m')
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL, C.AVG}.issubset(df.columns)
assert len(df) > 10
else:
print('Skipping Polygon.io OHLC test because update in progress')
def test_get_intraday(self):
if not flow.is_any_workflow_running():
df = pd.concat(poly.get_intraday(symbol='AAPL', timeframe='1w'))
assert {C.TIME, C.OPEN, C.HIGH, C.LOW,
C.CLOSE, C.VOL}.issubset(df.columns)
assert len(df) > 1000
else:
print(
'Skipping Polygon.io intraday test because update in progress')
def test_log_api_call_time(self):
if hasattr(poly, 'last_api_call_time'):
delattr(poly, 'last_api_call_time')
poly.log_api_call_time()
assert hasattr(poly, 'last_api_call_time')
def test_obey_free_limit(self):
if hasattr(poly, 'last_api_call_time'):
delattr(poly, 'last_api_call_time')
then = time()
poly.log_api_call_time()
poly.obey_free_limit()
now = time()
assert now - then > C.POLY_FREE_DELAY
class TestStockTwits:
def test_init(self):
assert type(twit).__name__ == 'StockTwits'
assert hasattr(twit, 'provider')
assert hasattr(twit, 'token')
def test_get_social_volume(self):
df = twit.get_social_volume(symbol='TSLA')
assert len(df) > 30
assert {C.TIME, C.VOL, C.DELTA}.issubset(df.columns)
def test_get_social_sentiment(self):
df = twit.get_social_sentiment(symbol='TSLA')
assert len(df) > 30
assert {C.TIME, C.POS, C.NEG}.issubset(df.columns)
class TestLaborStats:
def test_init(self):
assert type(bls).__name__ == 'LaborStats'
assert hasattr(iex, 'base')
assert hasattr(iex, 'version')
assert hasattr(iex, 'token')
assert hasattr(iex, 'provider')
def test_get_unemployment_rate(self):
df = bls.get_unemployment_rate(timeframe='2y')
assert {C.TIME, C.UN_RATE}.issubset(df.columns)
assert len(df) > 12
class TestGlassnode:
def test_init(self):
assert type(glass).__name__ == 'Glassnode'
assert hasattr(glass, 'base')
assert hasattr(glass, 'version')
assert hasattr(glass, 'token')
assert hasattr(glass, 'provider')
def test_get_s2f_ratio(self):
df = glass.get_s2f_ratio(timeframe='max')
assert len(df) > 3000
assert {C.TIME, C.HALVING, C.RATIO}.issubset(df.columns)
def test_get_diff_ribbon(self):
df = glass.get_diff_ribbon(timeframe='max')
assert len(df) > 3000
assert set([C.TIME] + C.MAs).issubset(df.columns)
def test_get_sopr(self):
df = glass.get_sopr(timeframe='max')
assert len(df) > 3000
assert {C.TIME, C.SOPR}.issubset(df.columns)
| 35.840816 | 80 | 0.588885 | 2,316 | 17,562 | 4.275475 | 0.100604 | 0.032519 | 0.029994 | 0.039487 | 0.68774 | 0.644314 | 0.573722 | 0.51939 | 0.487376 | 0.45102 | 0 | 0.012455 | 0.300535 | 17,562 | 489 | 81 | 35.91411 | 0.793634 | 0.005637 | 0 | 0.487562 | 0 | 0 | 0.061465 | 0 | 0 | 0 | 0 | 0 | 0.271144 | 1 | 0.116915 | false | 0 | 0.022388 | 0 | 0.156716 | 0.00995 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbb3136287cb05c4c04eb4cc1d4fa218279084f8 | 8,679 | py | Python | eqdes/dbd_tools.py | eng-tools/eqdes | f77809d3e79815b261a3385e33b81b596d514101 | [
"MIT"
] | null | null | null | eqdes/dbd_tools.py | eng-tools/eqdes | f77809d3e79815b261a3385e33b81b596d514101 | [
"MIT"
] | null | null | null | eqdes/dbd_tools.py | eng-tools/eqdes | f77809d3e79815b261a3385e33b81b596d514101 | [
"MIT"
] | 1 | 2020-11-07T04:46:23.000Z | 2020-11-07T04:46:23.000Z | """
References:
Sullivan, Salawdeh S., Pecker A. (2010). Soil-foundation-structure interaction considerations
for performance-based design of RC wall structures on shallow foundations.
"""
import numpy as np
from eqdes.extensions.exceptions import DesignError
def displacement_profile_frame(theta_c, heights, hm_factor, foundation=False,
fd_height=0.0, theta_f=0.0, verbose=0):
heights = np.array(heights)
if foundation:
displaced_shape = np.zeros(len(heights))
heights_ss = heights[1:] - fd_height
displaced_shape[1:] += cal_displaced_shape(theta_c, heights_ss, btype="frame")
displaced_shape += theta_f * heights
else:
displaced_shape = cal_displaced_shape(theta_c, heights, btype="frame")
displacements = displaced_shape * hm_factor
if verbose:
print('theta_c: ', theta_c)
print('hm_factor: ', hm_factor)
print('displacements: ', displacements)
return displacements
def cal_higher_mode_factor(n_storeys, btype="frame"):
"""
Calculates the higher mode factor according to DDBD12 CL.
:param n_storeys:
:param btype:
:return:
"""
# Higher mode factor for frame structures
if btype == "frame":
if n_storeys < 6:
factor = 1.0
elif n_storeys <= 16:
factor = 1.0 - 0.15 * float((n_storeys - 6.0)) / (16.0 - 6.0)
else:
factor = 0.85
else:
if n_storeys < 10:
factor = 1.0
elif n_storeys <= 16:
factor = (1.0 - 0.06 * float((n_storeys - 10.0)) / (16.0 - 10.0))
else:
factor = 0.85
return factor
def cal_displaced_shape(theta_c, heights, btype="frame"):
heights = np.array(heights)
max_height = max(heights)
if btype == "frame":
return theta_c * heights * (4 * max_height - heights) / (4 * max_height - heights[0])
def equivalent_sdof(masses, displacements, heights):
mass_x_disp = masses * displacements
mass_x_disp2 = masses * displacements ** 2
mass_x_disp_x_height = masses * displacements * heights
delta_d = np.sum(mass_x_disp2, axis=0) / np.sum(mass_x_disp, axis=0)
mass_eff = np.sum(mass_x_disp, axis=0) / delta_d
height_eff = np.sum(mass_x_disp_x_height, axis=0) / np.sum(mass_x_disp, axis=0)
return delta_d, mass_eff, height_eff
def equivalent_sdof_as_series(masses, displacements, heights):
mass_x_disp = masses[:, np.newaxis] * displacements
mass_x_disp2 = masses[:, np.newaxis] * displacements ** 2
mass_x_disp_x_height = masses[:, np.newaxis] * displacements * heights[:, np.newaxis]
delta_d = np.sum(mass_x_disp2, axis=0) / np.sum(mass_x_disp, axis=0)
mass_eff = np.sum(mass_x_disp, axis=0) / delta_d
height_eff = np.sum(mass_x_disp_x_height, axis=0) / np.sum(mass_x_disp, axis=0)
return delta_d, mass_eff, height_eff
def yield_displacement(theta_y, height_eff): # TODO: check where this comes from?
return theta_y * height_eff
def yield_displacement_wall(phi_y, heights, max_height):
"""
The yield displacement of a concrete wall. Eq. 6 (Sullivan et al. 2010).
:param phi_y: yield curvature
:param heights: height
:return:
"""
return phi_y * heights ** 2 / 2 + phi_y * heights ** 3 / max_height
def conc_frame_yield_drift(fye, youngs_steel, av_bay_length, av_beam_depth):
"""
Yield drift of a concrete frame from DDBD (Priestley et al. (2007)
:param fye: Effective yeild strength of reinforcing steel
:param youngs_steel: Young's modulus of reinforcing steel
:param av_bay_length: Average bay length
:param av_beam_depth: average beam depth
:return:
"""
return 0.5 * fye / youngs_steel * (av_bay_length / av_beam_depth)
def ductility(delta_current, delta_y):
"""
Computes the ductility for a given displacement and yield.
:param delta_current: current displacement
:param delta_y: yield displacement
:return:
"""
return delta_current / delta_y
def reduction_factor(xi, near_field=False):
if near_field > 1.0:
# Damping reduction factor
return (0.07 / (0.02 + xi)) ** 0.25
else:
# Damping reduction factor
return (0.07 / (0.02 + xi)) ** 0.5
def damping_from_reduction_factor(eta):
"""
Inversion of reduction factor equation
:param eta:
:return:
"""
return (0.07 / eta ** 2) - 0.02
def equivalent_viscous_damping(mu, mtype="concrete", btype="frame"):
"""
Calculate the equivalent viscous damping based on the ductility and structural type.
:param mu: Displacement ductility
:param mtype: material type
:param btype: building type (e.g. frame or wall)
:return:
"""
pie = 3.141
if mu < 1:
return 0.05
if mtype == "concrete":
if btype == "frame":
# Equivalent viscous damping for concrete frame
return 0.05 + 0.565 * (mu - 1) / (mu * pie)
if btype == "wall":
# Equivalent viscous damping for concrete wall (Sullivan et al., 2010)
return 0.05 + 0.444 * (mu - 1) / (mu * pie)
def effective_period(delta_d, eta, corner_disp, corner_period):
corner_disp_eff = corner_disp * eta
if delta_d > corner_disp_eff:
return 0.0
else:
return corner_period * delta_d / corner_disp_eff
def effective_period_from_stiffness(mass_eff, k_eff):
"""
Calculates the effective period based on the mass and stiffness
:param mass_eff: effective mass
:param k_eff: effective stiffness
:return:
"""
return 2 * 3.141 * np.sqrt(mass_eff / k_eff)
def effective_stiffness_from_base_shear(v_base, disp):
"""
Calculates the effective stiffness based on the base shear and displacement.
Typically used in displacement based assessment
:return:
"""
return v_base / disp
def displacement_from_effective_period(eta, corner_disp, t_eff, corner_period):
"""
Displacement of SDOF using displacement-based assessment. # Eq. 11 Millen et al. (2016)
:param eta: Displacement reduction factor
:param corner_disp: Corner spectral displacement
:param t_eff: Effective displacement
:param corner_period:
:return:
"""
if t_eff > corner_period:
return eta * corner_disp
return eta * corner_disp * t_eff / corner_period
def effective_stiffness(mass_eff, t_eff):
"""
Calculates the effective period based on the mass and period
:param mass_eff: effective mass
:param t_eff: effective period
:return:
"""
return (4 * 3.141 ** 2 * mass_eff) / t_eff ** 2
def design_base_shear(k_eff, delta_d):
return k_eff * delta_d
def bilinear_load_factor(ductility_current, ductility_max, r):
"""
Computes the load
:param ductility_current: Current ductility
:param ductility_max: Maximum ductility
:param r: post-yield bi-linear
:return: factor to reduce maximum load
"""
hardening_load = r * (ductility_max - 1)
if ductility_current > ductility_max:
raise DesignError("Current ductility: {0}, exceeds maximum ductility {1}".format(ductility_current,
ductility_max))
elif ductility_current > 1.0:
return 1.0 - hardening_load + r * (ductility_current - 1)
else: # Elastic behaviour
return (1.0 - hardening_load) * ductility_current
def calculate_storey_forces(masses, displacements, v_base, btype):
if btype == 'frame':
k = 0.9
else:
k = 1.0
mass_x_disp = np.array(masses) * np.array(displacements)
storey_forces = k * v_base * mass_x_disp / sum(mass_x_disp) # Newtons per storey
storey_forces[-1] += (1 - k) * v_base
return storey_forces
def p_delta_moment(mass_eff, delta_d):
return mass_eff * 9.8 * delta_d
def p_delta_base_shear(mass_eff, delta_d, h_eff, v_base):
moment_pd = p_delta_moment(mass_eff, delta_d)
pd_factor = moment_pd / v_base * h_eff
if pd_factor > 0.1:
c_p_delta = 0.5
return c_p_delta * moment_pd / h_eff
else:
return 0
def yield_curvature(epsilon_y, length, btype="wall"):
if btype == "wall":
return 2.0 * epsilon_y / length
def add_foundation(ss_heights, ss_masses, fd_height, fd_mass):
# add foundation to heights
heights = list(ss_heights)
heights.insert(0, 0)
heights = np.array(heights) + fd_height
# add foundation to masses
storey_masses = list(ss_masses)
storey_masses.insert(0, fd_mass)
storey_masses = np.array(storey_masses)
return heights, storey_masses
| 30.886121 | 107 | 0.657564 | 1,213 | 8,679 | 4.473207 | 0.173124 | 0.017508 | 0.02488 | 0.01843 | 0.262993 | 0.195356 | 0.178769 | 0.143752 | 0.115739 | 0.10247 | 0 | 0.029165 | 0.24542 | 8,679 | 280 | 108 | 30.996429 | 0.799359 | 0.260168 | 0 | 0.220588 | 0 | 0 | 0.026506 | 0 | 0 | 0 | 0 | 0.003571 | 0 | 1 | 0.176471 | false | 0 | 0.014706 | 0.022059 | 0.419118 | 0.022059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbb343e9b2be1df7cf6c1f7dd29d44a9cc9e8f11 | 458 | py | Python | adaptive_filters/filters/lms.py | IsaacCorley/Adaptive-Filters | 8bcb63d0ff51a216ca9c12ca77d525a05ba1616f | [
"MIT"
] | 1 | 2022-02-01T11:25:28.000Z | 2022-02-01T11:25:28.000Z | adaptive_filters/filters/lms.py | IsaacCorley/Adaptive-Filters | 8bcb63d0ff51a216ca9c12ca77d525a05ba1616f | [
"MIT"
] | null | null | null | adaptive_filters/filters/lms.py | IsaacCorley/Adaptive-Filters | 8bcb63d0ff51a216ca9c12ca77d525a05ba1616f | [
"MIT"
] | 1 | 2022-02-01T11:25:28.000Z | 2022-02-01T11:25:28.000Z | from .base import AdaptiveFilter
class LMSFilter(AdaptiveFilter):
def __init__(self, *args, **kwargs):
super(LMSFilter, self).__init__(*args, **kwargs)
def step(self, xi, yi, wi):
# Predict
yi_hat = self.predict(xi)
# Compute error
ei = yi - yi_hat
# Compute gradient dw = mu * e(k) * x(k)
dw = self.mu * ei * xi
# Update weights
w = wi + dw
return yi_hat, ei, w | 20.818182 | 56 | 0.545852 | 59 | 458 | 4.050847 | 0.525424 | 0.062762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.338428 | 458 | 22 | 57 | 20.818182 | 0.788779 | 0.163755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbb77d5b2fe4f504aa0a72cdea81d5a871e075f9 | 5,524 | py | Python | mesonwrap/wrapcreator_test.py | mesonbuild/wrapweb | c616627d7448fcadd601e838bedf1d717474cb1c | [
"Apache-2.0"
] | 22 | 2015-05-30T12:40:58.000Z | 2020-01-02T10:46:11.000Z | mesonwrap/wrapcreator_test.py | mesonbuild/wrapweb | c616627d7448fcadd601e838bedf1d717474cb1c | [
"Apache-2.0"
] | 102 | 2015-05-30T17:12:02.000Z | 2020-01-25T11:20:35.000Z | mesonwrap/wrapcreator_test.py | mesonbuild/wrapweb | c616627d7448fcadd601e838bedf1d717474cb1c | [
"Apache-2.0"
] | 8 | 2018-02-27T14:59:24.000Z | 2020-01-24T18:13:16.000Z | import hashlib
import io
import os
import os.path
import unittest
import zipfile
import git
from mesonwrap import gitutils
from mesonwrap import ini
from mesonwrap import tempfile
from mesonwrap import wrapcreator
class WrapCreatorTest(unittest.TestCase):
def setUp(self):
self._workdir = tempfile.TemporaryDirectory()
self.workdir = self._workdir.name
def tearDown(self):
self._workdir.cleanup()
def mkfile(self, filename, contents=b''):
with open(os.path.join(self.workdir, filename), 'wb') as f:
f.write(contents)
def mkdir(self, dirname):
os.makedirs(os.path.join(self.workdir, dirname))
def test_make_zip(self):
self.mkfile('.gitignore')
self.mkfile('upstream.wrap', b'hello world')
self.mkdir('.git')
self.mkfile('.git/hello')
self.mkfile('meson.wrap', b'meson project')
self.mkdir('hello')
self.mkfile('hello/world', b'some contents')
with io.BytesIO() as zipf:
wrapcreator._make_zip(zipf, self.workdir, 'myprefix')
with zipfile.ZipFile(zipf, 'r') as zip:
self.assertListEqual(zip.namelist(), [
'myprefix/meson.wrap',
'myprefix/hello/world',
])
self.assertEqual(zip.read('myprefix/meson.wrap'),
b'meson project')
self.assertEqual(zip.read('myprefix/hello/world'),
b'some contents')
def test_check_wrapfile_empty(self):
with self.assertRaises(RuntimeError):
wrapcreator._check_wrapfile(ini.WrapFile())
def test_check_wrapfile_okay(self):
up = ini.WrapFile()
up.directory = 'hello'
up.source_url = 'https://example.com/file.tgz'
up.source_filename = 'file.tgz'
up.source_hash = 'hash-hash-hash'
try:
wrapcreator._check_wrapfile(up)
except RuntimeError as e:
self.fail(f'Unexpected RuntimeError {e!r}')
def test_make_wrap(self):
repo = gitutils.GitProject(git.Repo.init(self.workdir))
repo.commit('initial commit')
repo.create_version('1.2.3')
with repo.open('upstream.wrap', 'w') as f:
ini.WrapFile(
directory='hello',
source_url='https://example.com/file.tgz',
source_filename='file.tgz',
source_hash='hash-hash-hash').write(f)
with repo.open('meson.wrap', 'w') as f:
f.write('hello world')
repo.commit('my commit')
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.2.3')
up = ini.WrapFile.from_string(wrap.wrapfile_content)
self.assertEqual(up.directory, 'hello')
self.assertEqual(up.source_url, 'https://example.com/file.tgz')
self.assertEqual(up.source_filename, 'file.tgz')
self.assertEqual(up.source_hash, 'hash-hash-hash')
self.assertEqual(up.patch_url, 'https://wrapdb.mesonbuild.com/v1/'
'projects/project/1.2.3/1/get_zip')
self.assertEqual(up.patch_filename, 'project-1.2.3-1-wrap.zip')
with io.BytesIO(wrap.zip) as zipf:
with zipfile.ZipFile(zipf, 'r') as zip:
self.assertListEqual(zip.namelist(), ['hello/meson.wrap'])
self.assertEqual(zip.read('hello/meson.wrap'), b'hello world')
self.assertEqual(up.patch_hash, hashlib.sha256(wrap.zip).hexdigest())
self.assertEqual(wrap.wrapfile_name, 'project-1.2.3-1-wrap.wrap')
self.assertEqual(wrap.zip_name, 'project-1.2.3-1-wrap.zip')
self.assertEqual(wrap.commit_sha, repo.head_hexsha)
def test_make_wrap_bad_wrapfile(self):
repo = gitutils.GitProject(git.Repo.init(self.workdir))
repo.commit('initial commit')
repo.create_version('1.2.3')
with repo.open('upstream.wrap', 'w') as f:
f.write('[wrap-file]\n')
f.write('hello = world\n')
repo.commit('my commit')
with self.assertRaisesRegex(
RuntimeError, 'Missing .* in upstream.wrap'):
_ = wrapcreator.make_wrap('project', repo.git_dir, '1.2.3')
def test_merged_revisions(self):
repo = gitutils.GitProject(git.Repo.init(self.workdir))
repo.commit('initial commit')
repo.create_version('1.0.0')
with repo.open('upstream.wrap', 'w') as f:
ini.WrapFile(
directory='hello',
source_url='https://example.com/file.tgz',
source_filename='file.tgz',
source_hash='hash-hash-hash').write(f)
repo.commit('commit 1')
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.0.0')
self.assertEqual(wrap.revision, 1)
comm2 = repo.commit('commit 2')
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.0.0')
self.assertEqual(wrap.revision, 2)
repo.commit('commit 3')
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.0.0')
self.assertEqual(wrap.revision, 3)
repo.merge_commit('commit 4', parent=comm2)
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.0.0')
self.assertEqual(wrap.revision, 4)
repo.merge_commit('commit 5', parent=comm2)
wrap = wrapcreator.make_wrap('project', repo.git_dir, '1.0.0')
self.assertEqual(wrap.revision, 5)
if __name__ == '__main__':
unittest.main()
| 38.361111 | 78 | 0.603548 | 686 | 5,524 | 4.750729 | 0.176385 | 0.082848 | 0.029457 | 0.049402 | 0.507211 | 0.438478 | 0.387849 | 0.369745 | 0.349494 | 0.349494 | 0 | 0.015377 | 0.258327 | 5,524 | 143 | 79 | 38.629371 | 0.780083 | 0 | 0 | 0.245902 | 0 | 0 | 0.175959 | 0.019008 | 0 | 0 | 0 | 0 | 0.180328 | 1 | 0.081967 | false | 0 | 0.090164 | 0 | 0.180328 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbb91268662b9930ca5897f3a73fe712152fdc33 | 6,130 | py | Python | python_data_scien/algorithmic_thinking1/application2.py | Arthur-Lanc/coursera | 58cab28c723e2f60ddfdaa37acde6dc97c107222 | [
"MIT"
] | null | null | null | python_data_scien/algorithmic_thinking1/application2.py | Arthur-Lanc/coursera | 58cab28c723e2f60ddfdaa37acde6dc97c107222 | [
"MIT"
] | null | null | null | python_data_scien/algorithmic_thinking1/application2.py | Arthur-Lanc/coursera | 58cab28c723e2f60ddfdaa37acde6dc97c107222 | [
"MIT"
] | null | null | null | import random
import math
import alg_upa_trial
import alg_application2_provided
import project2
import matplotlib.pyplot as plt
import time
def u_er(n,p):
v = set([i for i in range(n)])
e = set([])
for i in range(n):
for j in range(n):
if i < j:
a = random.random()
if a < p:
e.add(frozenset({i,j}))
g = (v,e)
return g
def u_graph_to_dictionary(g):
v = g[0]
e = g[1]
result_dict = {}
for i in v:
result_dict[i] = set([])
for item in e:
temp_list = list(item)
i = temp_list[0]
j = temp_list[1]
result_dict[i].add(j)
result_dict[j].add(i)
return result_dict
def dictionary_to_u_graph(result_dict):
v = set(result_dict.keys())
e = set([])
for i,value in result_dict.items():
for j in value:
e.add(frozenset({i,j}))
return (v,e)
def make_complete_ugraph(n):
'''
make_complete_ugraph
'''
v = set([i for i in range(n)])
e = set([])
for i in range(n):
for j in range(n):
if i < j:
e.add(frozenset({i,j}))
g = (v,e)
return g
def upa(n,m):
g = make_complete_ugraph(m)
v = g[0]
e = g[1]
upa_trial_obj = alg_upa_trial.UPATrial(m)
for i in range(m,n):
v.add(i)
v_t_set = upa_trial_obj.run_trial(m)
for j in v_t_set:
e.add(frozenset({i,j}))
return (v,e)
def random_order(graph_tuple):
v = graph_tuple[0]
v_list = list(v)
random.shuffle(v_list)
return v_list
def legend_example(p,m,node,comput_res_list,er_res_list,upa_res_list):
"""
Plot an example with two curves with legends
"""
xvals = [i for i in range(0,node+1)]
plt.plot(xvals, comput_res_list, '-b', label='computer network graph')
plt.plot(xvals, er_res_list, '-r', label='er graph(p:%s)' % str(p))
plt.plot(xvals, upa_res_list, '-y', label='upa graph(m:%s)' % str(m))
plt.legend(loc='upper right')
plt.xlabel('removed node num')
plt.ylabel('largest cc size')
plt.title('compute_resilience')
plt.grid(True)
plt.show()
def fasttargetedorder(graph_dict):
graph_dict_copy = alg_application2_provided.copy_graph(graph_dict)
n = len(graph_dict_copy.keys())
degreesets = [i for i in range(n)]
for k in range(n):
degreesets[k] = set()
for i in range(n):
d = len(graph_dict_copy[i])
degreesets[d].add(i)
l = []
for k in range(n-1,-1,-1):
while len(degreesets[k]) != 0:
u = degreesets[k].pop()
for v in graph_dict_copy[u]:
d = len(graph_dict_copy[v])
degreesets[d].remove(v)
degreesets[d-1].add(v)
l.append(u)
alg_application2_provided.delete_node(graph_dict_copy, u)
return l
def question3_plot():
upa_graph_dict_list = []
m = 5
for n in range(10, 1000, 10):
upa_graph_tuple = upa(n,m)
upa_graph_dict = u_graph_to_dictionary(upa_graph_tuple)
upa_graph_dict_list.append(upa_graph_dict)
xvals = [n for n in range(10, 1000, 10)]
targeted_order_yvals = []
for n in range(len(xvals)):
start = time.clock()
alg_application2_provided.targeted_order(upa_graph_dict_list[n])
elapsed = (time.clock() - start)
targeted_order_yvals.append(elapsed)
fasttargetedorder_yvals = []
for n in range(len(xvals)):
start = time.clock()
fasttargetedorder(upa_graph_dict_list[n])
elapsed = (time.clock() - start)
fasttargetedorder_yvals.append(elapsed)
plt.plot(xvals, targeted_order_yvals, '-b', label='targeted_order')
plt.plot(xvals, fasttargetedorder_yvals, '-r', label='fasttargetedorder')
plt.legend(loc='upper right')
plt.xlabel('node num')
plt.ylabel('running times')
plt.title('desktop Python')
plt.grid(True)
plt.show()
node = 1239
edge = 3047
p = float(3047)/float(766941)
m = int((float(edge)/float(node)))
comput_graph_dict = alg_application2_provided.load_graph(alg_application2_provided.NETWORK_URL)
#comput_graph_tuple = dictionary_to_u_graph(comput_graph_dict)
#comput_res_list = project2.compute_resilience(comput_graph_dict,random_order(comput_graph_tuple))
comput_res_list2 = project2.compute_resilience(comput_graph_dict,alg_application2_provided.targeted_order(comput_graph_dict))
er_graph_tuple = u_er(node,p)
er_graph_dict = u_graph_to_dictionary(er_graph_tuple)
#er_res_list = project2.compute_resilience(er_graph_dict,random_order(er_graph_tuple))
er_res_list2 = project2.compute_resilience(er_graph_dict,alg_application2_provided.targeted_order(er_graph_dict))
upa_graph_tuple = upa(node,m)
upa_graph_dict = u_graph_to_dictionary(upa_graph_tuple)
#upa_res_list = project2.compute_resilience(upa_graph_dict,random_order(upa_graph_tuple))
upa_res_list2 = project2.compute_resilience(upa_graph_dict,alg_application2_provided.targeted_order(upa_graph_dict))
#Question 1 (5 pts)
#legend_example(p,m,node,comput_res_list,er_res_list,upa_res_list)
#Question 2 (1 pt)
#all three graphs are resilient under random attacks as the first 20% of their nodes are removed.
#question 3
# targeted_order : O(n**2)
# fast_targeted_order : O(n)
#question3_plot()
#question 4
legend_example(p,m,node,comput_res_list2,er_res_list2,upa_res_list2)
#Question 5 (1 pt)
#er graph is resilient under targeted attacks as the first 20% of their nodes are removed
#Question 6
#cost,may be it is very expensive to model a random network.
#####################################
# graph = u_er(4,0.5)
# r = u_graph_to_dictionary(graph)
# print r
# graph = make_complete_ugraph(node)
# # r = u_graph_to_dictionary(graph)
# # print graph
# # print r
# print len(graph[0])
# print len(graph[1])
# graph = upa(5,3)
# r = u_graph_to_dictionary(graph)
# print r
# print dictionary_to_u_graph(r) | 31.116751 | 126 | 0.63752 | 931 | 6,130 | 3.943072 | 0.16971 | 0.066195 | 0.021792 | 0.023972 | 0.422501 | 0.370199 | 0.28657 | 0.216835 | 0.177608 | 0.14274 | 0 | 0.020189 | 0.240457 | 6,130 | 197 | 127 | 31.116751 | 0.768256 | 0.185318 | 0 | 0.296296 | 0 | 0 | 0.042137 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.051852 | 0 | 0.17037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbba851f8286eead43fccdae35d91fa261e1a21e | 3,682 | py | Python | scripts/dirac-daemon.py | alexanderrichards/LZProduction | de5b70ec195a9cd573da9d59807ff959358a5e37 | [
"MIT"
] | 2 | 2017-04-28T10:16:06.000Z | 2018-06-25T06:54:58.000Z | scripts/dirac-daemon.py | alexanderrichards/LZProduction | de5b70ec195a9cd573da9d59807ff959358a5e37 | [
"MIT"
] | 106 | 2016-10-07T16:29:33.000Z | 2021-03-22T09:13:24.000Z | scripts/dirac-daemon.py | alexanderrichards/LZProduction | de5b70ec195a9cd573da9d59807ff959358a5e37 | [
"MIT"
] | 1 | 2017-04-27T15:34:12.000Z | 2017-04-27T15:34:12.000Z | #!/usr/bin/env python
# pylint: disable=invalid-name
"""Dirac daemon run script."""
import os
import sys
import importlib
import argparse
import logging
from logging.handlers import TimedRotatingFileHandler
from DIRAC.Core.Base import Script
if __name__ == '__main__':
app_name = os.path.splitext(os.path.basename(__file__))[0]
lzprod_root = os.path.dirname(
os.path.dirname(
os.path.expanduser(
os.path.expandvars(
os.path.realpath(
os.path.abspath(__file__))))))
parser = argparse.ArgumentParser(description='Run the DIRAC environment daemon.')
parser.add_argument('-s', '--host', default='localhost',
help="The dirac environment API host [default: %(default)s]")
parser.add_argument('-p', '--port', default=18861, type=int,
help="The dirac environment API port [default: %(default)s]")
parser.add_argument('-f', '--pid-file', default=os.path.join(lzprod_root, app_name + '.pid'),
help="The pid file used by the daemon [default: %(default)s]")
parser.add_argument('-l', '--log-dir', default=os.path.join(lzprod_root, 'log'),
help="Path to the log directory. Will be created if doesn't exist "
"[default: %(default)s]")
parser.add_argument('-v', '--verbose', action='count',
help="Increase the logged verbosite, can be used twice")
parser.add_argument('--debug-mode', action='store_true', default=False,
help="Run the daemon in a debug interactive monitoring mode. "
"(debugging only)")
args = parser.parse_args()
# DIRAC will parse our command line args unless we remove them
sys.argv = sys.argv[:1]
Script.parseCommandLine(ignoreErrors=True)
# Dynamic imports to module level
###########################################################################
# Add the python src path to the sys.path for future imports
sys.path.append(lzprod_root)
DiracDaemon = importlib.import_module('lzproduction.rpc.DiracRPCServer').DiracDaemon
# Logging setup
###########################################################################
# check and create logging dir
if not os.path.isdir(args.log_dir):
if os.path.exists(args.log_dir):
raise Exception("%s path already exists and is not a directory so cant make log dir"
% args.log_dir)
os.mkdir(args.log_dir)
# setup the handler
fhandler = TimedRotatingFileHandler(os.path.join(args.log_dir, 'dirac-daemon.log'),
when='midnight', backupCount=5)
if args.debug_mode:
fhandler = logging.StreamHandler()
fhandler.setFormatter(logging.Formatter("[%(asctime)s] %(name)15s : %(levelname)8s : %(message)s"))
# setup the root logger
root_logger = logging.getLogger()
root_logger.handlers = [fhandler]
root_logger.setLevel({None: logging.INFO,
1: logging.INFO,
2: logging.DEBUG}.get(args.verbose, logging.DEBUG))
# setup the main app logger
logger = logging.getLogger(app_name)
logger.debug("Script called with args: %s", args)
# Daemon setup
###########################################################################
DiracDaemon(address=(args.host, args.port),
app=app_name,
pid=args.pid_file,
logger=logger,
keep_fds=[fhandler.stream.fileno()],
foreground=args.debug_mode).start()
| 44.361446 | 103 | 0.57333 | 414 | 3,682 | 4.990338 | 0.391304 | 0.037754 | 0.049371 | 0.040658 | 0.128751 | 0.088093 | 0 | 0 | 0 | 0 | 0 | 0.004741 | 0.255296 | 3,682 | 82 | 104 | 44.902439 | 0.748724 | 0.094514 | 0 | 0 | 0 | 0 | 0.225671 | 0.010023 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbbca67e3ed02ae623e7c96bac6d9e3edff4d752 | 769 | py | Python | avatar_sgg/sentence_embedding/util.py | rafiberlin/clp-sose21-pm-vision | 55c786182ed4568cdeda4bb3676fa02b9580d68d | [
"MIT"
] | null | null | null | avatar_sgg/sentence_embedding/util.py | rafiberlin/clp-sose21-pm-vision | 55c786182ed4568cdeda4bb3676fa02b9580d68d | [
"MIT"
] | null | null | null | avatar_sgg/sentence_embedding/util.py | rafiberlin/clp-sose21-pm-vision | 55c786182ed4568cdeda4bb3676fa02b9580d68d | [
"MIT"
] | null | null | null | import torch
def vectorize_captions(ade20k_split, vectorizer, caption_key = "caption"):
"""
:param ade20k_split:
:param vectorizer: As in Vectorizer in distilbert_vectorizer.py or a SentenceTransformer model used in SentenceBert
:return:
"""
stacked_vectors = None
for image in ade20k_split:
vectors = vectorizer.encode(ade20k_split[image][caption_key], convert_to_tensor= True)
# adds a dimension at position 0; dimension 0 is used to "list the entries"
if len(vectors.shape) < 3:
vectors = vectors.unsqueeze(0)
if stacked_vectors is None:
stacked_vectors = vectors
else:
stacked_vectors = torch.cat((stacked_vectors, vectors), dim=0)
return stacked_vectors | 36.619048 | 119 | 0.682705 | 95 | 769 | 5.357895 | 0.515789 | 0.165029 | 0.078585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022298 | 0.241873 | 769 | 21 | 120 | 36.619048 | 0.850772 | 0.286086 | 0 | 0 | 0 | 0 | 0.013333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbbd58f8381a2258e5b20a406e574412ed80f67d | 3,147 | py | Python | examples/data_descriptors/one_class_classification.py | oulenz/fuzzy-rough-learn | 4565c1809f5ac426442ecf392c8ec7aeb438ada0 | [
"MIT"
] | 7 | 2019-12-17T15:11:05.000Z | 2022-01-19T17:05:22.000Z | examples/data_descriptors/one_class_classification.py | oulenz/fuzzy-rough-learn | 4565c1809f5ac426442ecf392c8ec7aeb438ada0 | [
"MIT"
] | 8 | 2020-05-26T21:56:02.000Z | 2022-01-15T18:50:26.000Z | examples/data_descriptors/one_class_classification.py | oulenz/fuzzy-rough-learn | 4565c1809f5ac426442ecf392c8ec7aeb438ada0 | [
"MIT"
] | 2 | 2020-11-06T12:27:47.000Z | 2021-04-29T15:25:06.000Z | """
========================
One class classification
========================
Data descriptors generalise knowledge about a target class of data to the whole attribute space.
This can be used to predict whether new data instances belong to the target class or not,
or to identify which new instances should be subjected to further inspection.
This type of binary classification is known as *one-class classification*,
*semi-supervised outlier detection*, *semi-supervised anomaly detection*, or *novelty detection*.
In principle, there is no good or bad way to generalise the target class, this can only be evaluated empirically.
In practice, we want a good balance between variance and bias.
The following graphs illustrate the behaviour of the data descriptors in fuzzy-rough-learn,
with their default hyperparameter values as established in [1]_.
Note that the predicted scores have been converted to quantiles
to obtain clear contour lines that illustrate how the predicted scores taper off.
References
----------
.. [1] `Lenz OU, Peralta D, Cornelis C (2021).
Average Localised Proximity: A new data descriptor with good default one-class classification performance.
Pattern Recognition, vol 118, no 107991.
doi: 10.1016/j.patcog.2021.107991
<https://www.sciencedirect.com/science/article/abs/pii/S0031320321001783>`_
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from frlearn.data_descriptors import ALP, CD, IF, MD, NND, LNND, LOF, SVM
# Sample attribute space, to use as test data
xx, yy = np.meshgrid(np.linspace(-6, 6, 300), np.linspace(-6, 6, 300))
# Generate training data
rng = np.random.default_rng(0)
X = rng.standard_normal((100, 2))
X_train = np.r_[1 * X + 2, 0.75*X, 0.5 * X - 2]
# Initialise data descriptors to include
data_descriptors = [
('ALP', ALP()),
('CD', CD()),
('IF', IF()),
('LNND', LNND()),
('LOF', LOF()),
('MD', MD()),
('NND', NND()),
('SVM', SVM()),
]
# Calculate number of rows
cols = 3
rows = (len(data_descriptors) + (cols - 1)) // cols
# Create plot layout with square subplots
fig, axs = plt.subplots(rows, cols, figsize=(3*cols, 3*rows), subplot_kw=dict(box_aspect=1), )
# Iterate over data descriptors
for i, (name, clf) in enumerate(data_descriptors):
ax = axs[i // cols][i % cols]
# Create model and query for scores
model = clf(X_train)
Z = model(np.c_[xx.ravel(), yy.ravel()])
# Transform scores into their respective centile
centiles = np.quantile(Z, np.linspace(0, 1, 101))
Z = np.searchsorted(centiles, Z)/100
Z = Z.reshape(xx.shape)
# Plot contours
ax.contourf(xx, yy, Z, levels=np.linspace(0, 1, 12), cmap=plt.cm.PuBu)
# Plot training data
c = ax.scatter(X_train[:, 0], X_train[:, 1], c='white', s=10, edgecolors='k')
# Set axis limits and delete ticks and legends
plt.xlim((-6, 6))
plt.ylim((-6, 6))
c.axes.get_xaxis().set_visible(False)
c.axes.get_yaxis().set_visible(False)
ax.set_title(name)
# Delete spare subfigures
for i in range((-len(data_descriptors)) % cols):
fig.delaxes(axs[-1, -(i + 1)])
fig.tight_layout()
plt.show()
| 33.126316 | 113 | 0.68605 | 478 | 3,147 | 4.460251 | 0.502092 | 0.063321 | 0.030957 | 0.011257 | 0.014071 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037293 | 0.173499 | 3,147 | 94 | 114 | 33.478723 | 0.782391 | 0.546235 | 0 | 0 | 0 | 0 | 0.019872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbbf240e012256bcc4d85263d1b311e8333b59e3 | 4,287 | py | Python | apps/database/models.py | gureuso/flask-blog | be1df7660e5837833eab273006ee2be7e3f1eda0 | [
"Apache-2.0"
] | null | null | null | apps/database/models.py | gureuso/flask-blog | be1df7660e5837833eab273006ee2be7e3f1eda0 | [
"Apache-2.0"
] | null | null | null | apps/database/models.py | gureuso/flask-blog | be1df7660e5837833eab273006ee2be7e3f1eda0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
import flask_login
from apps.database.session import db, login_manager
from config import JsonConfig
def get_model(model):
if JsonConfig.get_data('TESTING'):
return model.test_model
return model
class TestMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
message = db.Column(db.String(120))
class TestTestModel(TestMixin, db.Model):
__tablename__ = 'test_tests'
class TestModel(TestMixin, db.Model):
__tablename__ = 'tests'
test_model = TestTestModel
Test = get_model(TestModel)
class CommentMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
content = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.now())
updated_at = db.Column(db.DateTime, onupdate=datetime.now(), default=datetime.now())
class TestCommentModel(CommentMixin, db.Model):
__tablename__ = 'test_comments'
user_id = db.Column(db.Integer(), db.ForeignKey('test_users.id'))
post_id = db.Column(db.Integer(), db.ForeignKey('test_posts.id'))
parent_id = db.Column(db.Integer(), db.ForeignKey('test_comments.id'), nullable=True)
class CommentModel(CommentMixin, db.Model):
__tablename__ = 'comments'
user_id = db.Column(db.Integer(), db.ForeignKey('users.id'))
post_id = db.Column(db.Integer(), db.ForeignKey('posts.id'))
parent_id = db.Column(db.Integer(), nullable=True)
test_model = TestCommentModel
Comment = get_model(CommentModel)
class ViewMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
ip_address = db.Column(db.String(15))
created_at = db.Column(db.DateTime, default=datetime.now())
class TestViewModel(ViewMixin, db.Model):
__tablename__ = 'test_views'
user_id = db.Column(db.Integer(), db.ForeignKey('test_users.id'))
post_id = db.Column(db.Integer(), db.ForeignKey('test_posts.id'))
class ViewModel(ViewMixin, db.Model):
__tablename__ = 'views'
user_id = db.Column(db.Integer(), db.ForeignKey('users.id'))
post_id = db.Column(db.Integer(), db.ForeignKey('posts.id'))
test_model = TestViewModel
View = get_model(ViewModel)
class TagMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(255))
class TestTagModel(TagMixin, db.Model):
__tablename__ = 'test_tags'
post_id = db.Column(db.Integer(), db.ForeignKey('test_posts.id'))
class TagModel(TagMixin, db.Model):
__tablename__ = 'tags'
post_id = db.Column(db.Integer(), db.ForeignKey('posts.id'))
test_model = TestTagModel
Tag = get_model(TagModel)
class PostMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(255))
content = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.now())
updated_at = db.Column(db.DateTime, onupdate=datetime.now(), default=datetime.now())
class TestPostModel(PostMixin, db.Model):
__tablename__ = 'test_posts'
user_id = db.Column(db.Integer(), db.ForeignKey('test_users.id'))
tags = db.relationship('TestTagModel', backref='post')
class PostModel(PostMixin, db.Model):
__tablename__ = 'posts'
user_id = db.Column(db.Integer(), db.ForeignKey('users.id'))
tags = db.relationship('TagModel', backref='post')
test_model = TestPostModel
Post = get_model(PostModel)
class UserMixin:
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(120), unique=True)
nickname = db.Column(db.String(20), unique=True)
password = db.Column(db.String(255))
class TestUserModel(UserMixin, flask_login.UserMixin, db.Model):
__tablename__ = 'test_users'
posts = db.relationship('TestPostModel', backref='user')
comments = db.relationship('TestCommentModel', backref='user')
class UserModel(UserMixin, flask_login.UserMixin, db.Model):
__tablename__ = 'users'
posts = db.relationship('PostModel', backref='user')
comments = db.relationship('CommentModel', backref='user')
test_model = TestUserModel
User = get_model(UserModel)
@login_manager.user_loader
def member_loader(user_id):
return User.query.filter(User.id == user_id).first()
| 25.670659 | 89 | 0.710753 | 556 | 4,287 | 5.280576 | 0.165468 | 0.092643 | 0.115804 | 0.081744 | 0.540191 | 0.492507 | 0.482629 | 0.452657 | 0.430177 | 0.399864 | 0 | 0.005488 | 0.149988 | 4,287 | 166 | 90 | 25.825301 | 0.80022 | 0.004899 | 0 | 0.290323 | 0 | 0 | 0.079034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021505 | false | 0.010753 | 0.043011 | 0.010753 | 0.913978 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbc00ca62701282387cec06893d5523a7ee5a007 | 426 | py | Python | autograde/static/__init__.py | Feelx234/autograde | d41d8c280cbb83fc5fefb8fa1dea8342a5ba3731 | [
"MIT"
] | null | null | null | autograde/static/__init__.py | Feelx234/autograde | d41d8c280cbb83fc5fefb8fa1dea8342a5ba3731 | [
"MIT"
] | null | null | null | autograde/static/__init__.py | Feelx234/autograde | d41d8c280cbb83fc5fefb8fa1dea8342a5ba3731 | [
"MIT"
] | null | null | null | from pathlib import Path
def _load(*args, mode='rt'):
with Path(__file__).parent.joinpath(*args).open(mode=mode if mode.startswith('r') else f'r{mode}') as f:
return f.read().strip() + '\n' if mode.endswith('t') else f.read()
# Globals and constants variables.
INJECT_BEFORE = _load('inject_before.py')
INJECT_AFTER = _load('inject_after.py')
CSS = _load('basic.css')
FAVICON = _load('favicon.ico', mode='rb')
| 30.428571 | 108 | 0.685446 | 66 | 426 | 4.227273 | 0.590909 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133803 | 426 | 13 | 109 | 32.769231 | 0.756098 | 0.075117 | 0 | 0 | 0 | 0 | 0.168367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbc313f5bd1097c51c675f2d595398428edaedbf | 3,183 | py | Python | build.py | tapika/test_travis_ci | b06c206d277882cd104f5224375daa9f63b66372 | [
"MIT"
] | null | null | null | build.py | tapika/test_travis_ci | b06c206d277882cd104f5224375daa9f63b66372 | [
"MIT"
] | null | null | null | build.py | tapika/test_travis_ci | b06c206d277882cd104f5224375daa9f63b66372 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os, sys, platform
import argparse, threading
import builder
from builder import execcmd
isWindows = platform.system().lower().find("windows") != -1
parser = argparse.ArgumentParser()
parser.add_argument('-buildtype', help='Select build configuration, one of: Debug/Release')
args = parser.parse_args()
buildType = args.buildtype
if buildType == None: buildType="Release"
builtByBuilder=os.environ.get('TRAVIS')
print("build.py, running on python " + platform.python_version() )
sys.stdout.flush()
if isWindows:
execcmd("where ninja")
else:
execcmd("which ninja")
scriptDir=os.path.dirname(os.path.realpath(__file__))
projDir = os.path.join(scriptDir, "..", "src")
builder.gitClone("-b cling-patches http://root.cern.ch/git/llvm.git", projDir)
toolsDir = os.path.join(projDir, "tools")
os.chdir(toolsDir)
builder.gitClone("http://root.cern.ch/git/cling.git", "cling")
os.chdir(toolsDir)
builder.gitClone("-b cling-patches http://root.cern.ch/git/clang.git", "clang")
if isWindows:
cacheDir = "x64-" + buildType
else:
cacheDir = "WSL-" + buildType
cachePath = os.path.join(scriptDir, "..", "out", cacheDir)
if not os.path.exists(cachePath):
os.makedirs(cachePath)
os.chdir(cachePath)
cmd = "cmake -G Ninja -DCMAKE_BUILD_TYPE={}".format(buildType)
#cmd = "cmake -DCMAKE_BUILD_TYPE={}".format(buildType)
# -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON
if isWindows:
cmd = cmd + ' -DCMAKE_INSTALL_PREFIX:PATH="{}"'.format(os.path.join(scriptDir, "out", "install", cacheDir))
#ninjaPath = os.path.join(os.environ["VSINSTALLDIR"],"Common7\\IDE\\CommonExtensions\\Microsoft\\CMake\\Ninja\\ninja.exe" )
#cmd = cmd + ' -DCMAKE_MAKE_PROGRAM="{}"'.format(ninjaPath)
# cmake is really strict even to case sensitive paths. Vs uses 'X' in uppercase.
cl_path = os.popen('where cl.exe').read().rstrip().replace("Hostx64", "HostX64")
cmd = cmd + ' -DCMAKE_CXX_COMPILER:FILEPATH="{}"'.format(cl_path)
cmd = cmd + ' -DCMAKE_C_COMPILER:FILEPATH="{}"'.format(cl_path)
cmd = cmd + ' "{}"'.format(projDir)
execcmd(cmd)
buildCpus = 7
if builtByBuilder:
buildCpus = 2
if buildType == "Release":
buildTimeoutMin = 35
else:
# Debug builds are slower generally in windows & linux
buildTimeoutMin = 25
if not builtByBuilder:
buildTimeoutMin = 2*60
os.chdir(cachePath)
#
# No output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself.
# Check the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received
#
pingTime = 5 * 60
stopTimer = False
def pingTravis(doPrint = True):
if stopTimer:
return
if doPrint:
print("- Build still in progress...")
sys.stdout.flush()
threading.Timer(pingTime, pingTravis).start()
pingTravis(False)
cmd='ninja -j {} cling libcling'.format(buildCpus)
if not execcmd(cmd, True, buildTimeoutMin*60):
print ("\nNote: Cancelled build, timeout\n")
sys.stdout.flush()
stopTimer = True
| 28.936364 | 168 | 0.689287 | 410 | 3,183 | 5.292683 | 0.456098 | 0.02212 | 0.023041 | 0.026267 | 0.149309 | 0.072811 | 0.072811 | 0.041475 | 0.041475 | 0.041475 | 0 | 0.009388 | 0.163368 | 3,183 | 109 | 169 | 29.201835 | 0.805483 | 0.223688 | 0 | 0.19697 | 0 | 0 | 0.238335 | 0.050021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.060606 | 0 | 0.090909 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbc4d4b81091756370bfef8c3353cce0ddef9102 | 23,719 | py | Python | envs/envs_assistive/env_viewer.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | 1 | 2021-11-22T07:45:28.000Z | 2021-11-22T07:45:28.000Z | envs/envs_assistive/env_viewer.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | envs/envs_assistive/env_viewer.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | import gym, sys, argparse
import numpy as np
np.set_printoptions(precision=5)
import transforms3d as transforms3d
import matplotlib.pyplot as plt
import seaborn as sns
import mujoco_py
from mujoco_py.generated import const
import pybullet as p
from envs.gym_kuka_mujoco.controllers import iMOGVIC
from envs.gym_kuka_mujoco.utils.transform_utils import *
from envs.envs_assistive.feeding_envs import *
from envs.envs_assistive.drinking_envs import *
from envs.envs_assistive.scratch_itch_envs import *
from code.pytorch.LAMPO.core.rl_bench_box import *
import os, sys, multiprocessing, gym, ray, shutil, argparse, importlib, glob
import time
# from .learn import make_env
# import assistive_gym
import imageio
import commentjson
from code.pytorch.LAMPO.core.rl_bench_box import Mujoco_model, Mujoco_RL_model, AssistiveDRL
from envs.robosuite.robosuite.controllers import *
if sys.version_info < (3, 0):
print('Please use Python 3')
exit()
def render_frame(viewer, pos, euler):
viewer.add_marker(pos=pos,
label='',
type=const.GEOM_SPHERE,
size=[.01, .01, .01])
# mat = quat2mat(quat)
mat = transforms3d.euler.euler2mat(euler[0], euler[1], euler[2], 'sxyz')
cylinder_half_height = 0.02
pos_cylinder = pos + mat.dot([0.0, 0.0, cylinder_half_height])
viewer.add_marker(pos=pos_cylinder,
label='',
type=const.GEOM_CYLINDER,
size=[.005, .005, cylinder_half_height],
mat=mat)
def render_point(viewer, pos):
viewer.add_marker(pos=pos,
label='',
type=const.GEOM_SPHERE,
size=[.01, .01, .01])
def vis_impedance_random_sawyer_setpoint(initial_angles=None):
options = dict()
num_waypoints = 3
options['model_path'] = 'a_sawyer_test.xml'
options['rot_scale'] = .3
options['stiffness'] = np.array([1., 1., 1., 3., 3., 3.])
options['controlled_joints'] = ["robot0_right_j0", "robot0_right_j1",
"robot0_right_j2", "robot0_right_j3",
"robot0_right_j4", "robot0_right_j5",
"robot0_right_j6"]
options['num_waypoints'] = 3
options['null_space_damping'] = 1.0
import os
from envs.gym_kuka_mujoco import kuka_asset_dir
model_path = os.path.join(kuka_asset_dir(), 'a_sawyer_test.xml')
model = mujoco_py.load_model_from_path(model_path)
sim = mujoco_py.MjSim(model)
controller = iMOGVIC(sim, **options)
frame_skip = 50
high = np.array([.1, .1, .1, 2, 2, 2])
low = -np.array([.1, .1, .1, 2, 2, 2])
viewer = mujoco_py.MjViewer(sim)
# set parameters :::
scale = np.array([8.0, 0.0, 0.0])
scale_list = scale.repeat([6, 6, 6], axis=0).reshape(num_waypoints, 6)
stiffness_list = np.array([[20., 4., 4., 4., 4., 4.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]])
control_scale = np.ones_like(stiffness_list) * 100
stiffness_list = control_scale * stiffness_list
print("stiffness_list :::", stiffness_list)
damping_list = scale_list * np.sqrt(stiffness_list)
print("damping_list :::", damping_list)
weight_list = np.array([1.0, 0.05, 0.01])
controller.set_params_direct(stiffness_list, damping_list, weight_list)
# Set a different random state and run the controller.
# qpos = np.random.uniform(-1., 1., size=7)
# qpos = np.array([0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628])
qpos = initial_angles
controller.update_initial_joints(qpos)
qvel = np.zeros(7)
sim_state = sim.get_state()
sim_state.qpos[:] = qpos
sim_state.qvel[:] = qvel
sim.set_state(sim_state)
sim.forward()
controller.update_state()
print("current ee_pose :::", controller.ee_pose)
target_pos, target_mat = controller.get_pose_site("target_ee_site")
print("target ee_pose :::", target_pos)
while True:
viewer.render()
# set way_points :::
initial_state = np.array([-0.60349109, 0.09318907, 0.27348721,
1.9265446606661554, -0.40240192959667226, -1.541555812071902])
optimal_state = np.array([-0.80349109, 0.09318907, 0.07348721,
1.9265446606661554, -0.40240192959667226, -1.541555812071902])
state_scale = initial_state - optimal_state
pos_set_list = np.array([[0.1, 0., 1.2],
[0.1, 0., 1.2],
[0.1, 0., 1.2]])
quat_set_list = np.array([[-3.128104, 0.00437383, -2.08817412],
[-3.128104, 0.00437383, -2.08817412],
[-3.128104, 0.00437383, -2.08817412]])
way_points_list = np.concatenate((pos_set_list, quat_set_list), axis=1)
print("way_point_list :::", way_points_list)
controller.set_way_points(way_points_list)
print("reference list :::", controller.reference_list)
optimal_pose = pos_set_list[0, :]
velocity_list = []
position_list = []
stiffness_matrix = []
damping_matrix = []
energy_list = []
# for i in range(1):
# # qpos = np.random.uniform(-1., 1., size=7)
# qpos = np.array([-0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628])
# # qpos = np.array([-0.5538, -0.8208, 0.4155, 0.8409, -0.4955, 0.6482, 1.9628])
# qvel = np.zeros(7)
# state = np.concatenate([qpos, qvel])
#
# sim_state = sim.get_state()
# sim_state.qpos[:] = qpos
# sim_state.qvel[:] = qvel
# sim.set_state(sim_state)
# sim.forward()
# for j in range(1000):
# controller.update_state()
# torque, V, pose_err, vel_err, stiffness_eqv, damping_eqv = controller.update_vic_torque()
# energy_list.append(V)
# position_list.append(pose_err)
# velocity_list.append(vel_err)
# stiffness_matrix.append(stiffness_eqv)
# damping_matrix.append(damping_eqv)
# # torque = controller.get_euler_torque(way_points_list)
# # torque = controller.update_torque(way_points_list)
# print("final state", np.linalg.norm(controller.state, ord=2))
# sim.data.ctrl[:] = torque[:7]
# sim.step()
# render_frame(viewer, pos_set_list[0, :], quat_set_list[0, :])
# viewer.render()
def make_env(env_name, coop=False, seed=1001):
if not coop:
env = gym.make('assistive_gym:'+env_name)
else:
# module = importlib.import_module('assistive_gym.envs')
env_class = globals()[env_name.split('-')[0] + 'Env']
print(env_class)
env = env_class()
env.seed(seed)
return env
def sample_action(env, coop):
if coop:
return {'robot': env.action_space_robot.sample(), 'human': env.action_space_human.sample()}
return env.action_space.sample()
def viewer(env_name):
coop = 'Human' in env_name
# env = make_env(env_name, coop=True) if coop else gym.make(env_name)
env = FeedingSawyerHumanEnv()
# env = DrinkingSawyerHumanEnv()
options = dict()
options['model_path'] = 'a_sawyer_test.xml'
options['rot_scale'] = .3
options['stiffness'] = np.array([1., 1., 1., 3., 3., 3.])
options['controlled_joints'] = ["robot0_right_j0",
"robot0_right_j1",
"robot0_right_j2",
"robot0_right_j3",
"robot0_right_j4",
"robot0_right_j5",
"robot0_right_j6"]
options['num_waypoints'] = 3
# options['frame_skip'] = 10
options['null_space_damping'] = 1.0
param_dir = '/home/zhimin/code/5_thu/rl-robotic-assembly-control/code/pytorch/LAMPO/params/'
param_file = 'IMOGICAssitive.json'
param_file = os.path.join(param_dir, param_file)
with open(param_file) as f:
set_params = commentjson.load(f)
mujoco_model = Mujoco_model(set_params["controller_options"], render=True)
while True:
done = False
env.render()
# observation, spoon_pos, spoon_orient = env.reset()
observation = env.reset()
# print("Initial observation :", observation)
print('+' * 100)
# spoon_pos_inital, spoon_orient_initial = env.get_tool_pose()
print("robot_joint_angles :", observation['robot_joint_angles'])
# print("spoon pos :", spoon_pos, "spoon orient :", spoon_orient)
# print("spoon orient :", spoon_orient)
# set way points : target pose
pos, ori = env.robot.get_ee_pose()
start_euler = transforms3d.euler.quat2euler(ori, 'sxyz')
print("EE pos :", pos, "ee euler :", start_euler)
# target_pose = env.get_context()
# print("target pos :", env.target_pos)
# # print("target ori :", env.target_orient)
# print("target euler :", transforms3d.euler.quat2euler(env.target_orient, 'sxyz'))
# # p.getQuaternionFromEuler()
target_euler = transforms3d.euler.quat2euler(env.target_orient, 'sxyz')
print("target pos :", env.target_pos, "target euler :", target_euler)
delta_pos = env.target_pos - spoon_pos
# delta_pos = np.array([0., 0.3, 0.1])
# print("des_pos", pos + delta_pos)
# print('+' * 100)
print("delta_pos :", delta_pos)
ee_pose = mujoco_model.reset(observation['robot_joint_angles'])
print("initial_pose :", np.array(ee_pose))
print('+' * 100)
mujoco_model.set_waypoints(ee_pose[:3] + delta_pos, ee_pose[3:])
# mujoco_model.set_waypoints(ee_pose[:3] + np.array([0., -0.1, 0.2]), transforms3d.euler.quat2euler(ee_pose[3:], 'sxyz'))
# set impedance params
mujoco_model.set_impedance_params(params=None)
# time.sleep(10)
joint_list = []
joint_last = observation['robot_joint_angles']
time_steps = 0
while not done:
# action = sample_action(env, coop)
joint = mujoco_model.step(np.zeros(7))
# print("robot joints :", joint[0])
human_action = np.zeros(env.action_human_len)
action = {'robot': joint[0].copy() - joint_last, 'human': human_action} # env.action_space_human.sample()
joint_list.append(joint[0].copy())
# print("sample_action :", action)
observation, reward, done, info = env.step(action)
# print('robot joints pybullet:', observation['robot_joint_angles'])
if coop:
done = done['__all__']
# print('Robot reward:', reward['robot'], 'Human reward:', reward['human'])
# time.sleep(0.1)
joint_last = observation['robot_joint_angles']
time_steps += 1
print('+' * 100)
print("target pos :", env.target_pos)
print("target euler :", transforms3d.euler.quat2euler(env.target_orient, 'sxyz'))
spoon_pos, spoon_orient = env.get_tool_pose()
print("spoon pos :", spoon_pos)
print("spoon orient :", spoon_orient)
# set way points : target pose
pos, ori = env.robot.get_ee_pose()
start_euler = transforms3d.euler.quat2euler(ori, 'sxyz')
print("EE pos :", pos, "ee euler :", start_euler)
print("time_steps :", time_steps)
print("Mujoco ee pose :", mujoco_model.get_ee_pose())
print("Error :", mujoco_model.get_ee_pose() - ee_pose)
print("Pybullet error :", spoon_pos - spoon_pos_inital)
def mujoco_eval(env_name):
coop = 'Human' in env_name
env = make_env(env_name, coop=True) if coop else gym.make(env_name)
env.render()
# env.reset()
observation = env.reset()
print("obs :", observation['robot_joint_angles'])
param_dir = '/home/zhimin/code/5_thu/rl-robotic-assembly-control/code/pytorch/LAMPO/params/'
param_file = 'VICESAssitiveItch.json'
param_file = os.path.join(param_dir, param_file)
with open(param_file) as f:
params = commentjson.load(f)
mujoco_model = Mujoco_RL_model(params["controller_options"], render=True)
# qpos = np.array([0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 1.6482, 1.9628])
# qpos = np.array([1.73155, 1.91932, 1.47255, -2.29171, 0.42262, 1.13446, 1.75369])
qpos = np.array([[1.73155, 1.91932, 1.47255, 3.99147, 0.42262, 1.13446, 1.75369]])
ee_pose = mujoco_model.reset(qpos)
# # ee_pose = mujoco_model.reset(np.array([2.95, 4.07, -0.06, 1.44171, -6.2, 3.7, -0.35369]))
# # print("ee_pose :", ee_pose)
# # # print("goal_pose :", mujoco_model.controller.goal_pos, mujoco_model.controller.goal_ori)
# # # # ee_pose = mujoco_model.reset(np.zeros(7))
# # # ee_euler = np.array(transforms3d.euler.euler2mat(ee_pose[3], ee_pose[4], ee_pose[5], 'sxyz'))
#
# while True:
# mujoco_model.viewer_render()
#
# action = np.array([-0.0, 0.1, 0.0, 0.0, 0.0, 0.0])
# joint = mujoco_model.step(action)
# # # joint = mujoco_model.step(action, set_pos=ee_pose[:3], set_ori=ee_euler)
# print("ee_pose :", mujoco_model.get_ee_pose())
# while True:
# action = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
# target_euler = transforms3d.euler.mat2euler(mujoco_model.controller.goal_ori, 'sxyz')
# print("target pos :", mujoco_model.controller.goal_pos, "target euler :", target_euler)
# joint = mujoco_model.step(action)
# print("ee_pose :", mujoco_model.get_ee_pose())
# mujoco_model.viewer_render()
# done = False
# while not done:
# # action = sample_action(env, coop)
# action = np.array([0., 0.00, 0.0, 0.0, 0.0, 0.])
# print("goal_pos :", mujoco_model.controller.goal_pos)
# print("ee_pos :", mujoco_model.get_ee_pose())
# joint = mujoco_model.step(action, set_pos=ee_pose[:3], set_ori=ee_euler)
while True:
done = False
env.render()
print('+' * 100)
observation = env.reset()
if observation['robot_joint_angles'] is not None:
print("Done !!!")
# observation, spoon_pos, spoon_orient = env.reset()
# print("Initial observation :", observation)
# spoon_pos_inital, spoon_orient_initial = env.get_tool_pose()
# print("robot_joint_angles :", observation['robot_joint_angles'])
# print("spoon pos :", spoon_pos, "spoon orient :", spoon_orient)
# print("spoon orient :", spoon_orient)
human_action = np.zeros(env.action_human_len)
# action = {'robot': np.array([0.0, 0.0, 0., 0.0, 0.0, 0.0, 0.0]), 'human': human_action} # env.action_space_human.sample()
#
# # print("sample_action :", action)
# observation, reward, done, info = env.step(action)
print("robot_joint_angles :", observation['robot_joint_angles'])
done = False
# set way points : target pose
pos, ori = env.robot.get_ee_pose()
start_euler = transforms3d.euler.quat2euler(ori, 'sxyz')
start_ori = transforms3d.euler.euler2mat(start_euler[0], start_euler[1], start_euler[2], 'sxyz')
print("EE pos :", pos, "ee euler :", start_euler)
# target_pose = env.get_context()
# print("target pos :", env.target_pos)
# # print("target ori :", env.target_orient)
# print("target euler :", transforms3d.euler.quat2euler(env.target_orient, 'sxyz'))
# # p.getQuaternionFromEuler()
# target_euler = transforms3d.euler.quat2euler(env.target_orient, 'sxyz')
# print("target pos :", env.target_pos, "target euler :", target_euler)
# delta_pos = env.target_pos - spoon_pos
# # delta_pos = np.array([0., 0.3, 0.1])
# # print("des_pos", pos + delta_pos)
# # print('+' * 100)
# print("delta_pos :", delta_pos)
initial_angle = observation['robot_joint_angles']
# initial_angle[6] = 1.534
ee_pose = mujoco_model.reset(initial_angle)
print("initial_pose :", np.array(ee_pose))
ee_euler = np.array(transforms3d.euler.euler2mat(ee_pose[3], ee_pose[4], ee_pose[5], 'sxyz'))
print('+' * 100)
# time.sleep(10)
joint_list = []
joint_last = observation['robot_joint_angles']
time_steps = 0
# action = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
# joint_last = mujoco_model.step(action, set_pos=ee_pose[:3], set_ori=start_ori)
# time.sleep(2)
while not done:
# action = sample_action(env, coop)
action = np.array([0.0, -0.1, 0.0, 0.0, 0.0, 0.])
joint = mujoco_model.step(action)
print("Robot Joints :", joint)
human_action = np.zeros(env.action_human_len)
action = {'robot': joint.copy() - joint_last, 'human': human_action} # env.action_space_human.sample()
joint_list.append(joint.copy())
# print("sample_action :", action)
observation, reward, done, info = env.step(action)
# print('robot joints pybullet:', observation['robot_joint_angles'])
if coop:
done = done['__all__']
# print('Robot reward:', reward['robot'], 'Human reward:', reward['human'])
# time.sleep(0.1)
joint_last = observation['robot_joint_angles']
time_steps += 1
# print('+' * 100)
# print("target pos :", env.target_pos)
# print("target euler :", transforms3d.euler.quat2euler(env.target_orient, 'sxyz'))
# spoon_pos, spoon_orient = env.get_tool_pose()
# print("spoon pos :", spoon_pos)
# print("spoon orient :", spoon_orient)
# set way points : target pose
pos, ori = env.robot.get_ee_pose()
start_euler = transforms3d.euler.quat2euler(ori, 'sxyz')
print("EE pos :", pos, "ee euler :", start_euler)
print("time_steps :", time_steps)
print("Mujoco ee pose :", mujoco_model.get_ee_pose())
print("Error :", mujoco_model.get_ee_pose() - ee_pose)
# print("Pybullet error :", spoon_pos - spoon_pos_inital)
def viewer_mujoco(env_name, params):
coop = 'Human' in env_name
env = make_env(env_name, coop=True) if coop else gym.make(env_name)
env = AssistiveDRL(env, params, logdir='')
# Grab name of this rollout combo
video_name = "{}-{}-{}".format(
"env_test", "".join("jaco"), "controller_osc").replace("_", "-")
# Calculate appropriate fps
fps = int(10)
# Define video writer
video_writer = imageio.get_writer("{}.mp4".format(video_name), fps=fps)
while True:
done = False
env.render()
observation = env.reset()
action = sample_action(env, coop)
print(observation)
env.setup_camera(camera_width=1920 // 2, camera_height=1080 // 2)
if coop:
print('Robot observation size:', np.shape(observation['robot']), 'Human observation size:',
np.shape(observation['human']), 'Robot action size:', np.shape(action['robot']), 'Human action size:',
np.shape(action['human']))
else:
print('Observation size:', np.shape(observation), 'Action size:', np.shape(action))
while not done:
observation, reward, done, info = env.step(sample_action(env, coop))
img, _ = env.get_camera_image_depth()
video_writer.append_data(img)
env.render()
if coop:
done = done['__all__']
def viewer_pybullet(env_name, params):
coop = 'Human' in env_name
env = make_env(env_name, coop=True) if coop else gym.make(env_name)
# task = globals()[params["alg_options"]["task_class"]](args, env, '')
env = AssistiveDRL(env, params, logdir='')
# task.reset()
# while True:
# task._env.render()
# task.send_movement(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
env.reset()
while True:
# env.view_render()
env.step(np.array([0.0, 0.0, 1, 0.0, 0.0, 0.0]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Assistive Gym Environment Viewer')
parser.add_argument('--env',
# default="ScratchItchJacoHuman-v1",
default="DrinkingSawyerHuman-v1",
# default="FeedingSawyerHuman-v1",
help='Environment to test (default: ScratchItchJaco-v1)')
parser.add_argument('--video_record',
type=bool,
default=False,
help='the parameter file to use')
parser.add_argument('--video_path',
type=str,
default='IMOGICAssitive.json',
help='the parameter file to use')
args = parser.parse_args()
param_dir = '/home/zhimin/code/5_thu/rl-robotic-assembly-control/code/pytorch/LAMPO/params/'
# param_file = 'IMOGICAssitiveJaco.json'
param_file = 'VICESAssitive.json'
param_file = os.path.join(param_dir, param_file)
with open(param_file) as f:
params = commentjson.load(f)
# mujoco_eval(args.env)
# viewer_mujoco(args.env, params)
viewer_pybullet(args.env, params)
# env = DrinkingSawyerHumanEnv()
# done = False
# env.render()
# observation, spoon_pos, spoon_orient = env.reset()
#
# # time.sleep(10)
# joint_list = []
# # joint_last = observation['robot_joint_angles']
# time_steps = 0
# coop = True
# while not done:
# action = sample_action(env, coop)
#
# # print("sample_action :", action)
# observation, reward, done, info = env.step(action)
# # print('robot joints pybullet:', observation['robot_joint_angles'])
# if coop:
# done = done['__all__']
#
# # print('Robot reward:', reward['robot'], 'Human reward:', reward['human'])
#
# # time.sleep(0.1)
#
# # joint_last = observation['robot_joint_angles']
# time_steps += 1
# viewer(args.env)
# mujoco_eval(args.env)
# viewer_mujoco(args.env)
# print(p.getQuaternionFromEuler([0, 1.57, 0]))
# controller_name = "OSC_POSE"
# controller_path = os.path.join('/home/zhimin/code/5_thu/rl-robotic-assembly-control/envs/robosuite/robosuite/',
# 'controllers/config/{}.json'.format(controller_name.lower()))
# controller_config = load_controller_config(custom_fpath=controller_path)
# print("controller_config :", controller_config)
# controller_config['sim'] = self.sim
# controller_config["eef_name"] = "ee_site"
# controller_config["joint_indexes"] = {
# "joints": self.joint_indexes,
# "qpos": self._ref_joint_pos_indexes,
# "qvel": self._ref_joint_vel_indexes
# }
# controller_config["impedance_mode"] = "variable"
# controller_config["kp_limits"] = [0, 300]
# controller_config["damping_limits"] = [0, 10]
| 37.530063 | 132 | 0.589232 | 2,975 | 23,719 | 4.472941 | 0.12437 | 0.016984 | 0.021192 | 0.024047 | 0.61539 | 0.556775 | 0.504096 | 0.483129 | 0.460059 | 0.444503 | 0 | 0.053647 | 0.26991 | 23,719 | 631 | 133 | 37.58954 | 0.714789 | 0.336144 | 0 | 0.401338 | 0 | 0.010033 | 0.123526 | 0.017914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0301 | false | 0 | 0.073579 | 0 | 0.113712 | 0.137124 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbc892e0574321e2580bbc1b2acaab52826905f5 | 19,853 | py | Python | src/halomod/concentration.py | sjforeman/halomod | 587db6bc71a77ea60a541b306fc3601eeb424bc9 | [
"MIT"
] | null | null | null | src/halomod/concentration.py | sjforeman/halomod | 587db6bc71a77ea60a541b306fc3601eeb424bc9 | [
"MIT"
] | null | null | null | src/halomod/concentration.py | sjforeman/halomod | 587db6bc71a77ea60a541b306fc3601eeb424bc9 | [
"MIT"
] | null | null | null | """
Module defining oncentration-mass relations.
This module defines a base :class:`CMRelation` component class, and a number of specific
concentration-mass relations. In addition, it defines a factory function :func:`make_colossus_cm`
which helps with integration with the ``colossus`` cosmology code. With this function,
the user is able to easily create a ``halomod``-compatible ``Component`` model that
transparently uses ``colossus`` the background to do the actual computation of the
concentration mass relation. This means it is easy to use any of the updated models
from ``colossus`` in a native way.
Examples
--------
A simple example of using a native concentration-mass relation::
>>> from halomod.concentration import Duffy08
>>> duffy = Duffy08()
>>> m = np.logspace(10, 15, 100)
>>> plt.plot(m, duffy.cm(m, z=0))
You can also specify a different concentration-mass relation for tracer
if you're working with :class:`~halomod.halo_model.TracerHaloModel` ::
>>> from halomod import HaloModel
>>> hm = HaloModel(halo_concentration_model='Ludlow16',
>>> tracer_concentration_model='Duffy08')
Constructing and using a colossus-based relation::
>>> from halomod.concentration import make_colossus_cm
>>> diemer = make_colossus_cm(model='diemer15', statistic='median')()
>>> plt.plot(m, diemer.cm(m, z=1))
Note the extra function call on the second line here -- :func:`make_colossus_cm` returns
a *class*, not an instance. Under the hood, any parameters passed to the function other
than ``model`` are set as "defaults", and can be modified like standard model params.
For instance, using such a model in a broader :class:`~HaloModel` framework::
>>> diemer19_cls = make_colossus_cm(model='diemer19', ps_args={})
>>> hm = HaloModel(
>>> halo_concentration_model=diemer19_cls,
>>> halo_concentration_params={'ps_args': {'model': 'eisenstein98'}}
>>> )
>>> hm.update(halo_concentration_params = {"ps_args": {"model": 'sugiyama95'}})
Note that while ``statistic`` is a valid argument to the `diemer19` model in COLOSSUS,
we have constructed it without access to that argument (and so it recieves its default
value of "median"). This means we *cannot* update it via the ``HaloModel`` interface.
"""
import warnings
from typing import Optional
import numpy as np
from hmf import Component
from scipy import special as sp
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from astropy.cosmology import Planck15
from colossus.halo import concentration
from hmf.cosmology.cosmo import astropy_to_colossus
from hmf.density_field.filters import Filter
from hmf.cosmology.growth_factor import GrowthFactor
from .profiles import Profile, NFW
from hmf.cosmology.cosmo import Cosmology
from hmf.halos.mass_definitions import (
MassDefinition,
SOMean,
SOVirial,
SOCritical,
from_colossus_name,
)
from hmf._internals import pluggable
@pluggable
class CMRelation(Component):
r"""
Base-class for Concentration-Mass relations
"""
_pdocs = r"""
Parameters
----------
filter0 : :class:`hmf.filters.Filter` instance
An instance of a filter function, with the power specified at z=0.
Required for ``Bullock01``.
growth : :class:`hmf.growth_factor.GrowthFactor` instance
Specifies the growth function for the cosmology.
Required for ``Bullock01``
delta_c : float, optional
Critical density for collapse
Used in ``Bullock01``
mstar : float, optional
The nonlinear mass at the desired redshift.
If not provided, will be calculated if required.
\*\*model_parameters : unpacked-dictionary
These parameters are model-specific. For any model, list the available
parameters (and their defaults) using ``<model>._defaults``
"""
__doc__ += _pdocs
_defaults = {}
native_mdefs = tuple()
def __init__(
self,
cosmo: Cosmology = Cosmology(),
filter0: Optional[Filter] = None,
growth: Optional[GrowthFactor] = None,
delta_c: float = 1.686,
profile: Optional[Profile] = None,
mdef: Optional[MassDefinition] = None,
**model_parameters,
):
# Save instance variables
self.filter = filter0
self.growth = GrowthFactor(cosmo=cosmo.cosmo) if growth is None else growth
self.delta_c = delta_c
self.mdef = self.native_mdefs[0] if mdef is None else mdef
self.profile = NFW(self, mdef=self.mdef) if profile is None else profile
self.cosmo = cosmo
self.mean_density0 = cosmo.mean_density0
# TODO: actually implement conversion of mass definitions.
if self.mdef not in self.native_mdefs:
warnings.warn(
f"Requested mass definition '{mdef}' is not in native definitions for "
f"the '{self.__class__.__name__}' CMRelation. No mass conversion will be "
f"performed, so results will be wrong. Using '{self.mdef}'."
)
super(CMRelation, self).__init__(**model_parameters)
def mass_nonlinear(self, z):
"""
Return the nonlinear mass at z.
Parameters
----------
z : float
Redshift. Must not be an array.
"""
def model(lnr):
return (
self.filter.sigma(np.exp(lnr)) * self.growth.growth_factor(z)
- self.delta_c
) ** 2
res = minimize(model, [1.0,])
if res.success:
r = np.exp(res.x[0])
return self.filter.radius_to_mass(
r, self.mean_density0
) # TODO *(1+z)**3 ????
else:
warnings.warn("Minimization failed :(")
return 0
def cm(self, m, z=0):
"""
Return concentration parameter for mass m at z.
Parameters
----------
z : float
Redshift. Must not be an array.
m : float
Halo Mass.
"""
pass
def make_colossus_cm(model="diemer15", **defaults):
r"""
A factory function which helps with integration with the ``colossus`` cosmology code.
See :mod:`~halomod.concentration` for an example of how to use it.
Notice that it returns a *class* :class:`CustomColossusCM` not an instance.
"""
class CustomColossusCM(CMRelation):
_model_name = model
_defaults = defaults
native_mdefs = tuple(
from_colossus_name(d) for d in concentration.models[model].mdefs
)
def __init__(self, *args, **kwargs):
super(CustomColossusCM, self).__init__(*args, **kwargs)
# TODO: may want a more accurate way of passing sigma8 and ns here.
astropy_to_colossus(self.cosmo.cosmo, sigma8=0.8, ns=1)
def cm(self, m, z=0):
return concentration.concentration(
M=m,
mdef=self.mdef.colossus_name,
z=z,
model=self._model_name,
range_return=False,
range_warning=True,
**self.params,
)
CustomColossusCM.__name__ = model.capitalize()
CustomColossusCM.__qualname__ = model.capitalize()
return CustomColossusCM
class Bullock01(CMRelation):
r"""
Concentration-Mass relation of Bullock et al.(2001) [1]_.
See documentation for :class:`Bias` for information on input parameters. This
model has two model parameters.
Notes
-----
The form of the concentration is
.. math:: c_{\rm vir} = K a/a_c = K (1+z_c)/(1+z)
The detailed description of the model can be found in [1]_.
Other Parameters
----------------`
F, K : float
Default value is ``F=0.01`` and ``K=0.34``
References
----------
.. [1] Bullock, J.S. et al., " Profiles of dark haloes:
evolution, scatter and environment ",
https://ui.adsabs.harvard.edu/abs/1996MNRAS.282..347M.
"""
_defaults = {"F": 0.01, "K": 3.4}
native_mdefs = (SOCritical(),)
def zc(self, m, z=0):
r = self.filter.mass_to_radius(self.params["F"] * m, self.mean_density0)
nu = self.filter.nu(r, self.delta_c)
g = self.growth.growth_factor_fn(inverse=True)
zc = g(np.sqrt(nu))
zc[zc < z] = z # hack?
return zc
def cm(self, m, z=0):
return self.params["K"] * (self.zc(m, z) + 1.0) / (z + 1.0)
class Bullock01Power(CMRelation):
r"""
Extended Concentration-Mass relation of Bullock et al.(2001) [1]_.
See documentation for :class:`Bias` for information on input parameters. This
model has three model parameters.
Notes
-----
The form of the concentration is
..math:: c_{\rm vir} = a/(1+z)^c\big(\frac{m}{m_s}\big)^b
where a,b,c,ms are model parameters.
Other Parameters
----------------`
a, b, c: float
Default value is ``a=9.0``, ``b=-0.13`` and ``c=1.0``.
ms: float
Default value is ``None``, where it's set to be the non-linear mass at z.
References
----------
.. [1] Bullock, J.S. et al., " Profiles of dark haloes:
evolution, scatter and environment ",
https://ui.adsabs.harvard.edu/abs/1996MNRAS.282..347M.
"""
_defaults = {"a": 9.0, "b": -0.13, "c": 1.0, "ms": None}
native_mdefs = (SOCritical(),)
def _cm(self, m, ms, a, b, c, z=0):
return a / (1 + z) ** c * (m / ms) ** b
def cm(self, m, z=0):
ms = self.params["ms"] or self.mass_nonlinear(z)
return self._cm(m, ms, self.params["a"], self.params["b"], self.params["c"], z)
class Maccio07(CMRelation):
"""
Concentration-Mass relation based on Maccio et al.(2007) [1]_.
Default value taken from Padmanabhan et al.(2017) [2]_.
References
----------
.. [1] Maccio, A. V. et al., "Concentration, spin and shape of dark matter haloes:
scatter and the dependence on mass and environment",
https://ui.adsabs.harvard.edu/abs/2007MNRAS.378...55M.
.. [2] Padmanabhan, H. et al., "A halo model for cosmological neutral hydrogen :
abundances and clustering ",
https://ui.adsabs.harvard.edu/abs/2017MNRAS.469.2323P/abstract.
"""
_defaults = {"c_0": 28.65, "gamma": 1.45}
native_mdefs = (SOMean(),)
def cm(self, m, z):
return (
self.params["c_0"]
* (m * 10 ** (-11)) ** (-0.109)
* 4
/ (1 + z) ** self.params["gamma"]
)
class Duffy08(Bullock01Power):
r"""
Concentration-mass relation from Duffy et al.(2008) [1]_.
It has the same fomulae as :class:`Bullock01Power`,
but with parameter values refitted.
See documentation for :class:`Bias` for information on input parameters. This
model has five model parameters.
Notes
-----
.. note:: Only "NFW" parameters are implemented by default here. Of course, you can
always pass your own parameters from Table 1 of [1]_.
Other Parameters
----------------
a, b, c: float
Default is "NFW" parameters in [1]_.
ms: float
Default value is ``2e12``.
sample : str
Either "relaxed"(default) or "full". Specifies which set of parameters to take as
default parameters, from Table 1 of [1]_.
References
----------
.. [1] Duffy, A. R. et al., "Dark matter halo concentrations in the
Wilkinson Microwave Anisotropy Probe year 5 cosmology ",
https://ui.adsabs.harvard.edu/abs/2008MNRAS.390L..64D.
"""
_defaults = {"a": None, "b": None, "c": None, "ms": 2e12, "sample": "relaxed"}
native_mdefs = (SOCritical(), SOMean(), SOVirial())
def cm(self, m, z=0):
# All the params defined in Table 1 of Duffy 2008
set_params = {
"200c": {
"full": {"a": 5.71, "b": -0.084, "c": 0.47,},
"relaxed": {"a": 6.71, "b": -0.091, "c": 0.44,},
},
"vir": {
"full": {"a": 7.85, "b": -0.081, "c": 0.71,},
"relaxed": {"a": 9.23, "b": -0.09, "c": 0.69,},
},
"200m": {
"full": {"a": 10.14, "b": -0.081, "c": 1.01,},
"relaxed": {"a": 11.93, "b": -0.09, "c": 0.99,},
},
}
parameter_set = set_params.get(self.mdef.colossus_name, set_params["200c"]).get(
self.params["sample"]
)
a = self.params["a"] or parameter_set["a"]
b = self.params["b"] or parameter_set["b"]
c = self.params["c"] or parameter_set["c"]
return self._cm(m, self.params["ms"], a, b, c, z)
class Zehavi11(Bullock01Power):
r"""
Concentration-mass relation from Duffy et al.(2008) [1]_.
It has the same fomulae as :class:`Bullock01Power`,
but with parameter values refitted.
See documentation for :class:`Bias` for information on input parameters. This
model has four model parameters.
Other Parameters
----------------
a, b, c, ms: float
Default is ``(11.0,-0.13,1.0,2.26e12)``.
References
----------
.. [1] Zehavi, I. et al., "Galaxy Clustering in the Completed SDSS Redshift Survey:
The Dependence on Color and Luminosity",
https://ui.adsabs.harvard.edu/abs/2011ApJ...736...59Z.
"""
_defaults = {"a": 11.0, "b": -0.13, "c": 1.0, "ms": 2.26e12}
class Ludlow16(CMRelation):
r"""
Analytical Concentration-Mass relation of Ludlow et al.(2016) [1]_.
See documentation for :class:`Bias` for information on input parameters. This
model has two model parameters.
Notes
-----
.. note:: The form of the concentration is described by eq(6) and eq(7) in [1]_.
Other Parameters
----------------`
f, C : float
Default value is ``f=0.02`` and ``C=650``
References
----------
.. [1] Ludlow, A. D. et al., "The mass-concentration-redshift relation
of cold and warm dark matter haloes ",
https://ui.adsabs.harvard.edu/abs/2016MNRAS.460.1214L.
"""
# Note: only defined for NFW for now.
_defaults = {
"f": 0.02, # Fraction of mass assembled at "formation"
"C": 650, # Constant scaling
}
native_mdefs = (SOCritical(),)
def delta_halo(self, z=0):
return self.mdef.halo_overdensity_crit(z, self.cosmo.cosmo)
def _eq6_zf(self, c, C, z):
cosmo = self.cosmo.cosmo
M2 = self.profile._h(1) / self.profile._h(c)
rho_2 = self.delta_halo(z) * c ** 3 * M2
rhoc = rho_2 / C
in_brackets = (
rhoc * (cosmo.Om0 * (1 + z) ** 3 + cosmo.Ode0) - cosmo.Ode0
) / cosmo.Om0
c = c[in_brackets > 0]
in_brackets = in_brackets[in_brackets > 0]
return c, in_brackets ** 0.33333 - 1.0
def _eq7(self, f, C, m, z):
cvec = np.logspace(0, 2, 400)
# Calculate zf for all values in cvec
cvec, zf = self._eq6_zf(cvec, C, z)
# Mask out those that are unphysical
mask = (np.isnan(zf) | np.isinf(zf)) | (zf < 0)
zf = zf[~mask]
cvec = cvec[~mask]
lhs = self.profile._h(1) / self.profile._h(cvec)
rf = self.filter.mass_to_radius(f * m, self.mean_density0)
r = self.filter.mass_to_radius(m, self.mean_density0)
sigf = self.filter.sigma(rf) ** 2
sigr = self.filter.sigma(r) ** 2
gf = self.growth.growth_factor_fn()
num = self.delta_c * (1.0 / gf(zf) - 1.0 / gf(z))
den = np.sqrt(2 * (sigf - sigr))
rhs = sp.erfc(np.outer(num, 1.0 / den))
# indx_mass = 0
# print('f, rf: ', rf[indx_mass], r[indx_mass])
# print('sigf: ', sigf[indx_mass])
# print('sigr: ', sigr[indx_mass])
#
# print('lhs: ', lhs)
# print("rhs: ", rhs[:, indx_mass])
# print("num: ", num)
# print("den: ", den[indx_mass])
if np.isscalar(m):
rhs = rhs[:, 0]
spl = interp1d(lhs - rhs, cvec)
return spl(0.0)
else:
out = np.zeros_like(m)
for i in range(len(m)):
arg = lhs - rhs[:, i]
if np.sum(arg <= 0) == 0:
out[i] = cvec.min()
elif np.sum(arg >= 0) == 0:
out[i] = cvec.max()
else:
spl = interp1d(arg, cvec)
out[i] = spl(0.0)
return out
def cm(self, m, z=0):
return self._eq7(self.params["f"], self.params["C"], m, z)
class Ludlow16Empirical(CMRelation):
r"""
Empirical Concentration-Mass relation of Ludlow et al.(2016) [1]_
for Planck-like cosmology.
See documentation for :class:`Bias` for information on input parameters. This
model has eight model parameters.
Notes
-----
.. note:: The form of the concentration is described by eq(C1-C6) in [1]_::
Other Parameters
----------------`
c0_0, c0_z, beta_0, beta_z, gamma1_0, gamma1_z, gamma2_0, gamma2_z: float
Default value is ``(3.395,-0.215,0.307,0.54,0.628,-0.047,0.317,-0.893)``.
References
----------
.. [1] Ludlow, A. D. et al., "The mass-concentration-redshift relation
of cold and warm dark matter haloes ",
https://ui.adsabs.harvard.edu/abs/2016MNRAS.460.1214L.
"""
_defaults = {
"c0_0": 3.395,
"c0_z": -0.215,
"beta_0": 0.307,
"beta_z": 0.54,
"gamma1_0": 0.628,
"gamma1_z": -0.047,
"gamma2_0": 0.317,
"gamma2_z": -0.893,
}
native_mdefs = (SOCritical(),)
def _c0(self, z):
return self.params["c0_0"] * (1 + z) ** self.params["c0_z"]
def _beta(self, z):
return self.params["beta_0"] * (1 + z) ** self.params["beta_z"]
def _gamma1(self, z):
return self.params["gamma1_0"] * (1 + z) ** self.params["gamma1_z"]
def _gamma2(self, z):
return self.params["gamma2_0"] * (1 + z) ** self.params["gamma2_z"]
def _nu_0(self, z):
a = 1.0 / (1 + z)
return (
4.135 - 0.564 / a - 0.21 / a ** 2 + 0.0557 / a ** 3 - 0.00348 / a ** 4
) / self.growth.growth_factor(z)
def cm(self, m, z=0):
warnings.warn(
"Only use Ludlow16Empirical c(m,z) relation when using Planck-like cosmology"
)
# May be better to use real nu, but we'll do what they do in the paper
# r = self.filter.mass_to_radius(m, self.mean_density0)
# nu = self.filter.nu(r,self.delta_c)/self.growth.growth_factor(z)
xi = 1e10 / m
sig = (
self.growth.growth_factor(z)
* 22.26
* xi ** 0.292
/ (1 + 1.53 * xi ** 0.275 + 3.36 * xi ** 0.198)
)
nu = self.delta_c / sig
return (
self._c0(z)
* (nu / self._nu_0(z)) ** (-self._gamma1(z))
* (1 + (nu / self._nu_0(z)) ** (1.0 / self._beta(z)))
** (-self._beta(z) * (self._gamma2(z) - self._gamma1(z)))
)
class Ludlow2016(Ludlow16):
"This class is deprecated -- use :class:`Ludlow16` instead."
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated -- use Ludlow16 instead.",
category=DeprecationWarning,
)
super().__init__(*args, **kwargs)
class Ludlow2016Empirical(Ludlow16Empirical):
"This class is deprecated -- use :class:`Ludlow16Empirical` instead."
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated -- use Ludlow16Empirical instead.",
category=DeprecationWarning,
)
super().__init__(*args, **kwargs)
| 32.599343 | 97 | 0.578754 | 2,624 | 19,853 | 4.278582 | 0.204649 | 0.021377 | 0.022268 | 0.008016 | 0.323506 | 0.258395 | 0.226329 | 0.202993 | 0.185 | 0.168166 | 0 | 0.045732 | 0.281872 | 19,853 | 608 | 98 | 32.652961 | 0.741741 | 0.417771 | 0 | 0.108392 | 0 | 0 | 0.148554 | 0.012775 | 0 | 0 | 0 | 0.004934 | 0 | 1 | 0.087413 | false | 0.003497 | 0.055944 | 0.038462 | 0.325175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbca6237b21b15659ab2ba528ae1618f2e079171 | 3,315 | py | Python | elasticapm/transport/http.py | piquadrat/apm-agent-python | e071c7afdd65cf5381c7ab36fc10431d0085e50b | [
"BSD-3-Clause"
] | null | null | null | elasticapm/transport/http.py | piquadrat/apm-agent-python | e071c7afdd65cf5381c7ab36fc10431d0085e50b | [
"BSD-3-Clause"
] | null | null | null | elasticapm/transport/http.py | piquadrat/apm-agent-python | e071c7afdd65cf5381c7ab36fc10431d0085e50b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import ssl
import certifi
import urllib3
from urllib3.exceptions import MaxRetryError, TimeoutError
from elasticapm.transport.base import TransportException
from elasticapm.transport.http_base import (AsyncHTTPTransportBase,
HTTPTransportBase)
from elasticapm.utils import compat
logger = logging.getLogger('elasticapm.transport.http')
class Transport(HTTPTransportBase):
scheme = ['http', 'https']
def __init__(self, parsed_url, **kwargs):
super(Transport, self).__init__(parsed_url, **kwargs)
pool_kwargs = {
'cert_reqs': 'CERT_REQUIRED',
'ca_certs': certifi.where(),
'block': True,
}
if not self._verify_server_cert:
pool_kwargs['cert_reqs'] = ssl.CERT_NONE
pool_kwargs['assert_hostname'] = False
proxy_url = os.environ.get('HTTPS_PROXY', os.environ.get('HTTP_PROXY'))
if proxy_url:
self.http = urllib3.ProxyManager(proxy_url, **pool_kwargs)
else:
self.http = urllib3.PoolManager(**pool_kwargs)
def send(self, data, headers, timeout=None):
response = None
# ensure headers are byte strings
headers = {k.encode('ascii') if isinstance(k, compat.text_type) else k:
v.encode('ascii') if isinstance(v, compat.text_type) else v
for k, v in headers.items()}
if compat.PY2 and isinstance(self._url, compat.text_type):
url = self._url.encode('utf-8')
else:
url = self._url
try:
try:
response = self.http.urlopen(
'POST', url, body=data, headers=headers, timeout=timeout, preload_content=False
)
logger.info('Sent request, url=%s size=%.2fkb status=%s', url, len(data) / 1024.0, response.status)
except Exception as e:
print_trace = True
if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):
message = (
"Connection to APM Server timed out "
"(url: %s, timeout: %s seconds)" % (self._url, timeout)
)
print_trace = False
else:
message = 'Unable to reach APM Server: %s (url: %s)' % (
e, self._url
)
raise TransportException(message, data, print_trace=print_trace)
body = response.read()
if response.status >= 400:
if response.status == 429: # rate-limited
message = 'Temporarily rate limited: '
print_trace = False
else:
message = 'HTTP %s: ' % response.status
print_trace = True
message += body.decode('utf8')
raise TransportException(message, data, print_trace=print_trace)
return response.getheader('Location')
finally:
if response:
response.close()
class AsyncTransport(AsyncHTTPTransportBase, Transport):
scheme = ['http', 'https']
async_mode = True
sync_transport = Transport
| 37.670455 | 115 | 0.559276 | 338 | 3,315 | 5.340237 | 0.378698 | 0.044321 | 0.023269 | 0.019945 | 0.088643 | 0.059834 | 0.059834 | 0.059834 | 0 | 0 | 0 | 0.009183 | 0.342986 | 3,315 | 87 | 116 | 38.103448 | 0.819559 | 0.01991 | 0 | 0.189189 | 0 | 0 | 0.103544 | 0.007704 | 0 | 0 | 0 | 0 | 0.013514 | 1 | 0.027027 | false | 0 | 0.121622 | 0 | 0.243243 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbcbc98d532e6ba532d16fd12c2c84584d15e7ca | 691 | py | Python | tests/integration/prometheus-tester/src/metrics.py | sed-i/prometheus-operator | f53f16d98680ca1aa54f1771e2cb5bc2406d3438 | [
"Apache-2.0"
] | 7 | 2020-09-24T13:44:38.000Z | 2021-08-06T13:45:53.000Z | tests/integration/prometheus-tester/src/metrics.py | sed-i/prometheus-operator | f53f16d98680ca1aa54f1771e2cb5bc2406d3438 | [
"Apache-2.0"
] | 90 | 2020-09-25T18:47:09.000Z | 2021-12-07T10:59:10.000Z | tests/integration/prometheus-tester/src/metrics.py | sed-i/prometheus-operator | f53f16d98680ca1aa54f1771e2cb5bc2406d3438 | [
"Apache-2.0"
] | 15 | 2020-09-24T10:02:20.000Z | 2021-11-30T14:31:40.000Z | import random
import time
from prometheus_client import Summary, start_http_server
# Metric that tracks time spent and number of requests made.
REQUEST_TIME = Summary("request_processing_seconds", "Time spent processing request")
@REQUEST_TIME.time()
def process_request(t):
"""A fake function that takes a configurable amount of time to run.
Args:
t: integer specifying amount of time that should be
spent in processing this request
"""
time.sleep(t)
def main(port=8000):
"""Expose a metrics endpoint to prometheus."""
start_http_server(port)
while True:
process_request(random.random())
if __name__ == "__main__":
main()
| 23.033333 | 85 | 0.710564 | 93 | 691 | 5.075269 | 0.548387 | 0.069915 | 0.063559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007313 | 0.208394 | 691 | 29 | 86 | 23.827586 | 0.855576 | 0.387844 | 0 | 0 | 0 | 0 | 0.159494 | 0.065823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbcd45aef039ca2b43e93b58ad3683f4c087555e | 3,484 | py | Python | vseq/utils/device.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | 7 | 2021-03-25T12:33:53.000Z | 2022-03-23T13:10:31.000Z | vseq/utils/device.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | null | null | null | vseq/utils/device.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | null | null | null | import os
import subprocess
import re
from io import StringIO
from typing import Optional, Union, List
import torch
import pandas as pd
def get_visible_devices_global_ids():
"""Return the global indices of the visible devices"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
return list(range(torch.cuda.device_count()))
visible_devices = os.environ["CUDA_VISIBLE_DEVICES"]
visible_devices = re.split('; |, ', visible_devices)
visible_devices = [int(idx) for idx in visible_devices]
return visible_devices
def get_gpu_memory_usage() -> pd.DataFrame:
"""Return the free and used memory per GPU device on the node"""
gpu_stats = subprocess.check_output(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
gpu_df = pd.read_csv(StringIO(gpu_stats.decode("utf-8")), names=["memory.used", "memory.free"], skiprows=1)
gpu_df.rename(columns={"memory.used": "used", "memory.free": "free"}, inplace=True)
gpu_df["free"] = gpu_df["free"].map(lambda x: int(x.rstrip(" [MiB]")))
gpu_df["used"] = gpu_df["used"].map(lambda x: int(x.rstrip(" [MiB]")))
print("GPU usage:\n{}".format(gpu_df))
return gpu_df
def get_free_gpus(n_gpus: int = 1, require_unused: bool = True) -> Union[torch.device, List[torch.device]]:
"""Return one or more available/visible (and unused) devices giving preference to those with most free memory"""
gpu_df = get_gpu_memory_usage()
visible_devices = get_visible_devices_global_ids()
invisible_devices = set(range(torch.cuda.device_count())) - set(visible_devices)
if invisible_devices:
gpu_df = gpu_df.drop(index=invisible_devices)
if require_unused:
gpu_df = gpu_df[gpu_df.used < 10]
gpu_df = gpu_df.sort_values(by="free")
device_ids = gpu_df.iloc[:n_gpus].index.to_list()
devices = [torch.device(idx) for idx in device_ids]
return devices[0] if len(devices) == 1 else devices
def get_device(idx: Optional[int] = None):
"""Return the device to run on (cpu or cuda).
If `CUDA_VISIBLE_DEVICES` is not set we assume that no devices are wanted and return the CPU.
This is contrary to standard `torch.cuda.is_available()` behaviour
If idx is specified, return the GPU corresponding to that index in the local scope.
"""
if not torch.cuda.is_available() or "CUDA_VISIBLE_DEVICES" not in os.environ:
return torch.device("cpu")
if idx is None:
return torch.device("cuda:0")
local_device_indices = list(range(torch.cuda.device_count()))
return torch.device(f"cuda:{local_device_indices[idx]}")
def test_gpu_functionality():
"""Returns `True` if a GPU is available and functionality is OK, otherwise raises an error"""
# Set GPU as the device if available, else CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)
print()
if device.type == "cuda":
print(torch.cuda.get_device_name(0))
print("Memory Usage:")
print("Allocated:", round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1), "GB")
print("Cached: ", round(torch.cuda.memory_reserved(0) / 1024 ** 3, 1), "GB")
print("CUDA version:", torch.version.cuda)
torch.zeros(1).cuda()
return True
else:
# provoke an error
torch.zeros(1).cuda()
if __name__ == "__main__":
test_gpu_functionality()
free_gpu_id = get_free_gpus()
print(free_gpu_id)
| 35.55102 | 116 | 0.681401 | 515 | 3,484 | 4.421359 | 0.27767 | 0.03733 | 0.031621 | 0.017567 | 0.125165 | 0.091348 | 0.053579 | 0.033377 | 0 | 0 | 0 | 0.008837 | 0.188002 | 3,484 | 97 | 117 | 35.917526 | 0.796041 | 0.188002 | 0 | 0.033898 | 0 | 0 | 0.124955 | 0.024057 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.118644 | 0 | 0.338983 | 0.152542 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbd6467f210a99be3dd4f59e8ea620fd1f8341f7 | 2,132 | py | Python | infra_macros/fbcode_macros/tests/lib/copy_rule_test.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | infra_macros/fbcode_macros/tests/lib/copy_rule_test.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | infra_macros/fbcode_macros/tests/lib/copy_rule_test.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import tests.utils
from tests.utils import dedent
class CopyRuleTest(tests.utils.TestCase):
includes = [("@fbcode_macros//build_defs/lib:copy_rule.bzl", "copy_rule")]
@tests.utils.with_project()
def test_copy_rule_creates_genrules(self, root):
root.addFile(
"BUCK",
dedent(
"""
load("@fbcode_macros//build_defs/lib:copy_rule.bzl", "copy_rule")
copy_rule(
"$(location :foo)",
"simple",
)
copy_rule(
"$(location :foo)",
"simple_with_out",
out="some_out",
)
copy_rule(
"$(location :foo)",
"propagates_versions",
propagate_versions=True,
)
"""
),
)
expected = dedent(
r"""
cxx_genrule(
name = "propagates_versions",
cmd = "mkdir -p `dirname $OUT` && cp $(location :foo) $OUT",
labels = [
"is_fully_translated",
],
out = "propagates_versions",
)
genrule(
name = "simple",
cmd = "mkdir -p `dirname $OUT` && cp $(location :foo) $OUT",
labels = [
"is_fully_translated",
],
out = "simple",
)
genrule(
name = "simple_with_out",
cmd = "mkdir -p `dirname $OUT` && cp $(location :foo) $OUT",
labels = [
"is_fully_translated",
],
out = "some_out",
)
"""
)
result = root.runAudit(["BUCK"])
self.validateAudit({"BUCK": expected}, result)
| 28.052632 | 82 | 0.501876 | 205 | 2,132 | 5.02439 | 0.44878 | 0.062136 | 0.046602 | 0.05534 | 0.309709 | 0.261165 | 0.261165 | 0.261165 | 0.261165 | 0.261165 | 0 | 0.003051 | 0.385084 | 2,132 | 75 | 83 | 28.426667 | 0.782609 | 0.130394 | 0 | 0 | 0 | 0 | 0.107794 | 0.072968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbdb826d201e1bc50b91f8ea7c248711f0ff594c | 3,525 | py | Python | app.py | pg3io/onsevoitquand | 33e6fd71a93e7f42cc82b080ba5dd821fb19043a | [
"Apache-2.0"
] | 8 | 2020-03-24T17:01:45.000Z | 2020-12-05T08:15:34.000Z | app.py | pg3io/onsevoitquand | 33e6fd71a93e7f42cc82b080ba5dd821fb19043a | [
"Apache-2.0"
] | 3 | 2020-04-06T06:39:05.000Z | 2020-04-14T19:54:02.000Z | app.py | pg3io/onsevoitquand | 33e6fd71a93e7f42cc82b080ba5dd821fb19043a | [
"Apache-2.0"
] | 2 | 2020-03-30T08:08:01.000Z | 2020-04-08T13:09:28.000Z | import os
import datetime
import sass
import re
import yaml
from flask import Flask, render_template, jsonify
from flask import send_from_directory
from cal_setup import get_calendar_service
from html.parser import HTMLParser
from flask_fontawesome import FontAwesome
app = Flask(__name__)
fa = FontAwesome(app)
filesource = "./config.yaml"
sass_map = {"static/scss/style.scss": "static/style.css"}
labelList = ["webinar","url","tags","city"]
events = list()
file = list()
class HTMLFilter(HTMLParser):
text = ""
def handle_data(self, data):
self.text += data
# gen css
def compile_sass_to_css(sass_map):
for source, dest in sass_map.items():
with open(dest, "w") as outfile:
outfile.write(sass.compile(filename=source))
# read yaml file
def read_yaml():
with open(filesource, 'r') as stream:
try:
file = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
return file
# resplit label after html to text
def split_label(event,label):
event['desc'] = event['desc'].replace(label+":", " "+label+":")
return event
# search label to description
def search_label(event,label,vTags):
parse = (searh for searh in event['desc'].split() if re.match(r"^"+label+":.*", searh))
for i in parse:
if label == "tags":
fullTags = i.replace(label+":", "").split(",")
event[label] = list( d for d in fullTags if d in vTags )
else:
event[label] = i.replace(label+":", "").split(",")
event['desc'] = event['desc'].replace(i , "")
return event
# get event to Google Calendar
def get_gcalendar(idCal):
service = get_calendar_service()
now = datetime.datetime.utcnow().isoformat() + 'Z'
events_result = service.events().list(calendarId= idCal, timeMin=now,maxResults=100, singleEvents=True,orderBy='startTime').execute()
events = events_result.get('items', [])
return events
# format champs for timeline
def parse_data(events,vTags):
for event in events:
if 'start' in event:
start = event['start'].get('dateTime', event['start'].get('date'))
event['datestart'] = datetime.datetime.fromisoformat(start).strftime("%d/%m/%y %H:%M")
if 'end' in event:
end = event['end'].get('dateTime', event['end'].get('date'))
event['dateend'] = datetime.datetime.fromisoformat(end).strftime("%d/%m/%y %H:%M")
if 'description' in event:
# description : to txt
desc = HTMLFilter()
desc.feed(event['description'])
event['desc'] = desc.text
# split, search and clean label
for i in labelList:
event = split_label(event,i)
for i in labelList:
event = search_label(event,i,vTags)
return events
# route
@app.route('/')
def accueil():
compile_sass_to_css(sass_map)
file = read_yaml()
events = get_gcalendar(file['idCalendar'])
events = parse_data(events,file['valideTags'])
return render_template('index.html', events=events, file=file)
@app.errorhandler(404)
def page_not_found(e):
compile_sass_to_css(sass_map)
file = read_yaml()
return render_template('404.html',file=file), 404
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.png', mimetype='image/png')
@app.route('/ping')
def ping():
return "pong"
if __name__ == '__main__':
app.run(debug=True) | 31.473214 | 137 | 0.642553 | 462 | 3,525 | 4.781385 | 0.324675 | 0.024445 | 0.017655 | 0.021729 | 0.114984 | 0.055681 | 0.045269 | 0.031689 | 0.031689 | 0 | 0 | 0.004315 | 0.211064 | 3,525 | 112 | 138 | 31.473214 | 0.790004 | 0.055887 | 0 | 0.113636 | 0 | 0 | 0.100331 | 0.006629 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.113636 | 0.022727 | 0.363636 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbdc3dd2f5faa5c9d61f81de33cd34f27bc55fd3 | 1,988 | py | Python | test.py | svenpruefer/astrodynamics | a5441ea6cf47f99fabffe2ee6538c4c21a777f48 | [
"MIT"
] | 1 | 2019-01-02T00:56:57.000Z | 2019-01-02T00:56:57.000Z | test.py | svenpruefer/astrodynamics | a5441ea6cf47f99fabffe2ee6538c4c21a777f48 | [
"MIT"
] | null | null | null | test.py | svenpruefer/astrodynamics | a5441ea6cf47f99fabffe2ee6538c4c21a777f48 | [
"MIT"
] | null | null | null | ##########################################
# Import necessary classes and libraries #
##########################################
from celestial_object import *
from kepler import *
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
############################
# Define Figure parameters #
############################
fig = plt.figure()
ax = fig.gca(projection='3d')
#####################
# Define Parameters #
#####################
mu = 1.0 # Gravitational Parameter, equal to product of gravitational constant and central mass in restricted 2-body approximation
###################
# Define Objects #
###################
position_1 = np.array([3.0 / 4 * np.sqrt(3), 3.0 /4, -0.1],float)
velocity_1 = np.array([-1.0/(2*np.sqrt(2)), np.sqrt(3) / (1.6 * np.sqrt(2)), 1.0 / np.sqrt(2)],float)
mass_1 = 2
#test_body_1 = celestial_body.from_position_velocity(1,mu,position_1,velocity_1)
position_2 = np.array([2.5 / 4 * np.sqrt(3), 2.9 /4, 0.3],float)
velocity_2 = np.array([0.7/(2*np.sqrt(2)), -np.sqrt(3) / (2.4 * np.sqrt(2)), -1.0 / np.sqrt(2)],float)
mass_2 = 1
mu = 1 * (mass_1 + mass_2)
#test_body_2 = celestial_body.from_position_velocity(1,mu,position_2,velocity_2)
#body_1, body_2 = kepler_problem(1,position_1,velocity_1,1,position_2,velocity_2)
total_body = kepler_problem(mass_1,position_1,velocity_1,mass_2,position_2,velocity_2)
#############
# Plotting #
#############
#position1 = body_1.export_orbit(100)
#position2 = body_2.export_orbit(100)
position1 = mu / mass_1 * total_body.export_orbit(100)
position2 = - mu / mass_2 * total_body.export_orbit(100)
#lines = np.vstack( (position1[20,:],position2[20,:]) )
#print(position1)
#print(lines)
ax.scatter(np.array([0]),np.array([0]),np.array([0]))
ax.plot(position1[:,0],position1[:,1],position1[:,2])
ax.plot(position2[:,0],position2[:,1],position2[:,2])
#ax.plot(lines[:,0],lines[:,1],lines[:,2])
ax.set_zlim(-5.0,5.0)
plt.show()
| 29.235294 | 130 | 0.620724 | 304 | 1,988 | 3.894737 | 0.253289 | 0.050676 | 0.035473 | 0.045608 | 0.233108 | 0.162162 | 0.141892 | 0.116554 | 0.04223 | 0.04223 | 0 | 0.071709 | 0.102113 | 1,988 | 67 | 131 | 29.671642 | 0.591597 | 0.333501 | 0 | 0 | 0 | 0 | 0.00189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbdef67cff10af3011ac731abe8c8b528ad7d20c | 710 | py | Python | src/pairing_ap.py | HypoChloremic/KlinMikroTools | 5831d8a6e13295ca2a5e586ad3d65effef23cc41 | [
"Apache-2.0"
] | null | null | null | src/pairing_ap.py | HypoChloremic/KlinMikroTools | 5831d8a6e13295ca2a5e586ad3d65effef23cc41 | [
"Apache-2.0"
] | null | null | null | src/pairing_ap.py | HypoChloremic/KlinMikroTools | 5831d8a6e13295ca2a5e586ad3d65effef23cc41 | [
"Apache-2.0"
] | null | null | null | ## Pairing AP numbers
from analysis_script import KlinMikroTools
xl, sheetnames = run.load_excel("2015_2018_all.xls")
keys, frysdata = run.load_data_excel(xl, "15_18_frys_all", nativeList = True)
xl, sheetnames = run.load_excel("2015_2018_all.xls")
keys, allData = run.load_data_excel(xl, "Main", nativeList = True)
allAp = run.fix_AP(allData, 0, 2)
allAp = [i[0] for i in allAp]
AP = [i[3] for i in frysdata]
newData = []
for ind,i in enumerate(AP):
for ind2, k in enumerate(allAp):
try:
if k in i:
newData.append(frysdata[ind])
for z in allData[ind2]:
newData[-1].append(z)
except TypeError as e:
pass
st = run.gen_csv(newData)
run.save_csv(st)
| 23.666667 | 78 | 0.669014 | 115 | 710 | 3.982609 | 0.469565 | 0.061135 | 0.065502 | 0.082969 | 0.262009 | 0.183406 | 0.183406 | 0.183406 | 0.183406 | 0.183406 | 0 | 0.047872 | 0.205634 | 710 | 29 | 79 | 24.482759 | 0.764184 | 0.025352 | 0 | 0.1 | 0 | 0 | 0.078907 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.05 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbdffc5b6fce9d6ee7b13743afc5931ce0223e87 | 558 | py | Python | tests/test_rules.py | tripcher/flake8-obey-import-goat | 78f0d18369c2de369ff9c6a190386a808816db30 | [
"MIT"
] | 5 | 2022-01-07T12:22:41.000Z | 2022-02-09T08:34:23.000Z | tests/test_rules.py | tripcher/flake8-obey-import-goat | 78f0d18369c2de369ff9c6a190386a808816db30 | [
"MIT"
] | null | null | null | tests/test_rules.py | tripcher/flake8-obey-import-goat | 78f0d18369c2de369ff9c6a190386a808816db30 | [
"MIT"
] | 1 | 2022-03-18T06:52:16.000Z | 2022-03-18T06:52:16.000Z | import pytest
from flake8_obey_import_goat.rules import collect_rules_for
@pytest.mark.parametrize(
'filename, expected',
[
('a/foo.py', [('a', '')]),
('a/bar.py', [('a', ''), ('b', '')]),
('b/fuz/baz.py', [('f', '')]),
('b/bar/fuz/a/baz.py', [('f', '')]),
('a/fuz/bar.py', [('a', ''), ('b', ''), ('f', '')]),
('b/baz.py', []),
('b/bar/baz.py', []),
],
)
def test_collect_rules_for_main_cases(filename, expected, all_rules):
assert collect_rules_for(filename, all_rules) == expected
| 27.9 | 69 | 0.5 | 71 | 558 | 3.732394 | 0.380282 | 0.075472 | 0.169811 | 0.05283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002288 | 0.216846 | 558 | 19 | 70 | 29.368421 | 0.604119 | 0 | 0 | 0 | 0 | 0 | 0.18638 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe1693b29bac2c5c576a5649d15f853c2df5c5c | 2,262 | py | Python | 2019-2020/Zima/Python/Lista06/crawler.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | 2019-2020/Zima/Python/Lista06/crawler.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | 2019-2020/Zima/Python/Lista06/crawler.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | import re
import urllib.request
from bs4 import BeautifulSoup as Soup
from bs4 import Comment
from nltk import tokenize
def is_url(url):
if isinstance(url, str):
http = re.compile('http')
return http.match(url) #string in string / find
else:
return False
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element,Comment):
return False
return True
def text_from_html(body):
soup = Soup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_text(text):
#regex = re.compile("[^.]* Python [^.]*(\.|\!|\?)")
return [sentence for sentence in text if 'Python' in sentence]
def find_sentences_with_python(site):
url = urllib.request.urlopen(site).read().decode('utf-8')
html = Soup(url,'html.parser')
text = html.body.find_all(text=re.compile(r"([^.]*?Python[^.]*\.)"))
#text = text_from_html(url)
return extract_text(text)
#return txt
def find_first_not_visited(links_list, visited_sites):
for link in links_list:
if link not in visited_sites: #set
return link
def extract_links_from_a_tags(src_a_tags_list, dst_links_list):
for a_tag in src_a_tags_list:
url = a_tag.get('href')
if is_url(url):
dst_links_list.append(url)
def crawl(start_page, distance, action):
visited_sites = [] # set would be faster
yield (start_page, action(start_page))
visited_sites.append(start_page) # visited_sites.add
start_site = urllib.request.urlopen(start_page).read()
html = Soup(start_site,'html.parser')
a_tags_list = html.find_all('a')
links_list = []
extract_links_from_a_tags(a_tags_list, links_list)
n_of_visited = 1
while(n_of_visited < distance):
link_to_visit = find_first_not_visited(links_list,visited_sites)
yield(link_to_visit,action(link_to_visit))
visited_sites.append(link_to_visit)
n_of_visited += 1
it = crawl('https://www.python.org',3,find_sentences_with_python)
for i in it:
print(i)
| 28.632911 | 89 | 0.667993 | 329 | 2,262 | 4.340426 | 0.316109 | 0.044118 | 0.02521 | 0.032213 | 0.085434 | 0.056022 | 0.056022 | 0.056022 | 0 | 0 | 0 | 0.003363 | 0.211317 | 2,262 | 78 | 90 | 29 | 0.797085 | 0.066755 | 0 | 0.053571 | 0 | 0 | 0.062351 | 0.009995 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.089286 | 0.017857 | 0.392857 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe259ba0aaeb87d257608e784291afdb643ac7f | 623 | py | Python | Python/vijuneru_ango_make.py | PotatoTimeKun/programming | 7a825ae037b4a39752b10563466657d879d17a5c | [
"MIT"
] | 1 | 2022-03-08T04:20:28.000Z | 2022-03-08T04:20:28.000Z | Python/vijuneru_ango_make.py | PotatoTimeKun/programming | 7a825ae037b4a39752b10563466657d879d17a5c | [
"MIT"
] | null | null | null | Python/vijuneru_ango_make.py | PotatoTimeKun/programming | 7a825ae037b4a39752b10563466657d879d17a5c | [
"MIT"
] | null | null | null | """ヴィジュネル暗号を作る関数が入っています。"""
def make_vij(key:str,sent:str)->str:
"""
第一引数に鍵、第二引数に平文を受け取りヴィジュネル暗号を返します。
"""
x,y=0,0
ang=""
key=key.lower()
sent=sent.lower()
while y<len(sent):
if ord(sent[y])>=ord('a') and ord(sent[y])<=ord('z'):
ang+=chr(ord('A')+(ord(sent[y])+ord(key[x])-ord('a')*2)%26)
x+=1
else:
ang+=sent[y]
y+=1
x%=len(key)
return ang
if __name__=="__main__":
print("ウィジュネル暗号生成ツール\n鍵=",end='')
key=input()
print("文字列=",end='')
sen=input()
print("->"+make_vij(key,sen))
| 24.92 | 72 | 0.484751 | 84 | 623 | 3.47619 | 0.428571 | 0.068493 | 0.082192 | 0.113014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015766 | 0.287319 | 623 | 24 | 73 | 25.958333 | 0.641892 | 0.088283 | 0 | 0 | 0 | 0 | 0.066922 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.1 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe5f75fed7475ec2230053617d777b756b4af21 | 2,154 | py | Python | grobid_superconductors/linking/data_model.py | lfoppiano/grobid-superconductors-tools | e5fdf2f15d7fe319f17717cfde98e92d7eab43a8 | [
"Apache-2.0"
] | 1 | 2022-01-11T08:28:43.000Z | 2022-01-11T08:28:43.000Z | grobid_superconductors/linking/data_model.py | lfoppiano/grobid-superconductors-tools | e5fdf2f15d7fe319f17717cfde98e92d7eab43a8 | [
"Apache-2.0"
] | 2 | 2021-11-16T11:23:07.000Z | 2022-01-14T00:42:45.000Z | grobid_superconductors/linking/data_model.py | lfoppiano/grobid-superconductors-tools | e5fdf2f15d7fe319f17717cfde98e92d7eab43a8 | [
"Apache-2.0"
] | null | null | null | def span_to_dict(span):
converted_span = {
'text': span.text,
'formattedText': span._.formattedText,
'type': span.ent_type_,
'offset_start': span.idx,
'offset_end': span.idx + len(span.text),
'token_start': span.i,
'token_end': span.i + len(span),
'id': span._.id,
'boundingBoxes': span._.bounding_boxes,
'links': span._.links,
'linkable': span._.linkable
}
return converted_span
def token_to_dict(token):
converted_token = {
'text': token.text,
'offset': token.idx,
'formattedText': token._.formattedText,
'linkable': token._.linkable
}
# converted_token['style']
# converted_token['font'] = span.ent_type_
# converted_token['fontSize'] = span.i
return converted_token
def to_dict_link(target_id, target_text, target_type, type=None):
link = {
"targetId": target_id,
"targetText": target_text,
"targetType": target_type,
"type": type
}
return link
def to_dict_token(text="", offset=-1):
token = {
"text": text,
"formattedText": "",
"font": "",
"style": "",
"offset": offset,
"fontSize": "",
"linkable": False
}
return token
def to_dict_span(text, type, id=None, offset_start=-1, offset_end=-1, token_start=-1, token_end=-1):
converted_span = {
"id": id,
"text": str(text),
"formattedText": "",
"type": type,
"offset_start": offset_start,
"offset_end": offset_end,
"token_start": token_start,
"token_end": token_end,
"boundingBoxes": [],
"links": [],
"source": '',
"linkable": False
}
if id is None:
id = compute_id(converted_span)
converted_span['id'] = id
return converted_span
def compute_id(span):
output = [span['text'], span['type'], span['offset_start'], span['offset_end'], span['token_start'],
span['token_end'],
span['source']]
output = [str(o) for o in output]
return hash("".join(output))
| 24.477273 | 104 | 0.558032 | 237 | 2,154 | 4.814346 | 0.189873 | 0.068361 | 0.023663 | 0.038563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003266 | 0.289229 | 2,154 | 87 | 105 | 24.758621 | 0.741999 | 0.047354 | 0 | 0.119403 | 0 | 0 | 0.174316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0 | 0 | 0 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe606eb1fa8a6e4da6b1212603034827a37f263 | 16,728 | py | Python | ElectrospraySimulator/GUI_scripts/MainMenu.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | ElectrospraySimulator/GUI_scripts/MainMenu.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | ElectrospraySimulator/GUI_scripts/MainMenu.py | DavidPoves/Liquid-meniscus-in-the-ionic-regime-simulator | 9a8cfce64ae2adb06c39418fdbbb187c75431c69 | [
"MIT"
] | null | null | null | import os
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
import numpy as np
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import ElectrospraySimulator.GUI_scripts.PredefinedFuns as PreFuns
from ElectrospraySimulator.Tools.CreateMesh import str_2_num, write_mesh
from ElectrospraySimulator.Geometry_scripts.GMSH_Interface import GMSHInterface
from ElectrospraySimulator.GUI_scripts.ToolTip_creator import CreateToolTip
class MainMenu(tk.Frame):
def __init__(self, master=None):
"""
Initialize the MainMenu class. This will launch the GUI.
Args:
master: No user input required. Default is None.
"""
tk.Frame.__init__(self, master)
# Create the options buttons.
load_button = tk.Button(master, text='Load Geometry/Mesh', command=lambda: self.load_geometry_mesh(master))
load_button.grid(row=1, column=0, padx=10, pady=10)
load_button.configure(foreground='BLACK', activeforeground='BLACK')
CreateToolTip(load_button, 'Load a compatible file.')
create_button = tk.Button(master, text='Create a new geometry',
command=lambda: self.create_geometry(master))
create_button.grid(row=1, column=2, padx=10, pady=10)
create_button.configure(foreground='BLACK', activeforeground='BLACK')
CreateToolTip(create_button, 'Create a new geometry from scratch.')
self.geom_data = None
self.msh_filename = ''
def load_geometry_mesh(self, master):
"""
In case the user decides to load a file, this method will load the selected file. It will launch a dialog where
the user may choose the file. Accepted extensions are .geo, .msh and .xml.
Args:
master: Master window of the GUI.
Returns:
"""
ftypes = [('Dolfin Mesh File', '*.xml'), ('GMSH Geometry File', '*geo'), ('GMSH Mesh File', '*.msh'),
('All files', '*')]
filename = filedialog.askopenfilename(initialdir=os.getcwd(), filetypes=ftypes)
assert isinstance(filename, str), 'Select a proper file.'
if filename == '':
raise NameError('No file was selected. Stopping execution')
if filename.split('.')[-1] == 'geo':
self.msh_filename = write_mesh(filename)
elif filename.split('.')[-1] == 'msh' or filename.split('.')[-1] == 'xml':
self.msh_filename = filename
label = tk.Label(master, text='File was properly loaded. You can now close this window.', justify='center')
label.grid(row=2, column=1)
def create_geometry(self, master):
self.geom_data = GeometryGeneration(master, self)
class GeometryGeneration(tk.Frame):
def __init__(self, master1, main):
"""
Initialize the GeometryGeneration class, which will contain all the methods and attributes required to generate
a geometry. When initialized, a GUI will pop up, where the user will be able to personalize all the geometry
options to generate the desired .geo file.
Args:
master1: master window.
main: main menu object
"""
master2 = tk.Tk()
master2.title('Create a new Geometry')
tk.Frame.__init__(self, master2)
self.master2 = master2
self.finish = False
self.msh_filename = ''
# Create the Labels of the inputs.
tk.Label(master2, text='Select an option for the interface expression').grid(row=0, column=0)
tk.Label(master2, text='Expression z=f(r)').grid(row=1, column=0)
tk.Label(master2, text='Expression for r').grid(row=2, column=0)
tk.Label(master2, text='Expression for z').grid(row=3, column=0)
tk.Label(master2, text='Number of points:').grid(row=4, column=0)
tk.Label(master2, text='Initial independent parameter coordinate:').grid(row=5, column=0)
tk.Label(master2, text='Final independent parameter coordinate:').grid(row=6, column=0)
# Create the string variables that will store the inputs of the user.
self.user_choice = tk.StringVar(master2)
self.user_choice.set('Select an option...')
self.default_choice = self.user_choice.get()
self.z_of_r = tk.StringVar(master2)
self.r_fun = tk.StringVar(master2)
self.z_fun = tk.StringVar(master2)
self.number_points = tk.StringVar(master2)
self.number_points.set('200')
self.initial_ind_coord = tk.StringVar(master2)
self.initial_ind_coord.set('0')
self.final_ind_coord = tk.StringVar(master2)
self.final_ind_coord.set('1')
self.degrees_var = tk.BooleanVar(master2)
self.degrees = False
self.angle_unit = 'radians'
self.base_data = None
# Create the option menu.
self.options_dict = {'z = f(r)': 'z = f(r)', 'Expressions for r and z': 'Expressions for r and z',
'Predefined function': 'Predefined function'}
self.options_list = list(self.options_dict.values())
option_menu = tk.OptionMenu(master2, self.user_choice, *self.options_dict, command=self.option_fun)
option_menu.grid(row=0, column=1, padx=10, pady=10)
option_menu.configure(foreground='BLACK', activeforeground='BLACK')
# Create an option to introduce the number of points.
tk.Entry(master=self.master2, textvariable=self.number_points, justify='center').grid(row=4, column=1,
padx=10, pady=10)
# Create a button to close the create geometry menu.
close_but = tk.Button(self.master, text='Save and close.', command=lambda: self.close_fun(master1, main))
close_but.grid(row=7, column=2, padx=10, pady=10)
close_but.configure(foreground='BLACK', activeforeground='BLACK')
def option_fun(self, value):
self.user_choice.set(value)
# Call the function controlling the input boxes.
self.control_boxes()
def control_boxes(self):
tk.Entry(master=self.master2, textvariable=self.initial_ind_coord, state=tk.DISABLED, justify='center').grid(
row=5, column=1, padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.final_ind_coord, justify='center').grid(row=6, column=1,
padx=10, pady=10)
degrees_check = tk.Checkbutton(master=self.master2, variable=self.degrees_var, text='Degrees?',
command=self.check_angle_units)
degrees_check.grid(row=7, column=1, padx=10, pady=10)
CreateToolTip(degrees_check, 'If any angle is introduced, check this option to set\n'
'degrees as the unit to be used. Otherwise, radians\n'
'will be used.\n'
'Note: Ignore this option if no angles are introduced.')
if self.user_choice.get() == self.options_list[0]: # This means that the user want a f(z) option.
self.z_fun.set('')
self.r_fun.set('')
tk.Entry(master=self.master2, textvariable=self.z_of_r, justify='center').grid(row=1, column=1,
padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.r_fun, state=tk.DISABLED, justify='center').grid(
row=2, column=1, padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.z_fun, state=tk.DISABLED, justify='center').grid(
row=3, column=1, padx=10, pady=10)
elif self.user_choice.get() == self.options_list[-1]:
fun_data = PredefinedFunctions(self)
if fun_data.user_fun.get() == 'Half Taylor Cone':
tk.Entry(master=self.master2, textvariable=self.z_of_r, state=tk.DISABLED, justify='center').grid(
row=1, column=1, padx=10, pady=10)
self.r_fun.set('((1-2*s)*1)/(1-2*s*(1-s)*(1-20))')
self.z_fun.set('(2*(1-s)*s*20*(1/tan(49.3))*1)/(1-2*s*(1-s)*(1-20))')
tk.Entry(master=self.master2, textvariable=self.r_fun, justify='center').grid(row=2, column=1,
padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.z_fun, justify='center').grid(row=3, column=1,
padx=10, pady=10)
self.degrees_var.set(True)
else:
tk.Entry(master=self.master2, textvariable=self.z_of_r, state=tk.DISABLED, justify='center').grid(
row=1, column=1, padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.r_fun, justify='center').grid(row=2, column=1,
padx=10, pady=10)
tk.Entry(master=self.master2, textvariable=self.z_fun, justify='center').grid(row=3, column=1,
padx=10, pady=10)
def check_angle_units(self):
"""
This function checks the option chosen by the user on the Degrees? checkbox from the GUI.
Returns:
"""
if self.degrees_var.get():
self.degrees = True
self.angle_unit = 'degrees'
else:
self.degrees = False
self.angle_unit = 'radians'
def close_fun(self, master, main):
# Check that inputs are correct.
if self.user_choice.get() == self.options_list[0] and self.z_of_r.get() == '':
messagebox.showwarning(title='Error', message='No function was introduced, and it cannot be left blank.\n'
'Introduce a valid function.')
return
elif self.user_choice.get() == self.options_list[1]:
if self.r_fun.get() == '' or self.z_fun.get() == '':
messagebox.showwarning(title='Error', message='One of the functions was not introduced.\n'
'Introduce a valid function.')
return
elif self.user_choice.get() == self.default_choice:
messagebox.showwarning(title='Error', message='Please, select an option before proceeding.')
return
self.master2.destroy()
label = tk.Label(master, text='File was properly loaded. You can now close this window.', justify='center')
label.grid(row=2, column=1)
master.destroy()
# Generate the .geo file from the given data.
self.geo_gen = GMSHInterface()
num_points = int(self.number_points.get())
initial_ind_coord = str_2_num(self.initial_ind_coord.get())
final_ind_coord = str_2_num(self.final_ind_coord.get())
self.base_data = np.linspace(initial_ind_coord, final_ind_coord, num_points)
if self.z_of_r.get() != '':
self.geo_gen.geometry_generator(interface_fun=self.z_of_r.get(), r=self.base_data)
elif self.z_fun.get() is not None and self.r_fun.get() is not None:
self.geo_gen.geometry_generator(interface_fun_r=self.r_fun.get(), interface_fun_z=self.z_fun.get(),
independent_param=self.base_data, angle_unit=self.angle_unit)
self.msh_filename = self.geo_gen.mesh_generation_GUI()
main.msh_filename = self.msh_filename
class PredefinedFunctions(tk.Frame):
def __init__(self, input_data):
masterPlot = tk.Tk()
masterPlot.title('Browse predefined functions.')
tk.Frame.__init__(self, masterPlot)
self.masterPlot = masterPlot
self.geo_input = input_data
self.fig_pos = 0
self.fig = None
self.canvas = None
self.toolbar = None
tk.Label(master=masterPlot, text='Select a predefined function.').pack()
# Create an option Menu.
predef_funs_show = {'Half Taylor Cone': 'Taylor Cone', 'Cosine Function': 'Cosine Function',
'Parabolic Function': 'Parabolic Function', 'Straight Line': 'Straight Line'}
self.predef_funs = {'Half Taylor Cone': PreFuns.TaylorCone,
'Cosine Function': PreFuns.CosineFunction,
'Parabolic Function': PreFuns.ParabolicFunction,
'Straight Line': PreFuns.StraightLine
}
self.user_fun = tk.StringVar()
self.user_fun.set('Select a function.')
self.default_user_fun = self.user_fun.get()
opts_menu = tk.OptionMenu(self.masterPlot, self.user_fun, *predef_funs_show, command=self.plot_option)
opts_menu.pack()
opts_menu.configure(foreground='BLACK', activeforeground='BLACK')
close_but = tk.Button(self.masterPlot, text='Save choice', command=self.save)
close_but.pack()
close_but.configure(foreground='BLACK', activeforeground='BLACK')
def plot_option(self, value):
if self.user_fun.get() == self.default_user_fun:
messagebox.showwarning(title='Select a function', text='Please, select a function before proceeding.')
else:
if self.fig_pos == 0:
self.fig = Figure(figsize=(5, 4), dpi=100)
else:
# Eliminate the previous figure to avoid overlapping.
self.fig.clf()
self.canvas.get_tk_widget().destroy()
self.toolbar.destroy()
self.plot()
def plot(self):
if self.user_fun.get() == 'Half Taylor Cone':
var = np.linspace(str_2_num(self.geo_input.initial_ind_coord.get()),
str_2_num(self.geo_input.final_ind_coord.get())/2,
int(str_2_num(self.geo_input.number_points.get())))
else:
var = np.linspace(str_2_num(self.geo_input.initial_ind_coord.get()),
str_2_num(self.geo_input.final_ind_coord.get()),
int(str_2_num(self.geo_input.number_points.get())))
r, z = self.predef_funs.get(self.user_fun.get())(var)
self.fig_pos += 1
ax = self.fig.add_subplot()
ax.plot(r, z)
self.fig.suptitle(self.user_fun.get())
self.canvas = FigureCanvasTkAgg(self.fig, master=self.masterPlot) # A tk.DrawingArea.
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2Tk(self.canvas, self.masterPlot)
self.toolbar.update()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def update_degrees_units(self):
self.geo_input.check_angle_units()
degrees_check = tk.Checkbutton(master=self.geo_input.master2, variable=self.geo_input.degrees_var,
text='Degrees?', state=tk.DISABLED,
command=self.geo_input.check_angle_units)
degrees_check.grid(row=7, column=1, padx=10, pady=10)
CreateToolTip(degrees_check, 'If any angle is introduced, check this option to set\n'
'degrees as the unit to be used. Otherwise, radians\n'
'will be used.\n'
'Note: Ignore this option if no angles are introduced.')
def save(self):
# Load the chosen function into the Geometry menu.
if self.user_fun.get() == 'Half Taylor Cone':
self.geo_input.r_fun.set('((1-2*s)*1)/(1-2*s*(1-s)*(1-20))')
self.geo_input.z_fun.set('(2*(1-s)*s*20*(1/tan(49.3))*1)/(1-2*s*(1-s)*(1-20))')
self.geo_input.final_ind_coord.set('0.5')
self.geo_input.degrees_var.set(True)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.r_fun, justify='center').grid(
row=2, column=1, padx=10, pady=10)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_fun, justify='center').grid(
row=3, column=1, padx=10, pady=10)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_of_r, state=tk.DISABLED,
justify='center').grid(row=1, column=1, padx=10, pady=10)
self.geo_input.degrees_var.set(True)
self.update_degrees_units()
elif self.user_fun.get() == 'Cosine Function':
self.geo_input.z_of_r.set('0.5*cos(PI/2 * r)')
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.r_fun, state=tk.DISABLED,
justify='center').grid(row=2, column=1, padx=10, pady=10)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_fun, state=tk.DISABLED,
justify='center').grid(row=3, column=1, padx=10, pady=10)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_of_r, state=tk.NORMAL,
justify='center').grid(row=1, column=1, padx=10, pady=10)
self.geo_input.degrees_var.set(False)
self.update_degrees_units()
elif self.user_fun.get() == 'Parabolic Function' or self.user_fun.get() == 'Straight Line':
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.r_fun, state=tk.DISABLED,
justify='center').grid(row=2, column=1, padx=10, pady=10)
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_fun, state=tk.DISABLED,
justify='center').grid(row=3, column=1, padx=10, pady=10)
if self.user_fun.get() == 'Parabolic Function':
a = -0.5 / (1 - 0) ** 2
self.geo_input.z_of_r.set(f'{str(a)}*(r-0)^2 + 0.5')
if self.user_fun.get() == 'Straight Line':
self.geo_input.z_of_r.set('0.5*(1-r)')
tk.Entry(master=self.geo_input.master2, textvariable=self.geo_input.z_of_r, state=tk.NORMAL,
justify='center').grid(row=1, column=1, padx=10, pady=10)
self.masterPlot.destroy()
def run_main_menu():
root = tk.Tk()
root.title('Main Menu: Selection of the geometry.')
app = MainMenu(root)
root.mainloop()
return app
if __name__ == '__main__':
app = run_main_menu()
| 47.121127 | 113 | 0.671569 | 2,421 | 16,728 | 4.496902 | 0.131764 | 0.027005 | 0.041885 | 0.02976 | 0.504179 | 0.452007 | 0.383393 | 0.338936 | 0.311472 | 0.289336 | 0 | 0.024231 | 0.193269 | 16,728 | 354 | 114 | 47.254237 | 0.782512 | 0.08142 | 0 | 0.276364 | 0 | 0.014545 | 0.144677 | 0.010843 | 0 | 0 | 0 | 0 | 0.003636 | 1 | 0.050909 | false | 0 | 0.04 | 0 | 0.116364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe8941c5d4cc98be8c00b265f3444484096a230 | 2,137 | py | Python | Winpy/test.py | LeoWood/captcha_break | bae7052d7452132032f1c6d27e3e9d0114d63181 | [
"MIT"
] | null | null | null | Winpy/test.py | LeoWood/captcha_break | bae7052d7452132032f1c6d27e3e9d0114d63181 | [
"MIT"
] | null | null | null | Winpy/test.py | LeoWood/captcha_break | bae7052d7452132032f1c6d27e3e9d0114d63181 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author: LiuHuan
# Datetime: 2020/3/18 16:20
import torch
from PIL import Image
import matplotlib.pyplot as plt
from torchvision.transforms.functional import to_tensor
import string
from captcha.image import ImageCaptcha
characters = '-' + string.digits + string.ascii_uppercase
def decode(sequence):
a = ''.join([characters[x] for x in sequence])
s = ''.join([x for j, x in enumerate(a[:-1]) if x != characters[0] and x != a[j+1]])
if len(s) == 0:
return ''
if a[-1] != characters[0] and s[-1] != a[-1]:
s += a[-1]
return s
def pad_image(image, target_size):
iw, ih = image.size # 原始图像的尺寸
w, h = target_size # 目标图像的尺寸
scale = min(w / iw, h / ih) # 转换的最小比例
# 保证长或宽,至少一个符合目标图像的尺寸
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC) # 缩小图像
new_image = Image.new('RGB', target_size, (255, 255, 255)) # 生成灰色图像
# // 为整数除法,计算图像的位置
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2)) # 将图像填充为中间图像,两侧为灰色的样式
return new_image
# c = Image.open(r'C:\Users\Administrator\Desktop\ValidateCode (1).jpg')
# # c = Image.open(r'C:\Users\Administrator\Desktop\b.jpg')
# # c.convert('RGB')
# c = pad_image(c,(192,64))
# # plt.show(d)
# image = to_tensor(c)
#
# # generator = ImageCaptcha(width=192, height=64)
# # a = generator.generate_image('ABSD')
# # image = to_tensor(a)
#
# model=torch.load(r'E:\LiuHuan\Projects\captcha_break\Winpy\ctc3.pth')
# model = model.cuda()
#
# output = model(image.unsqueeze(0).cuda())
# output_argmax = output.detach().permute(1, 0, 2).argmax(dim=-1)
# print('pred:', decode(output_argmax[0]))
if __name__ == '__main__':
model = torch.load(r'E:\LiuHuan\Projects\captcha_break\Winpy\ctc_3_fonts_5000_180_50.pth')
model = model.cuda()
while True:
path = input()
a = Image.open(path)
c = pad_image(a, (192, 64))
image = to_tensor(c)
output = model(image.unsqueeze(0).cuda())
output_argmax = output.detach().permute(1, 0, 2).argmax(dim=-1)
print('pred:', decode(output_argmax[0])) | 31.426471 | 94 | 0.629855 | 319 | 2,137 | 4.112853 | 0.407524 | 0.02439 | 0.029726 | 0.016768 | 0.285061 | 0.285061 | 0.285061 | 0.285061 | 0.228659 | 0.228659 | 0 | 0.041207 | 0.19373 | 2,137 | 68 | 95 | 31.426471 | 0.720255 | 0.340197 | 0 | 0 | 0 | 0 | 0.061047 | 0.048692 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.305556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbe89472e9f317e6c6c4a24c686cb3561226d4cb | 8,137 | py | Python | aiogram_bot/handlers/simple_admin.py | samuelfirst/bot | dcc7dc15fb53d5841112f88c1f288c8b9058bf43 | [
"MIT"
] | null | null | null | aiogram_bot/handlers/simple_admin.py | samuelfirst/bot | dcc7dc15fb53d5841112f88c1f288c8b9058bf43 | [
"MIT"
] | null | null | null | aiogram_bot/handlers/simple_admin.py | samuelfirst/bot | dcc7dc15fb53d5841112f88c1f288c8b9058bf43 | [
"MIT"
] | null | null | null | import asyncio
import datetime
from contextlib import suppress
from typing import List, Optional
from aiogram import types
from aiogram.utils import exceptions
from aiogram.utils.exceptions import BadRequest, Unauthorized
from aiogram.utils.markdown import hlink, quote_html
from babel.dates import format_timedelta
from loguru import logger
from magic_filter import F
from sqlalchemy.dialects.postgresql import insert
from aiogram_bot.misc import bot, dp, i18n
from aiogram_bot.models.chat import Chat, ChatAllowedChannels
from aiogram_bot.models.user import User
from aiogram_bot.utils.timedelta import parse_timedelta_from_message
_ = i18n.gettext
@dp.message_handler(
F.ilter(F.reply_to_message.sender_chat),
commands=["ro", "ban"],
commands_prefix="!",
user_can_restrict_members=True,
bot_can_restrict_members=True,
# chat_property="restrict_commands",
)
async def command_ban_sender_chat(message: types.Message, target: Optional[types.Chat] = None):
if target is None:
target = message.reply_to_message.sender_chat
to_message = message.reply_to_message
else:
to_message = message
try: # Apply restriction
await message.chat.ban_sender_chat(sender_chat_id=target.id)
await ChatAllowedChannels.delete.where(
(ChatAllowedChannels.chat_id == message.chat.id)
& (ChatAllowedChannels.channel_id == target.id)
).gino.scalar()
logger.info(
"Chat {chat} restricted by {admin}",
chat=target.id,
admin=message.from_user.id,
)
except exceptions.BadRequest as e:
logger.error("Failed to restrict chat member: {error!r}", error=e)
return False
await to_message.answer(
_(
"Channel {channel} was permanently banned "
"and the channel owner will no longer be able to send messages here "
"on behalf of any of his channels."
).format(channel=target.mention)
)
return True
@dp.message_handler(
F.ilter(F.reply_to_message),
commands=["ro"],
commands_prefix="!",
user_can_restrict_members=True,
bot_can_restrict_members=True,
chat_property="restrict_commands",
)
async def cmd_ro(message: types.Message, chat: Chat):
duration = await parse_timedelta_from_message(message)
if not duration:
return
try: # Apply restriction
await message.chat.restrict(
message.reply_to_message.from_user.id, can_send_messages=False, until_date=duration
)
logger.info(
"User {user} restricted by {admin} for {duration}",
user=message.reply_to_message.from_user.id,
admin=message.from_user.id,
duration=duration,
)
except exceptions.BadRequest as e:
logger.error("Failed to restrict chat member: {error!r}", error=e)
return False
if duration >= datetime.timedelta(days=367):
duration = "forever"
else:
duration = format_timedelta(
duration, locale=chat.language, granularity="seconds", format="short"
)
await message.reply_to_message.answer(
_("<b>Read-only</b> activated for user {user}. Duration: {duration}").format(
user=message.reply_to_message.from_user.get_mention(), duration=duration
)
)
return True
@dp.message_handler(
F.ilter(F.reply_to_message),
commands=["ban"],
commands_prefix="!",
user_can_restrict_members=True,
bot_can_restrict_members=True,
chat_property="restrict_commands",
)
async def cmd_ban(message: types.Message, chat: Chat):
duration = await parse_timedelta_from_message(message)
if not duration:
return
try: # Apply restriction
await message.chat.kick(message.reply_to_message.from_user.id, until_date=duration)
logger.info(
"User {user} kicked by {admin} for {duration}",
user=message.reply_to_message.from_user.id,
admin=message.from_user.id,
duration=duration,
)
except exceptions.BadRequest as e:
logger.error("Failed to kick chat member: {error!r}", error=e)
return False
if duration >= datetime.timedelta(days=367):
duration = "forever"
else:
duration = format_timedelta(
duration, locale=chat.language, granularity="seconds", format="short"
)
await message.reply_to_message.answer(
_("User {user} <b>banned</b> for {duration}").format(
user=message.reply_to_message.from_user.get_mention(), duration=duration
)
)
return True
@dp.message_handler(
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
text_contains="@admin",
state="*",
chat_property="report_to_admins",
)
@dp.message_handler(
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
commands=["report"],
commands_prefix="!/",
state="*",
chat_property="report_to_admins",
)
async def text_report_admins(message: types.Message):
logger.info(
"User {user} report message {message} in chat {chat} from user {from_user}",
user=message.from_user.id,
message=message.message_id,
chat=message.chat.id,
from_user=message.reply_to_message.from_user.id,
)
if not message.reply_to_message:
return await message.reply(
_(
"Please use this command is only in reply to message what do you want to report "
"and this message will be reported to chat administrators."
)
)
admins: List[types.ChatMember] = await message.chat.get_administrators()
text = _("[ALERT] User {user} is reported message in chat {chat}.").format(
user=message.from_user.get_mention(),
chat=hlink(
message.chat.title,
f"https://t.me/{message.chat.username}/{message.reply_to_message.message_id}",
)
if message.chat.username
else quote_html(repr(message.chat.title)),
)
admin_ids = [
admin.user.id for admin in admins if admin.is_chat_admin() and not admin.user.is_bot
]
if admin_ids:
for admin in await User.query.where(
User.id.in_(admin_ids) & (User.do_not_disturb == False) # NOQA
).gino.all(): # NOQA
with suppress(Unauthorized):
await bot.send_message(admin.id, text)
logger.info("Send alert message to admin {admin}", admin=admin.id)
await asyncio.sleep(0.3)
await message.reply_to_message.reply(_("This message is reported to chat administrators."))
@dp.message_handler(
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
commands=["do_not_click", "leave"],
bot_can_restrict_members=True,
)
async def cmd_leave(message: types.Message):
try:
await message.chat.unban(user_id=message.from_user.id)
msg = await message.answer(
_("User {user} leave this chat...").format(user=message.from_user.get_mention())
)
except BadRequest:
msg = None
await asyncio.sleep(10)
with suppress(BadRequest):
await message.delete()
if msg:
await msg.delete()
@dp.message_handler(
F.ilter(F.reply_to_message.sender_chat),
commands=["approve_channel"],
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
user_can_promote_members=True,
bot_can_restrict_members=True,
)
async def command_allow_channel(message: types.Message):
target = message.reply_to_message.sender_chat
stmt = (
insert(ChatAllowedChannels)
.values(
chat_id=message.chat.id,
channel_id=target.id,
added_by=message.from_user.id,
)
.on_conflict_do_nothing(
index_elements=[ChatAllowedChannels.chat_id, ChatAllowedChannels.channel_id],
)
)
await stmt.gino.scalar()
await message.chat.unban_sender_chat(target.id)
await message.answer(
_("Channel {channel} allowed in this chat").format(channel=target.mention)
)
| 33.212245 | 97 | 0.664496 | 1,005 | 8,137 | 5.18408 | 0.18209 | 0.039731 | 0.053743 | 0.060461 | 0.501536 | 0.477351 | 0.454127 | 0.399808 | 0.368714 | 0.368714 | 0 | 0.002244 | 0.233256 | 8,137 | 244 | 98 | 33.348361 | 0.832826 | 0.012044 | 0 | 0.382488 | 0 | 0 | 0.142306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.073733 | 0 | 0.115207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dab11bddc8f4a95db1ed3b2a709218a612a145f | 1,535 | py | Python | setup.py | jstockwin/py-pdf-parser | 44475cb006788fa3ccfbd19d929484815d47ba53 | [
"MIT"
] | 186 | 2020-05-14T16:49:46.000Z | 2022-03-27T10:24:29.000Z | setup.py | jstockwin/py-pdf-parser | 44475cb006788fa3ccfbd19d929484815d47ba53 | [
"MIT"
] | 88 | 2020-05-01T11:51:13.000Z | 2022-03-08T16:47:09.000Z | setup.py | jstockwin/py-pdf-parser | 44475cb006788fa3ccfbd19d929484815d47ba53 | [
"MIT"
] | 19 | 2020-05-19T10:37:56.000Z | 2022-03-24T12:12:57.000Z | import os
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
print(sys.stderr, "{}: need Python 3.6 or later.".format(sys.argv[0]))
print(sys.stderr, "Your Python is {}".format(sys.version))
sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
setup(
name="py-pdf-parser",
packages=find_packages(),
exclude=["tests.*", "tests", "docs", "docs.*"],
version="0.10.1",
url="https://github.com/jstockwin/py-pdf-parser",
license="BSD",
description="A tool to help extracting information from structured PDFs.",
long_description=open(os.path.join(ROOT_DIR, "README.md")).read(),
long_description_content_type="text/markdown",
author="Jake Stockwin",
author_email="jstockwin@gmail.com",
include_package_data=True,
install_requires=[
"pdfminer.six==20211012",
"docopt==0.6.2",
"wand==0.6.7",
],
extras_require={
"dev": [
"matplotlib==3.4.3",
"pillow==8.4.0",
"pyvoronoi==1.0.7",
"shapely==1.7.1",
],
"test": [
"black==21.9b0",
"ddt==1.4.4",
"matplotlib==3.4.3",
"mock==4.0.3",
"mypy==0.910",
"nose==1.3.7",
"pillow==8.4.0",
"pycodestyle==2.8.0",
"pytype==2021.9.9",
"recommonmark==0.7.1",
"sphinx-autobuild==2021.3.14",
"sphinx-rtd-theme==1.0.0",
"Sphinx==4.2.0",
],
},
)
| 26.929825 | 78 | 0.52899 | 199 | 1,535 | 3.98995 | 0.557789 | 0.007557 | 0.035264 | 0.032746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075949 | 0.279479 | 1,535 | 56 | 79 | 27.410714 | 0.641953 | 0 | 0 | 0.14 | 0 | 0 | 0.364821 | 0.046906 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.06 | 0 | 0.06 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dad3d8e9c4356eb7773873c62f379058d2933b0 | 3,892 | py | Python | src/scraping/scrapers/whomyth_scraper.py | mfleming99/scraping-qas | 922e54d89965d735efa5a26afd59163129b25328 | [
"Apache-2.0"
] | 4 | 2020-08-01T01:29:29.000Z | 2021-04-26T10:11:34.000Z | src/scraping/scrapers/whomyth_scraper.py | mfleming99/scraping-qas | 922e54d89965d735efa5a26afd59163129b25328 | [
"Apache-2.0"
] | 23 | 2020-03-26T00:48:09.000Z | 2020-10-20T01:20:45.000Z | src/scraping/scrapers/whomyth_scraper.py | mfleming99/scraping-qas | 922e54d89965d735efa5a26afd59163129b25328 | [
"Apache-2.0"
] | 4 | 2020-06-04T04:17:40.000Z | 2021-07-08T10:58:37.000Z | # Copyright (c) Johns Hopkins University and its affiliates.
# This source code is licensed under the Apache 2 license found in the
# LICENSE file in the root directory of this source tree.
"""
WHO Myth crawler
Expected page to crawl is
https://www.who.int/emergencies/diseases/novel-coronavirus-2019/advice-for-public/myth-busters
"""
__author__ = "Kenton Murray, Max Fleming"
__copyright__ = "Copyright 2020, Johns Hopkins University"
__credits__ = ["Kenton Murray"]
__license__ = "Apache 2.0"
__version__ = "0.1"
__maintainer__ = "JHU-COVID-QA"
__email__ = "covidqa@jhu.edu"
__status__ = "Development"
import datetime
import time
import pprint
import uuid
from urllib import request, response, error, parse
from urllib.request import urlopen
from bs4 import BeautifulSoup, NavigableString, CData, Tag
import json
import jsonlines
from covid_scraping import Conversion, Scraper
'''
<div class="sf-content-block content-block" >
<div ><h2></h2><h2><strong>COVID-19 virus can be transmitted in areas with hot and humid climates <o:p></o:p></strong></h2><p>From the
evidence so far, the COVID-19 virus can be transmitted in ALL AREAS, including areas with
hot and humid weather. Regardless of climate, adopt protective measures if you
live in, or travel to an area reporting COVID-19. The best way to
protect yourself against COVID-19 is by frequently cleaning your hands. By
doing this you eliminate viruses that may be on your hands and avoid infection
that could occur by then touching your eyes, mouth, and nose.<o:p></o:p></p><p> </p></div>
</div>
Unfortunately, the "sf-content-block" has a lot of other times it is used but are not questions. I naively look for <h2> which is a bolded question
Originally written by @KentonMurray so direct questions to him
'''
class WhoMythScraper(Scraper):
def scrape(self):
url = 'https://www.who.int/emergencies/diseases/novel-coronavirus-2019/advice-for-public/myth-busters'
html = urlopen(url)
soup = BeautifulSoup(html, "lxml")
qas_plus_some = soup.find_all(
'div', class_='sf-content-block content-block')
qa_pairs = []
for potential in qas_plus_some:
for child in potential.children:
if "h2" in str(
child): # Super hacky ... but this seemed to be the best way for this site
s_child = str(child)
s_child = s_child.replace("\n", " ")
s_child = s_child.replace(u'\xa0', u' ')
qa = s_child.split("</h2>")
if len(qa) == 2:
question = str(qa[0])
answer = str(qa[1])
elif len(qa) == 3: # First question is different
question = str(qa[1])
answer = str(qa[2])
else:
print("ERROR:") # TODO: better error handling?
qa_pairs.append((question, answer))
converter = Conversion(
self._filename,
self._path)
for pair in qa_pairs:
converter.addExample({
"sourceName": 'WHOMyth',
"sourceUrl": url,
"typeOfInfo": 'QA',
"needUpdate": True,
"typeOfInfo": 'QA',
"isAnnotated": False,
"responseAuthority": "",
"question": pair[0],
"answer": pair[1],
"hasAnswer": True,
"targetEducationLevel": 'NA',
"topic": ["Myths"],
"extraData": {},
"targetLocation": "",
"language": 'en'
})
return converter.write()
def main():
scraper = WhoMythScraper(path='./', filename='WhoMyth')
scraper.scrape()
if __name__ == '__main__':
main()
| 37.423077 | 147 | 0.599435 | 474 | 3,892 | 4.799578 | 0.50211 | 0.015824 | 0.018462 | 0.012308 | 0.16 | 0.125714 | 0.125714 | 0.069451 | 0.069451 | 0.069451 | 0 | 0.015267 | 0.293165 | 3,892 | 103 | 148 | 37.786408 | 0.811705 | 0.11408 | 0 | 0.028571 | 0 | 0.014286 | 0.187303 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0 | 0.2 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dadcc14a61d662c9baf481f05f6c5c0066bd963 | 1,714 | py | Python | join_crops_back.py | Yorkbenno/SEAM | 6213fea7de5ac8bfbc8e1d15dd322fcd28a35917 | [
"MIT"
] | null | null | null | join_crops_back.py | Yorkbenno/SEAM | 6213fea7de5ac8bfbc8e1d15dd322fcd28a35917 | [
"MIT"
] | null | null | null | join_crops_back.py | Yorkbenno/SEAM | 6213fea7de5ac8bfbc8e1d15dd322fcd28a35917 | [
"MIT"
] | null | null | null | import os
from PIL import Image
import numpy as np
# from utils.util import online_cut_patches
import png
pseudo_mask_path = 'validoutcampred'
origin_ims_path = '../WSSS4LUAD/Dataset_crag/3.testing/img'
destination = 'crag_seam_testPseudoMask'
if not os.path.exists(destination):
os.mkdir(destination)
ims_dict = {}
for partial_mask in os.listdir(pseudo_mask_path):
im_index, s = partial_mask.split('_')
im_index = int(im_index)
# position = s.split('-')[0]
if im_index not in ims_dict:
ims_dict[im_index] = []
ims_dict[im_index].append(os.path.join(pseudo_mask_path, partial_mask))
for origin_im in os.listdir(origin_ims_path):
im = np.asarray(Image.open(os.path.join(origin_ims_path, origin_im)))
complete_mask = np.zeros((im.shape[0], im.shape[1]))
sum_counter = np.zeros_like(complete_mask)
im_index = int(origin_im.split('.')[0])
for im_path in ims_dict[im_index]:
partial_mask = np.load(im_path, allow_pickle=True)
position_path = im_path.split('_')[-1].split('-')[0][1:-1].split(',')
position = tuple((int(position_path[0]), int(position_path[1])))
# print(position)
complete_mask[position[0]:position[0]+112, position[1]:position[1]+112] += partial_mask
sum_counter[position[0]:position[0]+112, position[1]:position[1]+112] += 1
complete_mask = np.rint(complete_mask / sum_counter)
palette = [(0, 64, 128), (64, 128, 0), (243, 152, 0), (255, 255, 255)]
with open(os.path.join(destination, f'{origin_im.split(".")[0]}.png'), 'wb') as f:
w = png.Writer(complete_mask.shape[1], complete_mask.shape[0], palette=palette, bitdepth=8)
w.write(f, complete_mask.astype(np.uint8)) | 39.860465 | 99 | 0.683197 | 267 | 1,714 | 4.161049 | 0.299625 | 0.050405 | 0.037804 | 0.037804 | 0.075608 | 0.075608 | 0.075608 | 0.075608 | 0.075608 | 0.075608 | 0 | 0.045675 | 0.156943 | 1,714 | 43 | 100 | 39.860465 | 0.723183 | 0.049008 | 0 | 0 | 0 | 0 | 0.070068 | 0.056546 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db011632ef88945272306b3c525a9e57a678228 | 2,843 | py | Python | cashalgo/stock-svm/stockfeature/stock_feature_svr.py | Akagi201/learning-quant | 66689e2e6e3e6ebd8c4ce45f8b0ddff154c948c2 | [
"MIT"
] | 1 | 2021-07-13T20:19:34.000Z | 2021-07-13T20:19:34.000Z | cashalgo/stock-svm/stockfeature/stock_feature_svr.py | Akagi201/learning-quant | 66689e2e6e3e6ebd8c4ce45f8b0ddff154c948c2 | [
"MIT"
] | null | null | null | cashalgo/stock-svm/stockfeature/stock_feature_svr.py | Akagi201/learning-quant | 66689e2e6e3e6ebd8c4ce45f8b0ddff154c948c2 | [
"MIT"
] | 1 | 2017-07-06T23:20:35.000Z | 2017-07-06T23:20:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import tushare as ts # 使用了tushare扩展包,从中获取数据
def getFeatureSample(StockDf, idx, colum_name, feature_id): # 定义函数 获取待评估特征值
feature_val = StockDf.ix[idx, colum_name]
sample = str(feature_id) + ':' + str(feature_val) + ' '
return sample
def fetchStockData(code, output_csv=None): # 获取数据
StockDf = ts.get_h_data(code)
StockDf = StockDf.sort_index(axis=0, ascending=True)
# adding EMA feature
StockDf['ema'] = StockDf['close']
StockDf['rise'] = StockDf['close']
DfLen = len(StockDf.index) # 长度
EMA = 0; # EMA指标
RISE = 0; # 收益率
for n in range(0, DfLen):
idx = n
Close = StockDf.ix[idx, 'close'] # 收盘价数据
if (n == 0):
EMA = Close
RISE = 0
else:
EMA = StockDf.ix[idx - 11, 'ema']
EMA = ((n - 1) * EMA + 2 * Close) / (n + 1) # 计算EMA指标
CloseP = StockDf.ix[idx - 1, 'close'] # 前一日收盘价
RISE = (Close - CloseP) / CloseP # 收益率
StockDf.ix[idx, 'ema'] = EMA
StockDf.ix[idx, 'rise'] = RISE # 收益率
if (output_csv != None):
StockDf.to_csv(output_csv) # 调取输出的数据文件
return StockDf
def genFeature(StockDf, file_name, win_size=3):
# Generating moving window features
problem_file = open(file_name, 'w+')
DfLen = len(StockDf.index) # 长度
for n in range(0, DfLen - win_size):
predic_idx = n + win_size
predict = 0
predict = StockDf.ix[predic_idx, 'rise'] # 收益率预测
predict = predict * 10 # 1= rise 10%
Sample = str(predict) + ' '
feature_id = 1
feature_val = 0
for j in range(n, n + win_size):
Sample += getFeatureSample(StockDf, j, 'open', feature_id) # 获得开盘价指标
feature_id += 1
Sample += getFeatureSample(StockDf, j, 'high', feature_id) # 获得最高价指标
feature_id += 1
Sample += getFeatureSample(StockDf, j, 'close', feature_id) # 获得收盘价指标
feature_id += 1
Sample += getFeatureSample(StockDf, j, 'low', feature_id) # 获得最低价指标
feature_id += 1
Sample += getFeatureSample(StockDf, j, 'volume', feature_id) # 获得成交量
feature_id += 1
Sample += getFeatureSample(StockDf, j, 'ema', feature_id) # 获取计算出的EMA指标
feature_id += 1
Sample += '\n'
problem_file.write(Sample)
problem_file.close()
print('\n sample number: ' + str(n + 1) + '\n feature number: ' + str(feature_id - 1))
del problem_file
del StockDf
if __name__ == '__main__':
# print(sys.path)
for i in range(1, len(sys.argv)):
print ("Argument", i, sys.argv[i])
StockCode = sys.argv[1] # 获取股票代码
Df = fetchStockData(StockCode, StockCode + '.csv')
genFeature(Df, 3, StockCode)
| 31.94382 | 90 | 0.570172 | 352 | 2,843 | 4.46875 | 0.301136 | 0.091545 | 0.050858 | 0.114431 | 0.176732 | 0.14876 | 0.127146 | 0 | 0 | 0 | 0 | 0.017103 | 0.300739 | 2,843 | 88 | 91 | 32.306818 | 0.774145 | 0.098839 | 0 | 0.121212 | 0 | 0 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.030303 | 0 | 0.106061 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db2bf5e1db756870bc7748f382c8ce76ca3a094 | 1,161 | py | Python | lambdas/common.py | jforge/blog-serverless-ping-pong | 978f86476caf814c524e928e4102e0ff85433caf | [
"Apache-2.0"
] | null | null | null | lambdas/common.py | jforge/blog-serverless-ping-pong | 978f86476caf814c524e928e4102e0ff85433caf | [
"Apache-2.0"
] | null | null | null | lambdas/common.py | jforge/blog-serverless-ping-pong | 978f86476caf814c524e928e4102e0ff85433caf | [
"Apache-2.0"
] | 2 | 2020-01-04T19:07:46.000Z | 2021-04-09T06:27:47.000Z | import boto3
import time
client = boto3.client('sns')
def _publish(region: str, account: str, topic: str, msg: str) -> None:
"publish a message to sns"
client.publish(TopicArn=f'arn:aws:sns:{region}:{account}:{topic}', Message=msg)
def publish(ctx, topic, msg) -> None:
"publish a message to sns"
region, account = determine_region_and_account(ctx)
delay(1)
_publish(region, account, topic, msg)
def delay(x: int) -> None:
"sleep for x seconds, to slow down the game a little"
time.sleep(x)
def determine_region_and_account(arn: str) -> (str, str):
"returns the region and account from a given arn"
xs = arn.split(':')
return xs[3], xs[4]
def dump_context(ctx) -> None:
"Logs the lambda context"
print(
f"""
function_name: {ctx.function_name}
function_version: {ctx.function_version}
invoked_function_arn: {ctx.invoked_function_arn}
memory_limit_in_mb: {ctx. memory_limit_in_mb}
aws_request_id: {ctx.aws_request_id}
log_group_name: {ctx.log_group_name}
log_stream_name: {ctx.log_stream_name}
identity: {ctx.identity}
""")
| 29.769231 | 83 | 0.663221 | 167 | 1,161 | 4.401198 | 0.371257 | 0.053061 | 0.065306 | 0.051701 | 0.065306 | 0.065306 | 0 | 0 | 0 | 0 | 0 | 0.005488 | 0.215332 | 1,161 | 38 | 84 | 30.552632 | 0.801317 | 0.149009 | 0 | 0.064516 | 0 | 0 | 0.511207 | 0.110345 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.064516 | 0 | 0.258065 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db584eb68e7f5ce1983b83234b3f707aebd6ebf | 1,079 | py | Python | predictor.py | didithilmy/ppam-lyrics-generator-backend | 0f5275e204c2421c7d6e8fa764a518bc7a7b9548 | [
"MIT"
] | null | null | null | predictor.py | didithilmy/ppam-lyrics-generator-backend | 0f5275e204c2421c7d6e8fa764a518bc7a7b9548 | [
"MIT"
] | 2 | 2021-08-25T16:01:38.000Z | 2022-02-10T01:23:46.000Z | predictor.py | didithilmy/ppam-lyrics-generator-backend | 0f5275e204c2421c7d6e8fa764a518bc7a7b9548 | [
"MIT"
] | null | null | null | from tensorflow import keras
from keras.preprocessing.sequence import pad_sequences
import numpy as np
class LyricsModelPredictor:
def __init__(self, model_file_path):
self.max_vocabulary_size = 9000
self.n_steps = 10
self.model = keras.models.load_model(model_file_path)
print("Model loaded")
print(self.model.summary())
def predict_single(self, input_sequence):
pad_encoded = pad_sequences([input_sequence], maxlen=self.n_steps, truncating='pre')
pad_encoded = np.reshape(pad_encoded, (1, self.n_steps, 1))
pad_encoded = pad_encoded / float(self.max_vocabulary_size)
pred_word_ind = self.model.predict_classes(pad_encoded, verbose=0)[0]
return int(pred_word_ind)
def generate_sequence(self, seed_sequence, num_words):
output_seq = []
input_seq = list(seed_sequence)
for i in range(num_words):
predicted = self.predict_single(input_seq)
output_seq.append(predicted)
input_seq.append(predicted)
return output_seq
| 34.806452 | 92 | 0.690454 | 141 | 1,079 | 4.971631 | 0.439716 | 0.085592 | 0.042796 | 0.059914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011933 | 0.223355 | 1,079 | 30 | 93 | 35.966667 | 0.824582 | 0 | 0 | 0 | 0 | 0 | 0.013915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db59f473b894a4383cc030accc2e792491c4819 | 972 | py | Python | examples/containers/declarative_injections.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | examples/containers/declarative_injections.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | examples/containers/declarative_injections.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | """Declarative container injections example."""
import sqlite3
from dependency_injector import containers, providers
class UserService:
def __init__(self, db: sqlite3.Connection):
self.db = db
class AuthService:
def __init__(self, db: sqlite3.Connection, user_service: UserService):
self.db = db
self.user_service = user_service
class Container(containers.DeclarativeContainer):
database = providers.Singleton(sqlite3.connect, ":memory:")
user_service = providers.Factory(
UserService,
db=database,
)
auth_service = providers.Factory(
AuthService,
db=database,
user_service=user_service,
)
if __name__ == "__main__":
container = Container()
user_service = container.user_service()
auth_service = container.auth_service()
assert user_service.db is auth_service.db is container.database()
assert isinstance(auth_service.user_service, UserService)
| 22.604651 | 74 | 0.704733 | 103 | 972 | 6.339806 | 0.330097 | 0.168453 | 0.082695 | 0.039816 | 0.091884 | 0.091884 | 0 | 0 | 0 | 0 | 0 | 0.005195 | 0.207819 | 972 | 42 | 75 | 23.142857 | 0.842857 | 0.042181 | 0 | 0.153846 | 0 | 0 | 0.017297 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db8a606335a590c07f812d9867df4ec02f92650 | 1,411 | py | Python | wagtailcontentstream/wagtail_hooks.py | FlipperPA/wagtailcontentstream | 3f4b9066104bc2dffaec5ec500595b6a2dd261e2 | [
"BSD-3-Clause"
] | 19 | 2017-08-07T00:06:21.000Z | 2022-03-24T16:53:44.000Z | wagtailcontentstream/wagtail_hooks.py | FlipperPA/wagtailcontentstream | 3f4b9066104bc2dffaec5ec500595b6a2dd261e2 | [
"BSD-3-Clause"
] | null | null | null | wagtailcontentstream/wagtail_hooks.py | FlipperPA/wagtailcontentstream | 3f4b9066104bc2dffaec5ec500595b6a2dd261e2 | [
"BSD-3-Clause"
] | 2 | 2018-01-23T04:24:14.000Z | 2020-10-31T13:31:55.000Z | from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler
from wagtail.admin.rich_text.editors.draftail.features import InlineStyleFeature
from wagtail.core import hooks
@hooks.register('register_rich_text_features')
def register_monospace_feature(features):
"""
Registering the `monospace` feature, which uses the `CODE` Draft.js inline style type,
and is stored as HTML with a `<code>` tag.
"""
feature_name = 'monospace'
draftail_type = 'CODE'
html_tag = 'code'
# Configure how Draftail handles the feature in its toolbar.
control = {
'type': draftail_type,
'label': '{ }',
'description': 'Monospace',
}
# Call register_editor_plugin to register the configuration for Draftail.
features.register_editor_plugin(
'draftail', feature_name, InlineStyleFeature(control)
)
# Configure the content transform from the DB to the editor and back.
db_conversion = {
'from_database_format': {html_tag: InlineStyleElementHandler(draftail_type)},
'to_database_format': {'style_map': {draftail_type: html_tag}},
}
# Call register_converter_rule to register the content transformation conversion.
features.default_features.append(feature_name)
features.register_converter_rule('contentstate', feature_name, db_conversion)
| 38.135135 | 94 | 0.713678 | 159 | 1,411 | 6.106918 | 0.415094 | 0.045314 | 0.032956 | 0.041195 | 0.049434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201984 | 1,411 | 36 | 95 | 39.194444 | 0.862345 | 0.289865 | 0 | 0 | 0 | 0 | 0.151644 | 0.028632 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1db9ba754be09655275eb60d7239b28e70749430 | 1,132 | py | Python | dodo.py | rlowrance/re-avm | d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2 | [
"BSD-3-Clause"
] | 25 | 2016-10-07T05:08:15.000Z | 2022-03-22T01:36:51.000Z | dodo.py | rlowrance/re-avm | d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2 | [
"BSD-3-Clause"
] | 1 | 2021-01-14T22:27:23.000Z | 2021-01-14T22:27:23.000Z | dodo.py | rlowrance/re-avm | d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2 | [
"BSD-3-Clause"
] | 8 | 2016-08-12T07:26:29.000Z | 2021-07-05T01:22:42.000Z | # dodo.py
import os
import HPs
import Path
def task_create_samples2():
'create sample2 files from sample files in data/working/'
working = Path.Path().dir_working()
me = os.path.join(working, 'sample2')
return {
'file_dep': [
os.path.join(working, 'samples-test.csv'),
os.path.join(working, 'sample-train.csv'),
],
'targets': [
os.path.join(me, 'duplicates.pickle'),
os.path.join(me, 'uniques.pickle'),
os.path.join(me, 'all.csv'),
os.path.join(me, 'test.csv'),
os.path.join(me, 'train.csv'),
],
'actions': [
'python samples2.py',
],
}
last_months = [
year * 100 + month
for year in (2006, 2007, 2008)
for month in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
]
def task_fit_train_en_LASTMONTH_all():
targets = [
HPs.to_str(hp) + '.pickle'
for hp in HPs.iter_hps_en()
]
for last_month in last_months:
yield {
'action': 'python fit.py train en %d all' % last_month,
'targets': targets,
}
| 24.608696 | 67 | 0.530919 | 149 | 1,132 | 3.926175 | 0.436242 | 0.082051 | 0.136752 | 0.102564 | 0.148718 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044271 | 0.321555 | 1,132 | 45 | 68 | 25.155556 | 0.717448 | 0.056537 | 0 | 0.078947 | 0 | 0 | 0.211932 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.078947 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dbd92f0b449af5312918b369b33d299f8ff8245 | 5,115 | py | Python | benchmarks/stats/typed/frequency.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 3 | 2018-08-03T02:41:29.000Z | 2021-03-19T03:18:47.000Z | benchmarks/stats/typed/frequency.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 3 | 2018-02-04T17:53:56.000Z | 2018-11-10T17:06:57.000Z | benchmarks/stats/typed/frequency.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 1 | 2018-08-04T00:14:12.000Z | 2018-08-04T00:14:12.000Z | import pstat
import copy
import support
from typed_math import pow, sqrt, exp, abs, fabs, log, round, pi
####################################
####### FREQUENCY STATS ##########
####################################
def itemfreq(inlist:List(float))->List(List(float)):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def scoreatpercentile (inlist:List(float), percent:float)->float:
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
#print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist,10,[0,max(inlist)])
cumhist = support.cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def percentileofscore (inlist:List(float), score:int)->float:
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
histbins=10 #bg: was default argument
defaultlimits=[0,max(inlist)] #None #bg: was a default argument
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = support.cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def histogram (inlist:List(float),numbins:int,defaultreallimits:(float,float))->(List(int),float,float,int):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
printextras=0 #bg: was default argument
if (defaultreallimits != None):
if type(defaultreallimits) not in [list,tuple] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def cumfreq(inlist:List(float))->(List(int),float,float,int):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
numbins=10 #bg: was optional argument
defaultreallimits=[0,max(inlist)] #None #bg# was optional argument
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = support.cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def relfreq(inlist:List(float))->(List(float),float,float,int):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
numbins=10 #bg: was optional argument
defaultreallimits=[0,max(inlist)] #None #bg: was optional argument
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
#bg#h=dyn(h)
h = h
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
| 38.75 | 150 | 0.674291 | 659 | 5,115 | 5.23217 | 0.276176 | 0.020882 | 0.026102 | 0.024362 | 0.296404 | 0.258411 | 0.197216 | 0.12877 | 0.12877 | 0.12877 | 0 | 0.016012 | 0.194135 | 5,115 | 131 | 151 | 39.045802 | 0.820476 | 0.377517 | 0 | 0.194444 | 0 | 0 | 0.013193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.055556 | 0 | 0.222222 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dbec6383b3f4ce8739ec35d98318429632c5504 | 1,520 | py | Python | setup.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | null | null | null | setup.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | 3 | 2021-09-27T05:10:36.000Z | 2021-09-27T06:10:57.000Z | setup.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@equinixmetal.com
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "metal"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.25.3", "six >= 1.10", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Metal API",
author="Equinix Metal API Team",
author_email="support@equinixmetal.com",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "Metal API"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Equinix Metal",
long_description="""\
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
"""
)
| 35.348837 | 318 | 0.714474 | 204 | 1,520 | 5.289216 | 0.455882 | 0.066728 | 0.016682 | 0.022243 | 0.437442 | 0.437442 | 0.376274 | 0.376274 | 0.376274 | 0.376274 | 0 | 0.019048 | 0.171053 | 1,520 | 42 | 319 | 36.190476 | 0.837302 | 0.393421 | 0 | 0 | 0 | 0.05 | 0.550169 | 0.027058 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dbfd4218bc8a081e44afb20c5c8590887b58c2f | 24,710 | py | Python | redisai/client.py | filipecosta90/redisai-py | 167be1146bc881130f09fe3cd1ca611b543aeb61 | [
"BSD-3-Clause"
] | null | null | null | redisai/client.py | filipecosta90/redisai-py | 167be1146bc881130f09fe3cd1ca611b543aeb61 | [
"BSD-3-Clause"
] | null | null | null | redisai/client.py | filipecosta90/redisai-py | 167be1146bc881130f09fe3cd1ca611b543aeb61 | [
"BSD-3-Clause"
] | null | null | null | from functools import wraps, partial
from typing import Union, AnyStr, ByteString, List, Sequence, Any
import warnings
from redis import StrictRedis
from redis.client import Pipeline as RedisPipeline
import numpy as np
from . import command_builder as builder
from .postprocessor import Processor
processor = Processor()
class Client(StrictRedis):
"""
Redis client build specifically for the RedisAI module. It takes all the necessary
parameters to establish the connection and an optional ``debug`` parameter on
initialization
Parameters
----------
debug : bool
If debug mode is ON, then each command that is sent to the server is
printed to the terminal
enable_postprocess : bool
Flag to enable post processing. If enabled, all the bytestring-ed returns
are converted to python strings recursively and key value pairs will be converted
to dictionaries. Also note that, this flag doesn't work with pipeline() function
since pipeline function could have native redis commands (along with RedisAI
commands)
Example
-------
>>> from redisai import Client
>>> con = Client(host='localhost', port=6379)
"""
def __init__(self, debug=False, enable_postprocess=True, *args, **kwargs):
super().__init__(*args, **kwargs)
if debug:
self.execute_command = enable_debug(super().execute_command)
self.enable_postprocess = enable_postprocess
def pipeline(self, transaction: bool = True, shard_hint: bool = None) -> 'Pipeline':
"""
It follows the same pipeline implementation of native redis client but enables it
to access redisai operation as well. This function is experimental in the
current release.
Example
-------
>>> pipe = con.pipeline(transaction=False)
>>> pipe = pipe.set('nativeKey', 1)
>>> pipe = pipe.tensorset('redisaiKey', np.array([1, 2]))
>>> pipe.execute()
[True, b'OK']
"""
return Pipeline(self.enable_postprocess,
self.connection_pool,
self.response_callbacks,
transaction=True, shard_hint=None)
def dag(self, load: Sequence = None, persist: Sequence = None,
readonly: bool = False) -> 'Dag':
"""
It returns a DAG object on which other DAG-allowed operations can be called. For
more details about DAG in RedisAI, refer to the RedisAI documentation.
Parameters
----------
load : Union[AnyStr, List[AnyStr]]
Load the list of given values from the keyspace to DAG scope
persist : Union[AnyStr, List[AnyStr]]
Write the list of given key, values to the keyspace from DAG scope
readonly : bool
If True, it triggers AI.DAGRUN_RO, the read only DAG which cannot write (PERSIST) to
the keyspace. But since it can't write, it can execute on replicas
Returns
-------
Any
Dag object which holds other operations permitted inside DAG as attributes
Example
-------
>>> con.tensorset('tensor', ...)
'OK'
>>> con.modelset('model', ...)
'OK'
>>> dag = con.dag(load=['tensor'], persist=['output'])
>>> dag.tensorset('another', ...)
>>> dag.modelrun('model', inputs=['tensor', 'another'], outputs=['output'])
>>> output = dag.tensorget('output').run()
>>> # You can even chain the operations
>>> result = dag.tensorset(**akwargs).modelrun(**bkwargs).tensorget(**ckwargs).run()
"""
return Dag(load, persist, self.execute_command, readonly, self.enable_postprocess)
def loadbackend(self, identifier: AnyStr, path: AnyStr) -> str:
"""
RedisAI by default won't load any backends. User can either explicitly
load the backend by using this function or let RedisAI load the required
backend from the default path on-demand.
Parameters
----------
identifier : str
Representing which backend. Allowed values - TF, TFLITE, TORCH & ONNX
path: str
Path to the shared object of the backend
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.loadbackend('TORCH', '/path/to/the/backend/redisai_torch.so')
'OK'
"""
args = builder.loadbackend(identifier, path)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.loadbackend(res)
def modelset(self,
key: AnyStr,
backend: str,
device: str,
data: ByteString,
batch: int = None,
minbatch: int = None,
tag: AnyStr = None,
inputs: Union[AnyStr, List[AnyStr]] = None,
outputs: Union[AnyStr, List[AnyStr]] = None) -> str:
"""
Set the model on provided key.
Parameters
----------
key : AnyStr
Key name
backend : str
Backend name. Allowed backends are TF, TORCH, TFLITE, ONNX
device : str
Device name. Allowed devices are CPU and GPU. If multiple GPUs are available,
it can be specified using the format GPU:<gpu number>. For example: GPU:0
data : bytes
Model graph read as bytes string
batch : int
Number of batches for doing auto-batching
minbatch : int
Minimum number of samples required in a batch for model execution
tag : AnyStr
Any string that will be saved in RedisAI as tag for the model
inputs : Union[AnyStr, List[AnyStr]]
Input node(s) in the graph. Required only Tensorflow graphs
outputs : Union[AnyStr, List[AnyStr]]
Output node(s) in the graph Required only for Tensorflow graphs
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> # Torch model
>>> model_path = os.path.join('path/to/TorchScriptModel.pt')
>>> model = open(model_path, 'rb').read()
>>> con.modelset("model", 'torch', 'cpu', model, tag='v1.0')
'OK'
>>> # Tensorflow model
>>> model_path = os.path.join('/path/to/tf_frozen_graph.pb')
>>> model = open(model_path, 'rb').read()
>>> con.modelset('m', 'tf', 'cpu', model,
... inputs=['a', 'b'], outputs=['mul'], tag='v1.0')
'OK'
"""
args = builder.modelset(key, backend, device, data,
batch, minbatch, tag, inputs, outputs)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.modelset(res)
def modelget(self, key: AnyStr, meta_only=False) -> dict:
"""
Fetch the model details and the model blob back from RedisAI
Parameters
----------
key : AnyStr
Model key in RedisAI
meta_only : bool
If True, only the meta data will be fetched, not the model blob
Returns
-------
dict
A dictionary of model details such as device, backend etc. The model
blob will be available at the key 'blob'
Example
-------
>>> con.modelget('model', meta_only=True)
{'backend': 'TF', 'device': 'cpu', 'tag': 'v1.0'}
"""
args = builder.modelget(key, meta_only)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.modelget(res)
def modeldel(self, key: AnyStr) -> str:
"""
Delete the model from the RedisAI server
Parameters
----------
key : AnyStr
Key of the model to be deleted
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.modeldel('model')
'OK'
"""
args = builder.modeldel(key)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.modeldel(res)
def modelrun(self,
key: AnyStr,
inputs: Union[AnyStr, List[AnyStr]],
outputs: Union[AnyStr, List[AnyStr]]) -> str:
"""
Run the model using input(s) which are already in the scope and are associated
to some keys. Modelrun also needs the output key name(s) to store the output
from the model. The number of outputs from the model and the number of keys
provided here must be same. Otherwise, RedisAI throws an error
Parameters
----------
key : str
Model key to run
inputs : Union[AnyStr, List[AnyStr]]
Tensor(s) which is already saved in the RedisAI using a tensorset call. These
tensors will be used as the input for the modelrun
outputs : Union[AnyStr, List[AnyStr]]
keys on which the outputs to be saved. If those keys exist already, modelrun
will overwrite them with new values
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.modelset('m', 'tf', 'cpu', model_pb,
... inputs=['a', 'b'], outputs=['mul'], tag='v1.0')
'OK'
>>> con.tensorset('a', (2, 3), dtype='float')
'OK'
>>> con.tensorset('b', (2, 3), dtype='float')
'OK'
>>> con.modelrun('m', ['a', 'b'], ['c'])
'OK'
"""
args = builder.modelrun(key, inputs, outputs)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.modelrun(res)
def modelscan(self) -> List[List[AnyStr]]:
"""
Returns the list of all the models in the RedisAI server. Modelscan API is
currently experimental and might be removed or changed in the future without
warning
Returns
-------
List[List[AnyStr]]
List of list of models and tags for each model if they existed
Example
-------
>>> con.modelscan()
[['pt_model', ''], ['m', 'v1.2']]
"""
warnings.warn("Experimental: Model List API is experimental and might change "
"in the future without any notice", UserWarning)
args = builder.modelscan()
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.modelscan(res)
def tensorset(self,
key: AnyStr,
tensor: Union[np.ndarray, list, tuple],
shape: Sequence[int] = None,
dtype: str = None) -> str:
"""
Set the tensor to a key in RedisAI
Parameters
----------
key : AnyStr
The name of the tensor
tensor : Union[np.ndarray, list, tuple]
A `np.ndarray` object or Python list or tuple
shape : Sequence[int]
Shape of the tensor. Required if `tensor` is list or tuple
dtype : str
Data type of the tensor. Required if `tensor` is list or tuple
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.tensorset('a', (2, 3), dtype='float')
'OK'
>>> input_array = np.array([2, 3], dtype=np.float32)
>>> con.tensorset('x', input_array)
'OK'
"""
args = builder.tensorset(key, tensor, shape, dtype)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.tensorset(res)
def tensorget(self,
key: AnyStr, as_numpy: bool = True,
meta_only: bool = False) -> Union[dict, np.ndarray]:
"""
Retrieve the value of a tensor from the server. By default it returns the numpy
array but it can be controlled using the `as_type` and `meta_only` argument.
Parameters
----------
key : AnyStr
The name of the tensor
as_numpy : bool
If True, returns a numpy.ndarray. Returns the value as a list and the
metadata in a dictionary if False. This flag also decides how to fetch
the value from the RedisAI server, which also has performance implications
meta_only : bool
If True, the value is not retrieved, only the shape and the type
Returns
-------
Union[dict, np.ndarray]
Returns a dictionary of data or a numpy array. Default is numpy array
Example
-------
>>> con.tensorget('x')
array([2, 3, 4])
>>> con.tensorget('x' as_numpy=False)
{'values': [2, 3, 4], 'dtype': 'INT64', 'shape': [3]}
>>> con.tensorget('x', meta_only=True)
{'dtype': 'INT64', 'shape': [3]}
"""
args = builder.tensorget(key, as_numpy, meta_only)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.tensorget(res,
as_numpy, meta_only)
def scriptset(self, key: AnyStr, device: str, script: str, tag: AnyStr = None) -> str:
"""
Set the script to RedisAI. Action similar to Modelset. RedisAI uses the TorchScript
engine to execute the script. So the script should have only TorchScript supported
constructs. That being said, it's important to mention that using redisai script
to do post processing or pre processing for a Tensorflow (or any other backend)
is completely valid. For more details about TorchScript and supported ops,
checkout TorchScript documentation.
Parameters
----------
key : AnyStr
Script key at the server
device : str
Device name. Allowed devices are CPU and GPU. If multiple GPUs are available.
it can be specified using the format GPU:<gpu number>. For example: GPU:0
script : str
Script itself, as a Python string
tag : AnyStr
Any string that will be saved in RedisAI as tag for the model
Returns
-------
str
'OK' if success, raise an exception otherwise
Note
----
Even though ``script`` is pure Python code, it's a subset of Python language and not
all the Python operations are supported. For more details, checkout TorchScript
documentation. It's also important to note that that the script is executed on a high
performance C++ runtime instead of the Python interpreter. And hence ``script`` should
not have any import statements (A common mistake people make all the time)
Example
-------
>>> script = open(scriptpath).read()
>>> con.scriptset('ket', 'cpu', script)
'OK'
"""
args = builder.scriptset(key, device, script, tag)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.scriptset(res)
def scriptget(self, key: AnyStr, meta_only=False) -> dict:
"""
Get the saved script from RedisAI. Operation similar to model get
Parameters
----------
key : AnyStr
Key of the script
meta_only : bool
If True, only the meta data will be fetched, not the script itself
Returns
-------
dict
Dictionary of script details which includes the script at the key ``source``
Example
-------
>>> con.scriptget('ket', meta_only=True)
{'device': 'cpu'}
"""
args = builder.scriptget(key, meta_only)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.scriptget(res)
def scriptdel(self, key: AnyStr) -> str:
"""
Delete the script from the RedisAI server
Parameters
----------
key : AnyStr
Script key to be deleted
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.scriptdel('ket')
'OK'
"""
args = builder.scriptdel(key)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.scriptdel(res)
def scriptrun(self,
key: AnyStr,
function: AnyStr,
inputs: Union[AnyStr, Sequence[AnyStr]],
outputs: Union[AnyStr, Sequence[AnyStr]]
) -> str:
"""
Run an already set script. Similar to modelrun
Parameters
----------
key : AnyStr
Script key
function : AnyStr
Name of the function in the ``script``
inputs : Union[AnyStr, List[AnyStr]]
Tensor(s) which is already saved in the RedisAI using a tensorset call. These
tensors will be used as the input for the modelrun
outputs : Union[AnyStr, List[AnyStr]]
keys on which the outputs to be saved. If those keys exist already, modelrun
will overwrite them with new values
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.scriptrun('ket', 'bar', inputs=['a', 'b'], outputs=['c'])
'OK'
"""
args = builder.scriptrun(key, function, inputs, outputs)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.scriptrun(res)
def scriptscan(self) -> List[List[AnyStr]]:
"""
Returns the list of all the script in the RedisAI server. Scriptscan API is
currently experimental and might remove or change in the future without warning
Returns
-------
List[List[AnyStr]]
List of list of scripts and tags for each script if they existed
Example
-------
>>> con.scriptscan()
[['ket1', 'v1.0'], ['ket2', '']]
"""
warnings.warn("Experimental: Script List API is experimental and might change "
"in the future without any notice", UserWarning)
args = builder.scriptscan()
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.scriptscan(res)
def infoget(self, key: AnyStr) -> dict:
"""
Get information such as
- How long since the model has been running
- How many samples have been processed
- How many calls handled
- How many errors raised
- etc.
Parameters
----------
key : AnyStr
Model key
Returns
-------
dict
Dictionary of model run details
Example
-------
>>> con.infoget('m')
{'key': 'm', 'type': 'MODEL', 'backend': 'TF', 'device': 'cpu', 'tag': '',
'duration': 0, 'samples': 0, 'calls': 0, 'errors': 0}
"""
args = builder.infoget(key)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.infoget(res)
def inforeset(self, key: AnyStr) -> str:
"""
Reset the run information about the model
Parameters
----------
key : AnyStr
Model key
Returns
-------
str
'OK' if success, raise an exception otherwise
Example
-------
>>> con.inforeset('m')
'OK'
"""
args = builder.inforeset(key)
res = self.execute_command(*args)
return res if not self.enable_postprocess else processor.inforeset(res)
class Pipeline(RedisPipeline, Client):
def __init__(self, enable_postprocess, *args, **kwargs):
warnings.warn("Pipeling AI commands through this client is experimental.",
UserWarning)
self.enable_postprocess = False
if enable_postprocess:
warnings.warn("Postprocessing is enabled but not allowed in pipelines."
"Disable postprocessing to remove this warning.", UserWarning)
self.tensorget_processors = []
super().__init__(*args, **kwargs)
def dag(self, *args, **kwargs):
raise RuntimeError("Pipeline object doesn't allow DAG creation currently")
def tensorget(self, key, as_numpy=True, meta_only=False):
self.tensorget_processors.append(partial(processor.tensorget,
as_numpy=as_numpy,
meta_only=meta_only))
return super().tensorget(key, as_numpy, meta_only)
def _execute_transaction(self, *args, **kwargs):
# TODO: Blocking commands like MODELRUN, SCRIPTRUN and DAGRUN won't work
res = super()._execute_transaction(*args, **kwargs)
for i in range(len(res)):
# tensorget will have minimum 4 values if meta_only = True
if isinstance(res[i], list) and len(res[i]) >= 4:
res[i] = self.tensorget_processors.pop(0)(res[i])
return res
def _execute_pipeline(self, *args, **kwargs):
res = super()._execute_pipeline(*args, **kwargs)
for i in range(len(res)):
# tensorget will have minimum 4 values if meta_only = True
if isinstance(res[i], list) and len(res[i]) >= 4:
res[i] = self.tensorget_processors.pop(0)(res[i])
return res
class Dag:
def __init__(self, load, persist, executor, readonly=False, postprocess=True):
self.result_processors = []
self.enable_postprocess = True
if readonly:
if persist:
raise RuntimeError("READONLY requests cannot write (duh!) and should not "
"have PERSISTing values")
self.commands = ['AI.DAGRUN_RO']
else:
self.commands = ['AI.DAGRUN']
if load:
if not isinstance(load, (list, tuple)):
self.commands += ["LOAD", 1, load]
else:
self.commands += ["LOAD", len(load), *load]
if persist:
if not isinstance(persist, (list, tuple)):
self.commands += ["PERSIST", 1, persist, '|>']
else:
self.commands += ["PERSIST", len(persist), *persist, '|>']
else:
self.commands.append('|>')
self.executor = executor
def tensorset(self,
key: AnyStr,
tensor: Union[np.ndarray, list, tuple],
shape: Sequence[int] = None,
dtype: str = None) -> Any:
args = builder.tensorset(key, tensor, shape, dtype)
self.commands.extend(args)
self.commands.append("|>")
self.result_processors.append(bytes.decode)
return self
def tensorget(self,
key: AnyStr, as_numpy: bool = True,
meta_only: bool = False) -> Any:
args = builder.tensorget(key, as_numpy, meta_only)
self.commands.extend(args)
self.commands.append("|>")
self.result_processors.append(partial(processor.tensorget,
as_numpy=as_numpy,
meta_only=meta_only))
return self
def modelrun(self,
key: AnyStr,
inputs: Union[AnyStr, List[AnyStr]],
outputs: Union[AnyStr, List[AnyStr]]) -> Any:
args = builder.modelrun(key, inputs, outputs)
self.commands.extend(args)
self.commands.append("|>")
self.result_processors.append(bytes.decode)
return self
def run(self):
results = self.executor(*self.commands)
if self.enable_postprocess:
out = []
for res, fn in zip(results, self.result_processors):
out.append(fn(res))
else:
out = results
return out
def enable_debug(f):
@wraps(f)
def wrapper(*args):
print(*args)
return f(*args)
return wrapper
| 35.915698 | 96 | 0.562444 | 2,820 | 24,710 | 4.879078 | 0.153546 | 0.032124 | 0.033578 | 0.022894 | 0.408242 | 0.381278 | 0.353877 | 0.334254 | 0.310778 | 0.303656 | 0 | 0.00346 | 0.333266 | 24,710 | 687 | 97 | 35.967977 | 0.831684 | 0.462121 | 0 | 0.358852 | 0 | 0 | 0.052458 | 0 | 0 | 0 | 0 | 0.001456 | 0 | 1 | 0.143541 | false | 0 | 0.038278 | 0 | 0.320574 | 0.004785 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dc34e1a31c3e0178ef693eb52a967b5a3bb0210 | 1,488 | py | Python | example_competition_fit.py | Justin60145/pybindingcurve | eb528472c9aefb2d2d50611e7b9d8e9bc9ab6926 | [
"MIT"
] | null | null | null | example_competition_fit.py | Justin60145/pybindingcurve | eb528472c9aefb2d2d50611e7b9d8e9bc9ab6926 | [
"MIT"
] | null | null | null | example_competition_fit.py | Justin60145/pybindingcurve | eb528472c9aefb2d2d50611e7b9d8e9bc9ab6926 | [
"MIT"
] | null | null | null | """Fitting example, determining Kd from 1:1:1 competition data"""
import numpy as np
import pybindingcurve as pbc
import sys
# We can choose to work in a common unit, typically nM, or uM, as long as all
# numbers are in the same unit, the result is valid. We assume uM for all
# concentrations bellow.
# Experimental data
xcoords = np.array([0.0, 4.2, 8.4, 16.8, 21.1, 31.6, 35.8, 40.0])
ycoords = np.array([150, 330, 1050, 3080, 4300, 6330, 6490, 6960])
# Construct the PyBindingCurve object, operating on a 1:1:1 (compeittion) system and add experimental data to the plot
mySystem = pbc.BindingCurve("1:1:1")
mySystem.add_scatter(xcoords, ycoords)
# Known system parameters, kdpl will be added to this by fitting
system_parameters = {"p": xcoords, "l": 10, "i": 10, "kdpl": 10, 'ymin':np.min(ycoords)}
# Now we call fit, passing the known parameters, followed by a dict of parameters to be fitted along
# with an initial guess, pass the ycoords, and what the readout (ycoords) is
fitted_system, fit_accuracy = mySystem.fit(system_parameters, {"kdpi": 0, 'ymax':np.max(ycoords)}, ycoords)
# Print out the fitted parameters
for k, v in fit_accuracy.items():
print(f"Fit: {k}={fitted_system[k]} +/- {v}")
# Assign more points to 'p' to make a smooth plot
fitted_system["p"] = np.linspace(0, np.max(xcoords))
# Add a new curve, simulated using fitted parameters to our BindingCurve object
mySystem.add_curve(fitted_system)
# Show the plot
mySystem.show_plot(ylabel="Signal")
| 39.157895 | 118 | 0.728495 | 251 | 1,488 | 4.2749 | 0.513944 | 0.011184 | 0.008388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054011 | 0.153898 | 1,488 | 37 | 119 | 40.216216 | 0.798253 | 0.521505 | 0 | 0 | 0 | 0 | 0.094964 | 0.031655 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dc679a7bd73f1dd22fd0d3b27ab18dc75d8b334 | 12,170 | py | Python | omnicanvas/canvas.py | samirelanduk/omnicanvas | edc22ec802da6188759fbbbb30f0dd44aabb3a7a | [
"MIT"
] | 2 | 2016-03-11T20:17:27.000Z | 2017-08-31T11:45:52.000Z | omnicanvas/canvas.py | samirelanduk/omnicanvas | edc22ec802da6188759fbbbb30f0dd44aabb3a7a | [
"MIT"
] | null | null | null | omnicanvas/canvas.py | samirelanduk/omnicanvas | edc22ec802da6188759fbbbb30f0dd44aabb3a7a | [
"MIT"
] | null | null | null | """This module contains the main Canvas class."""
from .color import process_color
from . import graphics
from . import svg
class Canvas:
"""A backdrop on which other :py:class:`.Graphic` objects are painted.
:param width: The canvas's width in pixels.
:param height: The canvas's height in pixels.
:param background_color: The canvas's background colour - the default is\
white"""
def __init__(self, width, height, background_color=None):
if isinstance(width, float):
width = round(width)
if not isinstance(width, int):
raise TypeError("Width must be numeric, not '%s'" % width)
self._width = width
if isinstance(height, float):
height = round(height)
if not isinstance(height, int):
raise TypeError("Height must be numeric, not '%s'" % height)
self._height = height
if background_color is None:
self._background_color = None
else:
self._background_color = process_color(background_color)
self._graphics = []
def __repr__(self):
return "<Canvas %i×%i (%i Graphics)>" % (
self._width, self._height, len(self._graphics)
)
def width(self, width=None):
"""The canvas's width in pixels. Passing a value will update the width
property.
:param width: If given, the canvas's width will be set to this.
:rtype: ``int``"""
if width is None:
return self._width
else:
if isinstance(width, float):
width = round(width)
if not isinstance(width, int):
raise TypeError("Width must be numeric, not '%s'" % width)
self._width = width
def height(self, height=None):
"""The canvas's height in pixels. Passing a value will update the height
property.
:param height: If given, the canvas's height will be set to this.
:rtype: ``int``"""
if height is None:
return self._height
else:
if isinstance(height, float):
height = round(height)
if not isinstance(height, int):
raise TypeError("Height must be numeric, not '%s'" % height)
self._height = height
def background_color(self, background_color=None):
"""The canvas's background colour, as a hex string. Passing a value will
update the background_color property (as a hex string).
:param str background_color: If given, the canvas's background_color \
will be set to this.
:rtype: ``str``"""
if background_color is None:
return self._background_color
else:
self._background_color = process_color(background_color)
def graphics(self):
"""A list of all the :py:class:`.Graphic` objects on this canvas.
:rtype: ``list``"""
return list(self._graphics)
def get_graphic_by_name(self, name):
"""Searches the canvas's :py:class:`.Graphic` objects and returns the
first one with a matching name. Returns ``None`` if there are no
matches.
:param str name: The name to search by.
:rtype: str"""
if not isinstance(name, str):
raise TypeError(
"Can only search for str name, not '%s'" % str(name)
)
for graphic in self.graphics():
if graphic.name() == name:
return graphic
def get_graphics_by_name(self, name):
"""Searches the canvas's :py:class:`.Graphic` objects and returns all
the ones with a matching name. Returns an empty list if there are no
matches.
:param str name: The name to search by.
:returns: ``list`` of :py:class:`.Graphic`"""
if not isinstance(name, str):
raise TypeError(
"Can only search for str name, not '%s'" % str(name)
)
return [g for g in self.graphics() if g.name() == name]
def move_graphic_forward(self, graphic):
"""Moves a :py:class:`.Graphic` forward - that is, closer to the viewer.
This method will make the :py:class:`.Graphic` more visible if it was
occluded.
:param Graphic graphic: The :py:class:`.Graphic` to move forward."""
if not isinstance(graphic, graphics.Graphic):
raise TypeError("%s is not a Graphic" % str(graphic))
if not graphic is self.graphics()[-1]:
index = self.graphics().index(graphic)
self._graphics[index], self._graphics[index + 1] = (
self._graphics[index + 1], self._graphics[index]
)
def move_graphic_backward(self, graphic):
"""Shifts a :py:class:`.Graphic` backward - away from the viewer. This
method will hide the :py:class:`.Graphic` behind others.
:param Graphic graphic: The :py:class:`.Graphic` to move backward."""
if not isinstance(graphic, graphics.Graphic):
raise TypeError("%s is not a Graphic" % str(graphic))
if not graphic is self.graphics()[0]:
index = self.graphics().index(graphic)
if index == -1:
raise ValueError("%s is not a Graphic in %s" % (
graphic, self
))
self._graphics[index], self._graphics[index - 1] = (
self._graphics[index - 1], self._graphics[index]
)
def add_rectangle(self, *args, **kwargs):
"""Adds a :py:class:`.Rectangle` to the canvas.
:param x: The x-coordinate of the Rectangle's upper left corner.
:param y: The y-coordinate of the Rectangle's upper left corner.
:param width: The Rectangle's width.
:param height: The Rectangle's height.
:param str fill_color: The Rectangle's interior colour.
:param opacity: The degree of transparency, from 0 to 1 (0 being\
invisible).
:param line_width: The width of the edge of the Rectangle in pixels.
:param str line_style: The pattern of the edges. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: The colour of the edge.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle).
:param dict data: Any data to be associated with the Rectangle.
:rtype: :py:class:`.Rectangle`"""
self._graphics.append(graphics.Rectangle(*args, **kwargs))
return self._graphics[-1]
def add_line(self, *args, **kwargs):
"""Adds a :py:class:`.Line` to the canvas.
:param x1: The x-coordinate of the Line's start point.
:param y1: The y-coordinate of the Line's start point.
:param x2: The x-coordinate of the Line's end point.
:param y2: The y-coordinate of the Line's end point.
:param line_width: The width of the Line in pixels.
:param str line_style: The pattern of the Line. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: The colour of the Line.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle).
:param dict data: Any data to be associated with the Line.
:rtype: :py:class:`.Line`"""
self._graphics.append(graphics.Line(*args, **kwargs))
return self._graphics[-1]
def add_oval(self, *args, **kwargs):
"""Adds a :py:class:`.Oval` to the canvas.
:param x: The x-coordinate of the Oval's bounding rectangle upper left corner.
:param y: The y-coordinate of the Oval's bounding rectangle upper left corner.
:param width: The bounding rectangle's width.
:param height: The bounding rectangle's height.
:param str fill_color: The Oval's interior colour.
:param opacity: The degree of transparency, from 0 to 1 (0 being\
invisible).
:param line_width: The width of the edge of the Oval in pixels.
:param str line_style: The pattern of the edges. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: The colour of the edge.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle).
:param dict data: Any data to be associated with the Oval.
:rtype: :py:class:`.Oval`"""
self._graphics.append(graphics.Oval(*args, **kwargs))
return self._graphics[-1]
def add_polygon(self, *args, **kwargs):
"""Adds a :py:class:`.Polygon` to the canvas.
:param \*points: The alternating x and y values of the Polygon's\
corners.
:param str fill_color: The Polygon's interior colour.
:param opacity: The degree of transparency, from 0 to 1 (0 being\
invisible).
:param line_width: The width of the edge of the Polygon in pixels.
:param str line_style: The pattern of the edges. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: The colour of the edge.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle).
:param dict data: Any data to be associated with the Polygon.
:rtype: :py:class:`.Polygon`"""
self._graphics.append(graphics.Polygon(*args, **kwargs))
return self._graphics[-1]
def add_text(self, *args, **kwargs):
"""Adds a :py:class:`.Text` to the canvas.
:param x: The Text's x location.
:param y: The Text's y location.
:param str text: The text to display.
:param font_size: The font size of the Text when displayed.
:param horizontal_align: The horizontal alignment of the Text. Acceptable\
values are ``left``, ``center`` (default) and ``right``.
:param vertical_align: The vertical alignment of the Text. Acceptable\
values are ``top``, ``middle`` (default) and ``bottom``.
:param str fill_color: Defaults to '#FFFFFF'.
:param opacity: The degree of transparency, from 0 to 1 (0 being\
invisible).
:param line_width: Defaults to 0.
:param str line_style: The line pattern. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: Defaults to '#000000'.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle), in degrees.
:param dict data: Any data to be associated with the Text.
:rtype: :py:class:`.Text`"""
self._graphics.append(graphics.Text(*args, **kwargs))
return self._graphics[-1]
def add_polyline(self, *args, **kwargs):
"""Adds a :py:class:`.Polyline` to the canvas.
:param \*points: The alternating x and y values of the Polyline's\
corners.
:param line_width: The width of the edge of the Polyline in pixels.
:param str line_style: The pattern of the edges. Acceptable values are\
``-`` (default), ``..`` (dotted) or ``--`` (dashed).
:param str line_color: The colour of the edge.
:param tuple rotation: Any rotation to be applied, in the format\
(x of rotation point, y of rotation point, angle).
:param dict data: Any data to be associated with the Polyline.
:rtype: :py:class:`.Polyline`"""
self._graphics.append(graphics.Polyline(*args, **kwargs))
return self._graphics[-1]
def save(self, path):
"""Saves the canvas to file as an SVG file.
:param str path: The location and filename to save to."""
with open(path, "w") as f:
f.write(self.to_svg())
to_svg = svg.generate_canvas_svg
"""Returns the SVG text of the canvas.
Any ``data`` attributes of the Graphics contained will be rendered as SVG
attributes.
:rtype: ``str``"""
| 38.881789 | 86 | 0.607149 | 1,624 | 12,170 | 4.485222 | 0.123153 | 0.023339 | 0.019769 | 0.014827 | 0.681082 | 0.632345 | 0.602416 | 0.56123 | 0.47872 | 0.461697 | 0 | 0.004126 | 0.283073 | 12,170 | 312 | 87 | 39.00641 | 0.830602 | 0.544618 | 0 | 0.438095 | 0 | 0 | 0.064772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.161905 | false | 0 | 0.028571 | 0.009524 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dc81b40f2ab3a5f5078ee1f4cee953dbcbaaed7 | 5,287 | py | Python | tests/core/jobgraph_test.py | Yelp/Tron | d60b015163418bf66f638e4c12337289ad8c040a | [
"Apache-2.0"
] | 190 | 2015-01-01T17:40:46.000Z | 2022-02-02T09:32:03.000Z | tests/core/jobgraph_test.py | Yelp/Tron | d60b015163418bf66f638e4c12337289ad8c040a | [
"Apache-2.0"
] | 237 | 2015-01-14T19:25:01.000Z | 2022-03-15T18:33:29.000Z | tests/core/jobgraph_test.py | Yelp/Tron | d60b015163418bf66f638e4c12337289ad8c040a | [
"Apache-2.0"
] | 40 | 2015-01-22T07:54:15.000Z | 2022-03-03T08:01:29.000Z | from unittest import mock
import pytest
from tron.config.schema import ConfigAction
from tron.config.schema import ConfigJob
from tron.core.jobgraph import AdjListEntry
from tron.core.jobgraph import JobGraph
MISSING_DEPENDENCY_ERR_MSG = """The following actions are dependencies of other actions but missing:
Action other.job2.action3 is dependency of actions:
- MASTER.job3.action5
Please check if you have deleted/renamed any of them or their containing jobs."""
def _setup_job_graph_config_container():
action1 = ConfigAction(name="action1", command="do something",)
action2 = ConfigAction(name="action2", command="do something", requires=["action1"],)
job1_config = ConfigJob(
name="job1",
node="default",
schedule=mock.Mock(),
actions={"action1": action1, "action2": action2},
namespace="MASTER",
)
action3 = ConfigAction(
name="action3", command="do something", triggered_by=["MASTER.job1.action2.shortdate.{shortdate}"],
)
job2_config = ConfigJob(
name="job1", node="default", schedule=mock.Mock(), actions={"action3": action3}, namespace="other",
)
action4 = ConfigAction(name="action4", command="do something",)
action5 = ConfigAction(
name="action5",
command="do something",
requires=["action4"],
triggered_by=["other.job2.action3.shortdate.{shortdate}"],
)
job3_config = ConfigJob(
name="job1",
node="default",
schedule=mock.Mock(),
actions={"action4": action4, "action5": action5},
namespace="MASTER",
)
config_container = mock.Mock()
config_container.get_jobs.return_value = {
"MASTER.job1": job1_config,
"other.job2": job2_config,
"MASTER.job3": job3_config,
}
return config_container
class TestJobGraph:
def setup_method(self):
self.job_graph = JobGraph(_setup_job_graph_config_container(), should_validate_missing_dependency=True)
def test_job_graph_missing_dependency(self):
missing_dependency_config_container = _setup_job_graph_config_container()
missing_dependency_config_container.get_jobs.return_value.pop("other.job2")
with pytest.raises(ValueError) as e:
JobGraph(
missing_dependency_config_container, should_validate_missing_dependency=True,
)
assert str(e.value) == MISSING_DEPENDENCY_ERR_MSG
def test_job_graph(self):
assert sorted(list(self.job_graph.action_map.keys())) == [
"MASTER.job1.action1",
"MASTER.job1.action2",
"MASTER.job3.action4",
"MASTER.job3.action5",
"other.job2.action3",
]
assert self.job_graph._actions_for_job == {
"MASTER.job1": ["MASTER.job1.action1", "MASTER.job1.action2"],
"other.job2": ["other.job2.action3"],
"MASTER.job3": ["MASTER.job3.action4", "MASTER.job3.action5"],
}
assert self.job_graph._adj_list == {
"MASTER.job1.action1": [AdjListEntry("MASTER.job1.action2", False)],
"MASTER.job1.action2": [AdjListEntry("other.job2.action3", True)],
"other.job2.action3": [AdjListEntry("MASTER.job3.action5", True)],
"MASTER.job3.action4": [AdjListEntry("MASTER.job3.action5", False)],
}
assert self.job_graph._rev_adj_list == {
"MASTER.job1.action1": [],
"MASTER.job1.action2": [AdjListEntry("MASTER.job1.action1", False)],
"other.job2.action3": [AdjListEntry("MASTER.job1.action2", True)],
"MASTER.job3.action4": [],
"MASTER.job3.action5": [
AdjListEntry("MASTER.job3.action4", False),
AdjListEntry("other.job2.action3", True),
],
}
def test_get_action_graph_for_job(self):
action_graph_1 = self.job_graph.get_action_graph_for_job("MASTER.job1")
assert sorted(action_graph_1.action_map.keys()) == [
"action1",
"action2",
]
assert action_graph_1.required_actions == {
"action1": set(),
"action2": {"action1"},
}
assert action_graph_1.required_triggers == {
"other.job2.action3": {"action2"},
"MASTER.job3.action5": {"other.job2.action3"},
}
action_graph_2 = self.job_graph.get_action_graph_for_job("other.job2")
assert sorted(action_graph_2.action_map.keys()) == [
"action3",
]
assert action_graph_2.required_actions == {
"action3": set(),
}
assert action_graph_2.required_triggers == {
"action3": {"MASTER.job1.action2"},
"MASTER.job3.action5": {"action3"},
}
action_graph_3 = self.job_graph.get_action_graph_for_job("MASTER.job3")
assert sorted(action_graph_3.action_map.keys()) == [
"action4",
"action5",
]
assert action_graph_3.required_actions == {
"action4": set(),
"action5": {"action4"},
}
assert action_graph_3.required_triggers == {
"action5": {"other.job2.action3"},
"other.job2.action3": {"MASTER.job1.action2"},
}
| 37.496454 | 111 | 0.617931 | 560 | 5,287 | 5.610714 | 0.176786 | 0.054106 | 0.061108 | 0.021642 | 0.393698 | 0.222152 | 0.120624 | 0.088797 | 0.078612 | 0.054424 | 0 | 0.037028 | 0.249102 | 5,287 | 140 | 112 | 37.764286 | 0.754408 | 0 | 0 | 0.064 | 0 | 0 | 0.259315 | 0.015321 | 0 | 0 | 0 | 0 | 0.112 | 1 | 0.04 | false | 0 | 0.048 | 0 | 0.104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dcb9a27c38aef465381d85b09fff62010ca1d8b | 785 | py | Python | constraints_wrapper.py | JulesGillet/Optimization_tools | 1fd6726477cd3db1b210b6b119f1592f5a3591df | [
"Apache-2.0"
] | null | null | null | constraints_wrapper.py | JulesGillet/Optimization_tools | 1fd6726477cd3db1b210b6b119f1592f5a3591df | [
"Apache-2.0"
] | null | null | null | constraints_wrapper.py | JulesGillet/Optimization_tools | 1fd6726477cd3db1b210b6b119f1592f5a3591df | [
"Apache-2.0"
] | null | null | null | import numpy as np
def Ineg_wrapper(valS, valI):
"""
Function used to wrap Inequalities into a suitable form for optimisation
valS > valI --> Inequality is satisfied
valS and valI can be float or 1d array
"""
epsilon = 1e-6
top = 1e3
ecart = valI - valS
if ecart < epsilon:
out = np.exp(ecart) * epsilon / np.exp(epsilon)
elif ecart > top:
out = np.log(ecart) * top / np.log(top)
else:
out = ecart
return out
def Eg_wrapper(val, ref):
"""
Function used to wrap Equalities into a suitable form for optimisation
val = ref --> Equality is satisfied
val and ref can be float or 1d array
"""
out = Ineg_wrapper(val, ref) + Ineg_wrapper(ref, val)
return out | 28.035714 | 77 | 0.6 | 110 | 785 | 4.245455 | 0.436364 | 0.070664 | 0.059957 | 0.077088 | 0.218415 | 0.218415 | 0 | 0 | 0 | 0 | 0 | 0.011194 | 0.317197 | 785 | 28 | 78 | 28.035714 | 0.860075 | 0.380892 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dcc1c94bd2acf5dd269a3bdc97c8e0d7cdf129c | 2,483 | py | Python | CIFARHelper.py | JosieHong/Handwritten-digit-recognition-based-on-CNN | dc4f84429120485db05ff547607ee87d92f3e20f | [
"MIT"
] | 23 | 2019-03-10T00:28:13.000Z | 2021-12-28T12:26:28.000Z | CIFARHelper.py | JosieHong/Handwritten-digit-recognition-based-on-CNN | dc4f84429120485db05ff547607ee87d92f3e20f | [
"MIT"
] | null | null | null | CIFARHelper.py | JosieHong/Handwritten-digit-recognition-based-on-CNN | dc4f84429120485db05ff547607ee87d92f3e20f | [
"MIT"
] | 8 | 2019-05-20T01:49:07.000Z | 2021-05-22T03:22:36.000Z | import numpy as np
def one_hot_encode(vec, vals=10):
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
class CifarHelper(object):
def __init__(self, all_data):
self.i = 0
batch_meta = all_data[0]
data_batch1 = all_data[1]
data_batch2 = all_data[2]
data_batch3 = all_data[3]
data_batch4 = all_data[4]
data_batch5 = all_data[5]
test_batch = all_data[6]
# Grabs a list of all the data batches for training
self.all_train_batches = [data_batch1,data_batch2,data_batch3,data_batch4,data_batch5]
# Grabs a list of all the test batches (really just one batch)
self.test_batch = [test_batch]
# Intialize some empty variables for later on
self.training_images = None
self.training_labels = None
self.test_images = None
self.test_labels = None
def set_up_images(self):
print("Setting Up Training Images and Labels")
# Vertically stacks the training images
self.training_images = np.vstack([d[b"data"] for d in self.all_train_batches])
train_len = len(self.training_images)
# Reshapes and normalizes training images
self.training_images = self.training_images.reshape(train_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the training labels (e.g. [0,0,0,1,0,0,0,0,0,0])
self.training_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.all_train_batches]), 10)
print("Setting Up Test Images and Labels")
# Vertically stacks the test images
self.test_images = np.vstack([d[b"data"] for d in self.test_batch])
test_len = len(self.test_images)
# Reshapes and normalizes test images
self.test_images = self.test_images.reshape(test_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the test labels (e.g. [0,0,0,1,0,0,0,0,0,0])
self.test_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]), 10)
def next_batch(self, batch_size):
# Note that the 100 dimension in the reshape call is set by an assumed batch size of 100
x = self.training_images[self.i:self.i+batch_size].reshape(100,32,32,3)
y = self.training_labels[self.i:self.i+batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y | 39.412698 | 108 | 0.633508 | 387 | 2,483 | 3.888889 | 0.242894 | 0.018605 | 0.019934 | 0.015947 | 0.407309 | 0.319601 | 0.200664 | 0.180731 | 0.180731 | 0.180731 | 0 | 0.045405 | 0.263794 | 2,483 | 63 | 109 | 39.412698 | 0.777899 | 0.20741 | 0 | 0 | 0 | 0 | 0.045965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.026316 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dcc91fd3540a464b94fc0c06c1e61aed4f3d9d0 | 2,394 | py | Python | sul/remote_integrity/config.py | nashirat/Final-Project-Sistem-Deteksi-Intrusi | 4ceff47c6da9002d7df51926a0dd2935a798f5df | [
"MIT"
] | null | null | null | sul/remote_integrity/config.py | nashirat/Final-Project-Sistem-Deteksi-Intrusi | 4ceff47c6da9002d7df51926a0dd2935a798f5df | [
"MIT"
] | null | null | null | sul/remote_integrity/config.py | nashirat/Final-Project-Sistem-Deteksi-Intrusi | 4ceff47c6da9002d7df51926a0dd2935a798f5df | [
"MIT"
] | 1 | 2021-03-18T00:16:02.000Z | 2021-03-18T00:16:02.000Z | #!/usr/bin/env python
import os
from configparser import ConfigParser, NoSectionError, NoOptionError
from sul.remote_integrity.exceptions import ConfigurationException
class Config:
def __int__(self):
# [server]
self.server_name = None
self.server_port = None
self.server_address = None
# [auth]
self.auth_username = None
self.auth_private_key = None
# [filter]
self.start_directory = None
self.ignore_files = []
self.ignore_directories = []
self.scan_php_modules = True
# [telegram]
self.telegram_api_token = None
self.telegram_api_chat_id = None
@staticmethod
def load(path):
if not os.path.exists(path):
raise ConfigurationException("File config '{}' tidak ada, apakah path sudah benar?".format(path))
config = Config()
parser = ConfigParser()
parser.read(path)
try:
config.server_name = parser.get("server", "server_name")
config.server_port = parser.getint("server", "server_port", fallback=21)
config.server_address = parser.get("server", "server_address")
config.auth_username = parser.get("auth", "auth_username")
config.auth_private_key = os.path.expanduser(parser.get("auth", "auth_private_key"))
config.ignore_files = parser.get("filter", "ignore_files").split(",") or []
config.ignore_directories = parser.get("filter", "ignore_directories").split(",") or []
config.start_directory = parser.get("filter", "start_directory")
config.scan_php_modules = parser.getboolean("filter", "scan_php_modules")
config.telegram_api_token = parser.get("telegram", "telegram_api_token") or None
try:
config.telegram_api_chat_id = parser.getint("telegram", "telegram_api_chat_id") or None
except ValueError:
config.telegram_api_chat_id = None
except (NoSectionError, NoOptionError) as e:
raise ConfigurationException("{} di file config '{}'".format(str(e), path))
for attr in config.__dict__.keys():
if getattr(config, attr) == "":
raise ConfigurationException("Missing attribute value '{}' di file config '{}'".format(attr, path))
return config
| 33.71831 | 115 | 0.625731 | 262 | 2,394 | 5.48855 | 0.324427 | 0.05007 | 0.041725 | 0.047288 | 0.049374 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001133 | 0.262322 | 2,394 | 70 | 116 | 34.2 | 0.813137 | 0.023392 | 0 | 0.045455 | 0 | 0 | 0.151801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.068182 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dd4e8cae818a371cde5bd0fde5da4efd76f6bf5 | 6,574 | py | Python | tb/ping/ping.py | falberti/scaling-eureka | d9ec5c145c4a8e0ea2f2a99e0155ec835129f332 | [
"MIT"
] | null | null | null | tb/ping/ping.py | falberti/scaling-eureka | d9ec5c145c4a8e0ea2f2a99e0155ec835129f332 | [
"MIT"
] | null | null | null | tb/ping/ping.py | falberti/scaling-eureka | d9ec5c145c4a8e0ea2f2a99e0155ec835129f332 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import pylxd
import random
import string
import urllib
import subprocess
import re
import os
import netifaces
if len(sys.argv) != 2:
print("Usage:",sys.argv[0],"<test-bed name>")
sys.exit(1)
if os.geteuid() != 0:
print("You need root permissions to set-up your test-bed")
sys.exit(1)
# Name of the test-bed
NAME = sys.argv[1]
# Create a lxd_client
lxd_client = pylxd.Client()
# Procedure for adding flow into ovs bridge
def addFlow(bridge, in_port, out_port, bidirectional=True):
out = subprocess.run(['ovs-ofctl', 'add-flow', str(bridge), 'priority=10,in_port='+str(in_port)+',actions=output:'+out_port])
if bidirectional:
out = subprocess.run(['ovs-ofctl', 'add-flow', str(bridge), 'priority=10,in_port='+str(out_port)+',actions=output:'+in_port])
######################################################################
# NETWORK
######################################################################
# Create a new OVS bridge for this tb
try:
lxd_client.networks.get(NAME)
print("Network {} already exists. Exiting...".format(NAME))
sys.exit(1)
except pylxd.exceptions.NotFound as e:
pass
network = lxd_client.networks.create(NAME, description='tb network', type='bridge', config={'bridge.driver': 'openvswitch'})
# Delete flows from the bridge - they will be added later
out = subprocess.run(['ovs-ofctl', 'del-flows', NAME])
out = subprocess.run(['ovs-ofctl', 'add-flow', NAME, 'priority=0,actions=drop'])
print("Done with creating network {}".format(NAME))
######################################################################
# STORAGE POOL
######################################################################
# Create a storage pool for this tb
try:
lxd_client.storage_pools.get(NAME)
print("Storage pool {} already exists. Exiting...".format(NAME))
sys.exit(1)
except pylxd.exceptions.NotFound as e:
pass
storage_pool = lxd_client.storage_pools.create({"config": {"size": "15GB"}, "driver": "btrfs", "name": NAME})
print("Done with creating storage pool {}".format(NAME))
######################################################################
# CREATE PROFILES
######################################################################
# Create a profile with two interfaces on the switches
try:
lxd_client.profiles.get(NAME)
print("Profile {} already exists. Exiting....".format(NAME))
sys.exit(1)
except pylxd.exceptions.NotFound as e:
pass
profile = lxd_client.profiles.create(NAME,
devices={
'root': {'path': '/', 'pool': NAME, 'type': 'disk'},
'eth0': {'name': 'eth0', 'nictype': 'bridged', 'parent': NAME, 'type': 'nic'},
})
print("Done with creating profile {}".format(NAME))
######################################################################
# CREATE CONTAINERS
######################################################################
for c_id in range(2):
container_name = NAME+'-'+str(c_id)
try:
lxd_client.containers.get(container_name)
print("Container {} already exists. Exiting...".format(container_name))
sys.exit(1)
except pylxd.exceptions.NotFound as e:
pass
config = {'name': container_name,
'source': {
'type': 'image',
'mode': 'pull',
'server': 'https://cloud-images.ubuntu.com/releases',
'protocol': 'simplestreams',
'fingerprint': '20.04'
},
'profiles': ['default', NAME] }
cont = lxd_client.containers.create(config, wait=True)
cont.start(wait=True)
print("Container #"+str(c_id)+" started")
######################################################################
# NETWORKING
######################################################################
print("Setting up networking...", end="")
# Set-up networking
host_ifaces = {}
for iface in netifaces.interfaces():
# Read ifindex and iflink
with open('/sys/class/net/'+iface+'/ifindex', 'r') as f:
ifindex = int(f.read().strip())
with open('/sys/class/net/'+iface+'/iflink', 'r') as f:
iflink = int(f.read().strip())
host_ifaces[ifindex] = {'name': iface, 'peer_id': iflink}
# Get ifaces from the containers
ifaces = {}
ifaces_map = {}
try:
for c_id in range(2):
container_name = NAME+'-'+str(c_id)
cont = lxd_client.containers.get(container_name)
cont_profiles = [lxd_client.profiles.get(p) for p in cont.profiles]
nics = set([k for p in cont_profiles for k in p.devices if p.devices[k]['type'] == 'nic'])
cont_ifaces = {}
# Read ifindex and iflink
for iface in nics:
ifindex = int(cont.execute(['cat', '/sys/class/net/'+iface+'/ifindex']).stdout.strip())
iflink = int(cont.execute(['cat', '/sys/class/net/'+iface+'/iflink']).stdout.strip())
cont_ifaces[ifindex] = {'name': iface, 'peer_id': iflink}
ifaces[container_name] = cont_ifaces
ifaces_map[container_name] = {}
except Exception as e:
print(e)
sys.exit(1)
# Create the mapping
for c_id in range(2):
container_name = NAME+'-'+str(c_id)
cont_ifaces = ifaces[container_name]
for iface in cont_ifaces:
iface_name = cont_ifaces[iface]['name']
peer_id = cont_ifaces[iface]['peer_id']
ifaces_map[container_name][iface_name] = host_ifaces[peer_id]['name']
# Connect ifaces by means of ovs flows
addFlow(NAME, ifaces_map[NAME+'-0']['eth0'], ifaces_map[NAME+'-1']['eth0'])
print("Done")
print("Pre-configuring IP addresses...", end="")
# Pre-configure IP addresses for all interfaces
container_name = NAME+'-0'
try:
container = lxd_client.containers.get(container_name)
except pylxd.exceptions.NotFound as e:
print("Container {} does not exist. Exiting...".format(container_name))
sys.exit(1)
container.execute(['ip', 'addr', 'add', '10.0.0.1/30', 'dev', 'eth0'])
print("Done")
print("Pre-configuring IP addresses...", end="")
# Pre-configure IP addresses for all interfaces
container_name = NAME+'-1'
try:
container = lxd_client.containers.get(container_name)
except pylxd.exceptions.NotFound as e:
print("Container {} does not exist. Exiting...".format(container_name))
sys.exit(1)
container.execute(['ip', 'addr', 'add', '10.0.0.2/30', 'dev', 'eth0'])
print("Done")
print("==========================================================")
print(" All done. Have fun!")
print("==========================================================")
# exit
sys.exit(0)
| 36.120879 | 133 | 0.564192 | 787 | 6,574 | 4.620076 | 0.238882 | 0.060781 | 0.019802 | 0.023102 | 0.414741 | 0.391089 | 0.339384 | 0.306106 | 0.287954 | 0.287954 | 0 | 0.009684 | 0.167478 | 6,574 | 181 | 134 | 36.320442 | 0.654668 | 0.091117 | 0 | 0.335938 | 0 | 0 | 0.244051 | 0.026461 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007813 | false | 0.03125 | 0.070313 | 0 | 0.078125 | 0.179688 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dd6d33b1b7a3577b46839bbe432ee0f617b3bc5 | 369 | py | Python | FishCDailyQuestion/ex001-010/Python3_008/008_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | FishCDailyQuestion/ex001-010/Python3_008/008_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | FishCDailyQuestion/ex001-010/Python3_008/008_04.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | #!/usr/bin/evn python3
# coding:utf-8
from math import sqrt
from math import fmod # 取余
prime_lst = []
for i in range(100, 201):
for j in range(2, int(sqrt(i))+1):
if fmod(i, j) == 0: # fmod(i, j) 相当于 i % j
break
else:
prime_lst.append(i)
print(prime_lst)
print("\nThere are {} prime numbers in total.".format(len(prime_lst)))
| 20.5 | 70 | 0.598916 | 64 | 369 | 3.390625 | 0.59375 | 0.147465 | 0.129032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039855 | 0.252033 | 369 | 17 | 71 | 21.705882 | 0.746377 | 0.157182 | 0 | 0 | 0 | 0 | 0.124183 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dd76808564db3bf60626f352cce1f1a8895916f | 6,156 | py | Python | instruments.py | gexahedron/sc3_microtonal | 94c3091705d1e194dab0ec600b35c968ceb79c95 | [
"MIT"
] | null | null | null | instruments.py | gexahedron/sc3_microtonal | 94c3091705d1e194dab0ec600b35c968ceb79c95 | [
"MIT"
] | null | null | null | instruments.py | gexahedron/sc3_microtonal | 94c3091705d1e194dab0ec600b35c968ceb79c95 | [
"MIT"
] | null | null | null | from sc3.all import *
import random
@synthdef
def bplay(out = 0, buf = 0, rate = 1, amp = 0.5, pan = 0, pos = 0, rel=15):
sig = PlayBuf.ar(2, buf, BufRateScale.ir(buf) * rate,
1, BufDur.kr(buf) * pos * 48000,
done_action=2)
env = EnvGen.ar(Env.linen(0.0, rel, 0), done_action=2)
sig = sig * env
sig = sig * amp
sig = Pan2.ar(sig, pan)
sig = Mix.ar(sig)
Out.ar(out, sig)
@synthdef
def chiptune_varsaw(out=0, freq=440, amp=0.5, dur=1, env_nom=0, decay=0.1, pan=0, gate=1, hpf=100, lpf=10000):
length = dur * 1.5
env_mult = 0.6
env = env_nom * EnvGen(Env([0.01, 0.5, 0.01],
[0.2 * env_mult, length * env_mult]), done_action=2) +\
(1 - env_nom) * EnvGen(Env([0.001, 1, decay, 0.001],
[0.02 * env_mult, (length + 0.2 - 0.04) * env_mult, 0.02 * env_mult],
'exponential'), done_action=2)
lag = 0.05
sig = Lag.kr(freq, lag)
sig = [VarSaw.ar(sig * num, random.random()) / (num ** 3) for num in range(1, 4)]
sig = sig * amp
sig = HPF.ar(sig, hpf)
sig = LPF.ar(sig, lpf)
sig = Pan2.ar(sig, pan, 1) * env
sig = Mix(sig)
Out.ar(out, sig)
@synthdef
def chiptune_triangle(out=0, freq=440, amp=0.5, dur=1, decay=0.1, pan=0, gate=1, hpf=100, lpf=10000,
atk = 0.01, dec = 0.075, sus = 1, rel = 1, hold = 0.5):
fenv = freq
sig = (LFTri.ar(fenv, random.random()) * 0.5 + 1)#.round(1 / 15)
env = EnvGen.kr(Env([0, 1, 1, 0], [atk, dec, hold, rel]), done_action=2)
sig = Pan2.ar(sig, pan, 1)
sig = sig * amp * env
Out.ar(0, sig)
@synthdef
def fm_2op_sinfb(out=0, freq=440,
m_ratio=1, c_ratio=1,
index=0, i_scale=1, c_atk=4, c_rel=-4,
amp=0.2, atk=0.01, rel=1, pan=0,
fx=0, fxsend=-25,
dur=1, decay=0.1, gate=1, hpf=100, lpf=10000):
i_env = EnvGen.kr(Env([index, index * i_scale, index], [atk, rel], [c_atk, c_rel]), done_action=2)
env = EnvGen.kr(Env.perc(atk, rel, curve=[c_atk, c_rel]), done_action=2)
mod = SinOsc.ar(freq * m_ratio) * (freq * m_ratio * i_env)
car = SinOscFB.ar(freq * c_ratio + mod) * env * amp
car = Pan2.ar(car, pan)
Out.ar(out, car)
@synthdef
def fm_2op_saw(out=0, freq=440,
m_ratio=1, c_ratio=1,
index=0, i_scale=1, c_atk=4, c_rel=-4,
amp=0.2, atk=0.01, rel=1, pan=0,
fx=0, fxsend=-25,
dur=1, decay=0.1, gate=1, hpf=100, lpf=10000):
i_env = EnvGen.kr(Env([index, index * i_scale, index], [atk, rel], [c_atk, c_rel]), done_action=2)
env = EnvGen.kr(Env.perc(atk, rel, curve=[c_atk, c_rel]), done_action=2)
mod = SinOsc.ar(freq * m_ratio) * (freq * m_ratio * i_env)
car = LFSaw.ar(freq * c_ratio + mod) * env * amp
car = Pan2.ar(car, pan)
Out.ar(out, car)
@synthdef
def fm_2op_pulse(out=0, freq=440,
m_ratio=1, c_ratio=1,
index=1, i_scale=5, c_atk=4, c_rel=-4,
amp=0.2, atk=0.01, rel=1, pan=0,
fx=0, fxsend=-25,
dur=1, decay=0.1, gate=1, hpf=100, lpf=10000):
i_env = EnvGen.kr(Env([index, index * i_scale, index], [atk, rel], [c_atk, c_rel]), done_action=2)
env = EnvGen.kr(Env.perc(atk, rel, curve=[c_atk, c_rel]), done_action=2)
mod = SinOsc.ar(freq * m_ratio) * (freq * m_ratio * i_env)
car = LFPulse.ar(freq * c_ratio + mod) * env * amp
car = Pan2.ar(car, pan)
Out.ar(out, car)
# @synthdef
# def bell(out=0, freq=440, pan=0,
# t60=1, pitchy=1, amp=0.25, gate=1):
# exciter = WhiteNoise.ar() * EnvGen.ar(Env.perc(0.001, 0.05), gate) * 0.25
# sig = Klank.ar(
# (
# [1, 2, 2.803, 3.871, 5.074, 7.81, 10.948, 14.421], # freqs
# [1, 0.044, 0.891, 0.0891, 0.794, 0.1, 0.281, 0.079], # amplitudes
# [1, 0.205, 1, 0.196, 0.339, 0.047, 0.058, 0.047] * t60 # ring times
# ),
# exciter,
# freq_scale=freq * pitchy)
# sig = FreeVerb.ar(sig) * amp
# # DetectSilence.ar(sig, 0.001, 0.5, done_action=2)
# sig = Pan2.ar(sig, pan)
# Out.ar(out, sig)
@synthdef
def pluck(out=0, freq = 440, amp = 0.5, decay = 5, coef = 0.1, pan=0):
env = EnvGen.kr(Env.linen(0, decay, 0), done_action=2)
sig = Pluck.ar(
input=WhiteNoise.ar() * amp,
trig=Impulse.kr(0),
maxdelaytime=0.1,
delaytime=1.0 / freq,
decaytime=decay,
coef=coef)
sig = Pan2.ar(sig, pan)
Out.ar(out, sig)
@synthdef
def hihat(out = 0, amp = 0.5, att = 0.01, rel = 0.2, ffreq = 6000, pan = 0):
env = EnvGen.kr(Env.perc(att, rel, amp), done_action=2)
snd = WhiteNoise.ar()
snd = HPF.ar(input=snd, freq=ffreq) * env
Out.ar(out, Pan2.ar(snd, pan))
@synthdef
def snare(out = 0, amp = 0.1, sinfreq = 180, att = 0.01, rel = 0.2, ffreq = 2000, pan = 0):
env = EnvGen.kr(Env.perc(att, rel, amp), done_action=2)
snd1 = HPF.ar(
input=WhiteNoise.ar(),
freq=ffreq) * env
snd2 = SinOsc.ar(freq=sinfreq) * env
sum = snd1 + snd2
Out.ar(out, Pan2.ar(sum, pan))
@synthdef
def kick(out = 0, amp = 0.3, sinfreq = 60, glissf = 0.9, att = 0.01, rel = 0.45, pan = 0):
env = EnvGen.kr(Env.perc(att, rel, amp), done_action=2)
ramp = XLine.kr(
start=sinfreq,
end=sinfreq * glissf,
dur=rel)
snd = SinOsc.ar(freq=ramp) * env
snd = Pan2.ar(snd, pan)
Out.ar(out, snd)
@synthdef
def kalimba(out = 0, freq = 440, amp = 0.1, mix = 0.1, pan=0):
snd = SinOsc.ar(freq) * EnvGen.ar(Env.perc(0.03, Rand(3.0, 4.0), 1, -7), done_action=2)
snd = HPF.ar(LPF.ar(snd, 380), 120)
click = DynKlank.ar(
(
[240 * ExpRand(0.97, 1.02), 2020 * ExpRand(0.97, 1.02), 3151 * ExpRand(0.97, 1.02)],
[0.354, 1, 0.562],
[0.8, 0.07, 0.08],
), BPF.ar(PinkNoise.ar(), 6500, 0.1) * EnvGen.ar(Env.perc(0.001, 0.01))) * 0.1
snd = (snd * mix) + (click * (1 - mix))
snd = Pan2.ar(snd, pan, 1) * amp
Out.ar(out, snd)
| 38.716981 | 110 | 0.532814 | 1,071 | 6,156 | 2.987862 | 0.163399 | 0.053125 | 0.058438 | 0.048125 | 0.531875 | 0.463125 | 0.4475 | 0.42 | 0.387813 | 0.375313 | 0 | 0.116992 | 0.284925 | 6,156 | 158 | 111 | 38.962025 | 0.60995 | 0.103314 | 0 | 0.403101 | 0 | 0 | 0.002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085271 | false | 0 | 0.015504 | 0 | 0.100775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dd9fcd57958c281d7aab0192a0dcd27ec637673 | 6,679 | py | Python | setup.py | alpa-projects/alpa | 2c54de2a8fa8a48c77069f4bad802f4e8fa6d126 | [
"Apache-2.0"
] | 114 | 2022-03-02T20:38:16.000Z | 2022-03-31T20:41:50.000Z | setup.py | alpa-projects/alpa | 2c54de2a8fa8a48c77069f4bad802f4e8fa6d126 | [
"Apache-2.0"
] | 6 | 2022-03-09T22:04:50.000Z | 2022-03-30T17:53:15.000Z | setup.py | alpa-projects/alpa | 2c54de2a8fa8a48c77069f4bad802f4e8fa6d126 | [
"Apache-2.0"
] | 5 | 2022-03-05T12:04:31.000Z | 2022-03-31T03:55:42.000Z | import glob
import os
import shutil
import subprocess
import sys
from setuptools import setup, find_packages
IS_WINDOWS = sys.platform == "win32"
def get_cuda_version(cuda_home):
"""Locate the CUDA version."""
version_file = os.path.join(cuda_home, "version.txt")
try:
if os.path.isfile(version_file):
with open(version_file, "r") as f_version:
version_str = f_version.readline().replace("\n", "").replace("\r", "")
return version_str.split(" ")[2][:4]
else:
version_str = subprocess.check_output(
[os.path.join(cuda_home, "bin", "nvcc"), "--version"]
)
version_str = str(version_str).replace("\n", "").replace("\r", "")
idx = version_str.find("release")
return version_str[idx + len("release "):idx + len("release ") + 4]
except RuntimeError:
raise RuntimeError("Cannot read cuda version file")
def locate_cuda():
"""Locate the CUDA environment on the system."""
# Guess #1
cuda_home = os.environ.get("CUDA_HOME") or os.environ.get("CUDA_PATH")
if cuda_home is None:
# Guess #2
try:
which = "where" if IS_WINDOWS else "which"
nvcc = subprocess.check_output([which, "nvcc"]).decode().rstrip("\r\n")
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except subprocess.CalledProcessError:
# Guess #3
if IS_WINDOWS:
cuda_homes = glob.glob("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*")
if len(cuda_homes) == 0:
cuda_home = ""
else:
cuda_home = cuda_homes[0]
else:
cuda_home = "/usr/local/cuda"
if not os.path.exists(cuda_home):
cuda_home = None
version = get_cuda_version(cuda_home)
cudaconfig = {
"home": cuda_home,
"include": os.path.join(cuda_home, "include"),
"lib64": os.path.join(cuda_home, os.path.join("lib", "x64") if IS_WINDOWS else "lib64"),
}
if not all([os.path.exists(v) for v in cudaconfig.values()]):
raise EnvironmentError(
"The CUDA path could not be located in $PATH, $CUDA_HOME or $CUDA_PATH. "
"Either add it to your path, or set $CUDA_HOME or $CUDA_PATH."
)
return cudaconfig, version
def get_cuda_version_str(no_dot=False):
"""Return the cuda version in the format of [x.x]."""
ver = locate_cuda()[1]
if no_dot:
ver = ver.replace(".", "")
return ver
install_require_list = [
"tqdm",
"ray[default]",
"jax==0.3.5",
"flax==0.4.1",
"pulp",
"tensorstore",
"numpy<1.22",
"numba",
]
dev_require_list = [
"prospector",
"yapf",
"cmake",
"pybind11"
]
doc_require_list = [
"sphinx",
"sphinx-rtd-theme",
"sphinx-gallery",
"matplotlib"
]
def build():
"""Build the custom pipeline marker API."""
# Check cuda version
build_command = []
if "CUDACXX" in os.environ and os.path.exists(os.environ["CUDACXX"]):
cudacxx_path = os.environ["CUDACXX"]
else:
# infer CUDACXX
cuda_version = get_cuda_version_str()
cudacxx_path = f"/usr/local/cuda-{cuda_version}/bin/nvcc"
if not os.path.exists(cudacxx_path):
raise ValueError("Cannot find CUDACXX compiler.")
# Enter the folder and build
build_command += [f"cd alpa/pipeline_parallel/xla_custom_call_marker; "]
build_command += [f"CUDACXX={cudacxx_path} ./build.sh"]
build_command = " ".join(build_command)
print(build_command)
ret = subprocess.call(build_command, shell=True)
if ret != 0:
raise RuntimeError("Failed to build the pipeline markers "
f"with exit code {ret}")
def move_file(target_dir, filename):
source = filename
destination = os.path.join(target_dir, "alpa/pipeline_parallel/xla_custom_call_marker/build",
filename.split('/')[-1])
# Create the target directory if it doesn't already exist.
os.makedirs(os.path.dirname(destination), exist_ok=True)
if not os.path.exists(destination):
print("Copying {} to {}.".format(source, destination))
if IS_WINDOWS:
# Does not preserve file mode (needed to avoid read-only bit)
shutil.copyfile(source, destination, follow_symlinks=True)
else:
# Preserves file mode (needed to copy executable bit)
shutil.copy(source, destination, follow_symlinks=True)
def build_and_move(build_ext):
build()
files_to_include = glob.glob("alpa/pipeline_parallel/xla_custom_call_marker/build/*.so")
for filename in files_to_include:
move_file(build_ext.build_lib, filename)
if __name__ == "__main__":
import setuptools
import setuptools.command.build_ext
from setuptools.command.install import install
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
return build_and_move(self)
class BinaryDistribution(setuptools.Distribution):
def has_ext_modules(self):
return True
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
with open(os.path.join("README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="alpa",
version=os.environ.get("VERSION"),
author="Alpa team",
author_email="",
description="Alpa automatically parallelizes large tensor computation graphs and "
"runs them on a distributed cluster.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alpa-projects/alpa",
classifiers=[
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords=("alpa distributed parallel machine-learning model-parallelism"
"gpt-3 deep-learning language-model python"),
packages=find_packages(exclude=["playground"]),
python_requires='>=3.7',
cmdclass={"build_ext": build_ext,
"install": InstallPlatlib},
distclass=BinaryDistribution,
install_requires=install_require_list,
extras_require={
'dev': dev_require_list,
'doc': doc_require_list + dev_require_list,
},
)
| 33.562814 | 97 | 0.612517 | 803 | 6,679 | 4.910336 | 0.321295 | 0.03652 | 0.017753 | 0.014202 | 0.102714 | 0.033477 | 0.033477 | 0.033477 | 0 | 0 | 0 | 0.007152 | 0.267256 | 6,679 | 198 | 98 | 33.732323 | 0.798529 | 0.060937 | 0 | 0.057325 | 0 | 0 | 0.20641 | 0.037821 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057325 | false | 0 | 0.057325 | 0.012739 | 0.171975 | 0.012739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dda30dc542feb6406a7aab8396d15c919404356 | 8,900 | py | Python | atlassian_jwt_auth/key.py | itsrifat/asap-authentication-python | e11df340b061bd4a4f863b8379530c30905b4f2a | [
"MIT"
] | null | null | null | atlassian_jwt_auth/key.py | itsrifat/asap-authentication-python | e11df340b061bd4a4f863b8379530c30905b4f2a | [
"MIT"
] | null | null | null | atlassian_jwt_auth/key.py | itsrifat/asap-authentication-python | e11df340b061bd4a4f863b8379530c30905b4f2a | [
"MIT"
] | null | null | null | import base64
import cgi
import logging
import os
import re
import sys
import cachecontrol
import cryptography.hazmat.backends
import jwt
import requests
from cryptography.hazmat.primitives import serialization
from requests.exceptions import RequestException
from atlassian_jwt_auth.exceptions import (KeyIdentifierException,
PublicKeyRetrieverException,
PrivateKeyRetrieverException)
if sys.version_info[0] >= 3:
from urllib.parse import unquote_plus
else:
from urllib import unquote_plus
PEM_FILE_TYPE = 'application/x-pem-file'
class KeyIdentifier(object):
""" This class represents a key identifier """
def __init__(self, identifier):
self.__key_id = validate_key_identifier(identifier)
@property
def key_id(self):
return self.__key_id
def validate_key_identifier(identifier):
""" returns a validated key identifier. """
regex = re.compile(r'^[\w.\-\+/]*$')
_error_msg = 'Invalid key identifier %s' % identifier
if not identifier:
raise KeyIdentifierException(_error_msg)
if not regex.match(identifier):
raise KeyIdentifierException(_error_msg)
normalised = os.path.normpath(identifier)
if normalised != identifier:
raise KeyIdentifierException(_error_msg)
if normalised.startswith('/'):
raise KeyIdentifierException(_error_msg)
if '..' in normalised:
raise KeyIdentifierException(_error_msg)
return identifier
def _get_key_id_from_jwt_header(a_jwt):
""" returns the key identifier from a jwt header. """
header = jwt.get_unverified_header(a_jwt)
return KeyIdentifier(header['kid'])
class BasePublicKeyRetriever(object):
""" Base class for retrieving a public key. """
def retrieve(self, key_identifier, **kwargs):
raise NotImplementedError()
class HTTPSPublicKeyRetriever(BasePublicKeyRetriever):
""" This class retrieves public key from a https location based upon the
given key id.
"""
# Use a static requests session, reused/shared by all instances of
# HTTPSPublicKeyRetriever:
_class_session = None
def __init__(self, base_url):
if base_url is None or not base_url.startswith('https://'):
raise PublicKeyRetrieverException(
'The base url must start with https://')
if not base_url.endswith('/'):
base_url += '/'
self.base_url = base_url
self._session = self._get_session()
def _get_session(self):
if HTTPSPublicKeyRetriever._class_session is None:
session = cachecontrol.CacheControl(requests.Session())
HTTPSPublicKeyRetriever._class_session = session
return HTTPSPublicKeyRetriever._class_session
def retrieve(self, key_identifier, **requests_kwargs):
""" returns the public key for given key_identifier. """
if not isinstance(key_identifier, KeyIdentifier):
key_identifier = KeyIdentifier(key_identifier)
url = self.base_url + key_identifier.key_id
try:
return self._retrieve(url, requests_kwargs)
except requests.RequestException as e:
try:
status_code = e.response.status_code
except AttributeError:
status_code = None
raise PublicKeyRetrieverException(e, status_code=status_code)
def _retrieve(self, url, requests_kwargs):
resp = self._session.get(url, headers={'accept': PEM_FILE_TYPE},
**requests_kwargs)
resp.raise_for_status()
self._check_content_type(url, resp.headers['content-type'])
return resp.text
def _check_content_type(self, url, content_type):
media_type = cgi.parse_header(content_type)[0]
if media_type.lower() != PEM_FILE_TYPE.lower():
raise PublicKeyRetrieverException(
"Invalid content-type, '%s', for url '%s' ." %
(content_type, url))
class HTTPSMultiRepositoryPublicKeyRetriever(BasePublicKeyRetriever):
""" This class retrieves public key from the supplied https key
repository locations based upon key ids.
"""
def __init__(self, key_repository_urls):
if not isinstance(key_repository_urls, list):
raise TypeError('keystore_urls must be a list of urls.')
self._retrievers = self._create_retrievers(key_repository_urls)
def _create_retrievers(self, key_repository_urls):
return [HTTPSPublicKeyRetriever(url) for url
in key_repository_urls]
def retrieve(self, key_identifier, **requests_kwargs):
for retriever in self._retrievers:
try:
return retriever.retrieve(key_identifier, **requests_kwargs)
except (RequestException, PublicKeyRetrieverException) as e:
if isinstance(e, PublicKeyRetrieverException):
if e.status_code is None or e.status_code < 500:
raise
logger = logging.getLogger(__name__)
logger.warn('Unable to retrieve public key from store',
extra={'underlying_error': str(e),
'key repository': retriever.base_url})
raise PublicKeyRetrieverException(
'Cannot load key from key repositories')
class BasePrivateKeyRetriever(object):
""" This is the base private key retriever class. """
def load(self, issuer):
""" returns the key identifier and private key pem found
for the given issuer.
"""
raise NotImplementedError('Not implemented.')
class DataUriPrivateKeyRetriever(BasePrivateKeyRetriever):
""" This class can be used to retrieve the key identifier and
private key from the supplied data uri.
"""
def __init__(self, data_uri):
self._data_uri = data_uri
def load(self, issuer):
if not self._data_uri.startswith('data:application/pkcs8;kid='):
raise PrivateKeyRetrieverException('Unrecognised data uri format.')
splitted = self._data_uri.split(';')
key_identifier = KeyIdentifier(unquote_plus(
splitted[1][len('kid='):]))
key_data = base64.b64decode(splitted[-1].split(',')[-1])
key = serialization.load_der_private_key(
key_data,
password=None,
backend=cryptography.hazmat.backends.default_backend())
private_key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return key_identifier, private_key_pem.decode('utf-8')
class StaticPrivateKeyRetriever(BasePrivateKeyRetriever):
""" This class simply returns the key_identifier and private_key_pem
initially provided to it in calls to load.
"""
def __init__(self, key_identifier, private_key_pem):
if not isinstance(key_identifier, KeyIdentifier):
key_identifier = KeyIdentifier(key_identifier)
self.key_identifier = key_identifier
self.private_key_pem = private_key_pem
def load(self, issuer):
return self.key_identifier, self.private_key_pem
class FilePrivateKeyRetriever(BasePrivateKeyRetriever):
""" This class can be used to retrieve the latest key identifier and
private key for a given issuer found under its private key
repository path.
"""
def __init__(self, private_key_repository_path):
self.private_key_repository = FilePrivateKeyRepository(
private_key_repository_path)
def load(self, issuer):
key_identifier = self._find_last_key_id(issuer)
private_key_pem = self.private_key_repository.load_key(key_identifier)
return key_identifier, private_key_pem
def _find_last_key_id(self, issuer):
key_identifiers = list(
self.private_key_repository.find_valid_key_ids(issuer))
if key_identifiers:
return key_identifiers[-1]
else:
raise IOError('Issuer has no valid keys: %s' % issuer)
class FilePrivateKeyRepository(object):
""" This class represents a file backed private key repository. """
def __init__(self, path):
self.path = path
def find_valid_key_ids(self, issuer):
issuer_directory = os.path.join(self.path, issuer)
for filename in sorted(os.listdir(issuer_directory)):
if filename.endswith('.pem'):
yield KeyIdentifier('%s/%s' % (issuer, filename))
def load_key(self, key_identifier):
key_filename = os.path.join(self.path, key_identifier.key_id)
with open(key_filename, 'rb') as f:
return f.read().decode('utf-8')
| 35.742972 | 79 | 0.667528 | 983 | 8,900 | 5.797558 | 0.218718 | 0.075276 | 0.022811 | 0.030707 | 0.190209 | 0.136866 | 0.094052 | 0.060712 | 0.047026 | 0.028075 | 0 | 0.002848 | 0.250337 | 8,900 | 248 | 80 | 35.887097 | 0.851319 | 0.114494 | 0 | 0.138554 | 0 | 0 | 0.057617 | 0.006359 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144578 | false | 0.006024 | 0.090361 | 0.018072 | 0.373494 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ddcd44bbdd49843f41d0a4c353daf88f8f34d07 | 750 | py | Python | tests/perf/adam_test.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 2 | 2021-03-17T12:00:32.000Z | 2021-03-17T12:18:30.000Z | tests/perf/adam_test.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | null | null | null | tests/perf/adam_test.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 1 | 2021-05-21T23:12:43.000Z | 2021-05-21T23:12:43.000Z | import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
device = 'cpu'
model_size = 1 * 1024**3
group_size = [model_size, 274432]
param = [torch.nn.Parameter(torch.ones(size, device=device)) for size in group_size]
optimizer = DeepSpeedCPUAdam(param)
#torch.set_num_threads(128)
for i, p in enumerate(param):
p.grad = torch.ones(group_size[i], device=device)
#param.grad = torch.ones(model_size, device=device)
avg = 0
for i in range(100):
start = time.time()
optimizer.step()
stop = time.time()
avg += (stop - start)
for i, p in enumerate(param):
p.grad = torch.ones(group_size[i], device=device) * 2
#param.grad = torch.ones(model_size, device=device) * 2
print("Elapsed Time is ", avg / 100)
| 30 | 84 | 0.696 | 116 | 750 | 4.413793 | 0.37069 | 0.087891 | 0.101563 | 0.027344 | 0.375 | 0.375 | 0.375 | 0.375 | 0.222656 | 0.222656 | 0 | 0.038462 | 0.168 | 750 | 24 | 85 | 31.25 | 0.782051 | 0.173333 | 0 | 0.105263 | 0 | 0 | 0.030794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ddd82d30b1e1a81aa3ef998e0157b0e90fad485 | 1,256 | py | Python | src/human_lambdas/user_handler/management/commands/addnotification.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | 25 | 2021-06-08T08:00:08.000Z | 2022-03-17T22:49:10.000Z | src/human_lambdas/user_handler/management/commands/addnotification.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | null | null | null | src/human_lambdas/user_handler/management/commands/addnotification.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | 5 | 2021-06-15T09:57:46.000Z | 2022-02-03T16:18:33.000Z | from django.core.management.base import BaseCommand
from human_lambdas.user_handler.models import Notification, User
from human_lambdas.workflow_handler.models import WorkflowNotification
class Command(BaseCommand):
help = "Add notification object to each user"
def handle(self, *args, **options):
users = User.objects.all()
for user in users:
if not user.notifications:
notification = Notification()
notification.save()
user.notifications = notification
user.save()
for org in user.organization_set.all():
for workflow in org.workflow_set.all():
if not WorkflowNotification.objects.filter(
workflow=workflow, notification=user.notifications
).exists():
WorkflowNotification(
workflow=workflow,
enabled=True,
notification=user.notifications,
).save()
self.stdout.write(
self.style.SUCCESS(
"Set-up all workflow notifications for user %s" % user.pk
)
)
| 38.060606 | 77 | 0.546178 | 110 | 1,256 | 6.181818 | 0.454545 | 0.094118 | 0.047059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.382962 | 1,256 | 32 | 78 | 39.25 | 0.877419 | 0 | 0 | 0 | 0 | 0 | 0.06449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ddface170466e79fd7bf527988be444878c69ac | 1,850 | py | Python | src/shader/wasm/compile.py | mil-tokyo/webdnn | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | [
"MIT"
] | 1,967 | 2017-05-28T08:18:37.000Z | 2022-03-15T18:10:57.000Z | src/shader/wasm/compile.py | mil-tokyo/webdnn | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | [
"MIT"
] | 315 | 2017-05-28T05:34:34.000Z | 2022-01-13T03:19:35.000Z | src/shader/wasm/compile.py | mil-tokyo/webdnn | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | [
"MIT"
] | 175 | 2017-05-31T08:10:00.000Z | 2021-10-15T05:22:12.000Z | """
compile operator kernels of c++ into wasm, then embed them in single ts file, to distribute single webdnn.js
"""
import base64
import glob
import os
import subprocess
import sys
CPP_SRC_DIR = "src"
DST_DIR = "../../descriptor_runner/operators/wasm/worker"
OPTIMIZATION = "-O3"
# change current directory to where this file is
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# dependency C++ library
if not os.path.exists("./lib/eigen-3.3.9"):
sys.stderr.write(f"downloading eigen library into {os.path.join(os.getcwd(), 'lib')}\n")
os.makedirs("./lib", exist_ok=True)
import urllib.request
import tarfile
thetarfile = "https://gitlab.com/libeigen/eigen/-/archive/3.3.9/eigen-3.3.9.tar.bz2"
ftpstream = urllib.request.urlopen(thetarfile)
thetarfile = tarfile.open(fileobj=ftpstream, mode="r|bz2")
thetarfile.extractall("./lib")
srcs = glob.glob(CPP_SRC_DIR + "/**/*.cpp", recursive=True)
subprocess.check_call(["emcc", "-std=c++11", "--pre-js", "pre.js", "-I", "lib/eigen-3.3.9", "-o", f"{DST_DIR}/workerRaw.js", OPTIMIZATION, "-s", "ALLOW_MEMORY_GROWTH=1", *srcs], shell=os.name=='nt')
# embed wasm into worker js
with open(f"{DST_DIR}/workerRaw.wasm", "rb") as f:
worker_wasm = f.read()
with open(f"{DST_DIR}/workerRaw.js", "rt", encoding="utf-8") as f:
worker_js = f.read()
worker_js_with_wasm = worker_js.replace("WASM_WORKER_WASM_BINARY_BASE64", base64.b64encode(worker_wasm).decode("ascii"))
worker_js_with_wasm_escaped = worker_js_with_wasm.replace("\\", "\\\\").replace("`", "\\`")
worker_data_url_src = f"""/* eslint-disable */
export const wasmWorkerSrcUrl = URL.createObjectURL(new File([`{worker_js_with_wasm_escaped}`], "worker.js", {{type: "text/javascript"}}));
"""
with open(f"{DST_DIR}/worker.ts", "wt", encoding="utf-8", newline="\n") as f:
f.write(worker_data_url_src)
| 38.541667 | 198 | 0.697838 | 283 | 1,850 | 4.399293 | 0.45583 | 0.051406 | 0.048193 | 0.051406 | 0.134137 | 0.088353 | 0.049799 | 0 | 0 | 0 | 0 | 0.017042 | 0.111892 | 1,850 | 47 | 199 | 39.361702 | 0.740718 | 0.110811 | 0 | 0 | 0 | 0.064516 | 0.3737 | 0.167584 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.225806 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1de0504272aa9c8d05b238a26dbdda9f0cfb6b6c | 6,275 | py | Python | scdali/models/gp.py | PMBio/scdali | 55baf5afaf44cc8314c951fe94c539e805adb84e | [
"BSD-3-Clause"
] | 3 | 2021-11-22T02:59:47.000Z | 2022-03-11T16:11:06.000Z | scdali/models/gp.py | PMBio/scdali | 55baf5afaf44cc8314c951fe94c539e805adb84e | [
"BSD-3-Clause"
] | 1 | 2022-03-10T10:29:03.000Z | 2022-03-10T10:29:03.000Z | scdali/models/gp.py | PMBio/scdali | 55baf5afaf44cc8314c951fe94c539e805adb84e | [
"BSD-3-Clause"
] | null | null | null | """Gaussian process implementation based on gpflow."""
import numpy as np
import gpflow
from gpflow.utilities import print_summary, to_default_float, set_trainable
import tensorflow as tf
import tensorflow_probability as tfp
from scdali.models.core import DaliModule
from scdali.utils.stats import freeman_tukey, compute_expected_sample_variance
from scdali.utils.matop import atleast_2d_column
class SparseGP(DaliModule):
"""Sparse GP model for modelling allelic imbalance in single cells.
A simple wrapper class around the gpflow SGPR model.
"""
def __init__(
self, a, d, E,
kernel='Linear',
num_inducing=300,
kernel_params=None,
variance_prior=False,
length_scale_prior=True,
apply_freeman_tukey=False):
"""Creates model.
Args
a: Counts for the alternative allele in each cell.
d: Total counts for both alleles in each cell.
E: Environment / cell-state matrix.
kernel: String representing a formula of gpflow.kernels,
e.g. Linear + RBF.
num_inducing: Number of inducing points.
kernel_params: List of dict with arguments for kernel creation.
variance_prior: Boolean indicating whether to use Gamma prior for
variance components.
variance_prior: Boolean indicating whether to use Inverse-Gamma
prior for kernel lengthscales.
apply_freeman_tukey: Use the Freeman-Tukey variance stabilizing
transform to compute rates.
"""
super().__init__(a, d, E)
self.apply_freeman_tukey = apply_freeman_tukey
if self.apply_freeman_tukey:
self.r = freeman_tukey(self.a, self.d)
else:
self.r = self.a / self.d
if num_inducing > self.n:
num_inducing = self.n
self.variance_prior = variance_prior
self.length_scale_prior = length_scale_prior
self.num_inducing = num_inducing
self.kernel = kernel
self.kernel_split = parse_str_formula(kernel)
if kernel_params is None:
kernel_params = [dict() for k in self.kernel_split]
self.kernel_params = kernel_params
self.model = self._init_model()
def _init_model(self):
"""Creates a gpflow SGPR model."""
Z = self._init_inducing_points()
mean_function=gpflow.mean_functions.Constant()
# constrain to be between 0 and 1
mean_function.c = gpflow.Parameter(
.5, transform=tfp.bijectors.Sigmoid())
return gpflow.models.SGPR(
data=(self.E, self.r),
kernel=self._create_kernel(),
inducing_variable=Z,
mean_function=mean_function,
num_latent_gps=1)
def _create_kernel(self):
"""Creates a kernel from list of strings stored in _kernel_split."""
k = None
for i, prod_kern in enumerate(self.kernel_split):
sub_k = None
for j, kern in enumerate(prod_kern):
new_k = getattr(
gpflow.kernels,
kern)( **self.kernel_params[i + j])
if hasattr(new_k, 'lengthscales') and self.length_scale_prior:
new_k.lengthscales.prior = tfp.distributions.InverseGamma(
to_default_float(1),
to_default_float(1))
if j == 0:
sub_k = new_k
if self.variance_prior:
new_k.variance.prior = tfp.distributions.Gamma(
to_default_float(1),
to_default_float(1))
else:
set_trainable(new_k.variance, False)
sub_k *= new_k
if i == 0:
k = sub_k
else:
k += sub_k
return k
def _init_inducing_points(self):
"""Samples at random to initialize the inducing point locations."""
return self.E[np.random.choice(range(self.n), self.num_inducing), :]
def fit(self, maxiter=250):
"""Fits the model."""
opt = gpflow.optimizers.Scipy()
try:
opt.minimize(
self.model.training_loss,
self.model.trainable_variables,
options={'maxiter': maxiter})
except tf.errors.InvalidArgumentError:
print('Warning: Optimization terminated, check model parameters!!')
def compute_elbo(self):
"""Evalutes the ELBO, a lower bound on the marginal log likelihood."""
return self.model.elbo().numpy()
def compute_posterior(self, E=None, full_cov=False):
"""Computes the mean and variances of the posterior over latent rates."""
E = self.E if E is None else atleast_2d_column(E)
mu, covar = self.model.predict_f(
E.astype(np.float64), full_cov=full_cov)
if full_cov:
covar = covar[0, :, :]
return mu.numpy().astype(np.float32), covar.numpy().astype(np.float32)
def compute_explained_variance(self):
"""Normalizes each variance component by the kernel variance."""
variances = list()
if isinstance(self.model.kernel, gpflow.kernels.base.ReducingCombination):
for kernel in self.model.kernel.kernels:
variances.append(compute_expected_sample_variance(kernel(self.E).numpy()))
else:
variances.append(compute_expected_sample_variance(self.model.kernel(self.E).numpy()))
likelihood_kernel = self.model.likelihood.variance.numpy() * np.eye(self.n)
variances.append(compute_expected_sample_variance(likelihood_kernel))
return variances
def get_prior_mean(self):
"""Returns the estimated prior mean."""
return self.model.mean_function.c.numpy()
def print_summary(self):
"""Prints a model summary."""
print_summary(self.model)
def parse_str_formula(formula):
"""Turns formula of strings with + and * into list of lists with variable names."""
return [[k.strip() for k in k_prod.split('*')] for k_prod in formula.split('+') ]
| 35.653409 | 97 | 0.608606 | 753 | 6,275 | 4.881806 | 0.298805 | 0.026931 | 0.019042 | 0.031556 | 0.080522 | 0.075082 | 0.039173 | 0.016322 | 0 | 0 | 0 | 0.005733 | 0.30502 | 6,275 | 175 | 98 | 35.857143 | 0.837193 | 0.219283 | 0 | 0.074766 | 0 | 0 | 0.018081 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102804 | false | 0 | 0.074766 | 0 | 0.261682 | 0.037383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1de16f38a88a819292c8f707bd52ecac80a506d6 | 2,693 | py | Python | MCQuizApp/views.py | Kevin-Oudai/django-mcquiz | 82a018569fb69c892438a57bfaec3aa694761098 | [
"MIT"
] | null | null | null | MCQuizApp/views.py | Kevin-Oudai/django-mcquiz | 82a018569fb69c892438a57bfaec3aa694761098 | [
"MIT"
] | 7 | 2021-10-20T15:22:40.000Z | 2021-11-09T05:41:14.000Z | MCQuizApp/views.py | Kevin-Oudai/django-mcquiz | 82a018569fb69c892438a57bfaec3aa694761098 | [
"MIT"
] | null | null | null | from django.http.response import Http404
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic import TemplateView
from django.shortcuts import get_object_or_404, render, get_list_or_404
from django.http import Http404
from .models import Quiz, Question, Answer
class QuizListView(ListView):
model = Quiz
context_object_name = 'quizzes'
queryset = Quiz.objects.filter(
number_of_questions__gt=0).filter(draft=False)
class QuizDetailView(DetailView):
model = Quiz
def get_object(self, *args, **kwargs):
pk = self.kwargs.get('pk')
return get_object_or_404(Quiz, pk=pk, draft=False)
def questions_view(request, pk, quiz_url):
template_name = "MCQuizApp/question_list.html"
data = []
context = {}
quiz = get_object_or_404(Quiz, id=pk)
questions = quiz.get_questions()
if not questions:
raise Http404("no questions in the quiz.")
context['title'] = quiz.title
if questions != None:
for i in range(len(questions)):
data.append({
"id": questions[i].pk,
"figure": questions[i].figure,
"content": questions[i].content,
"answers": questions[i].get_answers_list()
})
context['questions'] = data
context['pk'] = pk
context['url'] = quiz_url
response = render(request, template_name, context)
return response
def solutions(request, pk, quiz_url):
# retrive guesses
guesses = request.GET.dict()
# retrieve questions ids and correct answers
questions = Quiz.objects.get(id=pk).get_questions()
# create solutions list
question = []
for item in questions:
if str(item.id) in guesses.keys():
guess = guesses[str(item.id)]
else:
guess = None
question.append({'figure': item.figure, 'content': str(item.content), 'guess': guess,
'answer': str(item.get_answer_id()), 'choices': item.get_answers_list()})
# count total correct and total incorrect
total_correct = 0
total_incorrect = 0
for item in question:
if item['guess'] == item['answer']:
total_correct += 1
else:
total_incorrect += 1
# calculate percentage
total_questions = len(questions)
percentage = total_correct / total_questions * 100
context = {}
context['questions'] = question
context['total'] = total_correct
context['score'] = percentage
context['errors'] = total_incorrect
context['number'] = total_questions
return render(request, 'MCQuizApp/solutions.html', context)
| 32.059524 | 98 | 0.647976 | 322 | 2,693 | 5.282609 | 0.291925 | 0.035273 | 0.026455 | 0.038801 | 0.021164 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014195 | 0.241367 | 2,693 | 83 | 99 | 32.445783 | 0.818404 | 0.052358 | 0 | 0.090909 | 0 | 0 | 0.078555 | 0.020424 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.106061 | 0 | 0.287879 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1de728168d5705a24cfbc8c3d5d9391effeca66f | 717 | py | Python | website/settings/prod.py | RemeoLong/Attic | 8d9458b86a924b639001fe0bee052ba5be66dbce | [
"MIT"
] | null | null | null | website/settings/prod.py | RemeoLong/Attic | 8d9458b86a924b639001fe0bee052ba5be66dbce | [
"MIT"
] | null | null | null | website/settings/prod.py | RemeoLong/Attic | 8d9458b86a924b639001fe0bee052ba5be66dbce | [
"MIT"
] | null | null | null | import os
from .base import *
SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
DEBUG = False
ALLOWED_HOSTS = ['170.39.76.95', '.atticrestorations.biz', 'ns79.stableserver.net']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ["DB_name"],
'USER': os.environ["DB_user"],
'PASSWORD': os.environ["DB_pass"],
'HOST': os.environ["DB_host"],
'PORT': os.environ["DB_port"],
}
}
EMAIL_BACKEND = 'django_ses.SESBackend'
AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"]
AWS_SES_REGION_NAME = 'us-east-2'
AWS_SES_REGION_ENDPOINT ='email.us-east-2.amazonaws.com'
| 29.875 | 83 | 0.683403 | 100 | 717 | 4.6 | 0.48 | 0.156522 | 0.119565 | 0.06087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023026 | 0.152022 | 717 | 23 | 84 | 31.173913 | 0.733553 | 0 | 0 | 0 | 0 | 0 | 0.389121 | 0.211994 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.05 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1de7c6a232164c1f5bc7a519e5875ef95303520b | 764 | py | Python | plugins/dbnd-postgres/src/dbnd_postgres/log_pg_table_operator.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-postgres/src/dbnd_postgres/log_pg_table_operator.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-postgres/src/dbnd_postgres/log_pg_table_operator.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
from dbnd._core.commands.metrics import log_pg_table
class LogPostgresTableOperator(BaseOperator):
@apply_defaults
def __init__(self, table_name, conn_id, *args, **kwargs):
super(LogPostgresTableOperator, self).__init__(
*args, **kwargs
) # py2.7 compatibility
self.table_name = table_name
self.conn_id = conn_id
def execute(self, context):
hook = PostgresHook(postgres_conn_id=self.conn_id)
connection_string = hook.get_uri()
log_pg_table(
self.table_name, connection_string, with_histograms=True,
)
| 33.217391 | 69 | 0.715969 | 90 | 764 | 5.744444 | 0.477778 | 0.058027 | 0.075435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0033 | 0.206806 | 764 | 22 | 70 | 34.727273 | 0.849835 | 0.024869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |