text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from epitator.annotator import AnnoDoc
from epitator.structured_incident_annotator import StructuredIncidentAnnotator
import datetime
# import logging
# from .test_utils import with_log_level
def remove_empty_props(d):
return {
k: v
for k, v in d.items()
if v is not None
}
class TestStructuredIncidentAnnotator(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.annotator = StructuredIncidentAnnotator()
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_count_table(self):
doc = AnnoDoc('''
Type / New / Confirmed / Probable / Suspect / Total
Cases / 3 / 293 / / 32 / 413
Deaths / 5 / 193 / 82 / 28 / 303
''')
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
# Date/country??
# Need to include because association rules are different for tables.
'type': 'caseCount',
'value': 3,
'attributes': []
}, {
'type': 'cumulativeCaseCount',
'value': 293,
'attributes': ['confirmed']
}, {
'type': 'cumulativeCaseCount',
'value': 32,
'attributes': ['suspected']
}, {
'type': 'cumulativeCaseCount',
'value': 413,
'attributes': []
}, {
'type': 'deathCount',
'value': 5,
'attributes': []
}, {
'type': 'cumulativeDeathCount',
'value': 193,
'attributes': ['confirmed']
}, {
'type': 'cumulativeDeathCount',
'value': 82,
'attributes': []
}, {
'type': 'cumulativeDeathCount',
'value': 28,
'attributes': ['suspected']
}, {
'type': 'cumulativeDeathCount',
'value': 303,
'attributes': []
}])
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_location_count_table(self):
doc = AnnoDoc("""
Distribution of reported x fever cases from 1 Jul 2017-17 Apr 2018
Federal units / Reported / Discarded / Under investigation / Confirmed / Deaths
Acre (AC) / 1 / 1 / - / - / -
Amapá (AP) / 8 / 2 / 6 / - / -
Pará (PA) / 7 / 5 / 2 / - / -
Amazonas (AM) / 42 / 31 / 11 / - / -
Rondônia (RO) / 9 / 8 / 1 / - / -
Roraima (RR) / 3 / 3 / - / - / -
Tocantins (TO) / 17 / 15 / 2 / - / -
Bahia (BA) / 62 / 35 / 27 / - / -
Ceará (CE) / 4 / 3 / 1 / - / -
Maranhão (MA) / 7 / 5 / 2 / - / -
Paraíba (PB) / 5 / - / 5 / - / -
Pernambuco (PE) / 6 / 4 / 2 / - / -
Piauí (PI) / 9 / 6 / 3 / - / -
Rio Grande do Norte (RN) / 3 / 2 / 1 / - / -
Sergipe (SE) / 2 / 2 / - / - / -
Distrito Federal (DF) / 74 / 43 / 30 / 1 / 1
Goiás (GO) / 66 / 37 / 29 / - / -
Mato Grosso (MT) / 10 / 8 / 2 / - / -
Mato Grosso do Sul (MS) / 13 / 10 / 3 / - / -
Espírito Santo (ES) / 119 / 88 / 25 / 6 / 1
Minas Gerais (MG) / 1444 / 656 / 294 / 494 / 156
Rio de Janeiro (RJ) / 453 / 172 / 84 / 197 / 64
São Paulo (SP) / 2558 / 1655 / 444 / 459 / 120
Paraná (PR) / 110 / 102 / 8 / - / -
Rio Grande do Sul (RS) / 49 / 34 / 15 / - / -
Santa Catarina (SC) / 45 / 22 / 23 / - / -
Total / 5131 / 2951 / 1023 / 1157 / 342
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
incident = metadatas[0]
self.assertEqual(incident['value'], 1)
self.assertEqual(incident['type'], 'caseCount')
self.assertEqual(incident['location']['geonameid'], '3665474')
self.assertEqual(
incident['dateRange'],
[datetime.datetime(2017, 7, 1),
datetime.datetime(2018, 4, 18)])
def test_date_count_table(self):
doc = AnnoDoc("""
Cumulative case data
Report date / Cases / Deaths / New cases per week
26 Jun 2017 / 190 / 10 /
8 Sep 2017 / 300 / 12 /
9 Sep 2017 / 309 / 13 /
15 Sep 2017 / 319 / 14 /
6 Oct 2017 / 376 / 14 /
13 Oct 2017 /
20 Oct 2017 / 431 / 17 / 34
27 Oct 2017 / 457 / 18 / 26
3 Nov 2017 / 486 / 19 / 29""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[-1], {
'value': 29,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 28),
datetime.datetime(2017, 11, 4)]
})
self.assertEqual(metadatas[-2], {
'value': 19,
'type': 'cumulativeDeathCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 11, 3),
datetime.datetime(2017, 11, 4)]
})
def test_date_count_table_2(self):
doc = AnnoDoc("""
| Report date | Cases |
| 6 Oct 2017 | 26 |
| 13 Oct 2017 | 29 |
| 20 Oct 2017 | 34 |""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
'value': 26,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 9, 30),
datetime.datetime(2017, 10, 7)]
}, {
'value': 29,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 7),
datetime.datetime(2017, 10, 14)]
}, {
'value': 34,
'type': 'caseCount',
'attributes': [],
'dateRange': [
datetime.datetime(2017, 10, 14),
datetime.datetime(2017, 10, 21)]
}])
def test_non_incident_counts_and_species(self):
doc = AnnoDoc("""
Species / Morbidity / Mortality / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Orange Spotted Snakehead (_Channa aurantimaculata_) / 100% / 1% / 32 / 30 / 1 / 28 / 3
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas, [{
'attributes': [],
'type': 'caseCount',
'value': 30,
'species': {
'id': 'tsn:642745',
'label': 'Channa aurantimaculata'}
}, {
'attributes': [],
'type': 'deathCount',
'value': 1,
'species': {
'id': 'tsn:642745',
'label': 'Channa aurantimaculata'}
}])
def test_unknown_species_and_space_delimited_counts(self):
doc = AnnoDoc("""
The epidemiological statistics accumulated since the start of the event are included in the following "outbreak summary":
Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Birds / 6 368 632 / 1 303 173 / 1 297 617 / 3 850 608 / 0
Black-crowned night-heron / not available / 1 / 1 / 0 / 0
Passeridae (unidentified) / not available / 2 / 2 / 0 / 0
Pale thrush / not available / 1 / 1 / 0 / 0
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0], {
'attributes': [],
'type': 'caseCount',
'value': 1303173,
'species': {'id': 'tsn:174371', 'label': 'Aves'}
})
self.assertEqual(metadatas[-1], {
'attributes': [],
'type': 'deathCount',
'value': 1,
'species': "Cannot parse"
})
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_multi_section_table(self):
doc = AnnoDoc("""
Disease update
--------------
Confirmed, probable, and suspect cases and deaths from Ebola virus disease in Guinea, Liberia, and Sierra Leone, as of 30 Jun 2014
Type / New* / Confirmed / Probable / Suspect / Totals by country
Guinea
Cases / 3 / 293 / 88 / 32 / 413
Deaths / 5 / 193 / 82 / 28 / 303
Liberia
Cases / 8 / 52 / 21 / 34 / 107
Deaths / 7 / 33 / 17 / 15 / 65
Sierra Leone
Cases / 11 / 199 / 31 / 9 / 239
Deaths / 2 / 65 / 29 / 5 / 99
Totals
Cases / 22 / 544 / 140 / 75 / 759
Deaths / 14 / 291 / 128 / 48 / 467
*New cases were reported between 25-29 Jun 2014
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[4]['type'], 'cumulativeCaseCount')
self.assertEqual(metadatas[4]['dateRange'], [
datetime.datetime(2014, 6, 30, 0, 0),
datetime.datetime(2014, 7, 1, 0, 0)])
self.assertEqual(metadatas[4]['value'], 413)
self.assertEqual(metadatas[4]['location']['geonameid'], '2420477')
def test_number_in_header(self):
doc = AnnoDoc("""
Health Jurisdiction / Cases (percentage) / Incidence rate per 100 000 Person-Years
Salt Lake county / 162 (68.9) / 14.4
Utah county / 45 (19.1) / 7.6
Bear River / 5 (2.1) / 2.8
Southeast Utah / 2 (0.9) / 5.0
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0]['type'], 'caseCount')
self.assertEqual(metadatas[0]['value'], 162)
self.assertEqual(metadatas[0]['location']['geonameid'], '5781004')
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_unusual_format(self):
doc = AnnoDoc("""
For subscribers' convenience, we hereby reproduce Israel's annual rabies statistics since 2014:
Year // badger / cat / fox / jackal / wolf / dog / cattle / sheep / horse // total
2014 // 3 / 0 / 2 / 2 / 4 / 2 / 1 / 0 / 0 // 14
2015 // 12 / 1 / 1 / 3 / 0 / 1 / 7 / 0 / 1 // 20
2016 // 12 / 0 / 7 / 5 / 0 / 0 / 5 / 0 / 1 // 30
2017 // 10 / 2 / 0 / 47 / 0 / 0 / 14 / 1 / 0 // 74
2018 // 4 / 0 / 0 / 35 / 0 / 1 / 7 / 1 / 1 // 51
""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
# A value from row one is not used because 2014 is missed by the date
# parser although other years are caught.
# The index refers to the badgers in 2015. It is an unintuitive index
# because some species are not being parsed so their values are skipped.
self.assertEqual(metadatas[2]['type'], 'caseCount')
self.assertEqual(metadatas[2]['value'], 12)
self.assertEqual(metadatas[2]['species']['label'], 'Taxidea taxus')
self.assertEqual(metadatas[2]['dateRange'], [
datetime.datetime(2015, 1, 1, 0, 0),
datetime.datetime(2016, 1, 1, 0, 0)])
def test_date_association(self):
doc = AnnoDoc("""
The outbreak strains of salmonella have infected a reported 961 people in 48 states [only Alaska and Delaware have not reported cases - Mod.LL] and the District of Columbia. Illnesses started on dates ranging from 4 January 2017 to 31 July 2017.
State / Number of Cases
Alabama / 25
Arizona / 6
Arkansas / 9
California / 54
Virginia / 56
Washington / 22
West Virginia / 17
Wisconsin / 24
Wyoming / 10""")
doc.add_tier(self.annotator)
metadatas = [
remove_empty_props(span.metadata)
for span in doc.tiers['structured_incidents']
]
self.assertEqual(metadatas[0]['dateRange'], [
datetime.datetime(2017, 1, 4, 0, 0),
datetime.datetime(2017, 8, 1, 0, 0)])
def test_fp_table_merging(self):
doc = AnnoDoc("""
Non-Latin Caribbean
Bahamas / week 30 [ending 25 Jul 2014] / 0 / 0 / 6 / 0
Dominica / week 28 [ending 11 Jul 2014] / 3559 / 141 / 0 / 0
Jamaica / week 29 [ending 18 Jul 2014] / 0 / 0 / 1 / 0
Turks & Caicos Islands / week 28 [ending 11 Jul 2014] / 0 / 10 / 7 / 0
US Virgin Islands / week 29 [ending 18 Jul 2014] / 0 / 2 / 7 / 0
Andean area:
Bolivia / 9 / 0 / 0 / 3 / 0
Colombia / 30 / 0 / 0 / 1 / 0
Peru / 28 / 0 / 0 / 3 / 0
""")
doc.add_tier(self.annotator)
def test_unparsable_date_bug(self):
doc = AnnoDoc("""
Cases by Country / Week updated / Probable / Conf. / Virus type / DHF severe / Deaths
Hispanic Caribbean
Dominican Republic / 17 [week ending 28 Apr 2017] / 315 / 0 / D? / 15 / 0
Puerto Rico / 19 [week ending 12 May 2017] / 9 / 0 / D2 / 0 / 0
English, French, Dutch Caribbean
American Virgin Islands / 19 [week ending 12 May 2017] / 1 / 1 / D? / 0 / 0
Andean
Bolivia / 17 / [week ending 28 Apr 2017] / 4260 / 0 / D? / 34 / 0
Colombia / 20 [week ending 19 May 2017] / 12 552 / 8357 / D? / 131 / 36
Ecuador / 17 [week ending 28 Apr 2017] / 6075 / 6075 / D? / 6 / 3
Peru / 20 [week ending 19 May 2017] / 44 971 / 12 717 / D 2,3 / 137 / 54
Venezuela / 17 [week ending 28 Apr 2017] / 2722 / 309 / D? / 7 / 0
""")
doc.add_tier(self.annotator)
def test_non_integer_value(self):
doc = AnnoDoc("""
******
[6] India, Pune, Marharastra, fatal human case
Date: Mon 4 Jul 2016, 12.57 AM IST
Source: The Times of India [edited]
""")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['structured_incidents']), 0)
def test_multiline_title(self):
doc = AnnoDoc("""
Arizona, 3 May 2018.
More text
Species / Susceptible / Cases / Deaths / Killed and disposed of / Slaughtered
Birds / 3000/ 1500 / 1500 / 0 / 0
Affected population: Commercial layers
""")
doc.add_tier(self.annotator)
# TODO: 1500 in the Deaths column is parsed as a year. To resolve this
# the annotator needs to use a heuristic based on the column
# name when determining column types. Simply giving integer interpretations
# priority in all cases doesn't work on docs like the one in test_unusual_format.
self.assertEqual(doc.tiers['structured_incidents'][0].metadata['location']['name'], 'Arizona')
# @with_log_level(logging.getLogger('epitator.structured_incident_annotator'), logging.INFO)
def test_missing_count_bug(self):
doc = AnnoDoc("""
State / Number of Cases
Alabama / 25
Arizona / 6
Arkansas / 9
California / 54
Colorado / 18
N Dakota / 1
S Dakota / 1
Connecticut / 9
""")
doc.add_tier(self.annotator)
locations = [span.metadata['location']
for span in doc.tiers['structured_incidents']]
geonameids = [
location['geonameid'] if isinstance(location, dict) else location
for location in locations]
self.assertEqual(geonameids, [
'4829764', '5551752', '4099753',
'5332921', '5417618', '5690763',
'5769223', '4831725'])
def test_case_synonyms(self):
doc = AnnoDoc("""
As of 7 Jun 2019, a total of 279 people infected with the outbreak strains of _Salmonella_ have been reported from 41 states.
A list of the states and the number of cases in each is on the map of reported cases page.
State / Ill people
------------------
Alabama / 7
Arkansas / 8
Arizona / 1
California / 9
Colorado / 4
Connecticut / 3
""")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['structured_incidents']), 6)
| 15,956 | 6,341 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Automatically apply the optimizations if the supported versions of FairSeq
are detected.
"""
import logging
import sys
from packaging import version
from fastseq.config import FASTSEQ_VERSION, MAX_FAIRSEQ_VERSION, MIN_FAIRSEQ_VERSION
from fastseq.logging import get_logger
from fastseq.utils.api_decorator import OPTIMIZED_CLASSES
from fastseq import config
logger = get_logger(__name__, logging.INFO)
LATEST_VERSION = 'latest'
def is_supported_fairseq():
"""Check if the installed fairseq is supported.
Returns:
a bool value: True indicates the installed fairseq is supported.
"""
v = version.parse(fairseq.__version__)
return version.parse(
MIN_FAIRSEQ_VERSION) <= v <= version.parse(MAX_FAIRSEQ_VERSION)
def apply_fairseq_optimization():
"""Automaticall apply the optimization to the installed fairseq.
The optimized classes and functions are replaced in runtime.
"""
if not is_supported_fairseq():
logger.warning(
f"fairseq(v{fairseq.__version__}) is not supported by fastseq(v"
f"{FASTSEQ_VERSION}) yet, please change fairseq to "
f"v{MIN_FAIRSEQ_VERSION} ~ v{MAX_FAIRSEQ_VERSION}, or check other "
"versions of fastseq. Currently, no optimization in fastseq has "
"been applied. Please ignore this warning if you are not using "
"fairseq")
return
import fastseq.optimizer.fairseq.beam_search_optimizer # pylint: disable=import-outside-toplevel
if config.USE_EL_ATTN:
import fastseq.optimizer.fairseq.el_attention_optimizer # pylint: disable=import-outside-toplevel
import fastseq.optimizer.fairseq.generate # pylint: disable=import-outside-toplevel
_update_fairseq_model_registration()
logger.info(f"fairseq(v{fairseq.__version__}) has been optimized by "
f"fastseq(v{FASTSEQ_VERSION}).")
def _update_fairseq_model_registration():
"""Use the optimized classes to update the registered fairseq models and
arches.
"""
for model_name, model_class in MODEL_REGISTRY.items():
if model_class in OPTIMIZED_CLASSES:
MODEL_REGISTRY[model_name] = OPTIMIZED_CLASSES[model_class]
logger.debug(
"Update the register model {} from {} to {}".format(
model_name, model_class, OPTIMIZED_CLASSES[model_class]))
for arch_name, model_class in ARCH_MODEL_REGISTRY.items():
if model_class in OPTIMIZED_CLASSES:
ARCH_MODEL_REGISTRY[arch_name] = OPTIMIZED_CLASSES[model_class]
logger.debug(
"Update the register model arch {} from {} to {}".format(
arch_name, model_class, OPTIMIZED_CLASSES[model_class]))
is_fairseq_installed = True
try:
import fairseq # pylint: disable=ungrouped-imports
from fairseq.models import ARCH_MODEL_REGISTRY, MODEL_REGISTRY # pylint: disable=ungrouped-imports
from fairseq.sequence_generator import SequenceGenerator # pylint: disable=ungrouped-imports
except ImportError as error:
is_fairseq_installed = False
logger.warning('fairseq can not be imported. Please ignore this warning if '
'you are not using fairseq: {}'.format(error))
if is_fairseq_installed:
try:
apply_fairseq_optimization()
except:
logger.error("Unexpected error: {}".format(sys.exc_info()[0]))
raise
| 3,492 | 1,097 |
import model
print(dict(model._ex("select count(*) policies from tos_text").fetchone()))
print(dict(model._ex("select count(*) companies from company where last_error is null").fetchone()))
| 192 | 53 |
#! python3
# -*- coding: utf-8 -*-
# [ clipper ] #
# #
# Hey, welcome to clipper! This is a small tool I #
# have been building for personal use as a means #
# to take, analyze and upload screenshots quickly. #
# #
# I'm not sure how common this specific task is for #
# anyone else, but since, personally, it'd be a #
# huge time saver to have the proccess automated #
# and bound to a shortcut, I'm making the source #
# available to whomever else happens to find this #
# useful as well. Enjoy! #
# #
# - Vinícius Menézio #
from .img.clipImage import ClipImage
from requests.exceptions import ConnectionError
from imgurpython.helpers.error import ImgurClientError
def main():
clippy = ClipImage()
print( "dimensions:", clippy.width, "x", clippy.height, "px | colors:", len(clippy.palette) )
print("filesize: LOCAL", clippy.size/1000, "KB, ONLINE", clippy.onlineSize/1000,"KB\n") # BREAKS IF IT CAN'T UPLOAD / RETRIEVE FILESIZE
print("url:",clippy.url,"\n")
print(clippy.getColorTable())
if __name__ == "__main__":
main()
| 1,340 | 401 |
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) == 0:
return 0
NUM_OF_CHARS = 256
hist = [0] * NUM_OF_CHARS
l = 0
r = 0
max = 0
while r < len(s):
hist_i = ord(s[r])
hist[hist_i] += 1
while hist[hist_i] > 1:
hist[ord(s[l])] -= 1
l += 1
if r - l + 1 > max:
max = r - l + 1
r += 1
return max | 508 | 186 |
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name = 'CellARE',
version = '0.0.2',
description = 'A cellular automaton based implementation to run SIR simulations',
py_modules = ['cellare'],
package_dir = {'':'src'},
classifiers = [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
long_description = long_description,
long_description_content_type = 'text/markdown',
install_requires=[
"numpy",
"matplotlib"
],
url = 'https://github.com/DiegoGH117/cellare',
project_urls = {
'Documentation': 'https://cellare.readthedocs.io/en/latest/',
},
) | 936 | 276 |
import csv
import re
import pytest
from click.testing import CliRunner
from deploy_tools.cli import test_json_rpc, test_provider
from eth_tester.exceptions import TransactionFailed
from eth_utils import to_checksum_address
import auction_deploy.core
from auction_deploy.cli import AuctionState, main
from auction_deploy.core import (
DeployedAuctionContracts,
deploy_auction_contracts,
get_deployed_auction_contracts,
)
@pytest.fixture
def runner():
return CliRunner()
def extract_auction_address(output):
"""extract the auction address from 'deploy' output"""
match = re.search("^Auction address: (0x[0-9a-fA-F]{40})$", output, re.M)
if match:
return match[1]
raise ValueError(f"Could not find auction address in output: {repr(output)}")
@pytest.fixture()
def deployed_auction_address(auction_options, runner, use_token, token_contract):
"""Deploys an auction and return its address"""
argument = (
f"deploy --release-timestamp 2000000000 --max-participants "
f"{auction_options.maximal_number_of_participants} "
f"--min-participants {auction_options.minimal_number_of_participants}"
f" --start-price {auction_options.start_price} --jsonrpc test"
)
if use_token:
argument += f" --use-token --token-address {auction_options.token_address}"
deploy_result = runner.invoke(main, args=argument)
if deploy_result.exception is not None:
raise RuntimeError(
"Error while trying to run auction-deploy"
) from deploy_result.exception
return extract_auction_address(deploy_result.output)
@pytest.fixture()
def whitelisted_auction_address(runner, deployed_auction_address, whitelist_file):
"""Whitelists all addresses in the whitelist on the deployed auction and returns its address"""
runner.invoke(
main,
args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--batch-size 100 --jsonrpc test",
)
return deployed_auction_address
@pytest.fixture()
def whitelist_file(tmp_path, whitelist):
folder = tmp_path / "subfolder"
folder.mkdir()
file_path = folder / "whitelist.csv"
with file_path.open("w") as f:
writer = csv.writer(f)
writer.writerows([[to_checksum_address(address)] for address in whitelist])
return file_path
@pytest.fixture
def contracts(deployed_auction_address) -> DeployedAuctionContracts:
"""return the core.DeployedAuctionContracts object for the currently active auction"""
return get_deployed_auction_contracts(test_json_rpc, deployed_auction_address)
@pytest.fixture
def contracts_not_initialized(auction_options) -> DeployedAuctionContracts:
"""return the three auction related contracts where locker and slasher are not initialized"""
contracts = deploy_auction_contracts(
web3=test_json_rpc, auction_options=auction_options
)
return contracts
@pytest.fixture
def ensure_auction_state(contracts):
"""return a function that can be used to check the current auction state"""
def ensure_state(expected_state):
current_state = AuctionState(contracts.auction.functions.auctionState().call())
assert current_state == expected_state
return ensure_state
def bid(auction_contract, token_contract, sender, bid_value, use_token):
if use_token:
token_contract.functions.approve(auction_contract.address, bid_value).transact(
{"from": sender}
)
auction_contract.functions.bid().transact({"from": sender})
else:
auction_contract.functions.bid().transact({"from": sender, "value": bid_value})
@pytest.fixture
def deposit_pending_auction(
runner,
deployed_auction_address,
contracts,
token_contract,
auction_options,
use_token,
ether_owning_whitelist,
ensure_auction_state,
):
"""return the auction contract with enough bids so that the state is `DepositPending`"""
contracts.auction.functions.addToWhitelist(ether_owning_whitelist).transact()
contracts.auction.functions.startAuction().transact()
bid_value = contracts.auction.functions.currentPrice().call()
bid(
contracts.auction,
token_contract,
ether_owning_whitelist[0],
bid_value,
use_token,
)
bid(
contracts.auction,
token_contract,
ether_owning_whitelist[1],
bid_value,
use_token,
)
ensure_auction_state(AuctionState.DepositPending)
return contracts.auction
def test_cli_release_date_option(runner):
deploy_result = runner.invoke(
main, args="deploy --release-date '2033-05-18 03:33:21' --jsonrpc test"
)
assert deploy_result.exception is None
assert deploy_result.exit_code == 0
auction_address = extract_auction_address(deploy_result.output)
contracts = get_deployed_auction_contracts(test_json_rpc, auction_address)
release_timestamp = contracts.locker.functions.releaseTimestamp().call()
# 2033-05-18 03:33:21 is timestamp 2000000001
assert release_timestamp == 2_000_000_001
def test_cli_contract_parameters_set(runner):
result = runner.invoke(
main,
args="deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 "
"--release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_deploy_token_auction(runner):
arbitrary_token_address = "0x" + "1234" * 10
result = runner.invoke(
main,
args=f"deploy --use-token --token-address {arbitrary_token_address} --release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_resume_deployment(runner, contracts_not_initialized):
result = runner.invoke(
main,
args=f"deploy --start-price 123 --duration 4 --max-participants 567 --min-participants 456 "
f"--release-timestamp 2000000000 --jsonrpc test --auction {contracts_not_initialized.auction.address}"
f" --locker {contracts_not_initialized.locker.address}",
)
assert result.exit_code == 0
assert (
extract_auction_address(result.output)
== contracts_not_initialized.auction.address
)
def test_cli_transaction_parameters_set(runner):
result = runner.invoke(
main,
args="deploy --nonce 0 --gas-price 123456789 --gas 7000000 --release-timestamp 2000000000 --jsonrpc test",
)
assert result.exit_code == 0
def test_cli_private_key(runner, keystore_file_path, key_password):
result = runner.invoke(
main,
args="deploy --jsonrpc test --release-timestamp 2000000000 --keystore "
+ str(keystore_file_path),
input=key_password,
)
assert result.exit_code == 0
def test_cli_start_auction(runner, deployed_auction_address):
result = runner.invoke(
main, args="start --jsonrpc test --address " + deployed_auction_address
)
assert result.exit_code == 0
def test_cli_close_auction(
runner, deployed_auction_address, ensure_auction_state, contracts
):
result = runner.invoke(
main, args=f"start --jsonrpc test --address {deployed_auction_address}"
)
assert result.exit_code == 0
auction_duration = (
contracts.auction.functions.auctionDurationInDays().call() * 24 * 3600
)
# auction is started, time travel forward
test_provider.ethereum_tester.time_travel(
test_json_rpc.eth.getBlock("latest").timestamp + auction_duration
)
test_provider.ethereum_tester.mine_block()
result = runner.invoke(
main, args=f"close --jsonrpc test --address {deployed_auction_address}"
)
assert result.exit_code == 0
ensure_auction_state(AuctionState.Failed)
def test_cli_start_auction_with_auto_nonce(
runner, deployed_auction_address, keystores, key_password
):
"""test the auto-nonce option. we only do this for the start-auction"""
result = runner.invoke(
main,
args=f"start --auto-nonce --jsonrpc test --keystore {keystores[0]}"
+ f" --address {deployed_auction_address}",
input=key_password,
)
assert result.exit_code == 0
def test_cli_start_auction_key_not_owner(
runner, deployed_auction_address, keystore_file_path, key_password
):
"""Test that when you attempt to start the auction with a private_key not corresponding to the
owner of the auction, the command fails
This shows that the command takes into account the key"""
result = runner.invoke(
main,
args="start --jsonrpc test --address "
+ deployed_auction_address
+ " --keystore "
+ str(keystore_file_path),
input=key_password,
)
assert result.exit_code == 1
def test_cli_deposit_bids(runner, deposit_pending_auction, ensure_auction_state):
result = runner.invoke(
main,
args=f"deposit-bids --jsonrpc test --address {deposit_pending_auction.address}",
)
assert result.exit_code == 0
ensure_auction_state(AuctionState.Ended)
@pytest.fixture()
def replace_bad_function_call_output():
# TransactionFailed is raised by eth_tester
# when BadFunctionCallOutput would be raised by web3 in `get_bid_token_address`
bad_function_call = auction_deploy.core.BadFunctionCallOutput
auction_deploy.core.BadFunctionCallOutput = TransactionFailed
yield
auction_deploy.core.BadFunctionCallOutput = bad_function_call
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_auction_status(runner, deployed_auction_address):
result = runner.invoke(
main, args="status --jsonrpc test --address " + deployed_auction_address
)
assert result.exit_code == 0
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_auction_status_locker_not_init(runner, contracts_not_initialized):
result = runner.invoke(
main,
args="status --jsonrpc test --address "
+ contracts_not_initialized.auction.address,
)
assert result.exit_code == 0
def test_cli_whitelist(runner, deployed_auction_address, whitelist_file, whitelist):
result = runner.invoke(
main,
args=f"whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--batch-size 10 --jsonrpc test",
)
assert result.exit_code == 0
assert result.output == f"Number of whitelisted addresses: {len(whitelist)}\n"
def test_cli_check_whitelist_not_whitelisted(
runner, deployed_auction_address, whitelist_file, whitelist
):
result = runner.invoke(
main,
args=f"check-whitelist --file {whitelist_file} --address {deployed_auction_address} "
+ "--jsonrpc test",
)
assert result.exit_code == 0
assert (
result.output
== f"{len(whitelist)} of {len(whitelist)} addresses have not been whitelisted yet\n"
)
def test_cli_check_whitelist_all_whitelisted(
runner, whitelisted_auction_address, whitelist_file, whitelist
):
result = runner.invoke(
main,
args=f"check-whitelist --file {whitelist_file} --address {whitelisted_auction_address} "
+ "--jsonrpc test",
)
assert result.exit_code == 0
assert result.output == f"All {len(whitelist)} addresses have been whitelisted\n"
@pytest.mark.usefixtures("replace_bad_function_call_output")
def test_cli_not_checksummed_address(runner, deployed_auction_address):
address = deployed_auction_address.lower()
result = runner.invoke(main, args=f"status --jsonrpc test --address {address}")
assert result.exit_code == 0
def test_cli_incorrect_address_parameter_fails(runner):
not_an_address = "not_an_address"
result = runner.invoke(
main, args=f"status --jsonrpc test --address {not_an_address}"
)
assert (
f"The address parameter is not recognized to be an address: {not_an_address}"
in result.output
)
assert result.exit_code == 2
| 12,008 | 3,999 |
import os
from core.log import log
from core.cli_helpers import CLIHelper
from core.utils import mktemp_dump, sorted_dict
from core.ycheck.events import YEventCheckerBase
from core.searchtools import (
SearchDef,
SequenceSearchDef,
FileSearcher,
)
from core import (
checks,
plugintools,
)
RMQ_SERVICES_EXPRS = [
r"beam.smp",
r"epmd",
r"rabbitmq-server",
]
RMQ_PACKAGES = [
r"rabbitmq-server",
]
def cached_property(f):
@property
def _inner(inst):
if f.__name__ in inst._property_cache:
# log.debug("using cached value for %s", f.__name__)
return inst._property_cache[f.__name__]
# log.debug("using uncached value for %s", f.__name__)
ret = f(inst)
inst._property_cache[f.__name__] = ret
return ret
return _inner
class RabbitMQReport(object):
"""
Class providing easy access to the contents of a rabbitmqctl report.
First registers search definitions to execute against rabbitmqctl report
then runs the search to fetch the information that is then expose through
properties.
NOTE: the rabbitmqctl report output differs between versions 3.6.x and
3.8.x and we try to account for either by providing optional
regex expressions to match either.
"""
def __init__(self):
self._property_cache = {}
# save to file so we can search it later
self._f_report = mktemp_dump(''.join(CLIHelper().rabbitmqctl_report()))
searcher = FileSearcher()
searcher.add_search_term(self.connections_searchdef, self._f_report)
searcher.add_search_term(self.memory_searchdef, self._f_report)
searcher.add_search_term(self.cluster_partition_handling_searchdef,
self._f_report)
searcher.add_search_term(self.queues_searchdef, self._f_report)
self.results = searcher.search()
def __del__(self):
if os.path.exists(self._f_report):
os.unlink(self._f_report)
@cached_property
def queues_searchdef(self):
start = SearchDef([r"^Queues on ([^:]+):",
(r"^Listing queues for vhost ([^:]+) "
r"...")])
# NOTE: we don't use a list for the body here because
# we need to know which expression matched so that we
# can know in which order to retrieve the columns since
# their order is inverted between 3.6.x and 3.8.x
body = SearchDef(r"^(?:<([^.\s]+)[.0-9]+>\s+(\S+)|"
r"(\S+)\s+(?:\S+\s+){4}<([^.\s]+)[.0-9]"
r"+>)\s+.+")
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='queues')
@cached_property
def skewed_nodes(self):
vhosts = self.vhosts
_skewed_nodes = {}
skewed_queue_nodes = {}
global_total_queues = sum([vhost.total_queues for vhost in vhosts])
for vhost in self.vhosts:
if not vhost.total_queues:
continue
total_pcent = (float(100) / global_total_queues *
vhost.total_queues)
for node, vhost_dist in vhost.node_queue_distributions.items():
if total_pcent >= 1 and vhost_dist['pcent'] > 75:
if node not in skewed_queue_nodes:
skewed_queue_nodes[node] = 0
skewed_queue_nodes[node] += 1
# Report the node with the greatest skew of queues/vhost
if skewed_queue_nodes:
max_node = None
for node_name in skewed_queue_nodes:
if max_node is None:
max_node = node_name
elif (skewed_queue_nodes[node_name] >=
skewed_queue_nodes[max_node]):
max_node = node_name
if (skewed_queue_nodes[max_node] >
_skewed_nodes.get(max_node, 0)):
_skewed_nodes[max_node] = skewed_queue_nodes[max_node]
return _skewed_nodes
@cached_property
def vhosts(self):
seq_def = self.queues_searchdef
vhosts = []
for section in self.results.find_sequence_sections(seq_def).values():
vhost = None
# ensure we get vhost before the rest
for result in section:
if result.tag == seq_def.start_tag:
# check both report formats
vhost = RabbitMQVhost(result.get(1))
break
for result in section:
if result.tag == seq_def.body_tag:
node_name = result.get(1) or result.get(4)
# if we matched the section header, skip
if node_name == "pid":
continue
queue = result.get(2) or result.get(3)
# if we matched the section header, skip
if queue == "name":
continue
vhost.node_inc_queue_count(node_name)
log.debug(vhost.name)
vhosts.append(vhost)
return vhosts
@cached_property
def connections_searchdef(self):
start = SearchDef([r"^Connections:$",
r"^Listing connections ...$"])
# Again, the user and protocol columns are inverted
# between 3.6.x and 3.8.x so we have to catch both and
# decide.
body = SearchDef(r"^<(rabbit[^>.]*)(?:[.][0-9]+)+>.+(?:[A-Z]+\s+{[\d,]+}\s+(\S+)|\d+\s+{[\d,]+}\s+\S+\s+(\S+)).+{\"connection_name\",\"([^:]+):\d+:.+$") # noqa
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='connections')
@cached_property
def memory_searchdef(self):
start = SearchDef([r"^Status of node '([^']*)'$",
r"^Status of node ([^']*) ...$"])
body = SearchDef(r"^\s+\[{total,([0-9]+)}.+")
end = SearchDef(r"^$")
return SequenceSearchDef(start=start, body=body, end=end,
tag='memory')
@cached_property
def cluster_partition_handling_searchdef(self):
return SearchDef(r"^\s*{cluster_partition_handling,([^}]*)}",
tag='cluster_partition_handling')
@cached_property
def connections(self):
_connections = {'host': {}, 'client': {}}
sd = self.connections_searchdef
for results in self.results.find_sequence_sections(sd).values():
for result in results:
if result.tag == sd.body_tag:
host = result.get(1)
if host not in _connections['host']:
_connections['host'][host] = 1
else:
_connections['host'][host] += 1
# detect 3.6.x or 3.8.x format
user = result.get(2)
if user is None:
user = result.get(3)
client_name = result.get(4)
if user not in _connections['client']:
_connections['client'][user] = {}
if client_name not in _connections['client'][user]:
_connections['client'][user][client_name] = 1
else:
_connections['client'][user][client_name] += 1
if _connections['host']:
for client, users in _connections['client'].items():
sorted_users = sorted_dict(users, key=lambda e: e[1],
reverse=True)
_connections['client'][client] = sorted_users
return _connections
@cached_property
def memory_used(self):
sd = self.memory_searchdef
_memory_used = {}
for results in self.results.find_sequence_sections(sd).values():
for result in results:
if result.tag == sd.start_tag:
# check both report formats
node_name = result.get(1)
elif result.tag == sd.body_tag:
total = result.get(1)
mib_used = int(total) / 1024. / 1024.
_memory_used[node_name] = "{:.3f}".format(mib_used)
return _memory_used
@cached_property
def partition_handling(self):
results = self.results.find_by_tag("cluster_partition_handling")
if not results:
return
return results[0].get(1)
class RabbitMQVhost(object):
def __init__(self, name):
self.name = name
self._node_queues = {}
def node_inc_queue_count(self, node):
if node not in self._node_queues:
self._node_queues[node] = 0
self._node_queues[node] += 1
@property
def total_queues(self):
return sum(self.node_queues.values())
@property
def node_queues(self):
return self._node_queues
def node_queues_vhost_pcent(self, node):
return float(100) / self.total_queues * self.node_queues[node]
@property
def node_queue_distributions(self):
dists = {}
for node, queues in self.node_queues.items():
if queues:
vhost_pcent = self.node_queues_vhost_pcent(node)
dists[node] = {'queues': queues, 'pcent': vhost_pcent}
else:
dists[node] = {'queues': 0, 'pcent': 0}
return dists
class RabbitMQBase(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.report = RabbitMQReport()
class RabbitMQChecksBase(RabbitMQBase, plugintools.PluginPartBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.apt_check = checks.APTPackageChecksBase(core_pkgs=RMQ_PACKAGES)
@property
def plugin_runnable(self):
if self.apt_check.core:
return True
return False
class RabbitMQServiceChecksBase(RabbitMQChecksBase, checks.ServiceChecksBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, service_exprs=RMQ_SERVICES_EXPRS, **kwargs)
class RabbitMQEventChecksBase(RabbitMQChecksBase, YEventCheckerBase):
@property
def summary(self):
# mainline all results into summary root
return self.run_checks()
| 10,555 | 3,190 |
import itertools
import warnings
from inspect import signature
from timeit import default_timer
from sklearn.preprocessing import normalize
import dask
import numpy as np
try:
import shap
except:
msg = "SHAP not found, therefore using SHAP-values for feature importance not available."
warnings.warn(msg)
shap = None
from dask import delayed
from networkx import NetworkXUnfeasible, find_cycle, topological_sort
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..algo import (
evaluation,
imputation,
inference,
inference_v3,
new_inference,
new_prediction,
selection,
vector_prediction,
)
from ..algo.induction import base_induction_algorithm, expand_induction_algorithm
from ..composition import CompositeModel, NewCompositeModel, o, x
from ..graph import build_diagram, compose_all, get_targ, model_to_graph
from ..utils import (
DESC_ENCODING,
MISS_ENCODING,
TARG_ENCODING,
DecoratedDecisionTreeClassifier,
DecoratedDecisionTreeRegressor,
DecoratedRandomForestClassifier,
DecoratedRandomForestRegressor,
code_to_query,
get_i_o,
query_to_code,
)
from ..visuals import save_diagram, show_diagram
try:
from xgboost import XGBClassifier as XGBC
from xgboost import XGBRegressor as XGBR
except:
XGBC, XGBR = None, None
try:
from lightgbm import LGBMClassifier as LGBMC
from lightgbm import LGBMRegressor as LGBMR
except:
LGBMC, LGBMR = None, None
try:
from catboost import CatBoostClassifier as CBC
from catboost import CatBoostRegressor as CBR
except:
CBC, CBR = None, None
try:
from wekalearn import RandomForestClassifier as WLC
from wekalearn import RandomForestRegressor as WLR
except:
WLC, WLR = None, None
class Mercs(object):
delimiter = "_"
selection_algorithms = dict(
default=selection.base_selection_algorithm,
base=selection.base_selection_algorithm,
random=selection.random_selection_algorithm,
)
induction_algorithms = dict(
base=base_induction_algorithm,
default=base_induction_algorithm,
expand=expand_induction_algorithm,
)
classifier_algorithms = dict(
DT=DecisionTreeClassifier,
DDT=DecoratedDecisionTreeClassifier,
RF=RandomForestClassifier,
DRF=DecoratedRandomForestClassifier,
XGB=XGBC,
xgb=XGBC,
weka=WLC,
LGBM=LGBMC,
lgbm=LGBMC,
CB=CBC,
extra=ExtraTreesClassifier,
)
regressor_algorithms = dict(
DT=DecisionTreeRegressor,
DDT=DecoratedDecisionTreeRegressor,
RF=RandomForestRegressor,
DRF=DecoratedDecisionTreeRegressor,
XGB=XGBR,
xgb=XGBR,
weka=WLR,
LGBM=LGBMR,
lgbm=LGBMR,
CB=CBR,
extra=ExtraTreesRegressor,
)
prediction_algorithms = dict(
mi=vector_prediction.mi,
mrai=vector_prediction.mrai,
it=vector_prediction.it,
rw=vector_prediction.rw,
)
inference_algorithms = dict(
base=inference.base_inference_algorithm,
dask=inference_v3.inference_algorithm,
own=inference_v3.inference_algorithm,
)
imputer_algorithms = dict(
nan=imputation.nan_imputation,
NAN=imputation.nan_imputation,
NaN=imputation.nan_imputation,
null=imputation.nan_imputation,
NULL=imputation.nan_imputation,
skl=imputation.skl_imputation,
base=imputation.skl_imputation,
default=imputation.skl_imputation,
)
evaluation_algorithms = dict(
base=evaluation.base_evaluation,
default=evaluation.base_evaluation,
dummy=evaluation.dummy_evaluation,
)
# Used in parse kwargs to identify parameters. If this identification goes wrong, you are sending settings
# somewhere you do not want them to be. So, this is a tricky part, and moreover hardcoded. In other words:
# this is risky terrain, and should probably be done differently in the future.
configuration_prefixes = dict(
imputation={"imputation", "imp"},
induction={"induction", "ind"},
selection={"selection", "sel"},
prediction={"prediction", "pred", "prd"},
inference={"inference", "infr", "inf"},
classification={"classification", "classifier", "clf"},
regression={"regression", "regressor", "rgr"},
metadata={"metadata", "meta", "mtd"},
evaluation={"evaluation", "evl"},
)
def __init__(
self,
selection_algorithm="base",
induction_algorithm="base",
classifier_algorithm="DT",
regressor_algorithm="DT",
prediction_algorithm="mi",
inference_algorithm="own",
imputer_algorithm="default",
evaluation_algorithm="default",
random_state=42,
**kwargs
):
self.params = dict(
selection_algorithm=selection_algorithm,
induction_algorithm=induction_algorithm,
classifier_algorithm=classifier_algorithm,
regressor_algorithm=regressor_algorithm,
prediction_algorithm=prediction_algorithm,
inference_algorithm=inference_algorithm,
imputer_algorithm=imputer_algorithm,
evaluation_algorithm=evaluation_algorithm,
random_state=random_state,
)
self.params = {**self.params, **kwargs}
self.random_state = random_state
self.selection_algorithm = self.selection_algorithms[selection_algorithm]
# N.b.: First try to look up the key. If the key is not found, we assume the algorithm itself was passed.
self.classifier_algorithm = self.classifier_algorithms.get(
classifier_algorithm, classifier_algorithm
)
self.regressor_algorithm = self.regressor_algorithms.get(
regressor_algorithm, regressor_algorithm
)
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.induction_algorithm = self.induction_algorithms[
induction_algorithm
] # For now, we only have one.
self.imputer_algorithm = self.imputer_algorithms[imputer_algorithm]
self.evaluation_algorithm = self.evaluation_algorithms[evaluation_algorithm]
# Data-structures
self.m_codes = np.array([])
self.m_list = []
self.c_list = []
self.g_list = []
self.i_list = []
self.m_fimps = np.array([])
self.m_score = np.array([])
self.FI = np.array([])
self.targ_ids = np.array([])
# Query-related things
self.q_code = None
self.q_desc_ids = None
self.q_targ_ids = None
self.q_diagram = None
self.q_compose = None
self.q_methods = []
# Configurations
self.imp_cfg = self._default_config(self.imputer_algorithm)
self.ind_cfg = self._default_config(self.induction_algorithm)
self.sel_cfg = self._default_config(self.selection_algorithm)
self.clf_cfg = self._default_config(self.classifier_algorithm)
self.rgr_cfg = self._default_config(self.regressor_algorithm)
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.inf_cfg = self._default_config(self.inference_algorithm)
self.evl_cfg = self._default_config(self.evaluation_algorithm)
self.configuration = dict(
imputation=self.imp_cfg,
induction=self.ind_cfg,
selection=self.sel_cfg,
classification=self.clf_cfg,
regression=self.rgr_cfg,
prediction=self.prd_cfg,
inference=self.inf_cfg,
) # Collect all configs in one
self._update_config(random_state=random_state, **kwargs)
self.metadata = dict()
self.model_data = dict()
self._extra_checks_on_config()
return
def fit(self, X, y=None, m_codes=None, **kwargs):
assert isinstance(X, np.ndarray)
if y is not None:
assert isinstance(y, np.ndarray)
X = np.c_[X, y]
tic = default_timer()
self.metadata = self._default_metadata(X)
self._update_metadata(**kwargs)
self.i_list = self.imputer_algorithm(X, self.metadata.get("nominal_attributes"))
# N.b.: `random state` parameter is in `self.sel_cfg`
if m_codes is None:
self.m_codes = self.selection_algorithm(self.metadata, **self.sel_cfg)
else:
self.m_codes = m_codes
self.m_list = self.induction_algorithm(
X,
self.m_codes,
self.metadata,
self.classifier_algorithm,
self.regressor_algorithm,
self.clf_cfg,
self.rgr_cfg,
**self.ind_cfg
)
self._filter_m_list_m_codes()
self._consistent_datastructures()
if self.imputer_algorithm == self.imputer_algorithms.get("nan"):
# If you do no have imputers, you cannot use them as a baseline evaluation
self.evl_cfg["consider_imputations"] = False
self.m_score = self.evaluation_algorithm(
X, self.m_codes, self.m_list, self.i_list, **self.evl_cfg
)
toc = default_timer()
self.model_data["ind_time"] = toc - tic
self.metadata["n_component_models"] = len(self.m_codes)
return
def predict(
self,
X,
q_code=None,
inference_algorithm=None,
prediction_algorithm=None,
**kwargs
):
# Update configuration if necessary
if q_code is None:
q_code = self._default_q_code()
if inference_algorithm is not None:
self._reconfig_inference(inference_algorithm=inference_algorithm)
if prediction_algorithm is not None:
self._reconfig_prediction(
prediction_algorithm=prediction_algorithm, **kwargs
)
# Adjust data
self.q_code = q_code
self.q_desc_ids, self.q_targ_ids, _ = code_to_query(
self.q_code, return_list=True
)
# Make query-diagram
tic_prediction = default_timer()
self.m_sel = self.prediction_algorithm(
self.m_codes, self.m_fimps, self.m_score, q_code=self.q_code, **self.prd_cfg
)
toc_prediction = default_timer()
tic_diagram = default_timer()
self.q_diagram = self._build_q_diagram(self.m_list, self.m_sel)
toc_diagram = default_timer()
tic_infalgo = default_timer()
if isinstance(self.q_diagram, tuple):
self.q_diagrams = self.q_diagram
# for d in self.q_diagrams:
# print(d.nodes)
# self.c_list.append(self._build_q_model(X, d))
self.c_list = [self._build_q_model(X, d) for d in self.q_diagrams]
self.c_sel = list(range(len(self.c_list)))
self.c_diagram = self._build_q_diagram(
self.c_list, self.c_sel, composition=True
)
self.q_model = self._build_q_model(X, self.c_diagram)
else:
self.q_model = self._build_q_model(X, self.q_diagram)
toc_infalgo = default_timer()
tic_dask = default_timer()
X = X[:, self.q_model.desc_ids]
result = self.q_model.predict(X)
toc_dask = default_timer()
self.model_data["prd_time"] = toc_prediction - tic_prediction
self.model_data["dia_time"] = toc_diagram - tic_diagram
self.model_data["infalgo_time"] = toc_infalgo - tic_infalgo
self.model_data["dsk_time"] = toc_dask - tic_dask
self.model_data["inf_time"] = toc_dask - tic_prediction
return result
def get_params(self, deep=False):
return self.params
# Diagrams
def _build_q_diagram(self, m_list, m_sel, composition=False):
if isinstance(m_sel, tuple):
diagrams = [
build_diagram(
m_list,
m_sel_instance,
self.q_code,
prune=True,
composition=composition,
)
for m_sel_instance in m_sel
]
return tuple(diagrams)
else:
return build_diagram(
m_list, m_sel, self.q_code, prune=True, composition=composition
)
def show_q_diagram(self, kind="svg", fi=False, ortho=False, index=None, **kwargs):
if isinstance(self.q_diagram, tuple) and index is None:
return show_diagram(self.c_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
elif isinstance(self.q_diagram, tuple):
return show_diagram(
self.q_diagram[index], kind=kind, fi=fi, ortho=ortho, **kwargs
)
else:
return show_diagram(self.q_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
def save_diagram(self, fname=None, kind="svg", fi=False, ortho=False):
return save_diagram(self.q_diagram, fname, kind=kind, fi=fi, ortho=ortho)
# Inference
def _build_q_model(self, X, diagram):
try:
self.inference_algorithm(
diagram,
self.m_list,
self.i_list,
self.c_list,
X,
self.metadata.get("nominal_attributes"),
)
except NetworkXUnfeasible:
cycle = find_cycle(self.q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
n_component_models = self.metadata["n_component_models"]
q_model = NewCompositeModel(
diagram,
nominal_attributes=self.metadata["nominal_attributes"],
n_component_models=n_component_models,
)
return q_model
def _merge_q_models(self, q_models):
q_diagram = build_diagram(self.c_list, self.c_sel, self.q_code, prune=True)
return q_diagram
def merge_models(self, q_models):
types = self._get_types(self.metadata)
walks = [
model_to_graph(m, types, idx=idx, composition=True)
for idx, m in enumerate(q_models)
]
q_diagram = compose_all(walks)
filtered_nodes = self.filter_nodes(q_diagram)
try:
self.inference_algorithm(q_diagram, sorted_nodes=filtered_nodes)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_diagram, q_model
def _get_q_model(self, q_diagram, X):
self._add_imputer_function(q_diagram)
try:
self.inference_algorithm(q_diagram, X=X)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_model
# Filter
def _filter_m_list_m_codes(self):
"""Filtering out the failed models.
This happens when TODO: EXPLAIN
"""
fail_m_idxs = [i for i, m in enumerate(self.m_list) if m is None]
self.m_codes = np.delete(self.m_codes, fail_m_idxs, axis=0)
self.m_list = [m for m in self.m_list if m is not None]
return
# Graphs
def _consistent_datastructures(self, binary_scores=False):
self._update_m_codes()
self._update_m_fimps()
return
def _expand_m_list(self):
self.m_list = list(itertools.chain.from_iterable(self.m_list))
return
def _add_model(self, model, binary_scores=False):
self.m_list.append(model)
self._consistent_datastructures(binary_scores=binary_scores)
return
def _update_m_codes(self):
self.m_codes = np.array(
[
query_to_code(
list(model.desc_ids),
list(model.targ_ids),
attributes=self.metadata["attributes"],
)
for model in self.m_list
]
)
return
def _update_m_fimps(self):
init = np.zeros(self.m_codes.shape)
for m_idx, mod in enumerate(self.m_list):
init[m_idx, list(mod.desc_ids)] = mod.feature_importances_
self.m_fimps = init
return
def _update_m_score(self, binary_scores=False):
if binary_scores:
self.m_score = (self.m_codes == TARG_ENCODING).astype(float)
return
# Imputer
def _add_imputer_function(self, g):
for n in g.nodes:
if g.nodes[n]["kind"] == "imputation":
idx = g.nodes[n]["idx"]
f_1 = self._dummy_array # Artificial input
f_2 = self.i_list[idx].transform # Actual imputation
f_3 = np.ravel # Return a vector, not array
g.nodes[n]["function"] = o(f_3, o(f_2, f_1))
return
# Add ids
@staticmethod
def _add_ids(g, desc_ids, targ_ids):
g.graph["desc_ids"] = set(desc_ids)
g.graph["targ_ids"] = set(targ_ids)
return g
# Metadata
def _default_metadata(self, X):
if X.ndim != 2:
X = X.reshape(-1, 1)
n_rows, n_cols = X.shape
types = [X[0, 0].dtype for _ in range(n_cols)]
nominal_attributes = set(
[att for att, typ in enumerate(types) if self._is_nominal(typ)]
)
numeric_attributes = set(
[att for att, typ in enumerate(types) if self._is_numeric(typ)]
)
metadata = dict(
attributes=set(range(n_cols)),
n_attributes=n_cols,
types=types,
nominal_attributes=nominal_attributes,
numeric_attributes=numeric_attributes,
)
return metadata
def _update_metadata(self, **kwargs):
self._update_dictionary(self.metadata, kind="metadata", **kwargs)
# Assure every attribute is `typed`: If not every attribute is here, set to numeric type (default)
numeric = self.metadata["numeric_attributes"]
nominal = self.metadata["nominal_attributes"]
att_ids = self.metadata["attributes"]
# All attributes should be accounted for and none should be double.
if (len(nominal) + len(numeric) - len(att_ids)) != 0:
numeric = att_ids - nominal
self._update_dictionary(
self.metadata, kind="metadata", numeric_attributes=numeric
)
return
# Configuration
def _reconfig_prediction(self, prediction_algorithm="mi", **kwargs):
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.configuration["prediction"] = self.prd_cfg
self._update_config(**kwargs)
return
def _reconfig_inference(self, inference_algorithm="own", **kwargs):
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.inf_cfg = self._default_config(self.inference_algorithm)
self.configuration["inference"] = self.inf_cfg
self._update_config(**kwargs)
return
@staticmethod
def _default_config(method):
config = {}
sgn = signature(method)
for key, parameter in sgn.parameters.items():
if parameter.default is not parameter.empty:
config[key] = parameter.default
return config
def _update_config(self, **kwargs):
for kind in self.configuration:
self._update_dictionary(self.configuration[kind], kind=kind, **kwargs)
return
def _extra_checks_on_config(self):
self._check_xgb_single_target()
return
def _check_xgb_single_target(self):
nb_targets = self.configuration["selection"]["nb_targets"]
if nb_targets == 1:
return None
else:
if (
self.classifier_algorithm is self.classifier_algorithms["XGB"]
or self.regressor_algorithm is self.regressor_algorithms["XGB"]
):
xgb = True
else:
xgb = False
if xgb:
msg = """
XGBoost cannot deal with multi-target outputs.
Hence, the `nb_targets` parameter is automatically adapted to 1,
so only single-target trees will be learned.
Please take this into account.
"""
warnings.warn(msg)
self.configuration["selection"]["nb_targets"] = 1
return
def _parse_kwargs(self, kind="selection", **kwargs):
prefixes = [e + self.delimiter for e in self.configuration_prefixes[kind]]
parameter_map = {
x.split(prefix)[1]: x
for x in kwargs
for prefix in prefixes
if x.startswith(prefix)
}
return parameter_map
def _update_dictionary(self, dictionary, kind=None, **kwargs):
# Immediate matches
overlap = set(dictionary).intersection(set(kwargs))
for k in overlap:
dictionary[k] = kwargs[k]
if kind is not None:
# Parsed matches
parameter_map = self._parse_kwargs(kind=kind, **kwargs)
overlap = set(dictionary).intersection(set(parameter_map))
for k in overlap:
dictionary[k] = kwargs[parameter_map[k]]
return
# Helpers
def _filter_X(self, X):
# Filter relevant input attributes
if X.shape[1] != len(self.q_compose.desc_ids):
indices = self._overlapping_indices(
self.q_desc_ids, self.q_compose.desc_ids
)
return X[:, indices]
@staticmethod
def _dummy_array(X):
"""
Return an array of np.nan, with the same number of rows as the input array.
Parameters
----------
X: np.ndarray(), n_rows, n_cols = X.shape,
We use the shape of X to deduce shape of our output.
Returns
-------
a: np.ndarray(), shape= (n_rows, 1)
n_rows is the same as the number of rows as X.
"""
n_rows, _ = X.shape
a = np.empty((n_rows, 1))
a.fill(np.nan)
return a
def _default_q_code(self):
q_code = np.zeros(self.metadata["n_attributes"])
q_code[-1] = TARG_ENCODING
return q_code
@staticmethod
def _is_nominal(t):
condition_01 = t == np.dtype(int)
return condition_01
@staticmethod
def _is_numeric(t):
condition_01 = t == np.dtype(float)
return condition_01
@staticmethod
def _get_types(metadata):
nominal = {i: "nominal" for i in metadata["nominal_attributes"]}
numeric = {i: "numeric" for i in metadata["numeric_attributes"]}
return {**nominal, **numeric}
@staticmethod
def _overlapping_indices(a, b):
"""
Given an array a and b, return the indices (in a) of elements that occur in both a and b.
Parameters
----------
a
b
Returns
-------
Examples
--------
a = [4,5,6]
b = [4,6,7]
overlapping_indices(a, b) = [0,2]
"""
return np.nonzero(np.in1d(a, b))[0]
@staticmethod
def filter_nodes(g):
# This is not as safe as it should be
sorted_nodes = list(topological_sort(g))
filtered_nodes = []
for n in reversed(sorted_nodes):
if g.nodes[n]["kind"] == "model":
break
filtered_nodes.append(n)
filtered_nodes = list(reversed(filtered_nodes))
return filtered_nodes
# SYNTH
def autocomplete(self, X, **kwargs):
return
# Legacy (delete when I am sure they can go)
def predict_old(
self, X, q_code=None, prediction_algorithm=None, beta=False, **kwargs
):
# Update configuration if necessary
if q_code is None:
q_code = self._default_q_code()
if prediction_algorithm is not None:
reuse = False
self._reconfig_prediction(
prediction_algorithm=prediction_algorithm, **kwargs
)
# Adjust data
tic_prediction = default_timer()
self.q_code = q_code
self.q_desc_ids, self.q_targ_ids, _ = code_to_query(
self.q_code, return_list=True
)
# Make query-diagram
self.q_diagram = self.prediction_algorithm(
self.g_list, q_code, self.fi, self.t_codes, **self.prd_cfg
)
toc_prediction = default_timer()
tic_dask = default_timer()
toc_dask = default_timer()
tic_compute = default_timer()
res = self.q_model.predict.compute()
toc_compute = default_timer()
# Diagnostics
self.model_data["prd_time"] = toc_prediction - tic_prediction
self.model_data["dsk_time"] = toc_dask - tic_dask
self.model_data["cmp_time"] = toc_compute - tic_compute
self.model_data["inf_time"] = toc_compute - tic_prediction
self.model_data["ratios"] = (
self.model_data["prd_time"] / self.model_data["inf_time"],
self.model_data["dsk_time"] / self.model_data["inf_time"],
self.model_data["cmp_time"] / self.model_data["inf_time"],
)
return res
def _update_g_list(self):
types = self._get_types(self.metadata)
self.g_list = [
model_to_graph(m, types=types, idx=idx) for idx, m in enumerate(self.m_list)
]
return
def _update_t_codes(self):
self.t_codes = (self.m_codes == TARG_ENCODING).astype(int)
return
# AVATAR-TOOLS
def avatar(
self,
explainer_data,
background_data=None,
check_additivity=True,
keep_abs_shaps=False,
**explainer_kwargs
):
assert shap is not None, "SHAP not found, so cannot do anything here."
self._init_avatar()
for m_idx in range(len(self.m_list)):
# Extract tree and m_code
tree = self.m_list[m_idx].model
m_code = self.m_codes[m_idx]
# Filter data
attribute_filter = m_code == DESC_ENCODING
X = explainer_data[:, attribute_filter]
if background_data is not None:
B = background_data[:, attribute_filter]
else:
B = background_data
# Shap Calculation
explainer = shap.TreeExplainer(tree, data=B, **explainer_kwargs)
raw_shaps = explainer.shap_values(X, check_additivity=check_additivity)
# Process Shap values
abs_shaps = self._raw_to_abs_shaps(raw_shaps)
nrm_shaps = self._abs_to_nrm_shaps(abs_shaps)
if keep_abs_shaps:
self.abs_shaps.append(abs_shaps)
self.nrm_shaps.append(nrm_shaps)
self._format_abs_shaps()
self._format_nrm_shaps()
return
@staticmethod
def _raw_to_abs_shaps(raw_shaps):
# Process Shap values
tsr_shaps = np.array(raw_shaps) # tensor
abs_shaps = np.abs(tsr_shaps) # absolute
if len(abs_shaps.shape) == 3:
# In case of nominal target, sum shap values across target classes
abs_shaps = np.sum(abs_shaps, axis=0)
return abs_shaps
@staticmethod
def _abs_to_nrm_shaps(abs_shaps):
avg_shaps = np.mean(
abs_shaps, axis=0
) # Avg over instances (of explainer data!)
nrm_shaps = np.squeeze(
normalize(avg_shaps.reshape(1, -1), norm="l1")
) # Normalize (between 0 and 1)
return nrm_shaps
def avatar_q_model(
self,
X_train,
X_test,
l1_reg="num_features(10)",
check_additivity=False,
n_samples=20,
silent=True,
):
assert shap is not None, "SHAP not found, so cannot do anything here."
# Extract function to explain
m = self.q_model
f = self._extract_function_to_explain(self.q_model)
# Data
assert (
X_train.shape[1] == X_test.shape[1]
), "Inconsistent attribute count. Your carelessness is disappointing."
if X_train.shape[1] != len(m.desc_ids):
attribute_filter = m.desc_ids
X_train = X_train[:, attribute_filter]
X_test = X_test[:, attribute_filter]
explainer = shap.KernelExplainer(f, shap.sample(X_train, n_samples))
raw_shaps = explainer.shap_values(
X_test, l1_reg=l1_reg, check_additivity=check_additivity, silent=silent
)
# Process Shap values
abs_shaps = self._raw_to_abs_shaps(raw_shaps)
nrm_shaps = self._abs_to_nrm_shaps(abs_shaps)
return nrm_shaps
@staticmethod
def _extract_function_to_explain(m):
assert m.n_outputs_ == 1
# Extract function
if m.out_kind in {"nominal"}:
f = lambda x: m.predict_proba(x)[0]
elif m.out_kind in {"numerc"}:
f = m.predict
else:
raise ValueError("I don't know this kind of q_model.out_kind")
return f
def _init_avatar(self):
"""Initialize avatar-datastructures that are used there.
"""
self.abs_shaps = []
self.nrm_shaps = []
return
def _format_nrm_shaps(self):
if isinstance(self.nrm_shaps, list) and len(self.nrm_shaps) > 0:
init = np.zeros(self.m_codes.shape)
for m_idx, (mod, nrm_shap) in enumerate(zip(self.m_list, self.nrm_shaps)):
init[m_idx, list(mod.desc_ids)] = nrm_shap
self.nrm_shaps = init
else:
return
def _format_abs_shaps(self):
if isinstance(self.abs_shaps, list) and len(self.abs_shaps) > 0:
n_models, n_attributes = self.m_codes.shape
n_instances = self.abs_shaps[0].shape[0]
init = np.zeros((n_models, n_instances, n_attributes))
for m_idx, (mod, abs_shap) in enumerate(zip(self.m_list, self.abs_shaps)):
init_abs = np.zeros((n_instances, n_attributes))
init_abs[:, list(mod.desc_ids)] = abs_shap
init[m_idx, :, :] = init_abs
self.abs_shaps = init
else:
return
| 31,776 | 9,892 |
#
# PySNMP MIB module HUAWEI-LswRSTP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-LswRSTP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:34:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
dot1dStpPortEntry, dot1dStpPort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dStpPortEntry", "dot1dStpPort")
lswCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "lswCommon")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, Integer32, Counter64, ObjectIdentity, Bits, ModuleIdentity, iso, TimeTicks, MibIdentifier, IpAddress, Gauge32, Unsigned32, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Counter64", "ObjectIdentity", "Bits", "ModuleIdentity", "iso", "TimeTicks", "MibIdentifier", "IpAddress", "Gauge32", "Unsigned32", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, TruthValue, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString", "MacAddress")
hwLswRstpMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6))
hwLswRstpMib.setRevisions(('2001-06-29 00:00',))
if mibBuilder.loadTexts: hwLswRstpMib.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hwLswRstpMib.setOrganization('')
class EnabledStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
hwLswRstpMibObject = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1))
hwdot1dStpStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpStatus.setStatus('current')
hwdot1dStpForceVersion = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpForceVersion.setStatus('current')
hwdot1dStpDiameter = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpDiameter.setStatus('current')
hwdot1dStpRootBridgeAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRootBridgeAddress.setStatus('current')
hwDot1dStpBpduGuard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpBpduGuard.setStatus('current')
hwDot1dStpRootType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("primary", 2), ("secondary", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpRootType.setStatus('current')
hwDot1dTimeOutFactor = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dTimeOutFactor.setStatus('current')
hwDot1dStpPathCostStandard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dot1d-1998", 1), ("dot1t", 2), ("legacy", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpPathCostStandard.setStatus('current')
hwdot1dStpPortXTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5), )
if mibBuilder.loadTexts: hwdot1dStpPortXTable.setStatus('current')
hwdot1dStpPortXEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1), )
dot1dStpPortEntry.registerAugmentions(("HUAWEI-LswRSTP-MIB", "hwdot1dStpPortXEntry"))
hwdot1dStpPortXEntry.setIndexNames(*dot1dStpPortEntry.getIndexNames())
if mibBuilder.loadTexts: hwdot1dStpPortXEntry.setStatus('current')
hwdot1dStpPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortStatus.setStatus('current')
hwdot1dStpPortEdgeport = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortEdgeport.setStatus('current')
hwdot1dStpPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("forceTrue", 1), ("forceFalse", 2), ("auto", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortPointToPoint.setStatus('current')
hwdot1dStpMcheck = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpMcheck.setStatus('current')
hwdot1dStpTransLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpTransLimit.setStatus('current')
hwdot1dStpRXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXStpBPDU.setStatus('current')
hwdot1dStpTXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXStpBPDU.setStatus('current')
hwdot1dStpRXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXTCNBPDU.setStatus('current')
hwdot1dStpTXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXTCNBPDU.setStatus('current')
hwdot1dStpRXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXRSTPBPDU.setStatus('current')
hwdot1dStpTXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXRSTPBPDU.setStatus('current')
hwdot1dStpClearStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clear", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpClearStatistics.setStatus('current')
hwdot1dSetStpDefaultPortCost = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dSetStpDefaultPortCost.setStatus('current')
hwdot1dStpRootGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 14), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpRootGuard.setStatus('current')
hwdot1dStpLoopGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpLoopGuard.setStatus('current')
hwdot1dStpPortBlockedReason = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notBlock", 1), ("blockForProtocol", 2), ("blockForRootGuard", 3), ("blockForBPDUGuard", 4), ("blockForLoopGuard", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpPortBlockedReason.setStatus('current')
hwdot1dStpRXTCBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXTCBPDU.setStatus('current')
hwdot1dStpPortSendingBPDUType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpPortSendingBPDUType.setStatus('current')
hwdot1dStpOperPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpOperPortPointToPoint.setStatus('current')
hwRstpEventsV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0))
if mibBuilder.loadTexts: hwRstpEventsV2.setStatus('current')
hwRstpBpduGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 1)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpBpduGuarded.setStatus('current')
hwRstpRootGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 2)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpRootGuarded.setStatus('current')
hwRstpBridgeLostRootPrimary = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 3))
if mibBuilder.loadTexts: hwRstpBridgeLostRootPrimary.setStatus('current')
hwRstpLoopGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 4)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpLoopGuarded.setStatus('current')
hwdot1dStpIgnoredVlanTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10), )
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanTable.setStatus('current')
hwdot1dStpIgnoredVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1), ).setIndexNames((0, "HUAWEI-LswRSTP-MIB", "hwdot1dVlan"))
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanEntry.setStatus('current')
hwdot1dVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dVlan.setStatus('current')
hwdot1dStpIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpIgnore.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-LswRSTP-MIB", hwdot1dStpTXStpBPDU=hwdot1dStpTXStpBPDU, hwdot1dStpPortPointToPoint=hwdot1dStpPortPointToPoint, hwdot1dStpRXTCNBPDU=hwdot1dStpRXTCNBPDU, hwdot1dStpMcheck=hwdot1dStpMcheck, hwdot1dStpTXTCNBPDU=hwdot1dStpTXTCNBPDU, hwdot1dStpIgnore=hwdot1dStpIgnore, hwdot1dStpIgnoredVlanEntry=hwdot1dStpIgnoredVlanEntry, hwdot1dStpTXRSTPBPDU=hwdot1dStpTXRSTPBPDU, hwdot1dStpRXStpBPDU=hwdot1dStpRXStpBPDU, hwDot1dStpBpduGuard=hwDot1dStpBpduGuard, hwdot1dStpStatus=hwdot1dStpStatus, hwRstpLoopGuarded=hwRstpLoopGuarded, hwDot1dStpRootType=hwDot1dStpRootType, hwdot1dStpTransLimit=hwdot1dStpTransLimit, hwdot1dStpPortStatus=hwdot1dStpPortStatus, hwdot1dStpRXRSTPBPDU=hwdot1dStpRXRSTPBPDU, hwdot1dStpClearStatistics=hwdot1dStpClearStatistics, hwDot1dStpPathCostStandard=hwDot1dStpPathCostStandard, hwLswRstpMibObject=hwLswRstpMibObject, hwdot1dStpDiameter=hwdot1dStpDiameter, PYSNMP_MODULE_ID=hwLswRstpMib, hwdot1dStpOperPortPointToPoint=hwdot1dStpOperPortPointToPoint, hwLswRstpMib=hwLswRstpMib, hwdot1dStpPortEdgeport=hwdot1dStpPortEdgeport, hwdot1dStpPortXTable=hwdot1dStpPortXTable, hwdot1dStpRXTCBPDU=hwdot1dStpRXTCBPDU, hwdot1dStpLoopGuard=hwdot1dStpLoopGuard, hwRstpRootGuarded=hwRstpRootGuarded, EnabledStatus=EnabledStatus, hwdot1dStpRootGuard=hwdot1dStpRootGuard, hwdot1dStpIgnoredVlanTable=hwdot1dStpIgnoredVlanTable, hwRstpBridgeLostRootPrimary=hwRstpBridgeLostRootPrimary, hwdot1dStpRootBridgeAddress=hwdot1dStpRootBridgeAddress, hwRstpBpduGuarded=hwRstpBpduGuarded, hwRstpEventsV2=hwRstpEventsV2, hwdot1dSetStpDefaultPortCost=hwdot1dSetStpDefaultPortCost, hwdot1dStpForceVersion=hwdot1dStpForceVersion, hwDot1dTimeOutFactor=hwDot1dTimeOutFactor, hwdot1dStpPortXEntry=hwdot1dStpPortXEntry, hwdot1dStpPortBlockedReason=hwdot1dStpPortBlockedReason, hwdot1dVlan=hwdot1dVlan, hwdot1dStpPortSendingBPDUType=hwdot1dStpPortSendingBPDUType)
| 13,135 | 6,306 |
"""
Implementation of DuplicateChecker engine class to run duplicate records analysis.
"""
from typing import List, Optional, Union
from pandas import DataFrame
from src.ydata_quality.core.warnings import Priority
from ..core import QualityEngine, QualityWarning
from ..utils.auxiliary import find_duplicate_columns
class DuplicateChecker(QualityEngine):
"Engine for running analyis on duplicate records."
def __init__(self,
df: DataFrame,
entities: List[Union[str, List[str]]] = None,
is_close: bool = False,
severity: Optional[str] = None):
"""
Arguments:
df (DataFrame): reference DataFrame used to run the DataQuality analysis.
entities (List[Union[str, List[str]]]): entities relevant for duplicate analysis.
Passing lists allows composed entities of multiple columns.
is_close (bool): Pass True to use numpy.isclose instead of pandas.equals in column comparison.
severity (str): Sets the logger warning threshold.
Valid levels are [DEBUG, INFO, WARNING, ERROR, CRITICAL]."""
super().__init__(df=df, severity=severity)
self._entities = [] if entities is None else entities
self._tests = ["exact_duplicates", "entity_duplicates", "duplicate_columns"]
self._is_close = is_close
@property
def entities(self):
"Property that returns the entities relevant for duplicates analysis."
return self._entities
@entities.setter
def entities(self, entities: List[Union[str, List[str]]]):
if not isinstance(entities, list):
raise ValueError("Property 'entities' should be a list.")
entities = self.__unique_entities(entities)
assert all(entity in self.df.columns if isinstance(entity, str) else [
c in self.df.columns for c in entity] for entity in entities), "Given entities should exist as \
DataFrame's columns."
self._entities = entities
@staticmethod
def __unique_entities(entities: List[Union[str, List[str]]]):
"""Returns entities list with only unique entities"""
entities = set(entity if isinstance(entity, str) else entity[0] if len(
entity) == 1 else tuple(entity) for entity in entities)
return [entity if isinstance(entity, str) else list(entity) for entity in entities]
@staticmethod
def __get_duplicates(df: DataFrame):
"Returns duplicate records."
return df[df.duplicated()]
@staticmethod
def __get_entity_duplicates(df: DataFrame, entity: Union[str, List[str]]):
"Returns the duplicate records aggregated by a given entity."
return df.groupby(entity).apply(DuplicateChecker.__get_duplicates).reset_index(drop=True)
def exact_duplicates(self):
"Returns a DataFrame filtered for exact duplicate records."
dups = self.__get_duplicates(self.df) # Filter for duplicate instances
if len(dups) > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.EXACT_DUPLICATES, category=QualityWarning.Category.DUPLICATES,
priority=Priority.P2, data=dups,
description=f"Found {len(dups)} instances with exact duplicate feature values."
))
else:
self._logger.info("No exact duplicates were found.")
dups = None
return dups
def __provided_entity_dups(self, entity: Optional[Union[str, List[str]]] = None) -> dict:
"Find duplicates for passed entity (simple or composed)."
found_dups = {}
dups = self.__get_entity_duplicates(self.df, entity)
if len(dups) > 0: # if we have any duplicates
self.store_warning(
QualityWarning(
test='Entity Duplicates', category='Duplicates', priority=Priority.P2, data=dups,
description=f"Found {len(dups)} duplicates after grouping by entities."
))
if isinstance(entity, str):
entity = [entity] # Makes logic the same for str or List[str] entities
set_vals = set(dups[entity].apply(tuple, axis=1))
if len(entity) > 1:
entity_key = tuple(entity) # Lists are not hashable, therefore cannot be dictionary keys
else:
# No need to store keys as tuples for single entities (single values)
set_vals = [val[0] for val in set_vals]
entity_key = entity[0]
for val in set_vals: # iterate on each entity with duplicates
found_dups.setdefault(entity_key, {})[val] = dups[(dups[entity].values == val).all(axis=1)]
return found_dups
def entity_duplicates(self, entity: Optional[Union[str, List[str]]] = None):
"""Returns a dict of {entity: {entity_value: duplicates}} of duplicate records after grouping by an entity.
If entity is not specified, compute for all entities defined in the init.
"""
ent_dups = {}
if entity is not None: # entity is specified
ent_dups.update(self.__provided_entity_dups(entity))
else: # if entity is not specified
if len(self.entities) == 0:
self._logger.warning("There are no entities defined to run the analysis. Skipping the test.")
return None
for col in self.entities:
ent_dups.update(self.entity_duplicates(col))
return ent_dups
def duplicate_columns(self):
"Returns a mapping dictionary of columns with fully duplicated feature values."
dups = find_duplicate_columns(self.df, self._is_close)
cols_with_dups = len(dups.keys())
if cols_with_dups > 0:
self.store_warning(
QualityWarning(
test=QualityWarning.Test.DUPLICATE_COLUMNS, category=QualityWarning.Category.DUPLICATES,
priority=Priority.P1, data=dups,
description=f"Found {cols_with_dups} columns with exactly the same feature values as other columns."
)
)
else:
self._logger.info("No duplicate columns were found.")
dups = None
return dups
| 6,377 | 1,695 |
fig, ax = plt.subplots(2, sharex='all', figsize=(10, 5))
fig.savefig("two-subplots.png") | 89 | 40 |
import collections
import re
from icecube import icetray
from icecube import dataclasses
from icecube import dataio
def format_line( frame, key, maxwidth = None, ellipsis = '...' ):
'''Given an icecube frame and a key in that frame, return
exactly one line of text describing the I3FrameObject with that key.
Try to make the text as useful to a human reader as possible.
If accessing the object generates an exception, catch it and
return its description.
Clip to an optional maximum width with a trailing ellipsis'''
try:
obj = frame[key]
if (obj is None) and (key in frame):
return '(Unreadable)'
if hasattr(obj, "apply"):
obj = obj.apply(frame)
haslength = isinstance( obj, collections.Iterable )
except Exception as e:
obstr = '(Unreadable)'
else:
if( haslength ):
obstr = 'Iterable with {0} items'.format(len(obj))
else:
try:
# give the module and class name
obstr = '{0}.{1} object'.format(obj.__module__,obj.__class__.__name__)
except Exception as e:
# try basic repr
obstr = repr(obj).split('\n')[0]
if( maxwidth ):
if( len(obstr) > maxwidth ):
obstr = obstr[:maxwidth - len(ellipsis)] + ellipsis[0:maxwidth]
return obstr
def format_detail( frame, key ):
'''Given an icecube frame and a key in that frame, return
a human-readable string that describes the item in detail.'''
try:
obj = frame[key]
if hasattr(obj, "apply"):
obj = obj.apply(frame)
if isinstance(obj,dataclasses.I3String):
message = obj.value
if isinstance(obj,dataclasses.I3Double):
message = str(obj.value)
elif hasattr(obj, "items"):
message = '{\n'
for k in obj.keys():
message += str(k)+': '+str(obj[k])+'\n'
message += '}'
else:
message = str(obj)
except Exception as e:
message = '({0})'.format(e)
if re.match('<icecube\.[\S]*\.[\S]* object at [0-9xa-f]*>', message):
# Standard boring format. In some cases we might be able to do better.
if isinstance( obj, collections.Iterable):
message += ', contents:\n' + '\n'.join( [ str(x) for x in frame[key] ] )
return message
def format_xml( frame, key ):
'''Given an icecube frame and a key in that frame, return
the xml serialization of the item.'''
try:
if key in frame:
message = frame.as_xml(key)
else:
message = key+' not in frame'
except Exception as e:
message = '({0})'.format(e)
return message.expandtabs(4)
def format_size( frame, key):
'''Given an icecube frame and a key in that frame, return
the size of the string.
Default converts the string in Kilo, Mega, or GigaByte.
Adjust conversion to different formats by supplying
the list with given unit names.'''
cfactor = 1024.
sunit = False
unit = ['K', 'M', 'G']
if key in frame:
size = frame.size(key)
else:
return str()
while size > cfactor and bool(unit):
size /= cfactor
sunit = unit.pop(0)
if bool(sunit):
if size < 10:
return '{0:1.1f}{1:1s}'.format(size,sunit)
else:
return '{0:4.0f}{1:1s}'.format(size, sunit)
# Bytes are integer value, so show them like this
return '{0:4d} '.format(size)
| 3,575 | 1,094 |
#!/usr/bin/python
#You are climbing a stair case. It takes n steps to reach to the top.
#Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
class Solution:
# @param {integer} n
# @return {integer}
count=0
def climbStairs(self, n):
self.rec_climb(n)
print self.count
return self.count
def rec_climb(self, n):
if n==0:
#print 'yeah success'
self.count=self.count+1
elif n<0:
#print 'cannot climb this way'
pass
else:
self.rec_climb(n-1)
self.rec_climb(n-2)
solution=Solution()
solution.climbStairs(35) | 600 | 266 |
from __future__ import absolute_import, division, print_function
import string
from builtins import bytes, range
from CryptoAttacks.Math import factors
from CryptoAttacks.Utils import (add_padding, b2h, chunks, log, print_chunks,
random_bytes)
def encryption_oracle(payload):
"""Function implementing encryption oracle with ecb mode
Args:
payload(string): raw data to encrypt
Returns:
string
"""
raise NotImplementedError
def is_ecb(cipher, block_size=16):
"""Check if there are repeated blocks in ciphertext
Args:
cipher(string)
block_size(int)
Returns:
bool: True if there are repeated blocks (so it's probably ECB mode)
"""
cipher_blocks = chunks(cipher, block_size)
unique_blocks = set(cipher_blocks)
if len(unique_blocks) < len(cipher_blocks):
return True
return False
def find_block_size(encryption_oracle, constant=True):
"""Determine block size if ecb mode
Args:
encryption_oracle(callable)
constant(bool): True if prefix and suffix have constant length
Returns:
int
"""
if constant:
log.debug("constant == True")
payload = bytes(b'A')
size = len(encryption_oracle(payload))
while True:
payload += bytes(b'A')
new_size = len(encryption_oracle(payload))
if new_size > size:
log.info("block_size={}".format(new_size - size))
return new_size - size
else:
log.debug("constant == False")
payload = bytes(b'A')
max_size = len(encryption_oracle(payload))
possible_sizes = factors(max_size)
possible_sizes.add(max_size)
blocks_to_send = 5
for block_size in sorted(possible_sizes):
"""send payload of length x, so at least x-1 blocks should be identical"""
payload = random_bytes(1) * (blocks_to_send*block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
for x in range(len(enc_chunks)-1):
if enc_chunks[x] == enc_chunks[x+1]:
log.debug("Found two identical blocks at {}: {}".format(x, print_chunks(enc_chunks)))
for y in range(2, blocks_to_send-1):
if enc_chunks[x] != enc_chunks[x+y]:
break
else:
log.info("block_size={}".format(block_size))
return block_size
def find_prefix_suffix_size(encryption_oracle, block_size=16):
"""Determine prefix and suffix sizes if ecb mode, sizes must be constant
Rarely may fail (if random data that are send unhappily matches prefix/suffix)
Args:
encryption_oracle(callable)
block_size(int)
Returns:
tuple(int,int): prefix_size, suffix_size
"""
blocks_to_send = 5
payload = random_bytes(1) * (blocks_to_send * block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
log.debug("Encryption of length {}".format(blocks_to_send * block_size))
log.debug(print_chunks(enc_chunks))
for position_start in range(len(enc_chunks) - 1):
if enc_chunks[position_start] == enc_chunks[position_start + 1]:
for y in range(2, blocks_to_send - 1):
if enc_chunks[position_start] != enc_chunks[position_start + y]:
break
else:
log.success("Controlled payload start at chunk {}".format(position_start))
break
else:
log.critical_error("Position of controlled chunks not found")
log.info('Finding prefix')
changed_char = bytes([(payload[0] - 1)%256])
for aligned_bytes in range(block_size):
payload_new = payload[:aligned_bytes] + changed_char + payload[aligned_bytes+1:]
enc_chunks_new = chunks(encryption_oracle(payload_new), block_size)
log.debug(print_chunks(chunks(payload_new, block_size)))
log.debug(print_chunks(enc_chunks_new))
if enc_chunks_new[position_start] != enc_chunks[position_start]:
prefix_size = position_start*block_size - aligned_bytes
log.success("Prefix size: {}".format(prefix_size))
break
else:
log.critical_error("Size of prefix not found")
log.info('Finding suffix')
payload = random_bytes(1) * (block_size - (prefix_size % block_size)) # align to block_size
encrypted = encryption_oracle(payload)
suffix_size = len(encrypted) - len(payload) - prefix_size
while True:
payload += random_bytes(1)
suffix_size -= 1
if len(encryption_oracle(payload)) > len(encrypted):
log.success("Suffix size: {}".format(suffix_size))
break
else:
log.critical_error("Size of suffix not found")
return prefix_size, suffix_size
def decrypt(encryption_oracle, constant=True, block_size=16, prefix_size=None, secret_size=None,
alphabet=None):
"""Given encryption oracle which produce ecb(prefix || our_input || secret), find secret
Args:
encryption_oracle(callable)
constant(bool): True if prefix have constant length (secret must have constant length)
block_size(int/None)
prefix_size(int/None)
secret_size(int/None)
alphabet(string): plaintext space
Returns:
secret(string)
"""
log.debug("Start decrypt function")
if not alphabet:
alphabet = bytes(string.printable.encode())
if not block_size:
block_size = find_block_size(encryption_oracle, constant)
if constant:
log.debug("constant == True")
if not prefix_size or not secret_size:
prefix_size, secret_size = find_prefix_suffix_size(encryption_oracle, block_size)
"""Start decrypt"""
secret = bytes(b'')
aligned_bytes = random_bytes(1) * (block_size - (prefix_size % block_size))
if len(aligned_bytes) == block_size:
aligned_bytes = bytes(b'')
aligned_bytes_suffix = random_bytes(1) * (block_size - (secret_size % block_size))
if len(aligned_bytes_suffix) == block_size:
aligned_bytes_suffix = bytes(b'')
block_to_find_position = -1
controlled_block_position = (prefix_size+len(aligned_bytes)) // block_size
while len(secret) < secret_size:
if (len(secret)+1) % block_size == 0:
block_to_find_position -= 1
payload = aligned_bytes + aligned_bytes_suffix + random_bytes(1) + secret
enc_chunks = chunks(encryption_oracle(payload), block_size)
block_to_find = enc_chunks[block_to_find_position]
log.debug("To guess at position {}:".format(block_to_find_position))
log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size)))
log.debug("Encry: " + print_chunks(enc_chunks)+"\n")
for guessed_char in range(256):
guessed_char = bytes([guessed_char])
payload = aligned_bytes + add_padding(guessed_char + secret, block_size)
enc_chunks = chunks(encryption_oracle(payload), block_size)
log.debug("Plain: " + print_chunks(chunks(bytes(b'P'*prefix_size) + payload + bytes(b'S'*secret_size), block_size)))
log.debug("Encry: " + print_chunks(enc_chunks)+"\n")
if block_to_find == enc_chunks[controlled_block_position]:
secret = guessed_char + secret
log.debug("Found char, secret={}".format(repr(secret)))
break
else:
log.critical_error("Char not found, try change alphabet. Secret so far: {}".format(repr(secret)))
log.success("Secret(hex): {}".format(b2h(secret)))
return secret
else:
log.debug("constant == False")
def known_plaintexts(pairs, ciphertext, block_size=16):
"""Given enough pairs plaintext-ciphertext, we can assign ciphertexts blocks to plaintexts blocks,
then we can possibly decrypt ciphertext
Args:
pairs(list): list of dict, [{'cipher': 'aaa', 'plain': 'bbb'}, {'cipher': 'xxx', 'plain': 'pwa'}]
plaintexts have to be correctly padded (len(cipher) == len(plain))
ciphertext(string): ciphertext to decrypt
block_size(int)
Returns
tuple: ([decrypted_ciphertext_blocks], {'ciphertext_block': 'plaintext_block', ...})
decrypted_ciphertext_blocks may contain not-decrypted blocks from ciphertext
"""
result_mapping = {}
for pair in pairs:
ciphertext_blocks = chunks(pair['cipher'], block_size)
plaintext_blocks = chunks(pair['plain'], block_size)
if len(ciphertext_blocks) != len(plaintext_blocks):
print(pair)
print(ciphertext_blocks, plaintext_blocks)
print(len(ciphertext_blocks), len(plaintext_blocks))
assert 0
for cipher_block_no in range(len(ciphertext_blocks)):
result_mapping[ciphertext_blocks[cipher_block_no]] = plaintext_blocks[cipher_block_no]
target_ciphertext_blocks = chunks(ciphertext, block_size)
for cipher_block_no in range(len(target_ciphertext_blocks)):
if target_ciphertext_blocks[cipher_block_no] in list(result_mapping.keys()):
target_ciphertext_blocks[cipher_block_no] = result_mapping[target_ciphertext_blocks[cipher_block_no]]
return target_ciphertext_blocks, result_mapping
| 9,619 | 2,919 |
#!/usr/bin/python
import numpy as np
def bilinear(x,u_n,u,EPn,Pn,E,Sigy,H):
#initialization
h = x[1:len(x)]-x[:(len(x)-1)]
eps_n = (u_n[1:len(u_n)]-u_n[:(len(u_n)-1)])/h
eps = (u[1:len(u)]-u[:(len(u)-1)])/h
S = np.zeros(len(eps))
EP = np.zeros(len(eps))
P = np.zeros(len(eps))
TM = np.zeros(len(eps))
#Loop on integration points
for i,DEFO in enumerate(eps):
#(i) Elastic prediction
Selas = E*(DEFO-EPn[i])
#(ii) Compute the criterion
f = np.abs(Selas) - (Sigy+H*Pn[i])
if (f<=0):
#elastic step
S[i] = Selas
EP[i] = EPn[i]
P[i] = Pn[i]
TM[i] = E
elif (f>0):
#elastoplastic step: solve a nonlinear scalar equation
dP = f/(E+H)
P[i] = Pn[i]+dP
EP[i] = EPn[i]+(P[i]-Pn[i])*np.sign(Selas)
S[i] = E*(DEFO-EP[i])
TM[i] = (E*H)/(E+H)
return S,P,EP,TM
| 973 | 448 |
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# max_avg_deg.py
# Created by Disa Mhembere on 2014-08-03.
# Email: disa@jhu.edu
from computation.utils.r_utils import r_igraph_set_graph_attribute
from computation.algs.eigen.eigen import r_igraph_eigs
from computation.utils.igraph_attributes import r_igraph_get_attr
from rpy2.rinterface import NULL
def r_igraph_max_ave_degree(g):
"""
Compute local triangle count of graph g and save as necessary
*Global graph attributes can only be stored in the graph*
@param g: The igraph graph loaded via Rpy2 i.e. an R object
@return: Same graph an input but with added invariant as an attribute
"""
mad = r_igraph_get_attr(g, "eigvals", "g") # See if we already have computed eigenvalues for the graph
if mad == NULL: # Ok then compute top 1 eig ourself
mad = r_igraph_eigs(g, 1, return_eigs=True, save_fn=None)[0]
else:
mad = float(mad[0].split(",")[0][1:]) # The largest eigenvalue is held at index 0
if mad is not None:
g = r_igraph_set_graph_attribute(g, "max_ave_degree", mad)
else: # More than likely ran out of memory
print "Failed to estimate max ave degree because eigensolver failed ..."
return g # return so we can use for other attributes
| 1,827 | 593 |
from typing import Union
import pandas as pd
from ._base import (
_NonTrainableMultivariateModel,
_NonTrainableUnivariateModel,
_TrainableMultivariateModel,
_TrainableUnivariateModel,
)
class _NonTrainableUnivariateTransformer(_NonTrainableUnivariateModel):
def predict(
self, ts: Union[pd.Series, pd.DataFrame]
) -> Union[pd.Series, pd.DataFrame]:
"""Transform time series.
Parameters
----------
ts: pandas.Series or pandas.DataFrame
Time series to be transformed. If a DataFrame with k columns, it is
treated as k independent univariate time series and the transformer
will be applied to each univariate series independently.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
return self._predict(ts)
transform = predict
class _TrainableUnivariateTransformer(_TrainableUnivariateModel):
def fit(self, ts: Union[pd.Series, pd.DataFrame]) -> None:
"""Train the transformer with given time series.
Parameters
----------
ts: pandas.Series or pandas.DataFrame
Time series to be used to train the transformer.
If a DataFrame with k columns, k univariate transformers will be
trained independently.
"""
self._fit(ts)
def predict(
self, ts: Union[pd.Series, pd.DataFrame]
) -> Union[pd.Series, pd.DataFrame]:
"""Transform time series.
Parameters
----------
ts: pandas.Series or pandas.DataFrame
Time series to be transformed. If a DataFrame with k columns, it is
treated as k independent univariate time series.
- If the transformer was trained with a Series, the transformer
will be applied to each univariate series independently;
- If the transformer was trained with a DataFrame, i.e. the
transformer is essentially k transformers, those transformers
will be applied to each univariate series respectively.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
return self._predict(ts)
def fit_predict(
self, ts: Union[pd.Series, pd.DataFrame]
) -> Union[pd.Series, pd.DataFrame]:
"""Train the transformer, and tranform the time series used for
training.
Parameters
----------
ts: pandas.Series or pandas.DataFrame
Time series to be used for training and be transformed.
If a DataFrame with k columns, it is treated as k independent
univariate time series, and k univariate transformers will be
trained and applied to each series independently.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
self.fit(ts)
return self.predict(ts)
transform = predict
fit_transform = fit_predict
class _NonTrainableMultivariateTransformer(_NonTrainableMultivariateModel):
def predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]:
"""Transform time series.
Parameters
----------
df: pandas.DataFrame
Time series to be transformed.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
return self._predict(df)
transform = predict
class _TrainableMultivariateTransformer(_TrainableMultivariateModel):
def fit(self, df: pd.DataFrame) -> None:
"""Train the transformer with given time series.
Parameters
----------
df: pandas.DataFrame
Time series to be used to train the transformer.
"""
self._fit(df)
def predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]:
"""Transform time series.
Parameters
----------
df: pandas.DataFrame
Time series to be transformed.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
return self._predict(df)
def fit_predict(self, df: pd.DataFrame) -> Union[pd.Series, pd.DataFrame]:
"""Train the transformer, and tranform the time series used for
training.
Parameters
----------
df: pandas.DataFrame
Time series to be used for training and be transformed.
Returns
-------
pandas.Series or pandas.DataFrame
Transformed time series.
"""
self.fit(df)
return self.predict(df)
transform = predict
fit_transform = fit_predict
| 4,828 | 1,220 |
from django.shortcuts import get_object_or_404
from django_rest_logger import log
from knox.auth import TokenAuthentication
from knox.models import AuthToken
from rest_framework import status
from rest_framework.authentication import BasicAuthentication
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import CreateModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from accounts.models import User
from accounts.serializers import UserRegistrationSerializer, UserSerializer
from lib.utils import AtomicMixin
from django.utils import timezone
from lib.utils import validate_email as email_is_valid
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
class UserRegisterView(AtomicMixin, CreateModelMixin, GenericAPIView):
serializer_class = UserRegistrationSerializer
authentication_classes = ()
def post(self, request):
"""User registration view."""
email=request.data['email']
#email = User.normalize_email(request.data['email'])
if not email_is_valid(email):
return Response("Please use a different email address provider.", status=status.HTTP_200_OK)
if User.objects.filter(email=email).exists():
return Response("Email already in use, please use a different email address.", status=status.HTTP_200_OK)
now = timezone.now()
user = User(email=email,
first_name=request.data['first_name'],
last_name=request.data['last_name'],
is_active=True,
last_login=now,
date_joined=now)
user.set_password(request.data['password'])
user.save()
return Response("SUCCESS", status=status.HTTP_200_OK)
#return self.create(request)
class UserLoginView(GenericAPIView):
serializer_class = UserSerializer
authentication_classes = (BasicAuthentication,)
permission_classes = (IsAuthenticated,)
def post(self, request):
"""User login with username and password."""
token = AuthToken.objects.create(request.user)
return Response({
'user': self.get_serializer(request.user).data,
'token': token
})
class UserConfirmEmailView(AtomicMixin, GenericAPIView):
serializer_class = None
authentication_classes = ()
def get(self, request, activation_key):
"""
View for confirm email.
Receive an activation key as parameter and confirm email.
"""
user = get_object_or_404(User, activation_key=str(activation_key))
if user.confirm_email():
return Response(status=status.HTTP_200_OK)
log.warning(message='Email confirmation key not found.',
details={'http_status_code': status.HTTP_404_NOT_FOUND})
return Response(status=status.HTTP_404_NOT_FOUND)
class UserEmailConfirmationStatusView(GenericAPIView):
serializer_class = None
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
"""Retrieve user current confirmed_email status."""
user = self.request.user
return Response({'status': user.confirmed_email}, status=status.HTTP_200_OK)
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
| 3,511 | 938 |
import create
from lib import Machine
from lib import Transformer
import argparse
import pickle
from itertools import chain
from random import shuffle
parser = argparse.ArgumentParser(description='A simulation of the enigma encryption algorithm', prog='enigma.py')
subparsers = parser.add_subparsers(help='Which command to run', dest='subroutine')
create_parser = subparsers.add_parser('create', help='A utility to create encryption codexes')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt a file with a codex')
parser.add_argument('--test',
type=argparse.FileType('r'),
help='Validate a cypher')
create_parser.add_argument('file',
metavar='<File>',
type=argparse.FileType('w'),
help='The file to output to')
create_parser.add_argument('-r --random',
action='store_true',
help='Generates a completly random codex')
encrypt_parser.add_argument('in_file',
metavar='<Input file>',
type=argparse.FileType('r'),
help='The file to be encrypted')
encrypt_parser.add_argument('out_file',
metavar='<Out file>',
type=argparse.FileType('w'),
help='The destination for the resuts')
encrypt_mutual = encrypt_parser.add_mutually_exclusive_group(required=True)
encrypt_mutual.add_argument('--codex',
type=argparse.FileType('r'),
help='The codex to use')
encrypt_mutual.add_argument('--random',
nargs=3,
help='Create a random codex using a preset alphabet [ABC, bytes, numbers, ASCII, UTF], a minimum number of transformers, and a maximum number of transformers')
args = parser.parse_args()
if (args.test):
with open('cypher.pkl', mode='rb') as file:
cypher = pickle.load(file)
abc = cypher.getABC()
# print(cypher)
machine = Machine(cypher)
def gen(length):
c = [sample(abc, len(abc))] * length
return chain.from_iterable(c)
def transform(d):
return [machine.parse(value, counter) for counter, value in enumerate(d)]
testData = list(gen(5))
pdb.set_trace()
results = transform(transform(testData))
if (False not in [item[0] == item[1] for item in zip(testData, results)]):
print("This is a valid cypher")
else:
print("This is NOT a valid cypher")
if (args.subroutine == 'create'):
file = create.Create()
with open(args.file.name, mode='wb+') as output:
pickle.dump(file, output)
if (args.subroutine == 'encrypt'):
machine = None
if (args.codex):
with open(args.codex, 'rb') as file:
machine = Machine(pickle.load(file))
if (args.random):
CYPHER = create.random(create.genPreset(args.random[0]), args.random[1], args.random[2])
machine = Machine(abc=CYPHER[0].getABC())
with open(args.in_file.name, 'rb') as input, open(args.out_file.name, 'wb') as output:
clean = input.read()
crypt = [machine.parse(i, value) for i in enumerate(clean)]
output.write(crypt)
| 2,985 | 952 |
"""Manage the analysis folder from the autoimpute package.
This module handles imports from the analysis folder that should be accessible
whenever someone imports autoimpute.analysis. The list below specifies the
methods and classes that are available on import.
This module handles `from autoimpute.analysis import *` with the __all__
variable below. This command imports the public classes and methods from
autoimpute.analysis.
"""
from .base_regressor import MiBaseRegressor
from .linear_regressor import MiLinearRegression
from .logistic_regressor import MiLogisticRegression
from .metrics import raw_bias, percent_bias
__all__ = [
"MiBaseRegressor",
"MiLinearRegression",
"MiLogisticRegression",
"raw_bias",
"percent_bias"
]
| 754 | 215 |
import requests
from parameterized import parameterized, parameterized_class
import aito.client.requests as aito_requests
import aito.schema as aito_schema
from aito.client import AitoClient
from tests.cases import CompareTestCase
from tests.sdk.contexts import grocery_demo_client
def get_requests_resp_and_aito_resp(aito_client: AitoClient, request_obj: aito_requests.AitoRequest):
"""returns the json content from requests lib response and aito response for comparison"""
raw_resp_obj = requests.request(
method=request_obj.method,
url=aito_client.instance_url + request_obj.endpoint,
headers=aito_client.headers,
json=request_obj.query
)
raw_resp_json = raw_resp_obj.json()
aito_resp = aito_client.request(request_obj=request_obj)
return raw_resp_json, aito_resp
class TestBaseHitsResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
cls.request_obj = aito_requests.GenericQueryRequest(query={'from': 'users', 'limit': 3})
cls.raw_resp_json, cls.aito_resp = get_requests_resp_and_aito_resp(cls.client, cls.request_obj)
def test_attributes(self):
for attr in ['offset', 'total']:
self.assertEqual(getattr(self.aito_resp, attr), self.raw_resp_json[attr])
self.assertTrue(hasattr(self.aito_resp, 'hits'))
for idx, hit in enumerate(self.aito_resp.hits):
self.assertEqual(hit.json, self.raw_resp_json['hits'][idx])
self.assertTrue(hasattr(self.aito_resp, 'first_hit'))
self.assertEqual(self.aito_resp.first_hit.json, self.raw_resp_json['hits'][0])
def test_get_field(self):
self.assertIn('offset', self.aito_resp)
with self.assertRaises(KeyError):
_ = self.aito_resp['some_field']
def test_iter_fields(self):
aito_res_fields = [field for field in self.aito_resp]
json_res_fields = list(self.raw_resp_json.keys())
self.assertCountEqual(aito_res_fields, json_res_fields)
@parameterized_class(("test_name", "request_obj", "score_field"), [
("predict", aito_requests.PredictRequest({"from": "products", "predict": "tags", "limit": 3}), "$p"),
("recommend", aito_requests.RecommendRequest(
{"from": "impressions", "recommend": "product", "goal": {"session.user": "veronica"}, "limit": 3}
), "$p" ),
("match", aito_requests.MatchRequest(
{"from": "impressions", "where": {"session.user": "veronica"}, "match": "product", "limit": 3}
), "$p"),
("similarity", aito_requests.SimilarityRequest({"from": "products", "similarity": {"name": "rye bread"}}), "$score")
])
class TestScoredHitsResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_hit_class(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj)
self.assertTrue(hasattr(aito_resp, 'first_hit'))
self.assertEqual(aito_resp.first_hit.score, raw_resp_json['hits'][0][self.score_field])
with self.assertRaises(KeyError):
_ = aito_resp.first_hit.explanation
def test_hit_with_explanation(self):
self.request_obj.query = {**self.request_obj.query, 'select': ['$why']}
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, self.request_obj)
self.assertEqual(aito_resp.first_hit.explanation, raw_resp_json['hits'][0]['$why'])
class TestRelateResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_relate_response(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(
self.client,
aito_requests.RelateRequest({"from": "products", "where": {"$exists": "name"}, "relate": "tags", "limit": 2})
)
self.assertEqual(aito_resp.relations[0].json, raw_resp_json['hits'][0])
self.assertEqual(aito_resp.relations[0].frequencies, raw_resp_json['hits'][0]['fs'])
self.assertEqual(aito_resp.relations[0].probabilities, raw_resp_json['hits'][0]['ps'])
class TestEvaluateResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
def test_relate_response(self):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(
self.client,
aito_requests.EvaluateRequest({
"test": {"$index": {"$mod": [10, 0]}},
"evaluate": {
"from": "products",
"where": {"name": {"$get": "name"}},
"match": "tags"
}
})
)
self.assertEqual(aito_resp.accuracy, raw_resp_json['accuracy'])
self.assertEqual(aito_resp.test_sample_count, raw_resp_json['testSamples'])
self.assertEqual(aito_resp.train_sample_count, raw_resp_json['trainSamples'])
class TestGetSchemaResponse(CompareTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.client = grocery_demo_client()
@parameterized.expand([
('get_database_schema', aito_requests.GetDatabaseSchemaRequest(), aito_schema.AitoDatabaseSchema),
('get_table_schema', aito_requests.GetTableSchemaRequest(table_name='products'), aito_schema.AitoTableSchema),
(
'get_column_schema',
aito_requests.GetColumnSchemaRequest(table_name='products', column_name='name'),
aito_schema.AitoColumnTypeSchema
)
])
def test_get_schema_response(self, _, request_instance, schema_cls):
raw_resp_json, aito_resp = get_requests_resp_and_aito_resp(self.client, request_instance)
self.assertEqual(aito_resp.schema, schema_cls.from_deserialized_object(raw_resp_json))
| 5,962 | 1,965 |
import testinfra
def test_service_is_running_and_enabled(Service):
docker = Service('docker')
assert docker.is_running
assert docker.is_enabled
| 157 | 47 |
"""
Copyright (c) 2018- Guoxia Wang
mingzilaochongtu at gmail com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind.
"""
from PyQt4 import QtCore, QtGui
import numpy as np
import cv2
import os
import getpass
from edgelink import edgelink
from annotation import Point, Annotation, AnnBoundary
class ConvertToBoundariesWorker(QtCore.QObject):
"""
Make a new thread instance to convert to boundaries
from a segment map
"""
finishedSignal = QtCore.pyqtSignal(list)
def __init__(self, objects=None, height=0, width=0):
QtCore.QObject.__init__(self)
self.objects = objects
self.segmentMap = np.zeros((height, width), np.uint8)
def setObjects(self, objects):
self.objects = objects
def setSegmentMap(self, height, width):
self.segmentMap = np.zeros((height, width), np.uint8)
# Segment map convert to boundary list
def convertToBoundaries(self):
# First, we fill all labels to numpy ndarray
count = 1
for obj in self.objects:
for poly in obj.polygon:
pts = []
for pt in poly:
pts.append([pt.x, pt.y])
pts = np.around(pts).astype(np.int32)
cv2.fillPoly(self.segmentMap, [pts], count)
count += 1
# Second, we convert to boundary map from segment map
edgeMap = self.segmentationMapToBoundaryMap(self.segmentMap)
# Third, we get edge fragments
edgelist, edgeim, etype = edgelink(edgeMap)
polygon = []
for edge in edgelist:
if (len(edge) < 5):
continue
# Auto correct occlusion boundary direction
if (self.isNeedReverse(edge)):
edge.reverse()
# Convert to QPolygonF
poly = []
for pt in edge:
point = Point(pt[1], pt[0])
poly.append(point)
polygon.append(poly)
self.finishedSignal.emit(polygon)
return polygon
# Label segmentation map to boundary map
def segmentationMapToBoundaryMap(self, segment):
height, width = segment.shape
boundary = np.zeros((2*height+1, 2*width+1), np.uint8)
# Find vertical direction difference
edgelsV = (segment[0:-1, :] != segment[1:, :]).astype(np.uint8)
# Add a zero row
edgelsV = np.vstack([edgelsV, np.zeros((1, width), dtype=np.uint8)])
# Find horizontal direction difference
edgelsH = (segment[:,0:-1] != segment[:, 1:]).astype(np.uint8)
# Append a zero column
edgelsH = np.hstack([edgelsH, np.zeros((height, 1), dtype=np.uint8)])
# Assign to boundary
boundary[2::2, 1::2] = edgelsV
boundary[1::2, 2::2] = edgelsH
# Get boundary
boundary[2:-1:2, 2:-1:2] = np.maximum(
np.maximum(edgelsH[0:-1, 0:-1], edgelsH[1:, 0:-1]),
np.maximum(edgelsV[0:-1, 0:-1], edgelsV[0:-1, 1:]))
boundary[0, :] = boundary[1, :]
boundary[:, 0] = boundary[:, 1]
boundary[-1, :] = boundary[-2, :]
boundary[:, -1] = boundary[:, -2]
boundary = boundary[2::2, 2::2]
return boundary
# Check one edge occluison direction, and return true if need reverse
def isNeedReverse(self, edge):
height, width = self.segmentMap.shape
step = 3
posDirCount = 0
totalCount = len(edge) / step
for i in range(totalCount):
idx = i * step
pt1 = QtCore.QPointF(edge[idx][1], edge[idx][0])
idx = (i + 1) * step
if (idx >= len(edge)):
idx = -1
pt2 = QtCore.QPointF(edge[idx][1], edge[idx][0])
line1 = QtCore.QLineF(pt1, pt2)
line1 = line1.normalVector()
pt3 = line1.p2()
pt3.setX(min(max(pt3.x(), 0), width-1))
pt3.setY(min(max(pt3.y(), 0), height-1))
pt4 = QtCore.QPointF(line1.x1() - line1.dx(), line1.y1() - line1.dy())
pt4.setX(min(max(pt4.x(), 0), width-1))
pt4.setY(min(max(pt4.y(), 0), height-1))
if (self.segmentMap[int(pt3.y()), int(pt3.x())] >=
self.segmentMap[int(pt4.y()), int(pt4.x())]):
posDirCount += 1
ratio = float(posDirCount) / np.ceil(float(totalCount))
# If ratio greater than the threshold, we dont need to reverse the edge
if (ratio > 0.3):
return False
else:
return True
class BatchConvertToBoundariesWorker(QtCore.QObject):
"""
Make a new thread instance to batch convert to occlusion boundary labels
from instance labels
"""
updateProgress = QtCore.pyqtSignal(int, str)
finished = QtCore.pyqtSignal()
information = QtCore.pyqtSignal(str, str)
# Flag indicate cancel by user
canceled = False
# User selected operation
userOperationResult = -1
# Mutex and waitcondition
mutex = QtCore.QMutex()
waitCondition = QtCore.QWaitCondition()
def __init__(self, imageList, imageDir, gtExt):
QtCore.QObject.__init__(self)
self.imageDir = imageDir
self.imageList = imageList
self.gtExt = gtExt
def stop(self):
self.canceled = True
def batchConvertToBoundaries(self):
overwriteAll = False
annotation = Annotation()
worker = ConvertToBoundariesWorker()
# Convert each image
for idx, filename in enumerate(self.imageList):
if (self.canceled):
break
# get label json file name
imageExt = os.path.splitext(filename)[1]
gtfilename = filename.replace(imageExt, self.gtExt)
filename = os.path.join(self.imageDir, gtfilename)
filename = os.path.normpath(filename)
# Update progress dialog
self.updateProgress.emit(idx + 1, "Converting {0}".format(gtfilename))
# Check if label json file exist
if (not os.path.isfile(filename)):
text = "{0} not exist. Continue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
try:
annotation = Annotation()
annotation.fromJsonFile(filename)
except StandardError as e:
text = "Error parsing labels in {0}. \nContinue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
# Skip all image of has no instance labels
if (not annotation.objects):
continue
# Check if it has occlusion boundary label
if (not overwriteAll and annotation.boundaries):
text = "{0} already exists occlusion boundary labels. Do you want to overwrite?".format(filename)
self.mutex.lock()
self.information.emit("Overwrite", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.No):
continue
elif (self.userOperationResult == QtGui.QMessageBox.YesToAll):
overwriteAll = True
height = annotation.imgHeight
width = annotation.imgWidth
worker.setObjects(annotation.objects)
worker.setSegmentMap(height, width)
polygon = worker.convertToBoundaries()
# Create a new boundary object
boundaries = AnnBoundary()
boundaries.polygon = polygon
boundaries.deleted = 0
boundaries.verified = 0
boundaries.user = getpass.getuser()
boundaries.updateDate()
annotation.boundaries = boundaries
try:
annotation.toJsonFile(filename)
except StandardError as e:
text = "Error writting labels to {0}. \nContinue?".format(filename)
self.mutex.lock()
self.information.emit("IOError", text)
self.waitCondition.wait(self.mutex)
self.mutex.unlock()
if (self.userOperationResult == QtGui.QMessageBox.Yes):
continue
else:
break
self.finished.emit()
| 9,292 | 2,660 |
from ark_nlp.dataset.base._dataset import *
from ark_nlp.dataset.base._sentence_classification_dataset import SentenceClassificationDataset
from ark_nlp.dataset.base._sentence_classification_dataset import SentenceClassificationDataset as SCDataset
from ark_nlp.dataset.base._sentence_classification_dataset import PairMergeSentenceClassificationDataset
from ark_nlp.dataset.base._sentence_classification_dataset import PairMergeSentenceClassificationDataset as PMSCDataset
from ark_nlp.dataset.base._sentence_classification_dataset import TwinTowersSentenceClassificationDataset
from ark_nlp.dataset.base._sentence_classification_dataset import TwinTowersSentenceClassificationDataset as TTSCDataset
from ark_nlp.dataset.base._token_classification_dataset import TokenClassificationDataset
from ark_nlp.dataset.text_classification_dataset import TCDataset
from ark_nlp.dataset.text_match_dataset import TMDataset
from ark_nlp.dataset.bio_named_entity_recognition_dataset import BIONERDataset
from ark_nlp.dataset.biaffine_named_entity_recognition_dataset import BiaffineNERDataset
from ark_nlp.dataset.span_named_entity_recognition_dataset import SpanNERDataset
from ark_nlp.dataset.global_pointer_named_entity_recognition_dataset import GlobalPointerNERDataset
| 1,266 | 388 |
import codecs
import json
import requests
from ..const import CUSTOM_HEADER
def latest_year_data(code: str, latest_year: int) -> list:
"""
lastest_year: 1、3、5
"""
url = (
f"http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}?"
f"earnings_performance={latest_year}%E5%B9%B4&data_type=json"
)
text = requests.get(url, headers=CUSTOM_HEADER).text
res_list = []
text = codecs.decode(text.encode(), "utf-8-sig")
for item in json.loads(text):
res_list.append(
{
"date": item["tradedate"][:10],
"close": item["tclose"],
}
)
return res_list
| 674 | 239 |
# Time: O(n^3)
# Space: O(n)
# 842
# Given a string S of digits, such as S = "123456579",
# we can split it into a Fibonacci-like sequence [123, 456, 579].
#
# Formally, a Fibonacci-like sequence is a list F of non-negative
# integers such that:
#
# 0 <= F[i] <= 2^31 - 1,
# (that is, each integer fits a 32-bit signed integer type);
# F.length >= 3;
# and F[i] + F[i+1] = F[i+2] for all 0 <= i < F.length - 2.
# Also, note that when splitting the string into pieces,
# each piece must not have extra leading zeroes,
# except if the piece is the number 0 itself.
#
# Return any Fibonacci-like sequence split from S,
# or return [] if it cannot be done.
#
# Example 1:
#
# Input: "123456579"
# Output: [123,456,579]
# Example 2:
#
# Input: "11235813"
# Output: [1,1,2,3,5,8,13]
# Example 3:
#
# Input: "112358130"
# Output: []
# Explanation: The task is impossible.
# Example 4:
#
# Input: "0123"
# Output: []
# Explanation: Leading zeroes are not allowed, so "01", "2", "3" is not valid.
# Example 5:
#
# Input: "1101111"
# Output: [110, 1, 111]
# Explanation: The output [11, 0, 11, 11] would also be accepted.
#
# Note:
# - 1 <= S.length <= 200
# - S contains only digits.
# Solution: Brute Force
# The first two elements of the array uniquely determine the rest of the sequence.
# For each of the first two elements, assuming they have no leading zero, let's iterate through the rest of the string.
# At each stage, we expect a number less than or equal to 2^31 - 1 that starts with the sum of the two previous numbers.
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def splitIntoFibonacci(self, S): # 20ms
"""
:type S: str
:rtype: List[int]
"""
for i in xrange(min(10, len(S)-2)):
a = S[:i+1]
if a.startswith('0') and a != '0': break
a = int(a)
for j in xrange(i+1, min(i+10, len(S)-1)):
b = S[i+1:j+1]
if b.startswith('0') and b != '0': break
b = int(b)
fib = [a, b]
k = j+1
while k < len(S):
nxt = fib[-1] + fib[-2]
nxtS = str(nxt)
if nxt <= 2**31-1 and S[k:].startswith(nxtS):
k += len(nxtS)
fib.append(nxt)
else:
break
else:
if len(fib) >= 3:
return fib
return []
def splitIntoFibonacci_kamyu(self, S):
def startswith(S, k, x):
y = 0
for i in xrange(k, len(S)):
y = 10*y + int(S[i])
if y == x:
return i-k+1
elif y > x:
break
return 0
MAX_INT = 2**31-1
a = 0
for i in xrange(len(S)-2):
a = 10*a + int(S[i])
b = 0
for j in xrange(i+1, len(S)-1):
b = 10*b + int(S[j])
fib = [a, b]
k = j+1
while k < len(S):
if fib[-2] > MAX_INT-fib[-1]:
break
c = fib[-2]+fib[-1]
length = startswith(S, k, c)
if length == 0:
break
fib.append(c)
k += length
else:
return fib
if b == 0:
break
if a == 0:
break
return []
# Bad time complexity 240 ms, try to determine first 3 numbers in fib sequence at once.
# repeat partition with same first number ...
# not good as the official solution which calculates each first number only once
def splitIntoFibonacci_mingContest(self, S):
def isF(ss):
for i in xrange(1, min(11, len(ss)-1)):
for j in xrange(i+1, len(ss)):
d1, d2, d3 = ss[:i], ss[i:j], ss[j:]
fail = False
for d in (d1,d2,d3):
if len(d)>1 and d[0]=='0' or int(d)>2147483647:
fail = True
break
if fail: continue
if int(d1)+int(d2)==int(d3):
#print '{} {} {}'.format(d1, d2, d3)
return [int(d1),int(d2),int(d3)]
return []
if len(S)<3: return False
ans = []
for i in xrange(3, min(31, len(S)+1)):
ans = []
ret = isF(S[:i])
if ret:
ans = ret
d2, d3 = ret[1], ret[2]
ok = True
while i < len(S) and ok:
if d2+d3 > 2147483647:
ok = False
break
nd = str(d2 + d3)
if i + len(nd) <= len(S) and nd == S[i:i+len(nd)]:
i += len(nd)
d2, d3 = d3, d2+d3
ans.append(d3)
else:
ok = False
if i==len(S) and ok: return ans
return ans
print Solution().splitIntoFibonacci('123456579') # [123, 456, 579]
print Solution().splitIntoFibonacci('11235813') # [1, 1, 2, 3, 5, 8, 13]
print Solution().splitIntoFibonacci('112358130') # []
print Solution().splitIntoFibonacci('0123') # []
print Solution().splitIntoFibonacci('1101111') # [11, 0, 11, 11] | 5,596 | 1,960 |
import io
import hashlib
import logging
import os
import tempfile
from os import path
from os.path import isfile, isdir
import shutil
import requests
import tarfile
import zipfile
import uuid
from checksumdir import dirhash
from django.conf import settings
from rest_framework import status
class JsonException(Exception):
def __init__(self, msg):
self.msg = msg
super(JsonException, self).__init__()
def get_dir_hash(archive_object):
with tempfile.TemporaryDirectory() as temp_dir:
try:
content = archive_object.read()
archive_object.seek(0)
uncompress_content(content, temp_dir)
except Exception as e:
logging.error(e)
raise e
else:
return dirhash(temp_dir, 'sha256')
def store_datasamples_archive(archive_object):
try:
content = archive_object.read()
archive_object.seek(0)
except Exception as e:
logging.error(e)
raise e
# Temporary directory for uncompress
datasamples_uuid = uuid.uuid4().hex
tmp_datasamples_path = path.join(getattr(settings, 'MEDIA_ROOT'),
f'datasamples/{datasamples_uuid}')
try:
uncompress_content(content, tmp_datasamples_path)
except Exception as e:
shutil.rmtree(tmp_datasamples_path, ignore_errors=True)
logging.error(e)
raise e
else:
# return the directory hash of the uncompressed file and the path of
# the temporary directory. The removal should be handled externally.
return dirhash(tmp_datasamples_path, 'sha256'), tmp_datasamples_path
def get_hash(file, key=None):
if file is None:
return ''
else:
if isinstance(file, (str, bytes, os.PathLike)):
if isfile(file):
with open(file, 'rb') as f:
data = f.read()
elif isdir(file):
return dirhash(file, 'sha256')
else:
return ''
else:
openedfile = file.open()
data = openedfile.read()
openedfile.seek(0)
return compute_hash(data, key)
def get_owner():
ledger_settings = getattr(settings, 'LEDGER')
return ledger_settings['client']['msp_id']
def compute_hash(bytes, key=None):
sha256_hash = hashlib.sha256()
if isinstance(bytes, str):
bytes = bytes.encode()
if key is not None and isinstance(key, str):
bytes += key.encode()
sha256_hash.update(bytes)
return sha256_hash.hexdigest()
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class ZipFile(zipfile.ZipFile):
"""Override Zipfile to ensure unix file permissions are preserved.
This is due to a python bug:
https://bugs.python.org/issue15795
Workaround from:
https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries
"""
def extract(self, member, path=None, pwd=None):
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
ret_val = self._extract_member(member, path, pwd)
attr = member.external_attr >> 16
os.chmod(ret_val, attr)
return ret_val
def uncompress_path(archive_path, to_directory):
if zipfile.is_zipfile(archive_path):
with ZipFile(archive_path, 'r') as zf:
zf.extractall(to_directory)
elif tarfile.is_tarfile(archive_path):
with tarfile.open(archive_path, 'r:*') as tf:
tf.extractall(to_directory)
else:
raise Exception('Archive must be zip or tar.gz')
def uncompress_content(archive_content, to_directory):
if zipfile.is_zipfile(io.BytesIO(archive_content)):
with ZipFile(io.BytesIO(archive_content)) as zf:
zf.extractall(to_directory)
else:
try:
with tarfile.open(fileobj=io.BytesIO(archive_content)) as tf:
tf.extractall(to_directory)
except tarfile.TarError:
raise Exception('Archive must be zip or tar.*')
class NodeError(Exception):
pass
def get_remote_file(url, auth, **kwargs):
kwargs.update({
'headers': {'Accept': 'application/json;version=0.0'},
'auth': auth
})
if settings.DEBUG:
kwargs['verify'] = False
try:
response = requests.get(url, **kwargs)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
raise NodeError(f'Failed to fetch {url}') from e
return response
def get_remote_file_content(url, auth, content_hash, salt=None):
response = get_remote_file(url, auth)
if response.status_code != status.HTTP_200_OK:
logging.error(response.text)
raise NodeError(f'Url: {url} returned status code: {response.status_code}')
computed_hash = compute_hash(response.content, key=salt)
if computed_hash != content_hash:
raise NodeError(f"url {url}: hash doesn't match {content_hash} vs {computed_hash}")
return response.content
| 5,160 | 1,586 |
"""Initialise {{cookiecutter.git_project_name}} Core App Views."""
| 67 | 22 |
# FUNCTION
def med(arr1, arr2, length):
if length == 2:
return findMed( arr1, arr2)
mid = int((length-1)/2)
if (arr1[mid] < arr2[mid]):
return med( arr2[0:mid+1], arr1[-mid-1:length], len(arr2[0:mid+1]))
elif (arr1[mid] > arr2[mid]):
return med( arr1[0:mid+1], arr2[-mid-1:length], len(arr1[0:mid+1]))
def findMed(arr1, arr2):
return sorted(arr1 + arr2)[int(len(arr1 + arr2)/2 - 1)]
# Dictionaries to store databases from the text files
db1 = {}
db2 = {}
with open("db1.txt","r") as file:
for line in file:
x = line.split("- ")
db1[int(x[0])] = x[1][0:len(x[1])-1]
with open("db2.txt","r") as file:
for line in file:
x = line.split("- ")
db2[int(x[0])] = x[1][0:len(x[1])-1]
print(db1)
print(db2)
kth = int(input('\nEnter the no of the smallest movie: '))
print('\nThe reqd. smallest movie from first database:', db1[sorted(db1)[kth-1]])
print('\nThe reqd. smallest movie from second database:', db2[sorted(db2)[kth-1]])
# The Duration of the movie of Databases sorted and stored in the lists
arr1 = sorted(db1)
arr2 = sorted(db2)
length = len(arr1) # No. of movies in the database
median = med(arr1, arr2, length) #Function 'med' defined at the top.
for i in range(length):
if arr1[i] == median:
print("\nThe movie with median duration, i.e.",median, "is", db1[median])
break
elif arr2[i] == median:
print("\nThe movie with median duration, i.e.",median, "is", db2[median])
break
| 1,569 | 605 |
import mne
import pickle
import numpy as np
from sta import sta_matrix, sdtw_matrix
from sklearn.manifold import TSNE
# change this if you have GPUs
# in our platform, this experiment ran on 4 GPUs in around 20 minutes
n_gpu_devices = 0
def generate_samples(n_samples, n_times, time_point, space_points, M,
smoothing_time=1., smoothing_space=0.01,
seed=None):
"""Simulate brain signals at a time_point and in a random vertex among
`space_points`."""
rng = np.random.RandomState(seed)
n_features = len(M)
time_points = (np.ones(n_samples) * time_point).astype(int)
space_points = rng.choice(space_points, size=n_samples)
signals = np.zeros((n_samples, n_times, n_features)).astype(float)
values = rng.rand(n_samples) * 2 + 1
signals[np.arange(n_samples), time_points, space_points] = values
# create temporal and spatial gaussian filters to smooth the data
times = np.arange(n_times)
metric = (times[:, None] - times[None, :]) ** 2
kernel_time = np.exp(- metric / smoothing_time)
kernel_space = np.exp(- M / smoothing_space)
for i, signal in enumerate(signals):
signals[i] = kernel_space.dot(signal.T).T
signals[i] = kernel_time.dot(signal)
return signals
if __name__ == "__main__":
# load brain regions
mt = mne.read_label("data/lh.MT.label")
v1 = mne.read_label("data/lh.V1.label")
# load ground metric defined on the cortical triangulated mesh
M_ = np.load("data/ground_metric.npy") ** 2
M = M_ / np.median(M_)
vertices = [np.arange(642), []]
gamma = 1.
n_features = len(M)
epsilon = 10. / n_features
K = np.exp(- M / epsilon)
mt_vertices = mt.vertices[mt.vertices < 642]
v1_vertices = v1.vertices[v1.vertices < 642]
seed = 42
n_samples_per_task = 50
n_times = 20
time0, time1 = 5, 15
# Create the four categories of brain signals with different random seeds
meg_v1_0 = generate_samples(n_samples_per_task, n_times, time0,
v1_vertices, M=M, seed=seed)
meg_v1_1 = generate_samples(n_samples_per_task, n_times, time1,
v1_vertices, M=M, seed=seed + 1)
meg_mt_0 = generate_samples(n_samples_per_task, n_times, time0,
mt_vertices, M=M, seed=seed + 2)
meg_mt_1 = generate_samples(n_samples_per_task, n_times, time1,
mt_vertices, M=M, seed=seed + 3)
# to avoid numerical errors with Sinkhorn, add 1e-3
meg = np.concatenate((meg_v1_0, meg_v1_1, meg_mt_0, meg_mt_1)) + 1e-3
# create labels for categories
y_time = np.r_[2 * np.r_[n_samples_per_task * [0],
n_samples_per_task * [1]].tolist()]
y_space = np.r_[2 * n_samples_per_task * [0], 2 * n_samples_per_task * [1]]
betas = [0, 0.001, 0.01, 0.1, 0.5, 1., 2., 3., 5., 10.]
experiment = dict(meg=meg, y_time=y_time, y_space=y_space, betas=betas)
train_data = []
n_samples, n_times, dimension = meg.shape
params = dict(K=K, epsilon=epsilon, gamma=gamma, n_jobs=4,
n_gpu_devices=n_gpu_devices)
precomputed = sta_matrix(meg, betas, **params)
experiment["sta"] = dict()
for beta, train_ in zip(betas, precomputed):
train = train_.copy()
# shift the distance to avoid negative values with large betas
train -= train.min()
tsne_data = TSNE(metric="precomputed").fit_transform(train)
experiment["sta"][beta] = tsne_data
method = "soft"
experiment["soft"] = dict()
for beta in betas:
precomputed = sdtw_matrix(meg, beta, n_jobs=10)
train = precomputed.copy()
# shift the distance to avoid negative values with large betas
train -= train.min()
tsne_data = TSNE(metric="precomputed").fit_transform(train)
experiment[method][beta] = tsne_data
expe_file = open("data/tsne-brains.pkl", "wb")
pickle.dump(experiment, expe_file)
| 4,025 | 1,485 |
import torch
from torch.utils.data import Dataset, ConcatDataset
import torchvision
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import pandas as pd
import numpy as np
import pickle
import os
from .splitter import splitter, dataset_split
def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):
"""Index and split CIFAR10 dataset.
Args:
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
splits (list): list of fractional splits
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
"""
if path is None:
path = DATASETS_DIR
index_file = os.path.join(path, 'cifar10.index.csv')
indices = None
if os.path.exists(index_file):
index_csv = np.loadtxt(index_file)
indices = torch.tensor(index_csv)
print('Found predefined indexing file {}'.format(index_file))
trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)
testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)
fullset = ConcatDataset([trainset, testset])
print('Initializing CIFAR10Dataset splits')
# Currently five equal splits
dset_size = fullset.cumulative_sizes[-1]
int_splits = []
for i in range(len(splits)):
int_splits.append(int(dset_size * splits[i]))
if sum(int_splits) < dset_size:
rem = dset_size - sum(int_splits)
int_splits[-1] += rem
indices, splitsets = dataset_split(fullset, int_splits, indices=indices)
if not os.path.exists(index_file):
print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))
np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')
print('Finished splitting data.')
return splitsets
| 2,161 | 691 |
#from .BUILD_IN import BUILD_IN | 31 | 13 |
"""Types that help building the final representation of the data.
From the point of view of the internal API, the main type in this module is
:py:class:`Builder`, which uses the data stored in an
:py:class:`~muscle_synergies.vicon_data.aggregator.Aggregator` to build the
:py:class:`ViconNexusData`. That object, in turn, simply holds references to
:py:class:`DeviceData` instances corresponding to the different experimental
devices, organized by their type (see
:py:class:`~muscle_synergies.vicon_data.definitions.DeviceType`).
Refer to the documentation for the package
:py:mod:`muscle_synergies.vicon_data.__init__.py` for more on how
:py:class:`Builder` fits together with the other classes used for reading the
data from disk.
"""
import abc
from collections import defaultdict
from dataclasses import dataclass
from functools import lru_cache
from typing import Iterator, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from .aggregator import Aggregator, DeviceAggregator
from .definitions import DeviceType, SamplingFreq
@dataclass
class ViconNexusData:
"""The data contained in a Vicon Nexus CSV file.
The initialization arguments are stored as they are under the same names.
Args:
forcepl: a sequence of :py:class:`DeviceData` corresponding to the
different force plate devices.
emg: a single :py:class:`DeviceData` that includes all columns with EMG
measurements.
traj: a sequence of :py:class:`DeviceData` corresponding to the
different trajectory devices.
"""
forcepl: Sequence["DeviceData"]
emg: "DeviceData"
traj: Sequence["DeviceData"]
def __repr__(self):
return "ViconNexusData(forcepl=[...], emg=<DeviceData>, traj=[...])"
def describe(self) -> str:
"""Represent ViconNexusData object as a Markdown list.
This method is intended to help the user get a quick glance at what was
loaded. The returned value will be a multiline string similar to this:
ViconNexusData:
+ emg: 8 columns
+ forcepl (2 devices): DeviceData("Force Plate 1"), DeviceData("Force Plate 2")
+ traj (14 devices): DeviceData("Traj 1"), ..., DeviceData("Traj 14")
In the case of force plates and trajectory markers, if there are more
than 2 devices, they are occluded as in the last line of the example.
"""
emg_str = self._amount_str(self._num_cols(self.emg), "column")
forcepl_len_str = self._amount_str(len(self.forcepl), "device")
forcepl_members_str = self._stringify_list(self.forcepl)
traj_len_str = self._amount_str(len(self.traj), "device")
traj_members_str = self._stringify_list(self.traj)
return f"""ViconNexusData:
+ emg: {emg_str}
+ forcepl ({forcepl_len_str}): {forcepl_members_str}
+ traj ({traj_len_str}): {traj_members_str}"""
@staticmethod
def _num_cols(dev: "DeviceData") -> int:
"""Get number of columns contained in :py:class:`DeviceData` object."""
return len(dev.df.columns)
@staticmethod
def _amount_str(num: int, noun: str) -> str:
"""Add an "s" to a noun to make it plural if needed."""
if num == 1:
plural_s = ""
else:
plural_s = "s"
return f"{num} {noun}{plural_s}"
@staticmethod
def _stringify_list(seq: Sequence) -> str:
"""Represent list as string occluding elements to make it short."""
seq = list(seq)
if len(seq) > 2:
seq = [seq[0]] + ["..."] + [seq[-1]]
return ", ".join(map(str, seq))
class Builder:
"""Build a ViconNexusData using the data stored in an Aggregator."""
def __init__(self, aggregator: Optional[Aggregator] = None):
self.aggregator = aggregator
def build(self, aggregator: Optional[Aggregator] = None) -> ViconNexusData:
"""Build a ViconNexusData using the data stored in an Aggregator.
Args:
aggregator: if not provided, use the one given during
initialization.
Raises:
ValueError if the number of EMG devices is not exactly 1.
"""
if aggregator is None:
aggregator = self.aggregator
frame_tracker = self._build_frame_tracker(aggregator)
devices_by_type = defaultdict(list)
for device_agg in self._devices(aggregator):
device_data = self._build_device(device_agg, frame_tracker)
device_type = self._device_agg_type(device_agg)
devices_by_type[device_type].append(device_data)
# TODO fix a typing mess:
# 1. make _vicon_nexus_data get 3 parameters corresponding to device
# type lists instead of a dict
# 2. _simplify_emg now gets an emg_list and returns an emg_dev,
# checking if the list has too many entries
# done.
return self._vicon_nexus_data(self._simplify_emg(devices_by_type))
def _build_device(
self,
device_agg: DeviceAggregator,
frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"],
) -> "DeviceData":
"""Create new DeviceData from DeviceAggregator and frame trackers."""
params_dict = self._params_device_data(device_agg, frame_tracker)
return self._instantiate_device(**params_dict)
def _params_device_data(
self,
device_agg: DeviceAggregator,
frame_tracker: Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"],
) -> Mapping[str, Union[str, DeviceType, "_SectionFrameTracker", pd.DataFrame]]:
"""Build a dict with the params to create a new DeviceData instance.
This method sets up a dict corresponding to the keyword arguments
required by :py:meth`~Builder._instantiate_device`.
"""
return {
"device_name": self._device_agg_name(device_agg),
"device_type": self._device_agg_type(device_agg),
"units": self._device_agg_units(device_agg),
"frame_tracker": self._choose_frame_tracker(device_agg, *frame_tracker),
"dataframe": self._extract_dataframe(device_agg),
}
def _build_frame_tracker(
self, aggregator: Aggregator
) -> Tuple["ForcesEMGFrameTracker", "TrajFrameTracker"]:
"""Build frame trackers corresponding to Aggregator."""
sampling_freq = self._aggregator_sampling_freq(aggregator)
return (ForcesEMGFrameTracker(sampling_freq), TrajFrameTracker(sampling_freq))
@staticmethod
def _instantiate_device(
device_name: str,
device_type: DeviceType,
units: List[str],
frame_tracker: "_SectionFrameTracker",
dataframe: pd.DataFrame,
) -> "DeviceData":
"""Instantiate new DeviceData object."""
return DeviceData(
device_name=device_name,
device_type=device_type,
units=units,
frame_tracker=frame_tracker,
dataframe=dataframe,
)
@classmethod
def _extract_dataframe(cls, device_aggregator: DeviceAggregator) -> pd.DataFrame:
"""Create DataFrame with the data in the DeviceAggregator."""
data = cls._device_agg_data(device_aggregator)
header = cls._device_agg_coords(device_aggregator)
return pd.DataFrame(data, columns=header, dtype=float)
def _simplify_emg(
self, devices_by_type: Mapping[DeviceType, List["DeviceData"]]
) -> Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]]:
"""Replaces list of EMG devices with the single device in dict.
Args:
devices_by_type: a dict which lists all devices of each type.
Returns:
a copy of the dict with one change.
`new_devices_by_type[DeviceType.EMG]` will not be a a list of
devices but rather a single one as it is assumed that all EMG data
is represented as being different coordinates of a single
experimental device.
Raises:
ValueError if the number of EMG devices is not exactly 1.
"""
new_devices_dict = dict(devices_by_type)
emg_list = devices_by_type[DeviceType.EMG]
if len(emg_list) != 1:
raise ValueError(f"found {len(emg_list)} EMG devices - expected one")
emg_dev = emg_list[0]
new_devices_dict[DeviceType.EMG] = emg_dev
return new_devices_dict
@staticmethod
def _vicon_nexus_data(
devices_by_type: Mapping[DeviceType, Union["DeviceData", List["DeviceData"]]],
) -> ViconNexusData:
"""Instantiate new ViconNexusData object."""
return ViconNexusData(
forcepl=devices_by_type[DeviceType.FORCE_PLATE],
emg=devices_by_type[DeviceType.EMG],
traj=devices_by_type[DeviceType.TRAJECTORY_MARKER],
)
@staticmethod
def _devices(aggregator: Aggregator) -> Iterator[DeviceAggregator]:
"""Yield all `DeviceAggregator`s stored in the Aggregator."""
yield from aggregator.get_devices()
def _choose_frame_tracker(
self,
device_agg: DeviceAggregator,
forces_emg_tracker: "ForcesEMGFrameTracker",
traj_tracker: "TrajFrameTracker",
) -> "_SectionFrameTracker":
"""Choose the correct frame tracker for device."""
forces_emg = {DeviceType.FORCE_PLATE, DeviceType.EMG}
if self._device_agg_type(device_agg) in forces_emg:
return forces_emg_tracker
return traj_tracker
@staticmethod
def _device_agg_name(device_aggregator: DeviceAggregator) -> str:
"""Get device name from DeviceAggregator."""
return device_aggregator.name
@staticmethod
def _device_agg_type(device_aggregator: DeviceAggregator) -> DeviceType:
"""Get device type from DeviceAggregator."""
return device_aggregator.device_type
@staticmethod
def _device_agg_units(device_aggregator: DeviceAggregator) -> List[str]:
"""Get device units from DeviceAggregator."""
return device_aggregator.units
@staticmethod
def _device_agg_coords(device_aggregator: DeviceAggregator) -> List[str]:
"""Get device coordinates from DeviceAggregator."""
return device_aggregator.coords
@staticmethod
def _device_agg_data(device_aggregator: DeviceAggregator) -> List[List[float]]:
"""Get the data rows stored in DeviceAggregator."""
return device_aggregator.data_rows
@staticmethod
def _aggregator_sampling_freq(aggregator: Aggregator) -> "SamplingFreq":
"""Get the sampling frequencies stored in Aggregator."""
return aggregator.get_sampling_freq()
class _SectionFrameTracker(abc.ABC):
"""Convert array indices to/from (frame, subframe) for a section.
This class is abstract, subclasses implement the conversions, which differ
between the 2 sections of the CSV file. The first data row will have index
0 and correspond to frame 0 and subframe 0. The second data row will have
index 1 but its frame and subframe will differ depending on the relative
sampling rate of each section. See
:py:class:`~muscle_synergies.vicon_data.definitions.SamplingFreq`.
The 2 main methods of :py:class:`_SectionFrameTracker` are:
+ :py:meth:`~_SectionFrameTracker.index`: convert frame and subframe to the
corresponding array index.
+ :py:meth:`~_SectionFrameTracker.frame_tracker`: convert an array index to
the corresponding frame and subframe.
"""
def __init__(self, sampling_freq=SamplingFreq):
self._sampling_freq = sampling_freq
@property
def num_frames(self) -> int:
"""Total number of frames."""
return self._sampling_freq.num_frames
@abc.abstractproperty
def sampling_frequency(self) -> int:
"""Sampling frequency in Hz with which the measurements were made."""
pass
@abc.abstractmethod
def index(self, frame: int, subframe: int) -> int:
"""Array index associated with frame and subframe.
Raises:
ValueError if the arguments are outside of the allowed range.
`frame` should be between 1 and
:py:attr:`~_SectionFrameTracker.num_frames`. `subframe` should
be between 0 and
:py:attr:`~_SectionFrameTracker.num_subframes`.
"""
self._validate_frame_tracker_args(frame, subframe)
@abc.abstractmethod
def frame_tracker(self, index: int) -> Tuple[int, int]:
"""Frame and subframe associated with given array index.
Raises:
ValueError if the argument is outside of the allowed range (from 0
to :py:attr:`~_SectionFrameTracker.final_index`).
"""
self._validate_index_arg(index)
@abc.abstractproperty
def final_index(self) -> int:
"""The highest array index."""
pass
@property
def num_subframes(self) -> int:
"""The total number of subframes."""
return self._sampling_freq.num_subframes
@property
def _freq_forces_emg(self) -> int:
"""The sampling rate of the section with force plates and EMG."""
return self._sampling_freq.freq_forces_emg
@property
def _freq_traj(self) -> int:
"""The sampling rate of the section with trajectories."""
return self._sampling_freq.freq_traj
def _validate_index_arg(self, index: int):
"""Raise exception if index is outside of allowed range."""
if index not in range(self.final_index + 1):
raise ValueError(f"index {index} out of bounds (max is self.final_index)")
def _validate_frame_tracker_args(self, frame: int, subframe: int):
"""Raise exception if frame and subframe are not in allowed range."""
if frame not in range(1, self.num_frames + 1):
raise ValueError(f"frame {frame} is out of bounds")
if subframe not in range(self.num_subframes):
raise ValueError(f"subframe {subframe} out of range")
def time_seq(self) -> pd.Series:
"""Create Series with times in seconds of all measurements."""
return self._time_seq(self.sampling_frequency, self.final_index + 1)
@staticmethod
@lru_cache(maxsize=2)
def _time_seq(sampling_frequency: int, num_measurements: int) -> pd.Series:
"""Memoized version of time_seq."""
period = 1 / sampling_frequency
return pd.Series(period * np.arange(1, num_measurements + 1, 1))
class ForcesEMGFrameTracker(_SectionFrameTracker):
@property
def sampling_frequency(self) -> int:
return self._freq_forces_emg
def index(self, frame: int, subframe: int) -> int:
super().index(frame, subframe)
return (frame - 1) * self.num_subframes + subframe
def frame_tracker(self, index: int) -> Tuple[int, int]:
super().frame_tracker(index)
frame = (index // self.num_subframes) + 1
subframe = index % self.num_subframes
return frame, subframe
@property
def final_index(self) -> int:
return self.num_frames * self.num_subframes - 1
class TrajFrameTracker(_SectionFrameTracker):
@property
def sampling_frequency(self) -> int:
return self._freq_traj
def index(self, frame: int, subframe: int) -> int:
super().index(frame, subframe)
return frame - 1
def frame_tracker(self, index: int) -> Tuple[int, int]:
super().frame_tracker(index)
return index + 1, 0
@property
def final_index(self) -> int:
return self.num_frames - 1
class DeviceData:
"""Data associated with a measurement device."""
name: str
"""the name of the device, as it occurs on the CSV file. """
dev_type: DeviceType
"""the data associated with the device."""
units: Tuple[str]
"""physical units of each column in the :py:class:`~pandas.DataFrame`."""
df: pd.DataFrame
"""the type of the device (can be a force plate, trajectory marker or EMG
device).
"""
def __init__(
self,
device_name: str,
device_type: DeviceType,
units: List[str],
frame_tracker: _SectionFrameTracker,
dataframe: pd.DataFrame,
):
self.name = device_name
self.dev_type = device_type
self.units = tuple(units)
self.df = dataframe
self._frame_tracker = frame_tracker
@property
def sampling_frequency(self) -> int:
"""Sampling rate with which measurements were made."""
return self._frame_tracker.sampling_frequency
def time_seq(self) -> pd.Series:
"""Compute the moment in seconds in which measurements were made.
Returns:
a :py:class:`pandas.Series` where each entry corresponds to
"""
return self._frame_tracker.time_seq()
def iloc(self, frame: int, subframe: int) -> pd.Series:
"""Index data rows by their frame and subframe.
This method works similarly to :py:attr:`pandas.DataFrame.iloc`: its
purpose is to help the user index the data referring to rows. Whereas
the :py:class:`~pandas.DataFrame` version is used by directly indexing
it (`datafr.iloc[0]` returns the first row), the :py:class:`DeviceData`
version is a method.
To get the i-th row of the :py:class:`~pandas.DataFrame`, use its own
:py:attr:`~pandas.DataFrame.iloc`. This method should be used only when
the goal is to get not the i-th row but the one corresponding to a
given frame and subframe.
Raises:
KeyError: if the frame and subframe are out of bounds.
"""
return self.df.iloc[self._convert_key(frame, subframe)]
def frame_subfr(self, index: int) -> Tuple[ int, int]:
"""Find (frame, subframe) pair corresponding to index."""
return self._frame_tracker.frame_tracker(index)
def _key_slice_frame_subframe(
self,
stop: Tuple[int, int],
start: Optional[Tuple[int, int]] = None,
step: Optional[int] = None,
) -> slice:
"""Create slice with indexes corresponding to (frame, subframe) range.
Raises:
KeyError: if the frame and subframe are out-of-bounds.
"""
stop_index = self._convert_key(*stop)
if start is None:
return slice(stop_index)
start_index = self._convert_key(*start)
if step is None:
return slice(start_index, stop_index)
return slice(start_index, stop_index, step)
def _convert_key(self, frame: int, subframe: int) -> int:
"""Get index corresponding to given frame and subframe.
Raises:
KeyError: if the frame and subframe are out-of-bounds.
"""
try:
return self._frame_tracker_index(frame, subframe)
except ValueError as err:
raise KeyError from err
def _frame_tracker_index(self, frame: int, subframe: int) -> int:
"""Call FrameTracker.index with arguments."""
return self._frame_tracker.index(frame, subframe)
def __eq__(self, other) -> bool:
return (
self.name == other.name
and self.dev_type == other.dev_type
and self.units == other.units
and self.df.equals(other.df)
)
def __str__(self):
return f'DeviceData("{self.name}")'
def __repr__(self):
return f"<{str(self)}>"
| 19,547 | 5,816 |
#!/usr/bin/env python
"""Ensures there have been no changes to important certbot-auto files."""
import hashlib
import os
# Relative to the root of the Certbot repo, these files are expected to exist
# and have the SHA-256 hashes contained in this dictionary. These hashes were
# taken from our v1.14.0 tag which was the last release we intended to make
# changes to certbot-auto.
#
# Deleting letsencrypt-auto-source/letsencrypt-auto and
# letsencrypt-auto-source/letsencrypt-auto.sig can be done once we're
# comfortable breaking any certbot-auto scripts that haven't already updated to
# the last version. See
# https://opensource.eff.org/eff-open-source/pl/65geri7c4tr6iqunc1rpb3mpna for
# more info.
EXPECTED_FILES = {
os.path.join('letsencrypt-auto-source', 'letsencrypt-auto'):
'b997e3608526650a08e36e682fc3bf0c29903c06fa5ba4cc49308c43832450c2',
os.path.join('letsencrypt-auto-source', 'letsencrypt-auto.sig'):
'61c036aabf75da350b0633da1b2bef0260303921ecda993455ea5e6d3af3b2fe',
}
def find_repo_root():
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def sha256_hash(filename):
hash_object = hashlib.sha256()
with open(filename, 'rb') as f:
hash_object.update(f.read())
return hash_object.hexdigest()
def main():
repo_root = find_repo_root()
for filename, expected_hash in EXPECTED_FILES.items():
filepath = os.path.join(repo_root, filename)
assert sha256_hash(filepath) == expected_hash, f'unexpected changes to {filepath}'
print('All certbot-auto files have correct hashes.')
if __name__ == '__main__':
main()
| 1,629 | 617 |
import unittest
import qsml
class TestLoad(unittest.TestCase):
def test_load(self):
file = "tests/load.qsml"
returned_val = {
"myportfolio": {"GOOG": 10, "AAPL": 5, "BRK.B": 1},
"test": {"SNAP": 130, "MSFT": 5, "TSLA": 100},
}
self.assertEqual(qsml.load(file), returned_val, "Were not equal")
def test_load_comment_error(self):
file = "tests/load2.qsml"
with self.assertRaises(qsml.error.QSMLError):
qsml.load(file)
if __name__ == "__main__":
unittest.main()
| 560 | 222 |
"""
Collection of utilities such as memoization, automatic property storage, etc
"""
from __future__ import print_function, absolute_import, division
from functools import wraps, partial
import logging
from votesim.utilities import misc
logger = logging.getLogger(__name__)
class memoize:
"""
Decorator used to store past calls.
"""
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args, **kwargs):
key = (args, frozenset(kwargs.items()))
try:
return self.memoized[key]
except KeyError:
self.memoized[key] = self.function(*args, **kwargs)
return self.memoized[key]
class method_memoize(object):
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
#
#def lazyprop(fn):
# """
# Decorator used to cache property results
#
# From stack overflow. Author Mike Boers
# https://stackoverflow.com/questions/3012421/python-memoising-deferred-lookup-property-decorator
# """
#
# attr_name = '_lazy_' + fn.__name__
# @property
# def _lazyprop(self):
# if not hasattr(self, attr_name):
# setattr(self, attr_name, fn(self))
# return getattr(self, attr_name)
# return _lazyprop
#
### Lazy Property decorator
# Property name to hold all lazy data
_data_holder_attr = '_cache_properties'
def clean_lazy_properties(instance):
'''Clean all lazy properties'''
setattr(instance, _data_holder_attr, {})
def clean_some_lazy_properties(instance, names):
"""Clean properties in iterable names"""
try:
cache = getattr(instance, _data_holder_attr)
except AttributeError:
return
if isinstance(names, str):
names = [names]
for name in names:
try:
del cache[name]
except KeyError:
pass
setattr(instance, _data_holder_attr, cache)
return
def modify_lazy_property(instance, name, value, dictname=_data_holder_attr):
"""Modify a lazy property"""
cache = getattr(instance, dictname)
cache[name] = value
setattr(instance, _data_holder_attr, cache)
return
def lazy_property(fn):
"""
Version of lazy_property by John Huang.
Decorator used to cache property results into dictionary.
The cache can be clered using clean_lazy_properties.
"""
cache_name = _data_holder_attr
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
def lazy_property2(name=_data_holder_attr):
"""
Version of lazy_property by John Huang.
Decorator used to cache property results into dictionary.
The cache can be cleared using clean_lazy_properties.
Decorator must be called as a function.
Parameters
----------
name : str
Name of cache dictionary
Example
---------
Set the lazy property
>>> class class1(object):
>>> @lazy_property2('my_cache')
>>> def property(self):
>>> x = 2.0
>>> return x
Delete the lazy property
>>> a = class1()
>>> del a.my_cache
"""
def decorator(fn):
cache_name = name
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
return decorator
def reuse_doc(f):
"""Reuse the docstring from f on the decorated function
Parameters
----------
f : func or class
Desired func/class whose __doc__ you want to reuse
Returns
-------
out : decorator
Example
--------
Here we decorate class B with class A's docstring
>>> class A(object):
>>> '''I got A docstring'''
>>> def __init__(self):
>>> self.x = 10
>>> @reuse_doc(A)
>>> class B(A):
>>> pass
>>> B.__doc__ == 'I got A docstring'
"""
doc = f.__doc__
def decorator(fn):
fn.__doc__ = doc
return fn
return decorator
| 6,012 | 1,820 |
#!/usr/bin/env python3
from argparse import ArgumentParser, Namespace
from pathlib import Path
from re import search
from sys import argv
from typing import Callable, Iterable, List, Optional, TextIO, Tuple
from urllib.request import urlopen, urlretrieve
from ports import Platform, PortError, PortLicense, Ports
from ports.cran import Cran, CranPort
__author__ = "David Naylor <dbn@FreeBSD.org>"
__license__ = "BSD (FreeBSD)"
__summary__ = "Generates FreeBSD Ports from CRAN packages"
__version__ = "0.1.9"
ERR_GENERAL = 1
ERR_CATEGORY = 2
ERR_EXISTS = 3
class Command(object):
def __init__(self, description: str) -> None:
self._parser = ArgumentParser(description=description)
self._subparsers = self._parser.add_subparsers(title="available sub-commands", help="sub-command help")
def execute(self, args: List[str]) -> None:
parsed_args = self._parser.parse_args(args)
if hasattr(parsed_args, "action"):
parsed_args.action(parsed_args)
else:
self.usage()
def usage(self) -> None:
self._parser.print_usage()
def __call__(self, verb: str, description: str) -> Callable[[Callable[[Namespace], None]], ArgumentParser]:
def decorator(action: Callable[[Namespace], None]) -> ArgumentParser:
parser = self._subparsers.add_parser(verb, help=description)
parser.set_defaults(action=action)
return parser
return decorator
def make_cran_port(name: str, portdir: Optional[Path] = None, version: Optional[str] = None) -> CranPort:
if not version:
print("Checking for latest version...")
site_page = urlopen("http://cran.r-project.org/package=%s" % name).read().decode("utf-8")
version_match = search(r"<td>Version:</td>\s*<td>(.*?)</td>", str(site_page))
assert version_match is not None
version = version_match.group(1)
distfile = Ports.distdir / ("%s_%s.tar.gz" % (name, version))
if not distfile.exists(): # pylint: disable=no-member
print("Fetching package source (%s-%s)..." % (name, version))
urlretrieve("https://cran.r-project.org/src/contrib/%s" % distfile.name, distfile) # pylint: disable=no-member
return CranPort.create(name, distfile, portdir)
def diff(left: Iterable[str], right: Iterable[str]) -> Tuple[List[str], bool, List[str]]:
left = list(left)
right = list(right)
old = [i for i in left if i not in right]
new = [i for i in right if i not in left]
left = [i for i in left if i not in old]
right = [i for i in right if i not in new]
return old, left == right, new
def yies(obj: list) -> str:
return "ies" if len(obj) > 1 else "y"
def log_depends(log: TextIO, depend: str, difference: Tuple[List[str], bool, List[str]]) -> None:
old, common, new = difference
if not common:
log.write(" - order %s dependencies lexicographically on origin\n" % depend)
if old:
log.write(" - remove unused %s dependenc%s:\n" % (depend, yies(old)))
for i in sorted(old):
log.write(" - %s\n" % i)
if new:
log.write(" - add new %s dependenc%s:\n" % (depend, yies(new)))
for i in sorted(new):
log.write(" - %s\n" % i)
def log_uses(log: TextIO, difference: Tuple[List[str], bool, List[str]]) -> None:
old, common, new = difference
if not common:
log.write(" - sort cran uses arguments lexicographically\n")
for arg in old:
if arg == "auto-plist":
log.write(" - manually generate pkg-plist\n")
elif arg == "compiles":
log.write(" - port no longer needs to compile\n")
else:
raise PortError("Log: unknown cran argument: %s" % arg)
for arg in new:
if arg == "auto-plist":
log.write(" - automatically generate pkg-plist\n")
elif arg == "compiles":
log.write(" - mark port as needing to compile\n")
else:
raise PortError("Log: unknown cran argument: %s" % arg)
def log_license(log: TextIO, old: PortLicense, new: PortLicense) -> None:
if list(old) != list(sorted(new)):
log.write(" - update license to: %s\n" % " ".join(sorted(new)))
elif old.combination != new.combination:
if new.combination is None:
log.write(" - remove license combination\n")
else:
log.write(" - update license combination\n")
def generate_update_log(old: CranPort, new: CranPort) -> None:
assert (old.portversion or old.distversion) != new.distversion
with open(new.portdir / "commit.svn", "w", encoding="utf-8") as log:
log.write("%s: updated to version %s\n\n" % (new.origin, new.distversion))
if old.portrevision is not None:
log.write(" - removed PORTREVISION due to version bump\n")
if old.maintainer != new.maintainer:
log.write(" - update maintainer\n")
if old.comment != new.comment:
log.write(" - updated comment to align with CRAN package\n")
if list(sorted(old.license)) != list(sorted(new.license)) or old.license.combination != new.license.combination:
log.write(" - updated license to align with CRAN package\n")
if old.license.file is None and new.license.file is not None:
log.write(" - added license file from CRAN package\n")
elif old.license.file is not None and new.license.file is None:
log.write(" - removed license file (no longer in CRAN package)\n")
for depend in ("build", "lib", "run", "test"):
old_depends = getattr(old.depends, depend)
new_depends = getattr(new.depends, depend)
log_depends(log, depend, diff([i.origin for i in old_depends], sorted(i.origin for i in new_depends)))
if old.description != new.description:
log.write(" - update description to align with CRAN package\n")
if old.website != new.website:
log.write(" - update website URL to align with CRAN package\n")
if new.version in new.changelog:
assert old.portname is not None
port = make_cran_port(new.portname, version=new.version)
assert port.version == new.version
if port.version in port.changelog and port.changelog[port.version] == new.changelog[new.version]:
log.write(" - changelog not updated\n")
else:
log.write(" - changelog:\n")
for line in new.changelog[new.version]:
log.write(" -")
length = 4
for word in line.split(" "):
length += len(word) + 1
if length > 75:
log.write("\n ")
length = 5 + len(word)
log.write(" " + word)
log.write("\n")
else:
log.write(" - no changelog provided\n")
log.write("\nGenerated by:\tportcran (%s)\n" % __version__)
def update_category(portsdir: Path, category: str, name: str) -> None:
entry = " SUBDIR += %s\n" % name
makefile = portsdir / category / "Makefile"
tmpfile = portsdir / category / ".Makefile.portcran"
with makefile.open() as old:
with tmpfile.open("w") as new:
has_subdir = False
drain = False
for line in old.readlines():
if not drain:
if line == entry:
drain = True
if line.lstrip().startswith("SUBDIR"):
has_subdir = True
if line > entry:
new.write(entry)
drain = True
elif has_subdir:
new.write(entry)
drain = True
new.write(line)
tmpfile.rename(makefile)
def generate_create_log(cran: CranPort) -> None:
with open(cran.portdir / ".." / ".." / "commit.svn", "w") as log:
log.write("%s: %s\n" % (cran.origin, cran.comment))
log.write("\nGenerated by:\tportcran (%s)\n" % __version__)
def main() -> None:
command = Command(__summary__)
@command("update", "update a CRAN port")
def update(args: Namespace) -> None:
port = Ports.get_port_by_name(Cran.PKGNAMEPREFIX + args.name)
assert isinstance(port, CranPort)
cran = make_cran_port(args.name, portdir=port._portdir)
cran.generate()
generate_update_log(port, cran)
update.add_argument("name", help="name of the CRAN package")
update.add_argument("-o", "--output", help="output directory")
@command("create", "create a CRAN port")
def create(args: Namespace) -> None:
if args.address is not None:
Platform.address = args.address
categories = args.categories.split(",")
for category in categories:
if category not in Ports.categories:
print("err: %s in not a ports category" % category)
exit(ERR_CATEGORY)
portsdir = Ports.dir if args.portsdir is None else Path(args.portsdir)
category = categories[0]
name = Cran.PKGNAMEPREFIX + args.name
portdir = portsdir / category / name
cran = make_cran_port(args.name, portdir)
cran.categories = categories
cran.maintainer = Platform.address
try:
port = Ports.get_port_by_name(name)
print("err: CRAN port %s already exists at %s" % (args.name, port.origin))
exit(ERR_EXISTS)
except PortError:
pass
portdir.mkdir()
update_category(portsdir, category, name)
cran.generate()
generate_create_log(cran)
create.add_argument("name", help="name of the CRAN package")
create.add_argument("-a", "--address", help="creator's email address")
create.add_argument("-c", "--categories", default="math", help="comma separated list of the CRAN port's categories")
create.add_argument("-p", "--portsdir", help="output ports directory")
command.execute(argv[1:])
if __name__ == "__main__":
try:
main()
except PortError as ex:
print("err: %s" % ex)
exit(ERR_GENERAL)
| 10,304 | 3,162 |
#!/usr/bin/env python
##
# Copyright (c) 2012-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
"""
This tool takes data from stdin and validates it as iCalendar data suitable
for the server.
"""
from calendarserver.tools.cmdline import utilityMain, WorkerService
from twisted.internet.defer import succeed
from twisted.python.text import wordWrap
from twisted.python.usage import Options
from twistedcaldav.config import config
from twistedcaldav.ical import Component, InvalidICalendarDataError
from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
import os
import sys
def usage(e=None):
if e:
print(e)
print("")
try:
ValidOptions().opt_help()
except SystemExit:
pass
if e:
sys.exit(64)
else:
sys.exit(0)
description = '\n'.join(
wordWrap(
"""
Usage: validcalendardata [options] [input specifiers]\n
""",
int(os.environ.get('COLUMNS', '80'))
)
)
class ValidOptions(Options):
"""
Command-line options for 'validcalendardata'
"""
synopsis = description
optFlags = [
['verbose', 'v', "Verbose logging."],
['debug', 'D', "Debug logging."],
['parse-only', 'p', "Only validate parsing of the data."],
]
optParameters = [
['config', 'f', DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
]
def __init__(self):
super(ValidOptions, self).__init__()
self.outputName = '-'
self.inputName = '-'
def opt_output(self, filename):
"""
Specify output file path (default: '-', meaning stdout).
"""
self.outputName = filename
opt_o = opt_output
def openOutput(self):
"""
Open the appropriate output file based on the '--output' option.
"""
if self.outputName == '-':
return sys.stdout
else:
return open(self.outputName, "wb")
def opt_input(self, filename):
"""
Specify output file path (default: '-', meaning stdin).
"""
self.inputName = filename
opt_i = opt_input
def openInput(self):
"""
Open the appropriate output file based on the '--input' option.
"""
if self.inputName == '-':
return sys.stdin
else:
return open(os.path.expanduser(self.inputName), "rb")
errorPrefix = "Calendar data had unfixable problems:\n "
class ValidService(WorkerService, object):
"""
Service which runs, exports the appropriate records, then stops the reactor.
"""
def __init__(self, store, options, output, input, reactor, config):
super(ValidService, self).__init__(store)
self.options = options
self.output = output
self.input = input
self.reactor = reactor
self.config = config
self._directory = None
def doWork(self):
"""
Start the service.
"""
super(ValidService, self).startService()
if self.options["parse-only"]:
result, message = self.parseCalendarData()
else:
result, message = self.validCalendarData()
if result:
print("Calendar data OK")
else:
print(message)
return succeed(None)
def parseCalendarData(self):
"""
Check the calendar data for valid iCalendar data.
"""
result = True
message = ""
try:
component = Component.fromString(self.input.read())
# Do underlying iCalendar library validation with data fix
fixed, unfixed = component._pycalendar.validate(doFix=True)
if unfixed:
raise InvalidICalendarDataError("Calendar data had unfixable problems:\n %s" % ("\n ".join(unfixed),))
if fixed:
print("Calendar data had fixable problems:\n %s" % ("\n ".join(fixed),))
except ValueError, e:
result = False
message = str(e)
if message.startswith(errorPrefix):
message = message[len(errorPrefix):]
return (result, message,)
def validCalendarData(self):
"""
Check the calendar data for valid iCalendar data.
"""
result = True
message = ""
truncated = False
try:
component = Component.fromString(self.input.read())
if getattr(self.config, "MaxInstancesForRRULE", 0) != 0:
truncated = component.truncateRecurrence(config.MaxInstancesForRRULE)
component.validCalendarData(doFix=False, validateRecurrences=True)
component.validCalendarForCalDAV(methodAllowed=True)
component.validOrganizerForScheduling(doFix=False)
except ValueError, e:
result = False
message = str(e)
if message.startswith(errorPrefix):
message = message[len(errorPrefix):]
if truncated:
message = "Calendar data RRULE truncated\n" + message
return (result, message,)
def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
"""
Do the export.
"""
if reactor is None:
from twisted.internet import reactor
options = ValidOptions()
options.parseOptions(argv[1:])
try:
output = options.openOutput()
except IOError, e:
stderr.write("Unable to open output file for writing: %s\n" % (e))
sys.exit(1)
try:
input = options.openInput()
except IOError, e:
stderr.write("Unable to open input file for reading: %s\n" % (e))
sys.exit(1)
def makeService(store):
return ValidService(store, options, output, input, reactor, config)
utilityMain(options["config"], makeService, reactor, verbose=options["debug"])
if __name__ == "__main__":
main()
| 6,484 | 1,844 |
import torch
from torch import nn
from utils.utils import compute_conv_output_img_dims
def test_compute_conv_dims_out():
for width_img in [63, 64, 65, 66]:
dims_img = (width_img, width_img)
inp = torch.randn((10, 1,) + dims_img)
for padding in [0, 1, 2]:
for dilation in [1, 2, 3]:
for stride in [1, 2, 3]:
for kernel_size in [2, 3, 4, 5]:
conv = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
computed_img_dims_out = compute_conv_output_img_dims(
dims_img=dims_img,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
actual_img_dims_out = conv(inp).shape[2:]
assert computed_img_dims_out == actual_img_dims_out
| 1,243 | 363 |
import tkinter as tk
parent = tk.Tk()
# tk.WidgetName(parent_frame, options)
tk.Entry(parent, width=25).pack()
tk.Button(parent, text="LOOKOUT!").pack()
tk.Checkbutton(parent, text='RememberMe', variable=tk.IntVar()).pack()
tk.Label(parent, text="What's Your Name?").pack()
tk.OptionMenu(parent, tk.IntVar(), "Select Age", "15+", "25+", "40+", "60+").pack()
tk.Scrollbar(parent, orient=tk.VERTICAL).pack()
tk.Radiobutton(parent, text='Democratic', variable=tk.IntVar(), value=3).pack()
tk.Radiobutton(parent, text='Republican', variable=tk.IntVar(), value=5).pack()
parent.mainloop()
| 589 | 223 |
from ..base import X11DriverBaseRecipe
class Xf86VideoNouveauRecipe(X11DriverBaseRecipe):
def __init__(self, *args, **kwargs):
super(Xf86VideoNouveauRecipe, self).__init__(*args, **kwargs)
self.sha256 = '6d9242ba139c3df7afefffb455573b52' \
'f4427920b978161c00483c64a6da47cb'
self.name = 'xf86-video-nouveau'
self.version = '1.0.13'
self.depends = ['xorg-server']
| 432 | 195 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Encoder(nn.Module):
def __init__(self, in_channels=3, dim=64, n_downsample=3, encoder_type='convolutional'):
super(Encoder, self).__init__()
# Initial convolution block
layers = [
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels, dim*2, 7),
nn.InstanceNorm2d(dim),
nn.LeakyReLU(0.2, inplace=True),
]
# Downsampling
for i in range(n_downsample):
if i <= 3:
layers += [
nn.Conv2d(dim*2*(i+1), dim * (i+2)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (i+2)*2),
nn.ReLU(inplace=True),
]
else:
layers += [
nn.Conv2d(dim*2*(5), dim * (5)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (5)*2),
nn.ReLU(inplace=True),
]
self.model_blocks = nn.Sequential(*layers, nn.Tanh())
def forward(self, x):
x = self.model_blocks(x)
return x
class Decoder(nn.Module):
def __init__(self, out_channels=3, dim=64, n_upsample=3, encoder_type='convolutional', activation='relu'):
super(Decoder, self).__init__()
layers = []
dim = dim
# Upsampling
for i in reversed(range(n_upsample)):
# print(i)
if i > 3:
print('Arjuna')
layers += [
nn.ConvTranspose2d(dim * (5)*2, dim * (5)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (5)*2),
nn.LeakyReLU(0.2, inplace=True),
]
else:
layers += [
nn.ConvTranspose2d(dim * (i + 2)*2, dim * (i + 1)*2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * (i + 1)*2),
nn.LeakyReLU(0.2, inplace=True),
]
# Output layer
# layers += [nn.ReflectionPad2d(3), nn.Conv2d(dim, out_channels, 7)]
layers += [nn.ReflectionPad2d(4), nn.Conv2d(dim * (i + 1)*2, out_channels, 3), nn.Conv2d(out_channels, out_channels, 7)]
self.model_blocks = nn.Sequential(*layers)
if activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'relu':
self.activation = nn.ReLU()
def forward(self, x):
# print(x.shape)
x = self.model_blocks(x)
#x = self.activation(x)
return x
class AE(nn.Module):
"""docstring for AE"""
def __init__(self, in_channels, out_channels, dims=64, n_downsample=4):
super(AE, self).__init__()
self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='regular')
self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample, activation='relu')
def forward(self, x):
code = self.encoder(x)
out = self.decoder(code)
return out
class VAE(nn.Module):
"""docstring for AE"""
def __init__(self, in_channels, out_channels, dims=64, n_downsample=3):
super(VAE, self).__init__()
self.encoder = Encoder(in_channels, dim=dims, n_downsample=n_downsample, encoder_type='variational')
self.decoder = Decoder(out_channels, dim=dims, n_upsample=n_downsample)
def forward(self, x):
mu, z = self.encoder(x)
out = self.decoder(z)
return out | 3,563 | 1,295 |
# tests_collect_fractions.py
import unittest
from collect_fractions import (
validate_input_collect,
lcm,
collect_fractions
)
class TestValidateInputCollect(unittest.TestCase):
def test_validation_passes_with_correct_input(self):
fractions = [(1, 3), (4, 5)]
validate_input_collect(fractions)
def test_validation_raises_exception_with_empty_list(self):
fractions = []
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'List cannot be empty.')
def test_validation_raises_exception_if_fractions_is_not_of_type_list(self):
fractions = ((1, 3), (4, 5))
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Argument can only be of type "list".')
def test_validation_raises_exception_if_length_of_element_is_not_two(self):
fractions = [(1, 2), (1, 3, 4)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Tuple can only contain 2 elements.')
def test_validation_raises_exception_if_one_of_the_elements_of_the_tuples_is_not_integer(self):
fractions = [(1, 5), (1, 2.0)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Tuple can only contain integers.')
def test_validation_raises_exception_if_one_of_the_elements_has_denominator_zero(self):
fractions = [(1, 2), (1, 0)]
exc = None
try:
validate_input_collect(fractions)
except Exception as err:
exc = err
self.assertIsNotNone(exc)
self.assertEqual(str(exc), 'Cannot devide by zero.')
class TestCollectFractions(unittest.TestCase):
def test_collect_fractions_passes_with_only_one_element_in_list(self):
fractions = [(1, 7)]
self.assertEqual((1, 7), collect_fractions(fractions))
def test_collect_fraction_passes_with_more_than_one_element_in_list(self):
fractions = [(1, 4), (1, 2)]
self.assertEqual((3, 4), collect_fractions(fractions))
if __name__ == '__main__':
unittest.main() | 2,215 | 901 |
import gzip
import logging
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
from yapw.methods.blocking import ack
from exporter.util import Export, create_client
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Start a worker to export files from collections in Kingfisher Process.
Data is exported as gzipped line-delimited JSON files, with one file per year and one ``full.jsonl.gz`` file.
Multiple workers can run at the same time.
"""
def handle(self, *args, **options):
create_client().consume(callback, "exporter_init")
def callback(state, channel, method, properties, input_message):
collection_id = input_message.get("collection_id")
job_id = input_message.get("job_id")
export = Export(job_id)
dump_file = export.directory / "full.jsonl"
try:
export.directory.mkdir(parents=True)
except FileExistsError:
[f.unlink() for f in export.directory.glob("*") if f.is_file()]
export.lock()
id = 0
page = 1
files = {}
# acknowledge message processing now to avoid connection loses
# the rest can run for hours and is irreversible anyways
ack(state, channel, method.delivery_tag)
# load data from kf-process and save
while True:
with connections["kingfisher_process"].cursor() as cursor:
logger.debug("Processing page %s with id > %s", page, id)
cursor.execute(
"""
SELECT d.id, d.data, d.data->>'date'
FROM compiled_release c
JOIN data d ON (c.data_id = d.id)
WHERE collection_id = %s
AND d.id > %s
ORDER BY d.id
LIMIT %s
""",
[collection_id, id, settings.EXPORTER_PAGE_SIZE],
)
records = cursor.fetchall()
if not records:
break
with open(dump_file, "a") as full:
files[dump_file] = full
for r in records:
id = r[0]
full.write(r[1])
full.write("\n")
# annual and monthly dump
if r[2] is not None and len(r[2]) > 9:
year_path = export.directory / f"{int(r[2][:4])}.jsonl"
if year_path not in files:
files[year_path] = year_path.open("a")
files[year_path].write(r[1])
files[year_path].write("\n")
month_path = export.directory / f"{int(r[2][:4])}_{r[2][5:7]}.jsonl"
if month_path not in files:
files[month_path] = month_path.open("a")
files[month_path].write(r[1])
files[month_path].write("\n")
page = page + 1
# last page
if len(records) < settings.EXPORTER_PAGE_SIZE:
break
for path, file in files.items():
file.close()
with path.open("rb") as f_in:
with gzip.open(f"{path}.gz", "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
path.unlink()
export.unlock()
| 3,278 | 979 |
code_path = "F:/发表论文/apsec 2021/TiTIC"
data_path = "D:/fasttext_data"
| 71 | 43 |
# terrascript/pingdom/d.py
import terrascript
class pingdom_contact(terrascript.Data):
pass
class pingdom_team(terrascript.Data):
pass
| 147 | 54 |
from __future__ import absolute_import
from .service import AdminFeatureService, AdminFeatureServiceLayer, AdminMapService, Services
__version__ = "3.5.6" | 154 | 41 |
import copy
import os
import pickle
import torch
import torch.nn as nn
import numpy as np
from library import inputs, eval_inception_score
from Utils.checkpoints import save_context, Logger
from Utils import flags
from Utils import config
import Torture
FLAGS = flags.FLAGS
KEY_ARGUMENTS = config.load_config(FLAGS.config_file)
model = FLAGS.old_model
dirname = os.path.dirname(model)
basename = os.path.basename(model)
config_path = os.path.join(dirname, "..", "source", "configs_dict.pkl")
summary_path = os.path.join(dirname, "..", "summary")
with open(config_path, "rb") as f:
new_dict = pickle.load(f)
new_dict["gpu"] = FLAGS.gpu
FLAGS.set_dict(new_dict)
FLAGS.old_model = "loaded"
text_logger, MODELS_FOLDER, SUMMARIES_FOLDER = save_context(__file__, KEY_ARGUMENTS)
torch.manual_seed(1234)
torch.cuda.manual_seed(1235)
np.random.seed(1236)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
itr = inputs.get_data_iter(batch_size=100, subset=1000)
itr_u = inputs.get_data_iter(batch_size=100)
netG, optim_G = inputs.get_generator_optimizer()
netD, optim_D = inputs.get_discriminator_optimizer()
netC, optim_c = inputs.get_classifier_optimizer()
netC_T, _ = inputs.get_classifier_optimizer()
netG, netD, netC = netG.to(device), netD.to(device), netC.to(device)
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
netC = nn.DataParallel(netC)
netC_T = nn.DataParallel(netC_T)
checkpoint_io = Torture.utils.checkpoint.CheckpointIO(checkpoint_dir=MODELS_FOLDER)
checkpoint_io.register_modules(
netG=netG,
netD=netD,
netC=netC,
netC_T=netC_T,
optim_G=optim_G,
optim_D=optim_D,
optim_c=optim_c,
)
checkpoint_io.load_file(model)
logger = Logger(log_dir=SUMMARIES_FOLDER)
# with torch.no_grad():
# netG.eval()
# data, label = itr.__next__()
# sample_z = torch.randn(FLAGS.bs_g, FLAGS.g_z_dim).to(device)
# tlabel = label[: FLAGS.bs_g // 10]
# tlabel = torch.cat([tlabel for _ in range(10)], 0)
# x_fake = netG(sample_z, tlabel)
# logger.add_imgs(x_fake, "imgtest", nrow=FLAGS.bs_g // 10)
# itr_test = inputs.get_data_iter(batch_size=100, train=False, infinity=False)
# netC_T.eval()
# total, correct = 0, 0
# for images, labels in itr_test:
# images, labels = images.to(device), labels.to(device)
# outputs = netC_T(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print(total, correct, correct / total)
# # # # Inception score
with torch.no_grad():
netG.eval()
img_list = []
for _ in range(500):
sample_z = torch.randn(100, FLAGS.g_z_dim).to(device)
data, label = itr.__next__()
# print(label.shape, sample_z.shape)
x_fake = netG(sample_z.to(device), label.to(device))
img_list.append(x_fake.data.cpu().numpy() * 0.5 + 0.5)
img_list = np.concatenate(img_list, axis=0)
img_list = (np.transpose(img_list, [0, 2, 3, 1]) * 255).astype(np.uint8)
new_img_list = []
for i in range(50000):
new_img_list.append(img_list[i])
with open("image.pkl", "wb") as f:
pickle.dump(new_img_list, f)
exit()
print(img_list.shape)
print(eval_inception_score.get_inception_score(new_img_list))
| 3,357 | 1,387 |
from .base_page import BasePage
from .locators import ProductPageLocators
class ProductPage(BasePage):
def should_be_button_add_to_basket(self):
assert self.is_element_present(
*ProductPageLocators.BUTTON_ADD_TO_BASKET), "Button add to basket is not presented"
def click_button_add_to_basket(self):
self.browser.find_element(*ProductPageLocators.BUTTON_ADD_TO_BASKET).click()
def should_be_alert_about_adding_product_to_basket(self):
assert self.is_element_present(
*ProductPageLocators.ALERT_ABOUT_ADDING_PRODUCT_TO_BASKET), "Alert with name of product is not presented"
def should_be_product_name_in_alert_match_product_name(self):
text_product_name_in_basket = self.browser.find_element(
*ProductPageLocators.PRODUCT_NAME_IN_BASKET).text
text_product_name = self.browser.find_element(*ProductPageLocators.PRODUCT_NAME).text
assert text_product_name == text_product_name_in_basket, \
"A message about adding a product to the basket does not contain information about the desired product"
def shold_be_alert_with_cost_basket(self):
assert self.browser.find_element(
*ProductPageLocators.ALERT_WITH_COST_BASKET), "Alert with cost of basket is not presented"
def should_be_cost_product_in_alert_with_cost_basket(self):
text_cost_basket_in_alert = self.browser.find_element(*ProductPageLocators.COST_BASKET_IN_ALERT).text
text_cost_product = self.browser.find_element(*ProductPageLocators.COST_PRODUCT).text
assert text_cost_basket_in_alert == text_cost_product, \
"The cost of the basket does not match the cost of the product"
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_dissapear_of_success_message(self):
assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
| 2,093 | 654 |
import pandas as pd
class CASTableBase(object):
source_sql = None
source_data = None
source_cas = None
source_caslib = None
cas_table_name = None
caslib = None
decision_source = None
decision = None
db_conn = None
clean_up = False
def __init__(self, viya_conn, db_conn=None):
self.viya_conn = viya_conn
self.register_db_connection(db_conn)
self.set_decision_source()
def __del__(self):
if self.clean_up:
self.remove_from_cas()
def register_db_connection(self, db_conn):
self.db_conn = db_conn
def set_decision_source(self):
if self.decision_source is None:
return
module_obj = __import__('CAS')
if hasattr(module_obj, self.decision_source):
decision_module = getattr(module_obj, self.decision_source)
self.decision = decision_module(self.db_conn, self.viya_conn)
def remove_from_cas(self):
try:
self.viya_conn.drop_cas_table(self.cas_table_name, self.caslib)
except:
pass
def update_from_records(self, records):
self.viya_conn.update_cas_table(records, self.cas_table_name, self.caslib)
def update_from_source(self):
self.update_from_records(self.get_source_data())
def get_source_data(self):
if self.source_data is not None:
return self.source_data
self.pre_process_source_data()
if self.source_cas and self.source_caslib:
self.source_data = self.viya_conn.get_cas_table(self.source_cas, self.source_caslib)
elif self.decision_source:
self.decision.exec()
self.source_data = self.viya_conn.get_cas_table(self.cas_table_name, self.caslib)
else:
if self.source_sql is not None:
self.source_data = self.read_sql(self.source_sql, True)
try:
self.source_data.drop(['index'], axis=1, inplace=True)
except KeyError:
pass
except IndexError:
pass
self.source_data = pd.DataFrame().from_records(self.source_data.to_records())
self.post_process_source_data()
return self.source_data
def pre_process_source_data(self):
pass
def post_process_source_data(self):
pass
def get_from_cas(self):
return self.viya_conn.get_cas_table(self.cas_table_name, self.caslib)
def read_sql(self, sql, clear_index=False):
self.__check_db_conn()
if clear_index:
return pd.read_sql_query(sql, self.db_conn.conn, index_col=None)
else:
return pd.read_sql_query(sql, self.db_conn.conn)
def __check_db_conn(self):
if self.db_conn is None:
raise Exception('Please register a valid DB connection before using this method')
| 2,863 | 919 |
cont = 0
for i in range(6):
if float(input()) >= 0: cont += 1
print(f'{cont} valores positivos')
| 102 | 43 |
# coding=utf-8
import numpy as np
def colKRproduct(A,B):
'''
columnwise Khatri-Rao product between matrix A and B
'''
if A.shape[1] != B.shape[1]:
raise TypeError("A and B must have the same number of columns")
q = A.shape[1]
C = np.zeros([A.shape[0]*B.shape[0],q])
for i in np.arange(q):
C[:,i] = np.kron(A[:,i],B[:,i])
return C
def colKRproduct_conj_self(A):
return np.apply_along_axis(lambda x: np.kron(x.conj(),x),0,A)
def Xi(nMicX,nMicY):
'''
Retorna a matrix de permutação \Xi
'''
Xi = np.zeros([nMicX*nMicY,nMicX*nMicY])
print("XI() NOT IMPLEMENTED")
return Xi
def S2Z(S,nMicX,nMicY):
Z = np.zeros([nMicX*nMicY,nMicX*nMicY], dtype = S.dtype)
for x in np.arange(nMicX):
for y in np.arange(nMicX):
Z[:,y+x*nMicY] = np.reshape(
S[y*nMicY:(y+1)*nMicY,x*nMicX:(x+1)*nMicX],
newshape = [nMicX*nMicY],
order="F")
return Z
def spark(A):
from itertools import combinations as comb
from numpy import linalg
A = np.array(A)
At = A.T
[m,n] = At.shape
if n > m: return 0
for k in range (1,n+1):
row_combos = comb(range(m),k)
for rows in row_combos:
R = np.array([At[row] for row in rows])
rank = linalg.matrix_rank(R)
if rank < k: return k
return n+1
| 1,523 | 660 |
import argparse
import os
from pathlib import Path
import gtsfm.utils.logger as logger_utils
from gtsfm.loader.loader_base import LoaderBase
from gtsfm.loader.olsson_loader import OlssonLoader
from gtsfm.runner.gtsfm_runner_base import GtsfmRunnerBase
DATA_ROOT = Path(__file__).resolve().parent.parent.parent / "tests" / "data"
logger = logger_utils.get_logger()
class GtsfmRunnerOlssonLoader(GtsfmRunnerBase):
def __init__(self):
super(GtsfmRunnerOlssonLoader, self).__init__(tag="GTSFM on Dataset in Olsson's Lund format")
def construct_argparser(self) -> argparse.ArgumentParser:
parser = super(GtsfmRunnerOlssonLoader, self).construct_argparser()
parser.add_argument("--dataset_root", type=str, default=os.path.join(DATA_ROOT, "set1_lund_door"), help="")
parser.add_argument("--image_extension", type=str, default="JPG", help="")
return parser
def construct_loader(self) -> LoaderBase:
loader = OlssonLoader(
self.parsed_args.dataset_root,
image_extension=self.parsed_args.image_extension,
max_frame_lookahead=self.parsed_args.max_frame_lookahead,
max_resolution=self.parsed_args.max_resolution,
)
return loader
if __name__ == "__main__":
runner = GtsfmRunnerOlssonLoader()
runner.run()
| 1,337 | 429 |
# can't try large numbers yet due to lack of long
for i in xrange(1, 100):
for j in xrange(1, 100):
print i.__divmod__(j)
for i in xrange(1, 12):
for j in xrange(1, 12):
print i | j
print i & j
print i ^ j
print 1 ** 0
print 0 ** 0
print -1 ** 0
| 289 | 123 |
import os.path
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), "requirements.txt")) as f:
requirements = f.read().strip()
setup(
name="reverse",
version="0.0.0",
description="Reverse data",
packages=find_packages(exclude=["tests"]),
package_data={"reverse": ["metadata/*"]},
install_requires=requirements,
)
| 384 | 126 |
import torch
import numpy as np
import math
class CompositionalTwoArmedBandit():
def __init__(self, probs, ctx_dim, num_arms, num_ctx=400, max_ctx=1000):
self.probs = np.asarray(probs)
self.num_arms = num_arms
self.ctx_dim = ctx_dim
self.num_ctx = num_ctx
self.max_ctx = max_ctx
self.context = self.make_contexts(ctx_dim, num_ctx, max_ctx)
def sample(self, num_episodes=1000, num_trials=100, prob=None, cxt_per_epoch=False, repeats=None):
if cxt_per_epoch:
# generate unique contexts
self.context = self.make_contexts(self.ctx_dim, self.num_ctx, self.max_ctx)
# group them into high and rewarding
highrwd_context = self.context[:int(self.num_ctx/2)]
lowrwd_context = self.context[int(self.num_ctx/2):]
# make copies and generate samples for both contexts
highsamples = self.make_bag_of_tasks(num_episodes, repeats=repeats)
lowsamples = highsamples.copy()
np.random.shuffle(lowsamples)
# set low and high probs
low_prob, high_prob = self.probs
probs = self.probs.copy()
X, Y = [], []
ctx = torch.zeros(self.num_arms, self.ctx_dim)
for hsample, lsample in zip(highsamples, lowsamples):
# change high and low rewarding arm
np.random.shuffle(probs)
# sample contexts and assign to respective arms
ctx[probs == low_prob] = lowrwd_context[lsample]
ctx[probs == high_prob] = highrwd_context[hsample]
x, y = self._sample_one_episode(ctx.reshape(-1), probs, num_trials)
X.append(x)
Y.append(y)
Y = torch.stack(Y)
X = torch.stack(X)
return X, Y
def _sample_one_episode(self, x, probs, num_trials):
X, Y = [], []
low_prob, high_prob = self.probs
for _ in range(num_trials):
y = np.zeros(self.num_arms)
y[probs == low_prob] = np.random.choice([0, 1], size=(1,), p=self.probs[::-1])
y[probs == high_prob] = np.random.choice([0, 1], size=(1,), p=self.probs)
Y.append(torch.as_tensor(y))
X.append(torch.as_tensor(x).type(torch.FloatTensor))
return torch.stack(X), torch.stack(Y)
def make_bag_of_tasks(self, num_episodes, repeats=None):
num_contexts_per_group = int(self.num_ctx/2)
if repeats is None:
repeats = int(num_episodes/num_contexts_per_group)
samples = np.repeat(np.arange(num_contexts_per_group), repeats)
samples = samples[:num_episodes]
np.random.shuffle(samples)
return samples
def make_contexts(self, ctx_dim, num_ctx, max_ctx):
sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim))
while len(np.unique(sample_contexts, axis=0))<num_ctx: # such that we sample unique contexts
sample_contexts = np.random.randint(2, size=(max_ctx, ctx_dim))
sample_contexts = np.unique(sample_contexts, axis=0)[:num_ctx]
np.random.shuffle(sample_contexts)
return torch.tensor(sample_contexts).type(torch.FloatTensor)
| 3,204 | 1,119 |
#!/usr/bin/python
f = file('x1.txt', 'w')
f.write('hello')
f.close()
| 70 | 34 |
from nba_api.stats.static import players
from nba_api.stats import endpoints
from nba_api.stats.library.parameters import SeasonAll
from nba_api.stats.endpoints import playercareerstats
from nba_api.stats.endpoints import commonplayerinfo
from nba_api.stats.endpoints import playergamelog
import pandas as pd
import time
from random import *
import time
start_time = time.time()
#list of all players
player_dict = players.get_players()
def games_with_x_or_more_points(seasons, x, player_id):
count = 0
for s in seasons:
time.sleep(0.6)
gamelog_player = playergamelog.PlayerGameLog(player_id = player_id, season = s)
df_player_games = gamelog_player.get_data_frames()[0]
box_scores_points = df_player_games.loc[:, "PTS"]
for points in box_scores_points:
if(points >= x):
count += 1
return count
def get_player_id(fullname):
player = [player for player in player_dict if player['full_name'] == fullname][0]
return player['id']
def get_player_seasons(player_id):
player_info = commonplayerinfo.CommonPlayerInfo(player_id=player_id)
available_seasons = player_info.available_seasons.get_dict()
seasons = []
for season in available_seasons["data"]:
for s in season:
year = s[1:5];
if not year in seasons:
seasons.append(year)
return seasons
all_time_great_list_file = open("NBA/alltimegreats.txt","r")
ALL_TIMERS = []
while(True):
line = all_time_great_list_file.readline()[3:].strip()
if not line:
break
ALL_TIMERS.append(line)
player1 = ALL_TIMERS[randint(0, 99)]
player2 = ALL_TIMERS[randint(0, 99)]
print(player1)
print(player2)
while(player1 == player2):
player2 = ALL_TIMERS[randint(0, 99)]
player1_id = get_player_id(player1)
player2_id = get_player_id(player2)
player1_seasons = get_player_seasons(player1_id)
player2_seasons = get_player_seasons(player2_id)
ready = input()
print(player1 + " has " + str(games_with_x_or_more_points(player1_seasons, 30, player1_id)) + " games with 30 or more points")
print(player2 + " has " +str( games_with_x_or_more_points(player2_seasons, 30, player2_id)) + " games with 30 or more points")
# career = playercareerstats.PlayerCareerStats(player_id=player['id'])
# career_df = career.get_data_frames()[0]
# df_player_games.to_csv(filename)
# nba_players = players.get_players()
# for p in celtics_players:
# player_dict = [player for player in nba_players if player['full_name'] == p][0]
# career = playercareerstats.PlayerCareerStats(player_id=player_dict['id'])
# career_df = career.get_data_frames()[0]
# print(career_df)
# bron = player_info.available_seasons.get_dict()
# player_info = playercareerstats.career_totals_regular_season(per_mode36='totals', player_id=2544)
print("--- %s seconds ---" % (time.time() - start_time)) | 2,916 | 1,060 |
import os
from librosa.core import load, stft, istft, magphase
from librosa.output import write_wav
from concurrent.futures import ThreadPoolExecutor
from time import time
import asyncio
import os,glob
import numpy as np
from multiprocessing import cpu_count
#Thanks to https://github.com/jnzhng/keras-unet-vocal-separation
SAMPLE_RATE = 8192
WINDOW_SIZE = 1024
HOP_LENGTH = 768
def downsample(input_path, output_path):
wav, _ = load(input_path, sr=SAMPLE_RATE)
write_wav(output_path, wav, SAMPLE_RATE, norm=True)
print(f"Saving {output_path}")
def load_as_mag(file):
wav, _ = load(file, sr=None)
spectrogram = stft(wav, n_fft=WINDOW_SIZE, hop_length=HOP_LENGTH)
mag, _ = magphase(spectrogram)
return mag.astype(np.float32)
def save_to_npz(base, sample):
nps = {}
mix = load_as_mag(f'{base}/{sample}/mix.wav')
vocal = load_as_mag(f'{base}/{sample}/vocal.wav')
inst = load_as_mag(f'{base}/{sample}/inst.wav')
mix_max = mix.max()
mix_norm = mix / mix_max
vocal_norm = vocal / mix_max
inst_norm = inst / mix_max
#print(f"Saving {sample}")
try:
np.savez_compressed(f'MIR-1K_resized/{sample}.npz', mix=mix_norm, vocal=vocal_norm, inst=inst_norm)
except Exception as e:
print(e)
if __name__ == '__main__':
voise = 'MIR-1K/voise'
bg = 'MIR-1K/bg'
mix = 'MIR-1K/mix'
name = 0
resampled_data = 'MIR-1K_resampled_data'
base = 'MIR-1K'
foldernames = []
for filename in sorted(glob.glob(os.path.join(voise, '*.wav'))):
foldernames.append(os.path.split(filename)[-1].replace('.wav',''))
dirs = foldernames
with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool:
for i in range(len(dirs)):
target_dir = 'MIR-1K_resampled_data/{}_{:0>2d}/'.format(base, i+1)
os.makedirs(target_dir, exist_ok=True)
pool.submit(downsample, f'{mix}/{dirs[i]}.wav', target_dir + 'mix.wav')
pool.submit(downsample, f'{bg}/{dirs[i]}.wav', target_dir + 'inst.wav')
pool.submit(downsample, f'{voise}/{dirs[i]}.wav', target_dir + 'vocal.wav')
# ## Save wav files to npz
# 1. Load wave files from `corpus_resized`.
# 2. Apply Short-time Fourier transform (STFT) to audio trios
# 3. Apply normalization to magnitudes and save as npz dict in `numpy/`
dirs = sorted(list(os.walk('MIR-1K_resampled_data'))[0][1])
print(dirs)
with ThreadPoolExecutor(max_workers=cpu_count() * 2) as pool:
#print("!!!")
for i in range(len(dirs)):
#print("!!!")
pool.submit(save_to_npz, 'MIR-1K_resampled_data', dirs[i]) | 2,653 | 1,044 |
from utils import lr_scheduler, metric, prefetch, summary
import os, sys
import time
import numpy as np
from collections import OrderedDict
import glob
import math
import copy
import tqdm
from sklearn.metrics import roc_auc_score, roc_curve, auc
import matplotlib.pyplot as plt
from torch.cuda.amp import autocast
import cv2
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
rng = np.random.RandomState(2020)
def get_the_number_of_params(model, is_trainable=False):
"""get the number of the model"""
if is_trainable:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
return sum(p.numel() for p in model.parameters())
def AUC(anomal_scores, labels):
frame_auc = 0
try:
frame_auc = roc_auc_score(y_true=np.squeeze(labels, axis=0), y_score=np.squeeze(anomal_scores))
except:
print("AUC Cal ERROR: ", labels, anomal_scores)
return frame_auc
def evaluate_resnet(model, test_batch, args):
single_time = metric.AverageMeter('Time', ':6.3f')
progress = metric.ProgressMeter(len(test_batch), single_time, prefix="Evaluation: ")
model.eval()
counter = 0
tp = 0
for k, (images, labels) in enumerate(test_batch):
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
label = labels if args.label else None
label = label.view(-1)
input_image = images.detach()
a = time.time()
with autocast():
logit = model.forward(input_image)
if args.evaluate_time:
single_time.update((time.time() - a) * 1000)
progress.print(counter)
print("Single batch time cost {}ms".format(1000 * (time.time() - a)))
class_vector = F.softmax(logit, 1).data.squeeze()
assert len(class_vector) == len(label), "class number must match"
probs, idx = class_vector.sort(1, True)
idx = idx[:,0]
tp += torch.sum(idx.view(-1)==label).item()
counter += len(label)
accuracy = tp / counter
print("INFERENCE ACCURACY IS {}".format(accuracy))
return accuracy
def visualize(recon, gt):
b, c, h, w = recon.size()
for i in range(b):
img1, img2 = recon[i], gt[i]
img = torch.cat((img1, img2), dim=2)
img = 255. * (img + 1.) / 2.
img = img.squeeze(0).byte().cpu().numpy().transpose((1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2.resize(img, (600, 300))
frame, name = img, str(int(time.time()*1000))
cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame)
return True
def visualize_single(image):
b, c, h, w = image.size()
for i in range(b):
img = image[i]
img = 255. * (img + 1.) / 2.
img = img.byte().cpu().numpy().transpose((1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
frame, name = img, str(int(time.time()*1000))
cv2.imwrite(os.path.join("/data/miaobo/tmp", name+".jpg"), frame)
return True
| 3,195 | 1,162 |
#!/home1/datawork/agrouaze/conda_envs2/envs/py2.7_cwave/bin/python
# coding: utf-8
"""
"""
import sys
print(sys.executable)
import subprocess
import logging
from dateutil import rrule
import datetime
if __name__ == '__main__':
root = logging.getLogger ()
if root.handlers:
for handler in root.handlers:
root.removeHandler (handler)
import argparse
parser = argparse.ArgumentParser (description='start prun')
parser.add_argument ('--verbose',action='store_true',default=False)
args = parser.parse_args ()
if args.verbose:
logging.basicConfig (level=logging.DEBUG,format='%(asctime)s %(levelname)-5s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
else:
logging.basicConfig (level=logging.INFO,format='%(asctime)s %(levelname)-5s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
prunexe = '/appli/prun/bin/prun'
listing = '/home1/scratch/agrouaze/list_kpi_1d_v2_prun_test.txt' # written below
# call prun
opts = ' --split-max-lines=3 --background -e '
listing_content = []
sta = datetime.datetime(2015,1,1)
#sta = datetime.datetime(2020,6,1) # pour test 2 qui utilisent les cross assignments de partitions
logging.info('start year: %s',sta)
sto = datetime.datetime.today()
fid = open(listing,'w')
cpt = 0
for unit in ['S1A','S1B']:
for wv in ['wv1','wv2']:
logging.info('%s',unit)
for dd in rrule.rrule(rrule.DAILY,dtstart=sta,until=sto):
fid.write('%s %s %s\n'%(unit,wv,dd.strftime('%Y%m%d')))
cpt +=1
fid.close()
logging.info('listing written ; %s nb lines: %s',listing,cpt)
pbs = '/home1/datahome/agrouaze/git/kpi_mpc/src/kpi_WV_hs/compute_kpi_1d_v2.pbs'
cmd = prunexe+opts+pbs+' '+listing
logging.info('cmd to cast = %s',cmd)
st = subprocess.check_call(cmd,shell=True)
logging.info('status cmd = %s',st) | 1,960 | 722 |
import asyncio
import concurrent
import functools
import json
import numpy as np
import torch
from aiohttp import web
from aiohttp.web_runner import GracefulExit
import aiohttp_cors
import logging
import csv
import multiprocessing
import queue
import pickle
# Local imports:
from torchfcts import function_from_code, get_default_args, check_code_get_args, get_f_expr_or_ode, get_const_bools
from torchfit import torch_fit
if __name__ == '__main__':
import dbfcts as db # we do not need a database connection for spawned processes
logging.basicConfig(level=logging.WARN)
logging.root.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
HOST = '127.0.0.1'
PORT = 7555
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
sys_print = print
def print(*args):
sys_print(*args, flush=True)
async def index(request):
return web.json_response({'running': True})
async def check_code(request):
data = await request.json()
d = check_code_get_args(data['code'], data['name_underscore'], data['expr_mode'], data['ode_dim'],
data['ode_dim_select'])
return web.json_response(d)
async def add_model(request):
data = await request.json()
if data['expr_mode'] and 'ode_dim' in data:
del data['ode_dim']
del data['ode_dim_select']
f = function_from_code(data['code'], data['name_underscore'])
kwargs = get_default_args(f, data['expr_mode'], data.get('ode_dim'))
consts = get_const_bools(f)
data['args'] = [{'name': k, 'value': v, 'const': consts[k]} for k, v in kwargs.items()]
await db.create_model(data['name'], data)
return web.json_response({'success': True})
async def delete_model(request):
data = await request.json()
await db.delete_model(data['name'])
return web.json_response({'success': True})
async def delete_data(request):
data = await request.json()
await db.delete_data(data['parent'])
return web.json_response({'success': True})
async def model_exist_check(request):
data = await request.json()
print(data['name'], await db.get_models_names())
return web.json_response({'exists': data['name'] in await db.get_models_names()})
async def model_list(request):
return web.json_response(await db.get_all_models())
async def data_list(request):
return web.json_response(await db.get_data_names())
async def plot_code(request):
data = await request.json()
if data['content']['expr_mode']:
mask, res, x = plot_code_py(data)
else: # ODEs can be slow to solve, so we spin up a new process to not block the async loop
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
future = asyncio.wrap_future(executor.submit(plot_code_py, data))
mask, res, x = await future
return web.json_response({'x': x[mask].numpy().tolist(), 'y': res[mask].numpy().tolist()})
def plot_code_py(data):
content = data['content']
f_name = content['name_underscore']
f = function_from_code(content['code'], f_name)
kwargs = get_default_args(f, content['expr_mode'], content.get('ode_dim'))
f = get_f_expr_or_ode(content['code'], content['expr_mode'], f_name, content.get('ode_dim_select'))
# if not content['expr_mode']:
# kwargs['y0'] = torch.tensor(kwargs['y0'], dtype=torch.double)
for k in kwargs:
kwargs[k] = torch.tensor(kwargs[k], dtype=torch.double)
if 'xlim' in data:
x = torch.linspace(data['xlim'][0], data['xlim'][1], 250, dtype=torch.double)
else:
x = torch.linspace(0, 10, 250, dtype=torch.double)
with torch.no_grad():
res = f(x, **kwargs)
mask = torch.isfinite(res)
return mask, res, x
async def plot_data(request):
data = await request.json()
plot_data = []
max_n = data.get('max_n', 250)
for content in data['content']:
dataset = await db.get_data_content(content['id'])
if len(dataset['x']) > max_n:
skip = 1 + int(len(dataset['x']) / max_n)
else:
skip = 1
x = dataset['x'][::skip]
y = dataset['y'][::skip]
plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers',
'type': 'scattergl'})
return web.json_response(plot_data)
async def upload_data(request):
data = await request.post()
example = None
filenames = []
has_header = json.loads(data['has_header'])
commit_data = json.loads(data['commit_data'])
multiple_x_axes = json.loads(data['multiple_x_axes'])
for fname in data:
if not fname.startswith('file_'):
continue
f = data[fname].file.read().decode('latin-1')
fname = fname[5:]
filenames.append(fname)
if not commit_data and len(filenames) > 1:
continue
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f)
if has_header is None:
has_header = sniffer.has_header(f)
rows = [r for r in csv.reader(f.split('\n'), dialect=dialect) if len(r) > 0]
if has_header:
header = rows[0]
rows = rows[1:]
else:
header = ['x'] + [f'#{i}' for i in range(1, len(rows[0]))]
if commit_data:
try:
num_rows = np.array([[np.nan if x.strip() == '' else np.double(x) for x in r] for r in rows])
except ValueError:
return web.json_response({'success': False, 'error': 'Data contains non-numerical entries.'})
if multiple_x_axes:
for i in range(0, num_rows.shape[1], 2):
x = num_rows[:, i]
y = num_rows[:, i + 1]
mask = ~np.isnan(y)
if any(np.isnan(x[mask])):
return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'})
dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]),
'orig_x': list(x[mask]), 'orig_y': list(y[mask])}
await db.create_dataset(header[i + 1], fname, dataset)
else:
x = num_rows[:, 0]
for i in range(1, num_rows.shape[1]):
y = num_rows[:, i]
mask = ~np.isnan(y)
if any(np.isnan(x[mask])):
return web.json_response({'success': False, 'error': 'x-axis not defined for all y-values.'})
dataset = {'parent_name': fname, 'name': header[i], 'x': list(x[mask]), 'y': list(y[mask]),
'orig_x': list(x[mask]), 'orig_y': list(y[mask])}
await db.create_dataset(header[i], fname, dataset)
else:
cut_horizontal = False
cut_vertical = False
if len(rows[0]) > 7:
rows = [r[:7] + ['⋯'] for r in rows]
header = header[:7] + ['⋯']
cut_horizontal = True
if len(rows) > 7:
rows = rows[:7] + [['<center>⋮</center>'] * len(rows[0])]
cut_vertical = True
if cut_horizontal and cut_vertical:
rows[-1][-1] = '⋱'
example = {'header': header, 'has_header': has_header, 'data': rows, 'fname': fname}
if commit_data:
return web.json_response({'success': True, 'error': None})
else:
res = {'filenames': filenames, 'example': example}
return web.json_response(res)
async def shuwdown(request):
print('Stopping python server')
fit_process.terminate()
raise GracefulExit
async def stop_spurious_running_fits_and_empty_stop_queue(n_max=5):
# stop any fits that might be running (not that any should be...)
for _ in range(n_max):
interrupt_queue.put(True)
await asyncio.sleep(0.01)
while True:
try:
interrupt_queue.get_nowait()
except queue.Empty:
break
async def load_fit_models_data(fit_info):
# Get model code
models = {}
for model_id, d in fit_info['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = {'code': m['code'], 'expr_mode': m['expr_mode'], 'name_underscore': m['name_underscore'],
'ode_dim': m.get('ode_dim'), 'ode_dim_select': m.get('ode_dim_select')}
# Get data
data = []
for data_id, d in fit_info['data'].items():
if d['in_use']:
db_data = await db.get_data_content(d['id'])
data.append({'x': db_data['x'], 'y': db_data['y'], 'weight': d['weight'], 'model': d['model'],
'parameters': d['parameters']})
return fit_info, data, models
async def run_fit(request):
if request.method == 'POST':
await stop_spurious_running_fits_and_empty_stop_queue()
run_fit_queue.put(await load_fit_models_data(await request.json()))
return web.json_response({'status': 'started'})
return web.json_response({'error': 'must be a POST request'})
async def interrupt_fit(request):
if request.method == 'POST':
interrupt_queue.put(True)
return web.json_response({'status': 'interrupting'})
return web.json_response({'error': 'must be a POST request'})
async def fit_result(request):
try:
fit, r2 = result_queue.get_nowait()
# Empty iteration queue:
await asyncio.sleep(0.01)
try:
while True:
status_queue.get_nowait()
except queue.Empty:
pass
except queue.Empty:
# No fit result yet, check if there is a loss update:
d = None
try:
while True:
d = status_queue.get_nowait()
except queue.Empty:
pass
return web.json_response({'status': 'no-fit', 'info': d})
return web.json_response({'status': 'success', 'fit': fit, 'r2': r2})
class PickleableF:
def __init__(self, m):
self.m = m
def __call__(self, *args, **kwargs):
m = self.m
f = get_f_expr_or_ode(m['code'], m['expr_mode'], m['name_underscore'], m.get('ode_dim_select'))
return list(f(*args, **kwargs).numpy())
async def plot_fit(request):
data = await request.json()
plot_data, is_fitted = await make_plot(data)
res = {'plots': plot_data, 'is_fitted': is_fitted}
return web.json_response(res)
async def make_plot(data):
plot_data = []
max_n = data.get('max_n', 250)
# Generate functions
models = {}
for model_id, d in data['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = PickleableF(m)
models[model_id].expr_mode = m['expr_mode']
models[model_id].ode_dim = m.get('ode_dim')
# Plot data
xmin = float('infinity')
xmax = float('-infinity')
for d_id in data['data']:
d = data['data'][d_id]
if d['in_use']:
#
dataset = await db.get_data_content(d['id'])
if len(dataset['x']) > max_n:
skip = 1 + int(len(dataset['x']) / max_n)
else:
skip = 1
x = dataset['x'][::skip]
y = dataset['y'][::skip]
if min(x) < xmin:
xmin = min(x)
if max(x) > xmax:
xmax = max(x)
plot_data.append({'x': x, 'y': y, 'name': dataset['name'], 'mode': 'markers',
'type': 'scattergl', 'legendgroup': d_id})
# Plot fits
x = np.linspace(xmin, xmax, 250)
x_list = list(x)
x_torch = torch.from_numpy(x)
is_fitted = False
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
for i, d_id in enumerate(data['data']):
d = data['data'][d_id]
if d['in_use']:
f = models[d['model']]
is_fitted = True
kwargs = {}
for p in d['parameters']:
p_id = d['parameters'][p]
parameter = data['parameters'][p_id]
if parameter['const']:
kwargs[p] = parameter['value']
elif parameter.get('fit') is None:
kwargs[p] = parameter['value']
is_fitted = False
else:
kwargs[p] = parameter['fit']
for p in kwargs:
kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double)
if not f.expr_mode:
kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim)
# Run function evaluation in parallel, without blocking the server loop:
future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs))
c = DEFAULT_PLOTLY_COLORS[i % len(DEFAULT_PLOTLY_COLORS)]
plot_data.append(
{'x': x_list, 'future': future, 'mode': 'lines', 'showlegend': False, 'legendgroup': d_id,
'line': {'color': c} if is_fitted else {'color': c, 'dash': 'dash'}})
for d in plot_data:
if 'future' in d:
d['y'] = await d['future']
del d['future']
return plot_data, is_fitted
async def make_download(data):
download_data = []
# Generate functions
models = {}
for model_id, d in data['models'].items():
m = await db.get_models_content(d['name'])
models[model_id] = PickleableF(m)
models[model_id].expr_mode = m['expr_mode']
models[model_id].ode_dim = m.get('ode_dim')
# Get data and range
datasets = {}
xmin = float('infinity')
xmax = float('-infinity')
for d_id in data['data']:
d = data['data'][d_id]
if d['in_use']:
dataset = await db.get_data_content(d['id'])
datasets[d_id] = dataset
x = dataset['x']
if min(x) < xmin:
xmin = min(x)
if max(x) > xmax:
xmax = max(x)
# Generate fits and store data
x = np.linspace(xmin, xmax, 250)
x_list = list(x)
x_torch = torch.from_numpy(x)
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
for i, d_id in enumerate(data['data']):
d = data['data'][d_id]
if d['in_use']:
dataset = datasets[d_id]
store = {
'name': dataset['name'],
'x_data': dataset['x'],
'y_data': dataset['y']
}
f = models[d['model']]
kwargs = {}
list_of_parameters = []
for p in d['parameters']:
p_id = d['parameters'][p]
parameter = data['parameters'][p_id]
if parameter['const']:
kwargs[p] = parameter['value']
elif parameter.get('fit') is None:
kwargs[p] = parameter['value']
else:
kwargs[p] = parameter['fit']
info = {'name': p,
'type': parameter['type'],
'value:': kwargs[p],
'is_const': parameter['const']}
if parameter['type'] == 'detached':
info['detached_name'] = parameter['name']
list_of_parameters.append(info)
store['parameters'] = list_of_parameters
for p in kwargs:
kwargs[p] = torch.tensor(kwargs[p], dtype=torch.double)
if not f.expr_mode:
kwargs = transform_y0_kwargs_for_ode(kwargs, f.ode_dim)
# Run function evaluation in parallel, without blocking the server loop:
future = asyncio.wrap_future(executor.submit(f, x_torch, **kwargs))
store['x_fit'] = x_list
store['future'] = future
download_data.append(store)
for d in download_data:
if 'future' in d:
d['y_fit'] = await d['future']
del d['future']
return download_data
async def download_fit(request):
data = await request.json()
download_data = await make_download(data)
return web.json_response(download_data, dumps=functools.partial(json.dumps, indent=4))
def transform_y0_kwargs_for_ode(kwargs, dim):
y0 = np.ones(dim)
for i in range(dim):
y0[i] = kwargs[f'y0[{i}]']
del kwargs[f'y0[{i}]']
kwargs['y0'] = torch.from_numpy(y0)
return kwargs
def fitter(input_queue, output_queue, status_queue, interrupt_queue):
print('Fitting queue running')
while True:
fit_info, data, models = input_queue.get(True)
logger.debug('Got fit to be run')
# First get all parameters
parameter_names = []
values = []
const_index = 0
for parameter_id, d in fit_info['parameters'].items():
if not d['const']:
parameter_names.append(parameter_id)
values.append(d['value'])
const_index += 1
for parameter_id, d in fit_info['parameters'].items():
if d['const']:
parameter_names.append(parameter_id)
values.append(d['value'])
logger.debug(f'#parameters = {len(fit_info["parameters"])}')
logger.debug(f'#fit parameters = {const_index}')
for d in data:
d['parameter_indeces'] = {k: parameter_names.index(v) for k, v in d['parameters'].items()}
if const_index == 0:
logger.info('No parameters to be fitted')
output_queue.put(None)
continue
# with open('cache.pkl', 'wb') as f:
# pickle.dump((parameter_names, values, const_index, models, data), f)
method = fit_info.get('method')
fit, r2 = torch_fit(parameter_names, values, const_index, models, data, status_queue, interrupt_queue,
method=method)
output_queue.put((fit, r2))
if __name__ == '__main__':
multiprocessing.freeze_support()
# with open('cache.pkl', 'rb') as f:
# torch_fit(*pickle.load(f))
# exit()
# Fitter
run_fit_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
status_queue = multiprocessing.Queue()
interrupt_queue = multiprocessing.Queue()
fit_process = multiprocessing.Process(target=fitter,
args=(run_fit_queue, result_queue, status_queue, interrupt_queue))
fit_process.daemon = True
fit_process.start()
# Web Server
app = web.Application()
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
routes = [('/', index),
('/check_code', check_code),
('/plot_code', plot_code),
('/add_model', add_model),
('/delete_model', delete_model),
('/delete_data', delete_data),
('/model_exist_check', model_exist_check),
('/model_list', model_list),
('/upload_data', upload_data),
('/data_list', data_list),
('/plot_data', plot_data),
('/run_fit', run_fit),
('/interrupt_fit', interrupt_fit),
('/plot_fit', plot_fit),
('/fit_result', fit_result),
('/download_fit', download_fit),
('/exit', shuwdown),
]
methods = ['GET', 'POST', 'DELETE']
for uri, f in routes:
resource = cors.add(app.router.add_resource(uri))
for m in methods:
cors.add(resource.add_route(m, f))
print('Python server started')
try:
web.run_app(app, host=HOST, port=PORT, shutdown_timeout=0.0)
finally:
fit_process.terminate()
| 20,482 | 6,585 |
from flask import jsonify, make_response, request
from . import app
from .api_post import engine, login
from .core import utility_funcs
from sqlalchemy.orm import sessionmaker
from sqlalchemy_cockroachdb import run_transaction
from .api_post import login_required
@app.route("/api/v1/tasks/list")
@login_required
def list():
user_uuid = request.environ["user_id"]
d = []
tasks = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_tasks(session, user_uuid),
)
for task in tasks:
d.append(
{
"id": task.task_id,
"name": task.task_name,
"description": task.task_description,
"schedule": str(task.schedule),
"timestamp": str(task.timestamp),
}
)
return jsonify(d)
@app.route("/api/v1/task/<task_uuid>")
@login_required
def meta(task_uuid):
user_uuid = request.environ["user_id"]
Session = sessionmaker(bind=engine)
with Session() as session:
task = utility_funcs.get_task(session, user_uuid, task_uuid)
return {
"id": task.task_id,
"name": task.task_name,
"description": task.task_description,
"schedule": str(task.schedule),
"timestamp": str(task.timestamp),
}
@app.route("/api/v1/task/<task_uuid>/completed")
@login_required
def get_completed(task_uuid):
user_uuid = request.environ["user_id"]
is_completed = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.has_task_completed(
session, task_id=task_uuid, user_id=user_uuid
),
)
return {"completed": is_completed}
@app.route("/api/v1/task/<task_uuid>/current-streak")
@login_required
def get_current_streak(task_uuid):
user_uuid = request.environ["user_id"]
streak = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.task_streak_status(
session, task_id=task_uuid, user_id=user_uuid
),
)
return {"streak": streak}
def _get_info_fmt(session, user_uuid):
user = utility_funcs.get_user(session, user_uuid)
return {
"id": str(user.user_id),
"username": user.username,
"name": user.name,
"last_seen": user.last_seen,
"last_checked_events": user.last_checked_events,
}
@app.route("/api/v1/users/<user_id>")
@login_required
def get_info(user_uuid):
return run_transaction(
sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid)
)
@app.route("/api/v1/self")
@login_required
def get_self_info():
user_uuid = request.environ["user_id"]
return run_transaction(
sessionmaker(bind=engine), lambda session: _get_info_fmt(session, user_uuid)
)
@app.route("/api/v1/users/<friend_id>/friend_status")
@login_required
def friend_status(friend_id):
user_uuid = request.environ["user_id"]
print(friend_id, user_uuid, friend_id == str(user_uuid))
if friend_id == str(user_uuid):
return make_response("Cannot make friends with yourself", 403)
return {
"friends": run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.check_friend(session, user_uuid, friend_id),
)
}
@app.route("/api/v1/streaks/maximum")
@login_required
def max_streak():
user_uuid = request.environ["user_id"]
all, month, year = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_max_streak(session, user_uuid),
)
return {"all_time": all, "month": month, "year": year}
@app.route("/api/v1/task/<task_id>/maximum")
@login_required
def max_streak_task(task_id):
user_uuid = request.environ["user_id"]
all, month, year = run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_max_streak_task(session, user_uuid, task_id),
)
return {"all_time": all, "month": month, "year": year}
@app.route("/api/v1/events")
@login_required
def get_notifications():
user_uuid = request.environ["user_id"]
return run_transaction(
sessionmaker(bind=engine),
lambda session: utility_funcs.get_notifications(session, user_uuid),
)
| 4,312 | 1,394 |
#!/usr/bin/python3
"""
Run "mypy --disallow-untyped-defs --ignore-missing-imports \
--show-error-codes --strict-equality delta.py".
The holidays module lacks type hints.
Next need: explicit Pytests.
Notice the imports are punctuated in an isort style.
"""
from datetime import datetime, timedelta
import sys
import holidays
EVENT = datetime
fromisoformat = datetime.fromisoformat
class TimeCalc():
BD_START = 10
BD_STOP = 17
def _get_beginning_of_day(self,event: EVENT) -> EVENT:
return fromisoformat(f"{self._get_day_string(event)} {self.BD_START}:00")
def _get_end_of_day(self,event: EVENT) -> EVENT:
return fromisoformat(f"{self._get_day_string(event)} {self.BD_STOP}")
def _get_day(self,event: EVENT) -> EVENT:
return fromisoformat(self._get_day_string(event))
def _get_day_string(self,event: EVENT) -> str:
return f"{event.year}-{event.month}-{event.day}"
def _get_next_day(self,event: EVENT) -> EVENT:
return self._get_day(event) + timedelta(days=1)
def _is_work_day(self,event: EVENT) -> bool:
if event.date() in holidays.US():
return False
if event.weekday() >= 5:
return False
return True
def business_lapse(self,request: EVENT, response: EVENT) -> timedelta:
""" This is the entry point most clients will want to use.
"""
if request > response:
raise RuntimeError(f"How can there have been a response at {response} to a *later* request at {request}?")
if request != response:
beginning_of_request_day = self._get_beginning_of_day(request)
if request < beginning_of_request_day:
return self.business_lapse(min(beginning_of_request_day, response), response)
end_of_response_day = self._get_end_of_day(response)
if end_of_response_day < response:
return self.business_lapse(request, max(end_of_response_day, request))
request_day = self._get_day(request)
new_request = self._get_next_day(request)
if not self._is_work_day(request_day):
return self.business_lapse(new_request, max(response, new_request))
assert self._is_work_day(request_day)
if request_day != self._get_day(response):
assert request_day < self._get_day(response)
return (max(request, self._get_end_of_day(request)) - request) + \
self.business_lapse(new_request, max(response, new_request))
assert beginning_of_request_day <= request <= response <= end_of_response_day
return response - request
def main() -> None:
""" Examples:
./delta.py "2021-10-27 03:45" "2021-10-29 11:08"
./delta.py "2021-10-27 03:45" "2021-10-29 11:08"
./delta.py "2021-10-27 03:45" "2021-10-29 20:08"
./delta.py "2021-10-27 13:45" "2021-10-27 14:08"
./delta.py "2021-10-27 13:45" "2021-10-28 14:08"
./delta.py "2021-10-27 03:45" "2021-10-27 05:58"
./delta.py "2021-10-27 10:00" "2021-10-28 17:00"
./delta.py "2021-10-27 10:00" "2021-10-30 10:20"
./delta.py "2021-10-27 18:00" "2021-10-27 20:20"
./delta.py "2021-11-27 03:45" "2021-11-29 20:08"
./delta.py "2021-11-27 13:45" "2021-11-27 14:08"
./delta.py "2021-11-27 13:45" "2021-11-28 14:08"
./delta.py "2021-11-27 03:45" "2021-11-27 05:58"
./delta.py "2021-11-27 10:00" "2021-11-28 17:00"
./delta.py "2021-11-27 10:00" "2021-11-30 10:20"
./delta.py "2021-11-27 18:00" "2021-11-27 20:20"
"""
request_time = fromisoformat(sys.argv[1])
response_time = fromisoformat(sys.argv[2])
print(f"Times are {request_time} and {response_time}.")
print(f"Difference is {response_time - request_time}.")
print(f"Business lapse is {TimeCalc().business_lapse(request_time, response_time)}.")
if __name__ == "__main__":
main()
| 4,066 | 1,661 |
from octopus.modules.es import dao
from datetime import datetime
from octopus.modules.account.exceptions import NonUniqueAccountException
def query_filter(q):
"""Function used by the query endpoint to ensure only the relevant account data is returned"""
# q is an esprit.models.Query object
# this limits the query to certain fields in the source, so that things like password
# hashes and activation/reset tokens are never sent to the client
q.include_source(["id", "email", "created_date", "last_updated", "role"])
class BasicAccountDAO(dao.ESDAO):
__type__ = 'account'
@classmethod
def pull_by_email(cls, email):
q = AccountQuery(email=email)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the email {x}".format(x=email))
elif len(accs) == 1:
return accs[0]
else:
return None
@classmethod
def get_by_reset_token(cls, reset_token, not_expired=True):
q = AccountQuery(reset_token=reset_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the reset token {x}".format(x=reset_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_reset_expired() and not_expired:
return None
return acc
@classmethod
def get_by_activation_token(cls, activation_token, not_expired=True):
q = AccountQuery(activation_token=activation_token)
accs = cls.object_query(q=q.query())
if len(accs) > 1:
raise NonUniqueAccountException("There is more than one user account with the activation token {x}".format(x=activation_token))
elif len(accs) == 0:
return None
acc = accs[0]
if acc.is_activation_expired() and not_expired:
return None
return acc
class AccountQuery(object):
def __init__(self, email=None, reset_token=None, activation_token=None):
self.email = email
self.reset_token = reset_token
self.activation_token = activation_token
def query(self):
q = {
"query" : {
"bool" : {
"must" : []
}
}
}
if self.email is not None:
q["query"]["bool"]["must"].append({"term" : {"email.exact" : self.email}})
if self.reset_token is not None:
q["query"]["bool"]["must"].append({"term" : {"reset_token.exact" : self.reset_token}})
if self.activation_token is not None:
q["query"]["bool"]["must"].append({"term" : {"activation_token.exact" : self.activation_token}})
return q | 2,831 | 846 |
from django.test import TestCase
from app.calc import substraction
from app.calc import add
class CalcTests(TestCase):
def test_add_number(self):
#Test that two number added togather
self.assertEqual(add(3,8), 11)
def test_substraction_number(self):
self.assertEqual(substraction(8,3),5)
| 330 | 105 |
import json
# Jinja will replace the inside with double-quote-using JSON,
# so use single quotes to delimit the string.
# Use double quotes inside to keep the expression as a single string.
credentials = json.loads('{{ pillar["buildbot"]["credentials"]|json }}')
# json.loads creates unicode strings but Buildbot requires bytestrings.
# Python 2's Unicode situation makes me sad.
credentials = {k: v.encode('utf-8') for k, v in credentials.items()}
HTTP_USERNAME = credentials['http-user']
HTTP_PASSWORD = credentials['http-pass']
SLAVE_PASSWORD = credentials['slave-pass']
CHANGE_PASSWORD = credentials['change-pass']
GITHUB_DOC_TOKEN = credentials['gh-doc-token']
HOMU_BUILDBOT_SECRET = credentials['homu-secret']
S3_UPLOAD_ACCESS_KEY_ID = credentials['s3-upload-access-key-id']
S3_UPLOAD_SECRET_ACCESS_KEY = credentials['s3-upload-secret-access-key']
| 856 | 275 |
#!/usr/bin/env python
# import libs
import unittest
import random
import sys
import warnings
# import classes
import analytics.utils.misc as misc
import analytics.exceptions.exceptions as ex
import analytics.core.processor.processor as processor
from types import ListType, DictType
from analytics.core.map.dataitemmap import DataItemMap
from analytics.core.map.clustermap import ClusterMap
from analytics.core.map.elementmap import ElementMap
from analytics.core.map.pulsemap import PulseMap
from analytics.core.cluster import Cluster
from analytics.core.element import Element
from analytics.core.pulse import DynamicPulse, StaticPulse, Pulse
from analytics.core.attribute.dynamic import Dynamic
from analytics.core.attribute.feature import Feature
from analytics.algorithms.rank import RSYS
# some general input to test
general_input = [
None, True, False, sys.maxint, -sys.maxint-1, {}, [],
{"1": 1, "2": 2}, [1, 2, 3, 4, 5], "abc", 0, 1, -1, 1.23,
-3.34, " string ", " test test test ", "1"
]
class Processor_TestSequence(unittest.TestCase):
def setUp(self):
self._teststr = "test string"
self._iterations = 20
# object lists
self._clrobj = {
"id": "#1",
"name": "#1",
"desc": "#1",
"parent": None
}
self._elmobj = {
"id": "#1",
"name": "#1",
"desc": "#1",
"cluster": "#1",
"dir": "up"
}
self._plsobj = {
"id": "#1",
"name": "#1",
"desc": "#1",
"sample": 1
}
# maps
self._clustermap = ClusterMap()
self._elementmap = ElementMap()
self._pulsemap = PulseMap()
def test_processor_clusterObject(self):
for it in range(self._iterations):
obj = random.choice(general_input)
idmapper = random.choice(general_input)
if type(obj) is DictType:
with self.assertRaises(KeyError):
processor._processClusterObject(obj, idmapper)
else:
with self.assertRaises(TypeError):
processor._processClusterObject(obj, idmapper)
# process object without errors
idmapper = {}
clr = processor._processClusterObject(self._clrobj, idmapper)
self.assertEqual(type(clr), Cluster)
self.assertEqual(clr.name(), self._clrobj["name"])
self.assertEqual(clr.desc(), self._clrobj["desc"])
self.assertEqual(clr.parent(), None)
idmapper_obj = {self._clrobj["id"]: {"cluster": clr, "parent": None}}
self.assertEqual(idmapper, idmapper_obj)
def test_processor_elementObject(self):
for it in range(self._iterations):
obj = random.choice(general_input)
idmapper = random.choice(general_input)
if type(obj) is DictType:
with self.assertRaises(KeyError):
processor._processElementObject(obj, idmapper)
else:
with self.assertRaises(TypeError):
processor._processElementObject(obj, idmapper)
# process object without errors
elm = processor._processElementObject(self._elmobj)
self.assertEqual(type(elm), Element)
self.assertEqual(elm.name(), self._elmobj["name"])
self.assertEqual(elm.desc(), self._elmobj["desc"])
self.assertEqual(elm.cluster(), None)
self.assertEqual(elm.rank(), RSYS.UND_RANK)
def test_processor_pulseObject(self):
for it in range(self._iterations):
obj = random.choice(general_input)
idmapper = random.choice(general_input)
if type(obj) is DictType:
with self.assertRaises(KeyError):
processor._processPulseObject(obj, idmapper)
else:
with self.assertRaises(TypeError):
processor._processPulseObject(obj, idmapper)
# process object without errors
pls = processor._processPulseObject(self._plsobj)
self.assertEqual(isinstance(pls, Pulse), True)
self.assertEqual(pls.name(), self._plsobj["name"])
self.assertEqual(pls.desc(), self._plsobj["desc"])
self.assertEqual(pls.type(), type(self._plsobj["sample"]))
self.assertEqual(pls.store(), [])
self.assertEqual(pls.default(), None)
def test_processor_parseClusters(self):
objlist = [self._clrobj, {}]
idmapper = {}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
processor.parseClusters(objlist, self._clustermap, idmapper)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertEqual(len(self._clustermap._map), 1)
self.assertEqual(len(self._clustermap._root), 1)
clr = self._clustermap._map.values()[0]
key = self._clrobj["id"]
self.assertEqual(key in idmapper, True)
self.assertEqual(idmapper[key], {"cluster":clr, "parent": None})
def test_processor_parseElements(self):
objlist = [self._elmobj, {}]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
processor.parseElements(objlist, self._elementmap)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertEqual(len(self._elementmap._map), 1)
exm = self._elementmap._map.values()[0]
self.assertEqual(len(exm.features()), 1)
self.assertEqual(exm.features()[0].name(), "dir")
def test_processor_parsePulses(self):
objlist = [self._plsobj, {}]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
processor.parsePulses(objlist, self._pulsemap)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertEqual(len(self._pulsemap._map), 1)
def test_processor_processBlock(self):
clusters = {"map": self._clustermap, "data": [self._clrobj]}
elements = {"map": self._elementmap, "data": [self._elmobj]}
pulses = {"map": self._pulsemap, "data": [self._plsobj]}
# fill block
block = processor.ProcessBlock(clusters, elements, pulses)
self.assertEqual(block._clustermap, self._clustermap)
self.assertEqual(block._elementmap, self._elementmap)
self.assertEqual(block._pulsemap, self._pulsemap)
self.assertEqual(block._isProcessed, False)
# process block
block = processor.processWithBlock(block)
self.assertEqual(block._isProcessed, True)
self.assertEqual(
len(block._clustermap._map.values()), len(clusters["data"])
)
self.assertEqual(
len(block._elementmap._map.values()), len(elements["data"])
)
self.assertEqual(
len(block._pulsemap._map.values()), len(pulses["data"])
)
def test_processor_processBlockUnknownCluster(self):
elmobjects = [
self._elmobj,
{
"id": "#2",
"name": "#2",
"desc": "#2",
"cluster": None,
"dir": "up"
}
]
clusters = {"map": self._clustermap, "data": [self._clrobj]}
elements = {"map": self._elementmap, "data": elmobjects}
pulses = {"map": self._pulsemap, "data": [self._plsobj]}
# fill block
block = processor.ProcessBlock(clusters, elements, pulses)
self.assertEqual(block._clustermap, self._clustermap)
self.assertEqual(block._elementmap, self._elementmap)
self.assertEqual(block._pulsemap, self._pulsemap)
self.assertEqual(block._isProcessed, False)
# process block
block = processor.processWithBlock(block)
self.assertEqual(block._isProcessed, True)
self.assertEqual(
len(block._clustermap._map.values()), len(clusters["data"])+1
)
self.assertTrue(
processor.UNKNOWN_CLUSTER.id() in block._clustermap._map
)
self.assertEqual(
len(block._elementmap._map.values()), len(elements["data"])
)
self.assertEqual(
len(block._pulsemap._map.values()), len(pulses["data"])
)
def test_processor_discoverPulses(self):
clusters = {"map": self._clustermap, "data": [self._clrobj]}
elements = {"map": self._elementmap, "data": [self._elmobj]}
pulses = {"map": self._pulsemap, "data": [self._plsobj]}
# fill block, and discover pulses
block = processor.ProcessBlock(clusters, elements, pulses, True)
self.assertEqual(block._clustermap, self._clustermap)
self.assertEqual(block._elementmap, self._elementmap)
self.assertEqual(block._pulsemap, self._pulsemap)
self.assertEqual(block._isDiscovery, True)
self.assertEqual(block._isProcessed, False)
# process block
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
block = processor.processWithBlock(block)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertEqual(block._isProcessed, True)
self.assertEqual(
len(block._clustermap._map.values()), len(clusters["data"])
)
self.assertEqual(
len(block._elementmap._map.values()), len(elements["data"])
)
self.assertEqual(len(block._pulsemap._map.values()), 1)
for pulse in block._pulsemap._map.values():
self.assertEqual(pulse.name() in self._elmobj, True)
self.assertEqual(len(pulse.store()), 1)
def test_processor_sortElements(self):
elementlist = [
{"id": "#1","name": "#1","desc": "#1","cluster": None},
{"id": "#2","name": "#2","desc": "#2","cluster": None},
{"id": "#3","name": "#3","desc": "#3","cluster": None}
]
ranks = [RSYS.O, RSYS.B, RSYS.A, RSYS.F, RSYS.G, RSYS.K]
elementmap = ElementMap()
processor.parseElements(elementlist, elementmap)
for element in elementmap._map.values():
element.setRank(random.choice(ranks))
ls = processor.sortElements(elementmap._map.values())
for i in range(len(ls)-1):
self.assertTrue(ls[i].rank()._value >= ls[i+1].rank()._value)
def test_processor_sortElementsReversed(self):
elementlist = [
{"id": "#1","name": "#1","desc": "#1","cluster": None},
{"id": "#2","name": "#2","desc": "#2","cluster": None},
{"id": "#3","name": "#3","desc": "#3","cluster": None}
]
ranks = [RSYS.O, RSYS.B, RSYS.A, RSYS.F, RSYS.G, RSYS.K]
elementmap = ElementMap()
processor.parseElements(elementlist, elementmap)
for element in elementmap._map.values():
element.setRank(random.choice(ranks))
ls = processor.sortElements(elementmap._map.values(), True)
for i in range(len(ls)-1):
self.assertTrue(ls[i].rank()._value <= ls[i+1].rank()._value)
# Load test suites
def _suites():
return [
Processor_TestSequence
]
# Load tests
def loadSuites():
# global test suite for this module
gsuite = unittest.TestSuite()
for suite in _suites():
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
if __name__ == '__main__':
suite = loadSuites()
print ""
print "### Running tests ###"
print "-" * 70
unittest.TextTestRunner(verbosity=2).run(suite)
| 11,819 | 3,640 |
# -*- coding: UTF-8 -*-
import pytest
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.sql.sqltypes import INTEGER, VARCHAR
from sqlalchemy.testing import fixtures, config
from sqlalchemy_exasol.base import EXADialect
TEST_GET_METADATA_FUNCTIONS_SCHEMA = "test_get_metadata_functions_schema"
ENGINE_NONE_DATABASE = "ENGINE_NONE_DATABASE"
ENGINE_SCHEMA_DATABASE = "ENGINE_SCHEMA_DATABASE"
ENGINE_SCHEMA_2_DATABASE = "ENGINE_SCHEMA_2_DATABASE"
class MetadataTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.schema = TEST_GET_METADATA_FUNCTIONS_SCHEMA
cls.schema_2 = "test_get_metadata_functions_schema_2"
with config.db.begin() as c:
try:
c.execute("DROP SCHEMA %s CASCADE" % cls.schema)
except Exception as e:
print(e)
pass
c.execute("CREATE SCHEMA %s" % cls.schema)
c.execute(
"CREATE TABLE %s.t (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema)
c.execute(
"CREATE TABLE {schema}.s (id1 int primary key, fid1 int, fid2 int, age int, CONSTRAINT fk_test FOREIGN KEY (fid1,fid2) REFERENCES {schema}.t(pid1,pid2))".format(
schema=cls.schema))
cls.view_defintion = "CREATE VIEW {schema}.v AS select * from {schema}.t".format(schema=cls.schema)
c.execute(cls.view_defintion)
try:
c.execute("DROP SCHEMA %s CASCADE" % cls.schema_2)
except Exception as e:
print(e)
pass
c.execute("CREATE SCHEMA %s" % cls.schema_2)
c.execute(
"CREATE TABLE %s.t_2 (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema_2)
c.execute("CREATE VIEW {schema}.v_2 AS select * from {schema}.t_2".format(schema=cls.schema_2))
c.execute("COMMIT")
cls.engine_none_database = cls.create_engine_with_database_name(c, None)
cls.engine_schema_database = cls.create_engine_with_database_name(c, cls.schema)
cls.engine_schema_2_database = cls.create_engine_with_database_name(c, cls.schema_2)
cls.engine_map = {
ENGINE_NONE_DATABASE: cls.engine_none_database,
ENGINE_SCHEMA_DATABASE: cls.engine_schema_database,
ENGINE_SCHEMA_2_DATABASE: cls.engine_schema_2_database
}
@classmethod
def generate_url_with_database_name(cls, connection, new_database_name):
database_url = config.db_url
new_args = database_url.translate_connect_args()
new_args["database"] = new_database_name
new_database_url = URL(drivername=database_url.drivername, query=database_url.query, **new_args)
return new_database_url
@classmethod
def create_engine_with_database_name(cls, connection, new_database_name):
url = cls.generate_url_with_database_name(connection, new_database_name)
engine = create_engine(url)
return engine
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_schema_names(self, engine_name, use_sql_fallback):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
schema_names = dialect.get_schema_names(connection=c, use_sql_fallback=use_sql_fallback)
assert self.schema in schema_names and self.schema_2 in schema_names
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_schema_names_for_sql_and_odbc(self, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
schema_names_fallback = dialect.get_schema_names(connection=c, use_sql_fallback=True)
schema_names_odbc = dialect.get_schema_names(connection=c)
assert sorted(schema_names_fallback) == sorted(schema_names_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_table_names(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
table_names = dialect.get_table_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback)
assert "t" in table_names and "s" in table_names
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_table_names_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
table_names_fallback = dialect.get_table_names(connection=c, schema=schema, use_sql_fallback=True)
table_names_odbc = dialect.get_table_names(connection=c, schema=schema)
assert table_names_fallback == table_names_odbc
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_has_table_table_exists(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table = dialect.has_table(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
assert has_table, "Table %s.T was not found, but should exist" % self.schema
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_has_table_table_exists_not(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table = dialect.has_table(connection=c, schema=self.schema, table_name="not_exist",
use_sql_fallback=use_sql_fallback)
assert not has_table, "Table %s.not_exist was found, but should not exist" % self.schema
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_has_table_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table_fallback = dialect.has_table(connection=c, schema=schema, use_sql_fallback=True, table_name="t")
has_table_odbc = dialect.has_table(connection=c, schema=schema, table_name="t")
assert has_table_fallback == has_table_odbc, "Expected table %s.t with odbc and fallback" % schema
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_names(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_names = dialect.get_view_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback)
assert "v" in view_names
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_names_for_sys(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_names = dialect.get_view_names(connection=c, schema="sys", use_sql_fallback=use_sql_fallback)
assert len(view_names) == 0
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_definition(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name="v",
use_sql_fallback=use_sql_fallback)
assert self.view_defintion == view_definition
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_definition_view_name_none(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name=None,
use_sql_fallback=use_sql_fallback)
assert view_definition is None
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_view_names_for_sql_and_odbc(self, schema,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
c.execute("OPEN SCHEMA %s" % self.schema)
view_names_fallback = dialect.get_view_names(connection=c, schema=schema, use_sql_fallback=True)
view_names_odbc = dialect.get_view_names(connection=c, schema=schema)
assert view_names_fallback == view_names_odbc
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_view_definition_for_sql_and_odbc(self, schema,engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
view_name = "v"
dialect = EXADialect()
view_definition_fallback = dialect.get_view_definition(
connection=c, view_name=view_name, schema=schema, use_sql_fallback=True)
view_definition_odbc = dialect.get_view_definition(
connection=c, view_name=view_name, schema=schema)
assert view_definition_fallback == view_definition_odbc
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("table", ["t", "s", "unknown"])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_columns_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema, use_sql_fallback=True)
columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema)
assert str(columns_fallback) == str(columns_odbc) # object equality doesn't work for sqltypes
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_columns_none_table_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
table = None
columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema)
assert str(columns_fallback) == str(columns_fallback)
def make_columns_comparable(self, column_list): # object equality doesn't work for sqltypes
return sorted([{k: str(v) for k, v in column.items()} for column in column_list], key=lambda k: k["name"])
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_columns(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
columns = dialect.get_columns(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
expected = [{'default': None,
'is_distribution_key': False,
'name': 'pid1',
'nullable': False,
'type': INTEGER()},
{'default': None,
'is_distribution_key': False,
'name': 'pid2',
'nullable': False,
'type': INTEGER()},
{'default': None,
'is_distribution_key': False,
'name': 'name',
'nullable': True,
'type': VARCHAR(length=20)},
{'default': None,
'is_distribution_key': False,
'name': 'age',
'nullable': True,
'type': INTEGER()},
]
assert self.make_columns_comparable(expected) == self.make_columns_comparable(columns)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_columns_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
columns = dialect.get_columns(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert columns == []
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("table", ["t", "s"])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_pk_constraint_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
pk_constraint_fallback = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
pk_constraint_odbc = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema)
assert str(pk_constraint_fallback) == str(pk_constraint_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_pk_constraint(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
assert pk_constraint["constrained_columns"] == ['pid1', 'pid2'] and \
pk_constraint["name"].startswith("sys_")
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_pk_constraint_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert pk_constraint is None
@pytest.mark.parametrize("table", ["t", "s"])
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_foreign_keys_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema_2)
dialect = EXADialect()
foreign_keys_fallback = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
foreign_keys_odbc = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema)
assert str(foreign_keys_fallback) == str(foreign_keys_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_foreign_keys(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name="s",
use_sql_fallback=use_sql_fallback)
expected = [{'name': 'fk_test',
'constrained_columns': ['fid1', 'fid2'],
'referred_schema': 'test_get_metadata_functions_schema',
'referred_table': 't',
'referred_columns': ['pid1', 'pid2']}]
assert foreign_keys == expected
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_foreign_keys_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert foreign_keys == []
| 19,654 | 6,442 |
# Generated by Django 2.0.5 on 2018-09-05 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entrepreneurial_property', '0014_auto_20180905_0629'),
]
operations = [
migrations.AlterModelOptions(
name='brownfieldwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='developmentparkwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='greenfieldwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='industrialarealwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='officewastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AlterModelOptions(
name='scientificparkwastewater',
options={'verbose_name': 'Odpadní voda', 'verbose_name_plural': 'Odpadní vody'},
),
migrations.AddField(
model_name='brownfield',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='developmentpark',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='greenfield',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='industrialareal',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='office',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
migrations.AddField(
model_name='scientificpark',
name='uuid',
field=models.CharField(default='6cfe1b7c-0699-466a-99c0-04523b11b3ea', max_length=36),
preserve_default=False,
),
]
| 2,790 | 978 |
from flask import Flask, jsonify
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_track_usage import TrackUsage
from backend.config import config, Config
from celery import Celery
db = SQLAlchemy()
ma = Marshmallow()
celery = Celery(
__name__,
broker=Config.CELERY_BROKER_URL,
# backend='db+sqlite:///results.sqlite'
)
track = TrackUsage()
def create_app(config_name):
app = Flask(__name__)
print(config_name)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
ma.init_app(app)
celery.conf.update(app.config)
from flask_track_usage.storage.sql import SQLStorage
with app.app_context():
track.init_app(app, [SQLStorage(db=db)])
from backend.app.api.sample_api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
register_error(app)
return app
def register_error(app):
@app.errorhandler(400)
def bad_request(e):
print(e)
# jsonify({"msg": "bad request"})
return "bad request", 400
@app.errorhandler(404)
def page_not_found(e):
return jsonify({"msg": "page not found"}), 404
@app.errorhandler(500)
def internal_server_error(e):
return jsonify({"msg": "internal server error"}), 500
| 1,353 | 476 |
"""Test_flutil module."""
import unittest
from pineboolib.fllegacy import flsettings
class TestSettings(unittest.TestCase):
def test_settings(self) -> None:
"""Test read functions."""
setting = flsettings.FLSettings()
setting.writeEntryList("test_uno", [""])
setting.writeEntryList("test_uno", ["test_uno"])
setting.writeEntryList("test_dos", [])
setting.writeEntryList("test_dos", ["test_2_1", "test_2_2"])
setting.writeEntry("test_tres", "")
setting.writeEntry("test_tres", "test_tres")
setting.writeEntry("test_cuatro", False)
setting.writeEntry("test_cuatro", True)
setting.writeEntry("test_cinco", 0)
setting.writeEntry("test_cinco", 10)
setting.writeEntry("test_double", 0.00)
setting.writeEntry("test_double", 23.12)
self.assertEqual(setting.readListEntry("test_dos"), ["test_2_1", "test_2_2"])
self.assertEqual(setting.readListEntry("test_seis"), [])
self.assertEqual(setting.readListEntry("test_uno"), ["test_uno"])
self.assertEqual(setting.readEntry("test_tres"), "test_tres")
self.assertEqual(setting.readEntry("test_siete", "fallo"), "fallo")
self.assertEqual(setting.readNumEntry("test_cinco", 12), 10)
self.assertEqual(setting.readNumEntry("test_ocho", 14), 14)
self.assertTrue(setting.readBoolEntry("test_cuatro", False))
self.assertTrue(setting.readBoolEntry("test_nueve", True))
self.assertTrue(setting.readDoubleEntry("test_double", 23.12))
| 1,561 | 507 |
"""
FILE: setup.py
DESCRIPTION: Set up PyPI as a Python Library
DATE: 27-Jan-2020
"""
import setuptools
with open('README.md') as f:
README = f.read()
setuptools.setup(
author='Nuttaphat Arunoprayoch',
author_email='nat236919@gmail.com',
name='pyhighstakes',
license='MIT',
description='PyHighStakes is a library offering card decks and games',
version='v0.0.1',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/nat236919/pyhighstakes',
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires=[],
classifiers=[
# Trove classifiers
# (https://pypi.python.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop'
],
) | 1,109 | 373 |
# Generated by Django 3.2.10 on 2022-01-27 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0027_alter_totprequest_receiver'),
]
operations = [
migrations.AlterModelOptions(
name='totprequest',
options={'verbose_name': 'درخواست\u200cهای رمز یکبار مصرف', 'verbose_name_plural': 'درخواست\u200cهای رمز یکبار مصرف'},
),
migrations.AlterField(
model_name='totprequest',
name='channel',
field=models.CharField(choices=[('phone_number', 'Phone')], default='phone_number', max_length=20, verbose_name='از طریق'),
),
migrations.AlterField(
model_name='totprequest',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد'),
),
]
| 894 | 335 |
import math
import torch
import copy
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from UnarySim.kernel import FSUAdd
from UnarySim.kernel import FSUMul
from UnarySim.kernel import FSULinear
from torch.cuda.amp import autocast
from typing import List, Tuple, Optional, overload, Union
from UnarySim.kernel import HUBHardsigmoid, HUBHardtanh
from UnarySim.kernel import truncated_normal, Round
from UnarySim.stream import BinGen, RNG, BSGen
from UnarySim.metric import ProgError
class FSUMGUCell(torch.nn.Module):
"""
This is a minimal gated unit with unary computing, corresponding to HardMGUCell with "hard" asserted.
The scalehardsigmoid is scaled addition (x+1)/2, and hardtanh is direct pass.
This module follows the uBrain implementation style to maximize hardware reuse.
"""
def __init__(
self,
input_size: int, hidden_size: int, bias: bool = True,
weight_ext_f=None, bias_ext_f=None, weight_ext_n=None, bias_ext_n=None,
hx_buffer=None,
hwcfg={
"width" : 8,
"mode" : "bipolar",
"depth" : 10,
"depth_ismul" : 6,
"rng" : "Sobol",
"dimr" : 1
},
swcfg={
"btype" : torch.float,
"rtype" : torch.float,
"stype" : torch.float
}) -> None:
super(FSUMGUCell, self).__init__()
self.hwcfg = {}
self.hwcfg["width"] = hwcfg["width"]
self.hwcfg["mode"] = hwcfg["mode"].lower()
self.hwcfg["depth"] = hwcfg["depth"]
self.hwcfg["depth_ismul"] = hwcfg["depth_ismul"]
self.hwcfg["rng"] = hwcfg["rng"].lower()
self.hwcfg["dimr"] = hwcfg["dimr"]
self.swcfg = {}
self.swcfg["btype"] = swcfg["btype"]
self.swcfg["rtype"] = swcfg["rtype"]
self.swcfg["stype"] = swcfg["stype"]
self.input_size = input_size
self.hidden_size = hidden_size
assert self.hwcfg["mode"] in ["bipolar"], \
"Error: the hw config 'mode' in " + str(self) + " class requires 'bipolar'."
assert (weight_ext_f.size()[0], weight_ext_f.size()[1]) == (hidden_size, hidden_size + input_size), "Incorrect weight_f shape."
assert (weight_ext_n.size()[0], weight_ext_n.size()[1]) == (hidden_size, hidden_size + input_size), "Incorrect weight_n shape."
if bias is True:
assert bias_ext_f.size()[0] == hidden_size, "Incorrect bias_f shape."
assert bias_ext_n.size()[0] == hidden_size, "Incorrect bias_n shape."
hwcfg_linear={
"width" : self.hwcfg["width"],
"mode" : self.hwcfg["mode"],
"scale" : 1,
"depth" : self.hwcfg["depth"],
"rng" : self.hwcfg["rng"],
"dimr" : self.hwcfg["dimr"]
}
self.fg_ug_tanh = FSULinear(hidden_size + input_size, hidden_size, bias=bias,
weight_ext=weight_ext_f, bias_ext=bias_ext_f,
hwcfg=hwcfg_linear, swcfg=swcfg)
self.ng_ug_tanh = FSULinear(hidden_size + input_size, hidden_size, bias=bias,
weight_ext=weight_ext_n, bias_ext=bias_ext_n,
hwcfg=hwcfg_linear, swcfg=swcfg)
hwcfg_sigm={
"mode" : self.hwcfg["mode"],
"scale" : None,
"dima" : 0,
"depth" : self.hwcfg["depth"],
"entry" : None
}
self.fg_sigmoid = FSUAdd(hwcfg_sigm, swcfg)
hwcfg_hx_mul={
"width" : self.hwcfg["width"],
"mode" : self.hwcfg["mode"],
"static" : True,
"rng" : self.hwcfg["rng"],
"dimr" : self.hwcfg["dimr"]
}
self.fg_hx_mul = FSUMul(in_1_prob=hx_buffer, hwcfg=hwcfg_hx_mul, swcfg=swcfg)
hwcfg_ng_mul={
"width" : self.hwcfg["depth_ismul"],
"mode" : self.hwcfg["mode"],
"static" : False,
"rng" : self.hwcfg["rng"],
"dimr" : self.hwcfg["dimr"]
}
self.fg_ng_mul = FSUMul(in_1_prob=None, hwcfg=hwcfg_ng_mul, swcfg=swcfg)
hwcfg_hy={
"mode" : self.hwcfg["mode"],
"scale" : 1,
"dima" : 0,
"depth" : self.hwcfg["depth"],
"entry" : 3
}
self.hy_add = FSUAdd(hwcfg_hy, swcfg)
def check_forward_input(self, input: Tensor) -> None:
if input.size(1) != self.input_size:
raise RuntimeError("input has inconsistent input_size: got {}, expected {}".format(input.size(1), self.input_size))
def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None:
if input.size(0) != hx.size(0):
raise RuntimeError("Input batch size {} doesn't match hidden{} batch size {}".format(input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError("hidden{} has inconsistent hidden_size: got {}, expected {}".format(hidden_label, hx.size(1), self.hidden_size))
@autocast()
def forward(self, input: Tensor, hx: Tensor) -> Tensor:
self.check_forward_input(input)
self.check_forward_hidden(input, hx, '')
# forget gate
self.fg_ug_in = torch.cat((hx, input), 1)
self.fg_in = self.fg_ug_tanh(self.fg_ug_in)
self.fg = self.fg_sigmoid(torch.stack([self.fg_in, torch.ones_like(self.fg_in)], dim=0))
# new gate
self.fg_hx = self.fg_hx_mul(self.fg)
self.ng_ug_in = torch.cat((self.fg_hx, input), 1)
self.ng = self.ng_ug_tanh(self.ng_ug_in)
# output
self.fg_ng = self.fg_ng_mul(self.fg, self.ng)
self.fg_ng_inv = 1 - self.fg_ng
hy = self.hy_add(torch.stack([self.ng, self.fg_ng_inv, self.fg_hx], dim=0))
return hy
class HUBMGUCell(torch.nn.Module):
"""
This is a minimal gated unit with hybrid unary binary computing, corresponding to HardMGUCell with "hard" asserted.
The scalehardsigmoid is scaled addition (x+1)/2, and hardtanh is direct pass.
This module follows the uBrain implementation style to maximize hardware reuse.
"""
def __init__(
self, input_size: int, hidden_size: int, bias: bool = True,
weight_ext_f=None, bias_ext_f=None, weight_ext_n=None, bias_ext_n=None,
hwcfg={
"width" : 8,
"mode" : "bipolar",
"depth" : 10,
"depth_ismul" : 6,
"rng" : "Sobol",
"dimr" : 1
}) -> None:
super(HUBMGUCell, self).__init__()
self.hwcfg = {}
self.hwcfg["width"] = hwcfg["width"]
self.hwcfg["mode"] = hwcfg["mode"].lower()
self.hwcfg["depth"] = hwcfg["depth"]
self.hwcfg["depth_ismul"] = hwcfg["depth_ismul"]
self.hwcfg["rng"] = hwcfg["rng"].lower()
self.hwcfg["dimr"] = hwcfg["dimr"]
self.swcfg = {}
self.swcfg["btype"] = torch.float
self.swcfg["rtype"] = torch.float
self.swcfg["stype"] = torch.float
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_f = weight_ext_f
self.bias_f = bias_ext_f
self.weight_n = weight_ext_n
self.bias_n = bias_ext_n
self.hwcfg_ope = copy.deepcopy(self.hwcfg)
self.hwcfg_ope["scale"] = 1
@autocast()
def forward(self, input: Tensor, hx: Tensor) -> Tensor:
if hx is None:
hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device)
rnncell = FSUMGUCell(self.input_size, self.hidden_size, bias=self.bias,
weight_ext_f=self.weight_f, bias_ext_f=self.bias_f, weight_ext_n=self.weight_n, bias_ext_n=self.bias_n,
hx_buffer=hx,
hwcfg=self.hwcfg, swcfg=self.swcfg).to(input.device)
iSource = BinGen(input, self.hwcfg, self.swcfg)().to(input.device)
iRNG = RNG(self.hwcfg, self.swcfg)().to(input.device)
iBSG = BSGen(iSource, iRNG, self.swcfg).to(input.device)
hSource = BinGen(hx, self.hwcfg, self.swcfg)().to(input.device)
hRNG = RNG(self.hwcfg, self.swcfg)().to(input.device)
hBSG = BSGen(hSource, hRNG, self.swcfg).to(input.device)
oPE = ProgError(torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device),
self.hwcfg_ope).to(input.device)
for c in range(2**self.hwcfg["width"]):
idx = torch.zeros(iSource.size(), dtype=torch.long, device=input.device)
iBS = iBSG(idx + c)
hdx = torch.zeros(hSource.size(), dtype=torch.long, device=input.device)
hBS = hBSG(hdx + c)
oBS = rnncell(iBS, hBS)
oPE.Monitor(oBS)
hy = oPE()[0]
return hy
class HardMGUCell(torch.nn.Module):
"""
This is a minimal gated unit by replacing sigmoid and tanh with hubhardsigmoid and hubhardtanh if hard is set to True.
Refer to "Simplified Minimal Gated Unit Variations for Recurrent Neural Networks" and "Gate-Variants of Gated Recurrent Unit (GRU) Neural Networks" for more details.
This module is fully unary computing aware, i.e., all intermediate data are bounded to the legal unary range.
This module follows the uBrain implementation style to maximize hardware reuse.
This modeule assigns batch to dim[0].
This module applies floating-point data.
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True, hard: bool = True) -> None:
super(HardMGUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.hard = hard
if hard == True:
self.fg_sigmoid = HUBHardsigmoid()
self.ng_tanh = HUBHardtanh()
else:
self.fg_sigmoid = nn.Sigmoid()
self.ng_tanh = nn.Tanh()
self.weight_f = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size)))
self.weight_n = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size)))
if bias:
self.bias_f = nn.Parameter(torch.empty(hidden_size))
self.bias_n = nn.Parameter(torch.empty(hidden_size))
else:
self.register_parameter('bias_f', None)
self.register_parameter('bias_n', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data = truncated_normal(weight, mean=0, std=stdv)
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
if hx is None:
hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device)
# forget gate
self.fg_ug_in = torch.cat((hx, input), 1)
self.fg_in = HUBHardtanh()(F.linear(self.fg_ug_in, self.weight_f, self.bias_f))
self.fg = self.fg_sigmoid(self.fg_in)
# new gate
self.fg_hx = self.fg * hx
self.ng_ug_in = torch.cat((self.fg_hx, input), 1)
self.ng = self.ng_tanh(F.linear(self.ng_ug_in, self.weight_n, self.bias_n))
# output
self.fg_ng = self.fg * self.ng
self.fg_ng_inv = 0 - self.fg_ng
hy = HUBHardtanh()(self.ng + self.fg_ng_inv + self.fg_hx)
return hy
class HardMGUCellFXP(torch.nn.Module):
"""
This is a minimal gated unit by replacing sigmoid and tanh with hubhardsigmoid and hubhardtanh if hard is set to True.
Refer to "Simplified Minimal Gated Unit Variations for Recurrent Neural Networks" and "Gate-Variants of Gated Recurrent Unit (GRU) Neural Networks" for more details.
This module is fully unary computing aware, i.e., all intermediate data are bounded to the legal unary range.
This module follows the uBrain implementation style to maximize hardware reuse.
This modeule assigns batch to dim[0].
This module applies fixed-point quantization using 'intwidth' and 'fracwidth'.
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True, hard: bool = True,
intwidth=3, fracwidth=4) -> None:
super(HardMGUCellFXP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.hard = hard
self.trunc = Round(intwidth=intwidth, fracwidth=fracwidth)
if hard == True:
self.fg_sigmoid = HUBHardsigmoid()
self.ng_tanh = HUBHardtanh()
else:
self.fg_sigmoid = nn.Sigmoid()
self.ng_tanh = nn.Tanh()
self.weight_f = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size)))
self.weight_n = nn.Parameter(torch.empty((hidden_size, hidden_size + input_size)))
if bias:
self.bias_f = nn.Parameter(torch.empty(hidden_size))
self.bias_n = nn.Parameter(torch.empty(hidden_size))
else:
self.register_parameter('bias_f', None)
self.register_parameter('bias_n', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data = truncated_normal(weight, mean=0, std=stdv)
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
if hx is None:
hx = torch.zeros(input.size()[0], self.hidden_size, dtype=input.dtype, device=input.device)
# forget gate
self.fg_ug_in = torch.cat((self.trunc(hx), self.trunc(input)), 1)
self.fg_in = HUBHardtanh()(F.linear(self.trunc(self.fg_ug_in), self.trunc(self.weight_f), self.trunc(self.bias_f)))
self.fg = self.fg_sigmoid(self.trunc(self.fg_in))
# new gate
self.fg_hx = self.trunc(self.fg) * self.trunc(hx)
self.ng_ug_in = torch.cat((self.trunc(self.fg_hx), self.trunc(input)), 1)
self.ng = self.ng_tanh(self.trunc(F.linear(self.trunc(self.ng_ug_in), self.trunc(self.weight_n), self.trunc(self.bias_n))))
# output
self.fg_ng = self.trunc(self.fg) * self.trunc(self.ng)
self.fg_ng_inv = 0 - self.trunc(self.fg_ng)
hy = HUBHardtanh()(self.trunc(self.ng) + self.trunc(self.fg_ng_inv) + self.trunc(self.fg_hx))
return hy
| 14,552 | 5,106 |
from flask import Flask
from .config import DevConfig
from flask_bootstrap import Bootstrap
app = Flask(__name__,instance_relative_config= True)
app.config.from_object(DevConfig)
app.config.from_pyfile('config.py')
# Initializing Flask Extensions
bootstrap = Bootstrap(app)
from app import views
from app import error | 323 | 94 |
import json
import requests
from .bentosign_errors import BentoSignError
# An BentoSignObject is a dictionary where ``object.key=value`` is a shortcut for ``object[key]=value``
class BentoSignObject(dict):
def __init__(self):
super(BentoSignObject, self).__init__()
# Define __getattr__, __setattr__ and __delattr, so that
# object.name becomes an alias to object['name']
def __getattr__(self, key):
if key[0] == '_':
raise AttributeError('No such attribute: ' + key)
if key in self:
return self[key]
else:
raise AttributeError('No such attribute: ' + key)
def __setattr__(self, key, value):
if key[0] == '_':
return super(BentoSignObject, self).__setattr__(key, value)
self[key] = value
def __delattr__(self, key):
if key[0] == '_':
return super(BentoSignObject, self).__delattr__(key)
if key in self:
del self[key]
else:
raise AttributeError('No such attribute: ' + key)
# Define __getattr__, __setattr__ and __delattr, so that
# object.name becomes an alias to object['name']
@classmethod
def get_base_url(cls):
return 'http://localhost:5000/api/v1.0/'
@classmethod
def get_class_url(cls):
raise NotImplementedError()
@classmethod
def get(cls, id):
# Perform an HTTP GET
url = cls.get_class_url() + '/' + id
response = requests.get(url)
cls._process_response_code('GET', url, response)
# Create Object from JSON
object = cls()
payload = json.loads(response.content)
object.load_object_from_data(payload['object'])
return object
@classmethod
def find(cls, **params):
# Perform an HTTP GET with params
url = cls.get_class_url()
response = requests.get(url, params=params)
cls._process_response_code('GET', url, response)
# Create Objects from JSON list
objects = []
payload = json.loads(response.content)
for object_data in payload['objects']:
object = cls()
object.load_object_from_data(object_data)
objects.append(object)
return objects
@classmethod
def create(cls, **params):
# Perform an HTTP POST
url = cls.get_class_url()
response = requests.post(url, data=params)
cls._process_response_code('POST', url, response)
# Create Object from JSON
payload = json.loads(response.content)
object = cls()
object.load_object_from_data(payload['object'])
return object
@classmethod
def delete(cls, id):
# Perform an HTTP DELETE
url = cls.get_class_url() + '/' + id
response = requests.delete(url)
cls._process_response_code('DELETE', url, response)
def load_object_from_data(self, data):
for key, value in data.items():
self[key] = value
@classmethod
def _process_response_code(cls, method, url, response):
if response.status_code!=200:
payload = json.loads(response.content)
error_message = "%s %s returned %d" % (method, url, response.status_code)
if payload:
error = payload.get('error', None)
if error:
error_message = "BentoSignError %d: %s" % (error.code, error.message)
raise BentoSignError(error_message)
| 3,505 | 1,024 |
predictions = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ],
]
prediction_counts = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
]
| 2,842 | 2,747 |
from datetime import datetime
from dash.dependencies import Input, Output
from app1 import app
@app.callback(Output('placeholder', 'children'),
[Input('update_button', 'n_clicks')])
def update(n_clicks):
return datetime.now().strftime('%H:%M:%S') | 267 | 79 |
from django.http import HttpResponse
def test():
return HttpResponse("Hello, world. #Homepage")
test() | 108 | 31 |
from app.career.models import WeeklyBusiness
from app.common.serializers import BaseModelSerializer
class WeeklyBusinessSerializer(BaseModelSerializer):
class Meta:
model = WeeklyBusiness
fields = (
"id",
"created_at",
"updated_at",
"image",
"image_alt",
"business_name",
"body",
"year",
"week",
)
validators = []
| 460 | 121 |
__author__ = 'Jordi Vilaplana'
from pymongo import MongoClient
import detectlanguage
import json
import logging
import time
logging.basicConfig(
filename='emovix_twitter_detectlang.log',
level=logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d-%m-%y %H:%M')
# Configuration parameters
detectlanguage_api_key = ""
database_host = ""
database_name = ""
twitterStatusCol = ""
client = None
db = None
if __name__ == '__main__':
logging.debug('emovix_twitter_detectlang.py starting ...')
# Load configuration
with open('config.json', 'r') as f:
config = json.load(f)
detectlanguage_api_key = config['detectlanguage_api_key']
database_host = config['database_host']
database_name = config['database_name']
twitterStatusCol = config['source_box'] + "_twitterStatus"
client = MongoClient('mongodb://' + database_host + ':27017/')
db = client[database_name]
detectlanguage.configuration.api_key = detectlanguage_api_key
while True:
try:
if detectlanguage.user_status()['requests'] >= detectlanguage.user_status()['daily_requests_limit']:
logging.debug("Number of requests over daily limit.")
time.sleep(60)
statuses = db[twitterStatusCol].find({ "language_detections.language": { "$exists": False } })
if statuses:
count = 0
batch_request = []
batch_status = []
for twitterStatus in statuses:
if count >= 500:
logging.debug("Processing batch ...")
detections = detectlanguage.detect(batch_request)
if len(detections) != 500:
logging.error("ABNORMAL NUMBER OF LANGUAGE DETECTIONS: " + str(len(detections)))
break
count = 0
for detection in detections:
if len(detection) == 0:
detection = {}
detection['source'] = 'detectlanguage'
detection['language'] = ''
batch_status[count]['language_detections'] = []
batch_status[count]['language_detections'].append(detection)
else:
detection[0]['source'] = 'detectlanguage'
batch_status[count]['language_detections'] = []
batch_status[count]['language_detections'].append(detection[0])
db[twitterStatusCol].update( { "_id": batch_status[count]['_id']}, batch_status[count], upsert=True)
count += 1
count = 0
batch_request = []
batch_status = []
text = twitterStatus['text'].encode('utf-8')
batch_request.append(text)
batch_status.append(twitterStatus)
count += 1
except Exception as e:
# Oh well, just keep going
logging.error(e.__class__)
logging.error(e)
continue
except KeyboardInterrupt:
break
| 3,401 | 881 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
logger = get_logger(__name__)
def list_queues(client, include_metadata=False, marker=None, num_results=None,
prefix=None, show_next_marker=None, **kwargs):
from ..track2_util import list_generator
generator = client.list_queues(name_starts_with=prefix, include_metadata=include_metadata,
results_per_page=num_results, **kwargs)
pages = generator.by_page(continuation_token=marker)
result = list_generator(pages=pages, num_results=num_results)
if show_next_marker:
next_marker = {"nextMarker": pages.continuation_token}
result.append(next_marker)
else:
if pages.continuation_token:
logger.warning('Next Marker:')
logger.warning(pages.continuation_token)
return result
| 1,178 | 296 |
"""Unit test for GETL load method."""
from os import environ
from unittest.mock import Mock
from pyspark.sql import types as T
from getl.blocks.load.entrypoint import (
batch_csv,
batch_delta,
batch_json,
batch_xml,
resolve,
)
# TODO: Need to adapt to different xml version depending on spark version
environ[
"PYSPARK_SUBMIT_ARGS"
] = "--packages com.databricks:spark-xml_2.11:0.9.0 pyspark-shell"
SCHEMA = T.StructType(
[
T.StructField("name", T.StringType(), True),
T.StructField("empid", T.IntegerType(), True),
T.StructField("happy", T.BooleanType(), True),
T.StructField("sad", T.BooleanType(), True),
T.StructField("extra", T.BooleanType(), True),
]
)
# FUNCTIONS
def test_batch_json(spark_session, helpers):
"""batch_json should be able to load json files to a dataframe."""
# Arrange
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.json"),
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"Alias": "alias",
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.collect()[1][0] == "Mark Two"
assert result_df.collect()[2][1] == 11
assert result_df.count() == 3
def test_batch_json_multiLine_options(spark_session, helpers):
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample_multiline.json"),
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"Alias": "alias",
"Options": {"multiLine": True},
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.collect()[1][0] == "Mark Two"
assert result_df.collect()[2][1] == 11
assert result_df.count() == 3
def test_batch_json_fileregistry(spark_session, helpers):
"""batch_json should be able to load json files with file registry."""
# Arrange
file_path = helpers.relative_path(__file__, "./data/sample.json")
file_registry_mock = Mock()
file_registry_mock.get.return_value.load.return_value = [file_path]
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": "base_path",
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"FileRegistry": "SuperReg",
},
file_registry=file_registry_mock,
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.count() == 3
file_registry_mock.get.assert_called_with("SuperReg")
file_registry_mock.get.return_value.load.assert_called_with("base_path", ".json")
def test_batch_json_no_schema(spark_session, helpers):
"""batch_json should be able to load json files and inferSchema."""
# Arrange
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.json"),
"Alias": "alias",
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == 9
assert result_df.collect()[1][3] == "Mark Two"
assert not result_df.collect()[2][2]
assert result_df.count() == 3
def test_batch_xml(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/employee.xml"),
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 3
def test_batch_xml_no_schema(spark_session, helpers):
"""Test batch_xml can load XML doc without a given schema."""
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/employee.xml"),
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == 123
assert result_df.collect()[1][2] == "name2"
assert result_df.collect()[2][1] == "false"
assert result_df.count() == 3
def test_batch_xml_batching(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": [
helpers.relative_path(__file__, "./data/employee.xml"),
helpers.relative_path(__file__, "./data/employee_2.xml"),
],
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 4
def test_batch_xml_batching_new_column(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": [
helpers.relative_path(__file__, "./data/employee.xml"),
helpers.relative_path(__file__, "./data/employee_2.xml"),
helpers.relative_path(__file__, "./data/employee_3.xml"),
],
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[4][3] is False
assert result_df.count() == 5
def test_batch_xml_fileregistry(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents with a file registry."""
file_path = helpers.relative_path(__file__, "./data/employee.xml")
file_registry_mock = Mock()
file_registry_mock.get.return_value.load.return_value = [file_path]
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": "base_path",
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
"FileRegistry": "SuperReg",
},
file_registry=file_registry_mock,
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 3
file_registry_mock.get.assert_called_with("SuperReg")
file_registry_mock.get.return_value.load.assert_called_with("base_path", ".xml")
def test_batch_csv(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.csv"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_csv, conf)
# Assert
data = result_df.collect()
assert data[0]["name"] == "Mark Steelspitter"
assert data[0]["empid"] == 9
assert data[0]["happy"] is True
assert data[2]["name"] == "Mark Second"
assert data[2]["empid"] == 11
assert data[2]["happy"] is False
assert result_df.count() == 3
def test_batch_delta(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample-delta"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_delta, conf)
# Assert
data = result_df.collect()
assert data[0]["name"] == "Mark Steelspitter"
assert data[0]["empid"] == 9
assert data[0]["happy"] is True
assert data[2]["name"] == "Mark Second"
assert data[2]["empid"] == 11
assert data[2]["happy"] is False
assert result_df.count() == 3
def test_batch_delta_no_files(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample-delta-nofiles"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_delta, conf)
# Assert
data = result_df.collect()
assert len(data) == 0
| 8,793 | 2,953 |
import numpy as np
import math
def selection_sort(arr):
"""
Performs a selection sort algorithm
- Time complexity: O(n²)
Args:
arr (list): List to sort
Returns:
(list): Sorted list
"""
i = 0
while i < len(arr):
j = i
min = math.inf
while j < len(arr):
if arr[j] < min:
min = arr[j]
min_idx = j
j += 1
temp = arr[i]
arr[i] = arr[min_idx]
arr[min_idx] = temp
i += 1
return arr
# Testing
for i in range(100):
arr = list(np.random.randint(1,100,100))
sorted_arr = selection_sort(arr)
builtin_sort = sorted(arr)
if sorted_arr != builtin_sort:
print("false")
| 745 | 258 |
import time
from machine import I2C, Pin, Timer
from dumb_cup.v53l0x import VL53L0X
from dumb_cup.adxl345 import ADXL345
from dumb_cup.spirit_level import SpiritLevel
###############
# Constants
###############
OZ_FULL = const(16)
INIT_SAMPES = const(50)
NUM_SAMPLES = const(15)
RND_PLCS = const(1)
DE_BNC_DELAY = const(250)
BTN = const(26)
DE_BNC_TMR = const(0)
CHK_LVL_TMR = const(1)
BTN_ACTION_IN_PRG = const(99)
BTN_UNPRESSED = const(0)
BTN_PRESSED = const(1)
BTN_GOT_EMPTY = const(2)
BTN_GOT_FULL = const(3)
BTN_CALI_IN_PRG_3 = const(4)
SCL_PIN = const(21)
SDA_PIN = const(22)
X_THRESH = const(300)
Y_THRESH = const(300)
Z_THRESH = const(300)
CALI_F_NAME = "calibration.txt"
SETTINGS_DIR_NAME = "/dumb_cup"
###############
# Methods
###############
def map_val(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def on_not_level(x: int, y: int, z: int):
print("Not level")
print('x:', x, 'y:', y, 'z:',z ,'uint:mg')
def measure(tof: ADXL345, num_samples: int, round_num: int) -> int:
global vol_cof
samples = []
# if len(samples) >= NUM_SAMPLES + 1: samples.pop(0)
for i in range(num_samples):
samples.append(tof.read())
dist = sum(samples) / len(samples) * vol_cof
dist = map_val(dist, empty_val, full_val, 0, OZ_FULL)
return round(dist, round_num)
def blink(led, num: int = 1, delay: int = 200):
for i in range(num):
led.on()
time.sleep_ms(delay)
led.off()
time.sleep_ms(delay)
################
# Initialization
################
i2c = I2C(scl = Pin(SCL_PIN), sda = Pin(SDA_PIN), freq = 20000, timeout = 2000)
# Accelerometer
a345 = ADXL345(i2c)
dd = SpiritLevel(a345, on_not_level, X_THRESH, Y_THRESH, Z_THRESH)
# Time-of-Flight
tof = VL53L0X(i2c)
tof.set_Vcsel_pulse_period(tof.vcsel_period_type[0], 18)
tof.set_Vcsel_pulse_period(tof.vcsel_period_type[1], 14)
tof.start()
# Blinker
led = Pin(2, Pin.OUT)
# Main button.
btn = Pin(BTN, Pin.IN, Pin.PULL_DOWN)
###############
# Calibration
###############
de_bnc_tmr = Timer(DE_BNC_TMR)
de_bnc_flag = False
def dnc_timer_expr(timer):
global de_bnc_flag
de_bnc_flag = False
def on_btn(pin):
global tof
global de_bnc_flag
global cali_file
global btn_state
global btn
global led
if not de_bnc_flag:
# Turn on debounce timer.
de_bnc_flag = True
de_bnc_tmr.init(mode=Timer.ONE_SHOT, period=DE_BNC_DELAY, callback=dnc_timer_expr)
if btn_state == BTN_UNPRESSED:
btn_state = BTN_ACTION_IN_PRG
# Erase old file.
erase_cali()
# Let the user know we are getting the
# depth of the cup.
print("Getting measurements when empty.")
blink(led, 5, 300)
empty = measure(tof, NUM_SAMPLES, RND_PLCS)
fs_write_val("empty", empty)
btn_state = BTN_GOT_EMPTY
elif btn_state == BTN_GOT_EMPTY:
btn_state = BTN_ACTION_IN_PRG
# Let the user know we are getting the
# depth of the cup.
print("Getting measurements when full.")
blink(led, 5, 300)
full = measure(tof, NUM_SAMPLES, RND_PLCS)
fs_write_val("full", full)
btn_state = BTN_UNPRESSED
elif btn_state == BTN_ACTION_IN_PRG:
print("Busy")
def erase_cali():
import os
filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME
try:
with open(filepath) as f:
os.remove(filepath)
except:
pass
def fs_write_val(key: str, value: str):
import os
filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME
write_type = "a"
try:
with open(filepath, write_type) as f:
f.write("")
except OSError:
os.mkdir(SETTINGS_DIR_NAME)
write_type = "w"
try:
with open(filepath, write_type) as f:
s = "{}={}\n".format(key, value)
f.write(s)
except OSError:
print("FS error.")
def fs_read_cali():
import os
filepath = SETTINGS_DIR_NAME + "/" + CALI_F_NAME
with open(filepath, "r") as f:
return f.readlines()
return []
def uninstall():
import os
os.chdir("dumb_cup")
for item in os.listdir():
print(type(item))
try:
os.remove(item)
except:
os.rmdir(item)
btn_state = BTN_UNPRESSED
btn.irq(on_btn)
#####################
# Volume coefficient
#####################
def vol_cof():
lines = fs_read_cali()
for value in lines:
if "empty" in value:
empty_val = float(value.split("=")[1][0:-1])
elif "full" in value:
full_val = float(value.split("=")[1][0:-1])
return (empty_val, full_val)
#####################
# Check Liquid Level
#####################
def chk_liq_lvl(timer):
global old_lvl
global cur_lvl
global consumed
global tof
cur_lvl = measure(tof, NUM_SAMPLES, RND_PLCS)
delta = round((cur_lvl - old_lvl), RND_PLCS)
consumed += round(delta, RND_PLCS)
print("Current: {} Delta: {} Consumed: {}".format(cur_lvl, delta, consumed))
old_lvl = cur_lvl
###############
# Calibration
###############
empty_val, full_val = vol_cof()
volume = (full_val - empty_val) * -1
vol_cof = OZ_FULL / volume
print("Initializing liquid gauge...")
cur_lvl = measure(tof, INIT_SAMPES, RND_PLCS)
old_lvl = cur_lvl
consumed = 0
print("Initial liquid level: {}".format(cur_lvl))
chk_liq_lvl_tmr = Timer(CHK_LVL_TMR)
chk_liq_lvl_tmr.init(mode=Timer.PERIODIC, period=3000, callback=chk_liq_lvl)
# To begin conversion we need a calibration sequence.
# 1. Have the user empty the cup and level it on counter.
# 2. Have the user fill the cup.
#
# We could use the empty_reading (-137) and full_reading (-63) to
# calculate the linear volume of the cup (74).
#
# abs(empty_reading) - abs(full_reading) = linear_volume
#
# Then, we have two routes, we can convert linear_volume into
# millimeters. This would become linear_volume_mm.
#
# We can then get the cup diameter in millimeters (80mm)
# and multiply it by the linear_volume_mm, which should give
# us total volume.
#
# V = πr^2h
# cubic_mm = (cup_diameter / 2)^2 * π * linear_volume_mm
# cubic_mm = (40)^2 * π * 74
# cubic_mm = 1600 * π * 74
# cubic_mm = 5026.54 * 74
# cubic_mm = 371964.57
#
# To get ounces, multiply mm3 by 3.3814e-5.
#
# ounces = 12.5776184143
#
###############
# Main Loop
###############
while True:
dd.calculate()
| 6,819 | 2,726 |
from requests import api
from rest_framework.decorators import api_view
from rest_framework.response import Response
from asteroids.api import AsteroidApi
from asteroids.serializers import ApproachSerializer
from clients.neows import NeoWs
from django.shortcuts import render
@api_view(['GET'])
def get_closest_approach(request):
# Initialize NeoWs API
asteroid_api = AsteroidApi()
# Get data from NeoWs API
closest_approach_data = asteroid_api.get_closest_approach()
# Return closest approach
ca_model = ApproachSerializer().create(closest_approach_data)
return Response(data=ca_model.__dict__())
@api_view(['GET'])
def get_largest_asteroid(request):
# Initialize NeoWs API
asteroid_api = AsteroidApi()
# Get data from NeoWs API
largest_approach_data = asteroid_api.get_largest_approach()
# Return closest approach
ca_model = ApproachSerializer().create(data=largest_approach_data)
return Response(data=ca_model.__dict__())
@api_view(['GET'])
def get_fastest_asteroid(request):
# Initialize NeoWs API
asteroid_api = AsteroidApi()
# Get data from NeoWs API
fastest_approach_data = asteroid_api.get_fastest_approach()
# Return closest approach
ca_model = ApproachSerializer().create(data=fastest_approach_data)
return Response(data=ca_model.__dict__())
@api_view(['GET'])
def get_notable_encounters(request):
# Initialize NeoWs API
asteroid_api = AsteroidApi()
# Get data from NeoWs API
closest_approach_data = ApproachSerializer().create(asteroid_api.get_closest_approach())
largest_approach_data = ApproachSerializer().create(asteroid_api.get_largest_approach())
fastest_approach_data = ApproachSerializer().create(asteroid_api.get_fastest_approach())
data = {
'closest_approach': closest_approach_data.__dict__(),
'largest_approach': largest_approach_data.__dict__(),
'fastest_approach': fastest_approach_data.__dict__(),
}
# Return closest approach
return Response(data=data)
| 2,042 | 650 |
"""
Shell test
# 2019 Simon Kirkby obeygiantrobot@gmail.com
"""
import cadquery as cq
import cqparts
from cqparts.params import PositiveFloat, Int
from cqparts.display import render_props
from cqparts.constraint import Mate
from cqparts.utils.geometry import CoordSystem
from cqparts.search import register
# base shaft type
@register(export="misc")
class Shell(cqparts.Part):
length = PositiveFloat(124, doc="shaft length")
diam = PositiveFloat(40, doc="shaft diameter")
count = Int(5)
def make(self):
shft = cq.Workplane("XY").circle(self.diam / 2).extrude(self.length)
inc = 360.0 / float(self.count)
for i in range(self.count):
b = cq.Workplane("XY").circle(self.diam / 4).extrude(self.length / 2)
b = b.translate((self.diam / 2, 0, self.length / 8))
b = b.rotate((0, 0, 0), (0, 0, 1), float(i * inc))
shft = shft.union(b)
c = cq.Workplane("XY").circle(self.diam / 8).extrude(self.length - 6)
c = c.translate((self.diam / 2, 0, 0))
c = c.rotate((0, 0, 0), (0, 0, 1), float(i * inc))
shft = shft.union(c)
shft = shft.faces(">Z").shell(-1)
return shft
if __name__ == "__main__":
from cqparts.display import display
s = Shell()
display(s)
| 1,313 | 495 |
#!/usr/bin/env python
import netsblox
editor = netsblox.Editor()
phoneiot = editor.phone_iot
public_roles = editor.public_roles
assert type(phoneiot.get_sensors()) == list
assert public_roles.get_public_role_id() == editor.get_public_role_id()
assert phoneiot.get_color(12, 34, 54, 34) == 571220534
assert phoneiot.get_color(12, 34, 54) == -15982026
assert phoneiot.get_color(12, 34, blue=54) == -15982026
assert phoneiot.get_color(12, blue=54, green=34) == -15982026
assert type(editor.chart.default_options()) == dict
v = editor.hurricane_data.get_hurricane_data('katrina', 2005)
assert type(v) == list
for x in v:
assert type(x) == dict
assert netsblox.prep_send(12) == 12
assert netsblox.prep_send(12.5) == 12.5
assert netsblox.prep_send([1, 2, 3]) == [1, 2, 3]
assert netsblox.prep_send((1, 2, 3)) == [1, 2, 3]
assert netsblox.prep_send({ 'key': 'value' }) == [['key', 'value']]
assert netsblox.prep_send({ 'key': { 'more': 'stuff' } }) == [[ 'key', [[ 'more', 'stuff' ]] ]]
assert netsblox.prep_send([{ 'a': 1 }, { 'b': 2 }]) == [ [[ 'a', 1 ]], [[ 'b', 2 ]] ]
assert netsblox.prep_send(({ 'a': 1 }, { 'b': 2 })) == [ [[ 'a', 1 ]], [[ 'b', 2 ]] ]
assert netsblox.prep_send({ (1, 2, 3): 4 }) == [[ [1, 2, 3], 4 ]]
assert netsblox.prep_send(None) == ''
| 1,266 | 590 |
from gym.envs.registration import register
register(
id='CarlaEnv-state-v1',
entry_point='carla_env.carla_env:CarlaEnv',
max_episode_steps=500,
kwargs={
'render': True,
'carla_port': 2000,
'changing_weather_speed': 0.1,
'frame_skip': 1,
'observations_type': 'state',
'traffic': True,
'vehicle_name': 'tesla.cybertruck',
'map_name': 'Town05',
'autopilot': True
}
)
register(
id='CarlaEnv-pixel-v1',
entry_point='carla_env.carla_env:CarlaEnv',
max_episode_steps=500,
kwargs={
'render': True,
'carla_port': 2000,
'changing_weather_speed': 0.1,
'frame_skip': 1,
'observations_type': 'pixel',
'traffic': True,
'vehicle_name': 'tesla.cybertruck',
'map_name': 'Town05',
'autopilot': True
}
)
| 872 | 350 |
#!/usr/bin/env python2.7
import os
# to be able to import our modules from the directory above
os.sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from parsers.CurrentTime import ParserCurrentTime
from parsers.Fragmentation import ParserFragmentation
from parsers.ProcessCPU import ParserProcessCPU
from _common import ssh, cycle, simple_command_with_timestamp, prepend_timestamp
import re
import sys
import time
sshc, args = ssh([
{ 'name':'--collect-time', 'type':int, 'default':5, 'help':'How long should each cycle take' },
{ 'name':'--history', 'type':int, 'default':45, 'help':'Maximum lines to show (45 by default)' },
{ 'name':'--hz', 'type':int, 'default':100, 'help':'CONFIG_HZ of device, do not change' },
{ 'name':'--raw', 'default':False, 'action':'store_true', 'help':'Show raw difference (not divided by interval)' },
{ 'name':'--no-cpu', 'default':False, 'action':'store_true', 'help':'Do not show CPU usage on each line' },
], """
""", supports_script=True)
def do(sshc, cache, history, hz, raw, show_cpu):
frags = ParserFragmentation(sshc).get()
usage = ParserProcessCPU(sshc).get([])
etime = ParserCurrentTime(sshc).get()
if 'last' not in cache:
cache['last'] = {
'collected_on': etime.as_timestamp(),
'frags': frags,
'cpu' : usage,
}
return
time_difference = etime.as_timestamp() - cache['last']['collected_on']
overall_cpus = {}
for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']:
overall_cpus[tmp] = int(round(((usage['global'][tmp] - cache['last']['cpu']['global'][tmp])*100)/(time_difference*hz)))
pdiff = {}
for p in frags['frags']:
if p not in frags['frags']:
print >>sys.stderr, 'Error: fragmentation key %s missing in current statistics' % (p,)
return
elif p not in cache['last']['frags']['frags']:
print >>sys.stderr, 'Error: fragmentation key %s missing in previous statistics' % (p,)
return
if raw:
pdiff[p] = frags['frags'][p] - cache['last']['frags']['frags'][p]
else:
pdiff[p] = int(round((((frags['frags'][p] - cache['last']['frags']['frags'][p]))/(time_difference))))
if os.name == 'nt':
os.system('cls')
print "Packet fragmentation (written by Ondrej Holecek <oholecek@fortinet.com>)"
else:
print "\x1b[2J\x1b[H\033[1mPacket fragmentation (written by Ondrej Holecek <oholecek@fortinet.com>)\033[0m"
filters_applied = "Applied filters: "
if raw: filters_applied += "CNTS[raw] "
else: filters_applied += "CNTS[diff] "
filters_applied += "HIST[%i] " % (history,)
print prepend_timestamp("Overall CPU utilization: %3.1f %% user, %3.1f %% system, %3.1f %% idle" % (
overall_cpus['user'], overall_cpus['system'], overall_cpus['idle'],
), etime, 'fragtop')
print prepend_timestamp("Overall CPU utilization: %3.1f %% iowait, %3.1f %% irq, %3.1f %% softirq" % (
overall_cpus['iowait'], overall_cpus['irq'], overall_cpus['softirq'],
), etime, 'fragtop')
print prepend_timestamp(filters_applied, etime, 'fragtop')
prehdr = " | Received fragments reassembly counters | Outgoing fragmentation counters |"
if show_cpu: prehdr += " Historical CPU percentage |"
print prepend_timestamp(prehdr, etime, 'fragtop')
hdr = " %7s | %9s | %9s | %9s | %9s | %9s | %9s | %9s |" % ("history", "fragments", "packets", "timeout", "error", "packets", "fragments", "unable",)
if show_cpu: hdr += " %8s | %8s | %8s |" % ("system%", "irq%", "softirq%",)
print prepend_timestamp(hdr, etime, 'fragtop')
# current line
current_line = " %7i " % ( 0, )
for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'):
current_line += "| %9i " % (pdiff[k],)
current_line += "|"
if show_cpu: current_line += " %8i | %8i | %8i |" % (overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],)
print prepend_timestamp(current_line, etime, 'fragtop')
# older lines
for odata in cache['history']:
old_line = " %7i " % ( -int(round(etime.as_timestamp()-odata[0])),)
for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'):
old_line += "| %9i " % (odata[1][k],)
old_line += "|"
if show_cpu: old_line += " %8i | %8i | %8i |" % (odata[2], odata[3], odata[4],)
print prepend_timestamp(old_line, etime, 'fragtop')
cache['history'].insert(0, (etime.as_timestamp(), pdiff, overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],) )
if len(cache['history']) > history: cache['history'] = cache['history'][:history]
cache['last']['frags'] = frags
cache['last']['cpu'] = usage
cache['last']['collected_on'] = etime.as_timestamp()
sys.stdout.flush()
return etime
if __name__ == '__main__':
cache = {'history':[]}
try:
cycle(do, {
'sshc': sshc,
'cache': cache,
'history': args.history,
'hz': args.hz,
'raw': args.raw,
'show_cpu': not args.no_cpu,
}, args.collect_time, cycles_left=[args.max_cycles], debug=args.debug, interactive=args.interactive)
except KeyboardInterrupt:
sshc.destroy()
| 5,039 | 2,032 |
from django.contrib import admin
from .models import Project, Profile, Rate
admin.site.register(Project)
admin.site.register(Profile)
admin.site.register(Rate)
| 161 | 47 |
''' SCOTUS d-vec UISRNN inference'''
import sys
sys.path.append("./LegalUISRNN")
import numpy as np
import os, csv
import torch
import uisrnn
case_path = '/scratch/jt2565/sco50/sco50wav_proc_case/'
total_cases = (len(os.listdir(case_path))/2)
train_cases = (total_cases//10)*9
print("# of training:", train_cases)
print("# total cases:" , total_cases)
trn_seq_lst = []
trn_cluster_lst = []
test_seq_lst = []
test_cluster_lst = []
verbose = False
if verbose:
print("\n", "="*50, "\n Processing case-embedded d-vec")
#load 5 case-embedded dvecs (with directory holding raw files)
for i, case in enumerate(os.listdir(case_path)):
if case[-7:] == 'seq.npy':
case_id = case.split('/')[-1].split('_')[0]
train_sequence = np.load(case_path+case)
train_clus = np.load(case_path+case_id+'_id.npy')
train_cluster_id = []
#converts labels to int for inference/testing
for j in range(np.shape(train_clus)[0]):
if i <= train_cases:
train_cluster_id.append(str(train_clus[j]))
else:
train_cluster_id.append(int(train_clus[j]))
if j==(np.shape(train_clus)[0]-1):
train_cluster_id = np.asarray(train_cluster_id)
if verbose:
if i > train_cases:
print("-- Stored as test case --")
else:
print("-- Stored as train case --")
print('Processed case:', case_id)
print('emb shape:', np.shape(train_sequence))
print('label shape:', np.shape(train_clus))
print('flat label:', np.shape(train_cluster_id))
#add to training or testing list (for multiple cases
if i <= train_cases:
trn_seq_lst.append(train_sequence)
trn_cluster_lst.append(train_cluster_id)
else:
test_seq_lst.append(train_sequence)
test_cluster_lst.append(train_cluster_id)
#Define UISRNN (**copy from training**) + load model
model_args, training_args, inference_args = uisrnn.parse_arguments()
model_args.verbosity=3 #can verbose=False for no prints except training
# must match saved model
model_args.observation_dim=256 #from hparam
model_args.enable_cuda = True
model_args.rnn_depth = 2
model_args.rnn_hidden_size = 32
model_args.rnn_dropout = .2
model_args.crp_alpha = .8
inference_args.test_iteration = 2
inference_args.beam_search = 10
model = uisrnn.UISRNN(model_args)
model.load('./sco50wav_250bs10.pth')
#inference and evaluation (shrunk for running)
pred = model.predict(test_seq_lst[0], inference_args)
ans = [i for i in test_cluster_lst[0]]
if verbose:
print("-- Inference --")
print(type(pred), type(pred[0]))
print(type(ans), type(ans[0]))
print(len(pred), len(ans))
# opening the csv file in 'w+' mode
file = open('./predicted_labels.csv', 'w+', newline ='')
# writing the data into the file
with file:
write = csv.writer(file)
write.writerows([pred])
tracker=0
for i, p in enumerate(pred):
if p!=0:
tracker+=1
if tracker>0:
print('predicted other than 0! -> ', tracker) | 3,250 | 1,110 |
# Evaluation script template for de-identification tool https://github.com/dieterich-lab/Anonymize
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import fbeta_score
from sklearn.model_selection import permutation_test_score
import matplotlib.pyplot as plt
import glob
from mlxtend.evaluate import mcnemar
import collections
import numpy as np
from mlxtend.evaluate import mcnemar_table
# Evaluating baseline
# Save predicted labels and gold standard into these variables
y_true = []
y_pred = []
# Define folder of predictions and gold standard
file_list = []
# ENTER the folders of gold standard and labeled data for baseline
types = ('*.conll', '*.tokenized_anon')
for files in types:
for file in glob.glob(files):
file_list.append(file)
file_list = sorted(file_list)
list1 = file_list[:int(len(file_list)/2)]
list2 = file_list[int(len(file_list)/2):]
d = 0
for i,j in zip(list2, list1):
with open(i, "r", encoding="utf-8") as gold, open(j, "r", encoding="utf-8") as anno:
for g, a in zip(gold, anno):
g = g.replace(" ", "\t")
a = a.replace(" ", "\t")
a = a.split("\t")
g = g.split("\t")
g = [elem.replace("ORG", "O") for elem in g]
a = [elem.replace("ORG", "O") for elem in a]
if len(a) > 1 and len(g) > 1:
# Analze binary
if "ANON" in a[1]:
y_pred.append(1)
else:
y_pred.append(0)
if len(g[1].strip()) == 1:
y_true.append(0)
else:
y_true.append(1)
# Print evaluation report, confusion matrix and f2 scores
print("Evaluation baseline:")
print(classification_report(y_true, y_pred, labels=[1,0]))
print("Confusion matrix:")
print(confusion_matrix(y_true, y_pred, labels= [1,0]))
print("MCC",matthews_corrcoef(y_true, y_pred))
print("F2 - None",fbeta_score(y_true, y_pred, average=None, beta=2))
print("F2 - weighted",fbeta_score(y_true, y_pred, average='weighted', beta=2))
print("F2 - micro",fbeta_score(y_true, y_pred, average='micro', beta=2))
print("F2 - macro",fbeta_score(y_true, y_pred, average='macro', beta=2))
# Evaluating full featured model
y_true = []
y_pred2 = []
import glob
file_list = []
# ENTER the folders of gold standard and labeled data for baseline
types = ('*.conll', '*.tokenized_anon')
for files in types:
for file in glob.glob(files):
file_list.append(file)
file_list = sorted(file_list)
list1 = file_list[:int(len(file_list)/2)]
list2 = file_list[int(len(file_list)/2):]
d = 0
for i,j in zip(list2, list1):
with open(i, "r", encoding="utf-8") as gold, open(j, "r", encoding="utf-8") as anno:
for g, a in zip(gold, anno):
g = g.replace(" ", "\t")
a = a.replace(" ", "\t")
a = a.split("\t")
g = g.split("\t")
g = [elem.replace("ORG", "O") for elem in g]
a = [elem.replace("ORG", "O") for elem in a]
if len(a) > 1 and len(g) > 1:
# Analze binary
if "ANON" in a[1]:
y_pred2.append(1)
else:
y_pred2.append(0)
if len(g[1].strip()) == 1:
y_true.append(0)
else:
y_true.append(1)
# Print evaluation report, confusion matrix and f2 scores
print("Evaluation full featured model:")
print(classification_report(y_true, y_pred2, labels= [1,0]))
print("Confusion matrix:")
print(confusion_matrix(y_true, y_pred2, labels= [1,0]))
print("MCC",matthews_corrcoef(y_true, y_pred2))
print("F2 - None",fbeta_score(y_true, y_pred2, average=None, beta=2))
print("F2 - weighted",fbeta_score(y_true, y_pred2, average='weighted', beta=2))
print("F2 - micro",fbeta_score(y_true, y_pred2, average='micro', beta=2))
print("F2 - macro",fbeta_score(y_true, y_pred2, average='macro', beta=2))
# McNemar test
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_pred2 = np.array(y_pred2)
tb = mcnemar_table(y_target=y_true, y_model1=y_pred, y_model2=y_pred2)
print("McNemar contigency table")
print(tb)
chi2, p = mcnemar(ary=tb, corrected=True)
print('chi-squared:', chi2)
print('p-value:', p) | 4,143 | 1,773 |
from contextlib import contextmanager
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.ui import WebDriverWait
class SeleniumTestCase(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super(StaticLiveServerTestCase, cls).setUpClass()
cls.browser = WebDriver()
cls.browser.implicitly_wait(10)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(StaticLiveServerTestCase, cls).tearDownClass()
@contextmanager
def wait_for_page_load(self, timeout=10):
old_page = self.browser.find_element_by_tag_name("html")
yield
WebDriverWait(self.browser, timeout).until(staleness_of(old_page))
| 880 | 259 |
#!/usr/bin/python3
# Iterate through a list of dictionaries, sorted by a field from the dictionaries
response = [
{'a':1, 'b':2222, 'LastModified':1320, 'c':33},
{'a':11, 'LastModified':1229, 'b':222, 'c':3},
{'LastModified':1400,'a':111, 'b':2, 'c':3333},
{'a':1111, 'b':22, 'LastModified':180, 'c':333}
]
response = sorted(response, key=lambda k: k['LastModified'])
for x in response:
print('x',x)
| 411 | 191 |
import setuptools
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setuptools.setup(
name="urbangrammar_graphics",
version="1.2.3",
author="Martin Fleischmann",
author_email="martin@martinfleischmann.net",
python_requires=">=3.6",
install_requires=["matplotlib", "seaborn", "numpy", "contextily"],
description="Visual style for Urban Grammar AI research project",
url="https://github.com/urbangrammarai/graphics",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
)
| 617 | 203 |
numTilings = 8
def tilecode(in1, in2, tileIndices):
# write your tilecoder here (5 lines or so)
for i in range(0,numTilings):
col = (in1 + i*(0.6/numTilings)) // 0.6
row = (in2 + i*(0.6/numTilings)) // 0.6
tile = (i*121) + (11*col) + row
tileIndices[i] = int(tile)
return tileIndices
def printTileCoderIndices(in1, in2):
tileIndices = [-1] * numTilings
tilecode(in1, in2, tileIndices)
print('Tile indices for input (', in1, ',', in2,') are : ', tileIndices)
#printTileCoderIndices(0.1, 0.1)
#printTileCoderIndices(4.0, 2.0)
#printTileCoderIndices(5.99, 5.99)
#printTileCoderIndices(4.0, 2.1)
| 701 | 321 |
from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
pass
class Post(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=CASCADE)
title = models.CharField(max_length=2048)
body = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
categories = models.ManyToManyField(
'Category',
blank=True,
related_name="posts",
)
class Category(models.Model):
name = models.CharField(max_length=1024)
slug = models.CharField(max_length=1024)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return f'{self.name}'
| 868 | 296 |