text
stringlengths 2
999k
|
|---|
import secrets
from datetime import timedelta
from pathlib import Path
from secrets import token_urlsafe
from typing import List, Optional
from atoolbox import BaseSettings
from pydantic import EmailStr, constr
SRC_DIR = Path(__file__).parent
class Settings(BaseSettings):
pg_dsn = 'postgres://postgres@localhost:5432/em2'
cookie_name = 'em2'
sql_path: Path = SRC_DIR / 'models.sql'
create_app = 'em2.main.create_app'
worker_func = 'em2.worker.run_worker'
patch_paths: List[str] = ['em2.auth.patches']
auth_key = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa='
domain: str = 'localhost' # currently used as a proxy for development mode, should probably be changed
testing: bool = False # used only in unit test eg, to use http not https for the em2 protocol
any_origin: bool = False # WARNING: this is dangerous, only use when testing
local_port: Optional[int] = 8000
commit: str = 'unknown'
build_time: str = 'unknown'
# used for hashing when the user in the db has no password, or no user is found
dummy_password = '__dummy_password__'
bcrypt_work_factor = 12
# login attempts per minute allowed before grecaptcha is required
easy_login_attempts = 3
# max. login attempts allowed per minute
max_login_attempts = 20
# how long micro sessions can last before they need to be checked with auth
micro_session_duration = 60 * 15
# how many seconds until an idle session expires
session_expiry = 86400 * 4
# used for testing only to slow down the UI app
slow_ui: int = None
internal_auth_key: constr(min_length=40, max_length=100) = secrets.token_urlsafe()
# em2 feature settings:
signing_secret_key: constr(min_length=64, max_length=64) = b'1' * 64
message_lock_duration: int = 3600 # how many seconds a lock holds for
max_em2_file_size: int = 1024 ** 3
smtp_handler = 'em2.protocol.smtp.LogSmtpHandler'
aws_access_key: str = None
aws_secret_key: str = None
aws_region: str = 'us-east-1'
# set here so they can be overridden during tests
ses_host = 'email.{region}.amazonaws.com'
ses_endpoint_url = 'https://{host}/'
ses_configuration_set = 'em2'
smtp_message_id_domain = 'email.amazonses.com'
s3_endpoint_url: str = None # only used when testing
s3_temp_bucket: str = None
s3_temp_bucket_lifetime: timedelta = 'P30D'
s3_file_bucket: str = None
# generate randomly to avoid leaking secrets:
ses_url_token: str = token_urlsafe()
aws_sns_signing_host = '.amazonaws.com'
aws_sns_signing_schema = 'https'
s3_cache_bucket: str = None
max_ref_image_size = 10 * 1024 ** 2
max_ref_image_count = 20
upload_pending_ttl = 3600
image_sizes = [(800, 800), (400, 400)]
image_thumbnail_sizes = [(120, 120)]
vapid_private_key: str = None
vapid_sub_email: EmailStr = None
class Config:
env_prefix = 'em2_'
case_insensitive = True
|
import os,re
from waflib import Task,Errors,Node,TaskGen,Configure,Node,Logs,Context
from brick_general import ChattyBrickTask
def configure(conf):
"""This function gets called by waf upon loading of this module in a configure method"""
conf.load('brick_general')
conf.find_program('calibre',var='CALIBRE_DRC')
conf.find_program('calibredrv',var='CALIBRE_DRV')
conf.env['CALIBRE_DRC_OPTIONS'] = [
'-64', '-hier', '-turbo' ,'-turbo_all',
]
conf.env['CALIBRE_DRV_OPTIONS'] = []
@TaskGen.taskgen_method
def get_calibre_drc_rule_file_path(self):
ret_node = self.bld.bldnode.find_node('calibre_drc_rules')
if not ret_node:
ret_node = self.bld.bldnode.make_node('calibre_drc_rules')
ret_node.mkdir()
return ret_node
@TaskGen.feature('calibre_drc')
def create_calibre_drc_task(self):
self.rule_file = self.get_calibre_drc_rule_file_path().make_node('calibre_drc_rules_'+self.name+'_'+self.cellname)
self.output_file_base = self.get_resultdir_node().make_node(self.cellname)
self.svdb = self.get_resultdir_node().make_node('svdb')
if not os.path.exists(self.svdb.abspath()):
self.svdb.mkdir()
output = self.output_file_base.change_ext('.drc.results')
input = [self.layout_gds]
f = open(self.rule_file.abspath(),"w")
f.write("""
LAYOUT PATH "{0}"
LAYOUT PRIMARY {1}
LAYOUT SYSTEM GDSII
LAYOUT CASE YES
//LAYOUT RENAME TEXT "/</\\[/g" "/>/\\]/g"
DRC RESULTS DATABASE "{2}.drc.results"
DRC MAXIMUM RESULTS ALL
DRC MAXIMUM VERTEX 4096
DRC CELL NAME NO
DRC SUMMARY REPORT "{2}.drc.summary" REPLACE
VIRTUAL CONNECT COLON NO
VIRTUAL CONNECT REPORT NO
DRC ICSTATION YES
""".format(self.layout_gds.abspath(),self.cellname,self.output_file_base.abspath()))
if hasattr(self,'unselect_checks') and len(self.unselect_checks)>0:
f.write('DRC UNSELECT CHECK\n')
for line in getattr(self,'unselect_checks',[]):
f.write('\t"'+line+'"\n')
for inc in self.includes:
f.write('\nINCLUDE '+inc.abspath())
input.append(inc)
f.close()
t = self.create_task('calibreDrcTask', input, output)
@TaskGen.taskgen_method
def get_calibre_drc_logfile_node(self):
return self.get_logdir_node().make_node('calibre_drc_'+self.cellname+'.log')
@TaskGen.taskgen_method
def get_calibre_drc_options(self):
conditional_options = ""
if hasattr(self,'hcells'):
conditional_options += ' -hcell '+self.hcells_file.abspath()
return conditional_options
@Task.always_run
class calibreDrcTask(ChattyBrickTask):
vars = ['CALIBRE_DRC','CALIBRE_DRC_OPTIONS']
run_str = '${CALIBRE_DRC} -drc ${gen.get_calibre_drc_options()} ${CALIBRE_DRC_OPTIONS} ${gen.rule_file.abspath()}'
def check_output(self,ret,out):
regex = re.compile("--- TOTAL RESULTS GENERATED = ([1-9]\d+)")
for num,line in enumerate(out.split('\n')):
match = regex.match(line)
if match:
Logs.error("Error in DRC: Found %s errors, for details see %s" % (match.group(1),self.generator.get_calibre_drc_logfile_node().abspath()))
ret = 1
with open(self.generator.get_calibre_drc_logfile_node().abspath(),'w') as f:
f.write(out)
return ret
@TaskGen.feature('calibre_rve_drc')
def create_calibre_rve_drc_task(self):
try:
getattr(self,'gds',None).abspath()
except AttributeError:
Logs.error('Please name an existing GDSII file for feature \'cds_rve_drc\'')
report = self.get_resultdir_node().find_node(self.cellname+'.drc.results')
if not report:
Logs.error('Report '+self.get_resultdir_node().make_node(self.cellname+'.drc.results').abspath()+' not found. Please run feature \'calibre_drc\' first.')
input = [self.gds,report]
t = self.create_task('calibreRveDrcTask', input)
@Task.always_run
class calibreRveDrcTask(Task.Task):
run_str = "${CALIBRE_DRV} -m ${SRC[0].abspath()} -rve -drc ${SRC[1].abspath()}"
# for convenience
@Configure.conf
def calibre_drc(bld,*k,**kw):
set_features(kw,'calibre_drc')
return bld(*k,**kw)
# vim: noexpandtab:
|
import rlkit.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in, sawyer_pusher_camera_upright_v2
from rlkit.launchers.launcher_util import run_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from rlkit.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.grill.cvae_experiments import (
grill_her_td3_offpolicy_online_vae_full_experiment,
)
from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv
from rlkit.torch.vae.conditional_conv_vae import DeltaCVAE
from rlkit.torch.vae.conditional_vae_trainer import DeltaCVAETrainer
from rlkit.data_management.online_conditional_vae_replay_buffer import \
OnlineConditionalVaeRelabelingBuffer
x_var = 0.2
x_low = -x_var
x_high = x_var
y_low = 0.5
y_high = 0.7
t = 0.05
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
env_class=SawyerMultiobjectEnv,
env_kwargs=dict(
num_objects=10,
object_meshes=None,
fixed_start=True,
num_scene_objects=[1],
maxlen=0.1,
action_repeat=1,
puck_goal_low=(x_low + 0.01, y_low + 0.01),
puck_goal_high=(x_high - 0.01, y_high - 0.01),
hand_goal_low=(x_low + 3*t, y_low + t),
hand_goal_high=(x_high - 3*t, y_high -t),
mocap_low=(x_low + 2*t, y_low , 0.0),
mocap_high=(x_high - 2*t, y_high, 0.5),
object_low=(x_low + 0.01, y_low + 0.01, 0.02),
object_high=(x_high - 0.01, y_high - 0.01, 0.02),
preload_obj_dict=[
dict(color2=(1, 0, 0)),
dict(color2=(0, 1, 0)),
dict(color2=(0, 0, 1)),
dict(color2=(1, .4, .7)),
dict(color2=(0, .4, .8)),
dict(color2=(.8, .8, 0)),
dict(color2=(1, .5, 0)),
dict(color2=(.4, 0, .4)),
dict(color2=(.4, .2, 0)),
dict(color2=(0, .4, .4)),
],
),
grill_variant=dict(
save_video=True,
custom_goal_sampler='replay_buffer',
online_vae_trainer_kwargs=dict(
beta=20,
lr=0,
),
save_video_period=50,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
max_path_length=100,
algo_kwargs=dict(
batch_size=128,
num_epochs=3000,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
vae_training_schedule=vae_schedules.never_train,
oracle_data=False,
vae_save_period=25,
parallel_vae_train=False,
dataset_path=None,
rl_offpolicy_num_training_steps=0,
),
td3_trainer_kwargs=dict(
discount=0.99,
# min_num_steps_before_training=4000,
reward_scale=1.0,
# render=False,
tau=1e-2,
),
replay_buffer_class=OnlineConditionalVaeRelabelingBuffer,
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=-1,
relabeling_goal_sampling_mode='vae_prior',
),
exploration_goal_sampling_mode='vae_prior',
evaluation_goal_sampling_mode='reset_of_env',
normalize=False,
render=False,
exploration_noise=0.2,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
epsilon=0.05,
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
# vae_path="datasets/pusher_color_spectrum/itr_1900.pkl",
),
train_vae_variant=dict(
latent_sizes=4,
beta=10,
beta_schedule_kwargs=dict(
x_values=(0, 1500),
y_values=(1, 50),
),
num_epochs=1000,
dump_skew_debug_plots=False,
# decoder_activation='gaussian',
decoder_activation='sigmoid',
use_linear_dynamics=False,
generate_vae_dataset_kwargs=dict(
N=100000,
n_random_steps=50,
test_p=.9,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
random_rollout_data_set_to_goal=False,
conditional_vae_dataset=False,
save_trajectories=False,
enviorment_dataset=False,
tag="ccrig1",
),
# vae_trainer_class=DeltaCVAETrainer,
# vae_class=DeltaCVAE,
vae_kwargs=dict(
input_channels=3,
architecture=imsize48_default_architecture_with_more_hidden_layers,
decoder_distribution='gaussian_identity_variance',
),
# TODO: why the redundancy?
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=32,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=False,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
region='us-west-2',
logger_variant=dict(
tensorboard=True,
),
)
search_space = {
'seedid': range(5),
'grill_variant.exploration_noise': [0.2, ],
'train_vae_variant.representation_size': [16, ], #(3 * objects, 3 * colors)
'grill_variant.algo_kwargs.num_trains_per_train_loop':[1000, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(grill_her_td3_offpolicy_online_vae_full_experiment, variants, run_id=1)
|
#!/usr/bin/env python3
# Need to run this from the directory containing this script and make_rules.py
# Run this script, then check output in the generated file, then run make_rules.py
import argparse
import json
import requests
import sys
import os
import boto3
from slugify import slugify
import common.common_lib as common_lib
DEV = "dev"
QA = "qa"
PROD = "prod"
DEV_CURATOR_URL = "https://dev-data.covid-19.global.health/api/sources"
QA_CURATOR_URL = "https://qa-data.covid-19.global.health/api/sources"
PROD_CURATOR_URL = "https://data.covid-19.global.health/api/sources"
ENV_TO_URL = {
DEV: DEV_CURATOR_URL,
QA: QA_CURATOR_URL,
PROD:PROD_CURATOR_URL
}
AWS_REGION = os.getenv("AWS_REGION", "eu-central-1")
LIMIT = 100 # when we go over 100 sources we will need to update the maximum limit in the curator API
SOURCE_RULE = {
"rule_name": "",
"target_name": "",
"source_name": "",
"job_name": "",
"description": ""
}
FILE_NAME = "rules.json"
JOB_DEF_ENV = "EPID_INGESTION_ENV"
JOB_DEF_SOURCE_ID = "EPID_INGESTION_SOURCE_ID"
parser = argparse.ArgumentParser(
description="Define rules for AWS EventBridge by curator sources",
usage="python define_rules.py [<environment>]"
)
parser.add_argument(
"environment", help="Which environment to list sources from", choices=[DEV, PROD]
)
args = parser.parse_args(sys.argv[1:2])
s3_client = boto3.client("s3", AWS_REGION)
auth_headers = common_lib.obtain_api_credentials(s3_client)
env = args.environment
url = f"{ENV_TO_URL.get(env, '')}?limit={LIMIT}"
print(f"Getting source information from {env} curator API")
response = requests.get(url, headers=auth_headers)
if response.status_code != 200:
print(f"Non-200 response from curator API: {response.status_code}")
sys.exit(1)
sources = response.json().get("sources")
batch_client = boto3.client("batch", AWS_REGION)
resp = batch_client.describe_job_definitions()
job_definitions = resp.get("jobDefinitions")
if not job_definitions:
print(f"No job definitions found in response from Batch: {resp}")
sys.exit(1)
source_id_to_job_def_name = {}
for job_def in job_definitions:
props = job_def.get("containerProperties", {})
job_def_env = props.get("environment", {})
for kv in job_def_env:
print(f"key-val: {kv}")
if kv.get("name", "") == JOB_DEF_SOURCE_ID:
val = kv.get("value")
if not val:
continue
source_id_to_job_def_name[val] = job_def.get("jobDefinitionName")
break
rules = []
for source in sources:
source_name = source.get("name")
source_id = source.get("_id")
source_rule = SOURCE_RULE.copy()
job_def_name = source_id_to_job_def_name.get(source_id)
if not job_def_name:
print(f"No job definition found using source ID {source_id}")
continue
source_rule["rule_name"] = f"{job_def_name}"
source_rule["target_name"] = job_def_name
source_rule["source_name"] = source_name
source_rule["job_name"] = job_def_name
source_rule["description"] = f"Scheduled Batch ingestion rule for source: {source_name} with ID: {source_id} for environment: {env}"
rules.append(source_rule)
print(f"Writing source information to {FILE_NAME}")
with open(FILE_NAME, "w") as f:
json.dump(rules, f, indent=4)
print("Done")
|
import os
import sys
import time
import optparse
sys.path.insert(0, "../../")
from cbfeeds import CbReport
from cbfeeds import CbFeed
from cbfeeds import CbFeedInfo
from stix.core import STIXPackage
from stix.utils.parser import EntityParser, UnsupportedVersionError
from cybox.bindings.file_object import FileObjectType
from cybox.bindings.domain_name_object import DomainNameObjectType
from cybox.bindings.address_object import AddressObjectType
from stix.utils import nsparser
import mixbox.namespaces
from mixbox.namespaces import Namespace
ADDITIONAL_NAMESPACES = [
Namespace('http://us-cert.gov/ciscp', 'CISCP',
'http://www.us-cert.gov/sites/default/files/STIX_Namespace/ciscp_vocab_v1.1.1.xsd')
]
def merge(d1, d2):
""" given two dictionaries, return a single dictionary
that merges the two.
"""
result = d1
if not d2: return result
for k in d2:
if k in result:
result[k].extend(d2[k])
else:
result[k] = d2[k]
return result
def no_conditionals(obj):
""" return true only if:
- object has no conditionals applied or
- conditionals are jsut "Any Equals"
"""
# if they're not on the object...
if not hasattr(obj, "apply_condition") or not hasattr(obj, "condition"):
return True
# ...or if they're not defined...
if not obj.apply_condition or not obj.condition:
return True
# ... or if they're defined and any equals...
if obj.apply_condition.lower() == "any" and \
obj.condition.lower() == "equals":
return True
return False
def parse_File(file_obj):
""" parse a FileObjectType and return a list of md5s
if they exist and not subject to any conditionals. """
if not hasattr(file_obj, "Hashes") or not hasattr(file_obj.Hashes, "Hash"):
return
iocs = {}
iocs['md5'] = []
for h in file_obj.Hashes.Hash:
if not hasattr(h, "Type"):
continue
# only get md5s that are true if any are present. if not specified, assume so.
if no_conditionals(h.Type) and \
(h.Type.valueOf_ and h.Type.valueOf_.lower() == "md5"):
md5s = h.Simple_Hash_Value
iocs['md5'].extend(md5s.valueOf_.split(md5s.delimiter))
return iocs
def parse_observable(observable):
""" for each observable, if it's of a supported type,
the parse out the values and return. """
obj = observable.to_obj()
if not obj or not hasattr(obj, "Object") or not hasattr(obj.Object, "Properties"): return
prop = obj.Object.Properties
iocs = {}
if type(prop) == AddressObjectType:
ips = prop.Address_Value
if no_conditionals(ips):
iocs['ipv4'] = ips.valueOf_.split(ips.delimiter)
elif type(prop) == DomainNameObjectType:
domains = prop.Value
if no_conditionals(domains):
iocs['dns'] = domains.valueOf_.split(domains.delimiter)
elif type(prop) == FileObjectType:
merge(iocs, parse_File(prop))
return iocs
def parse_observables(observables):
""" iterate over the set of observables, parse out
visibile indicators and return a dictionary of
iocs present and suitable for feed inclusion. """
iocs = {}
for observable in observables:
try:
merge(iocs, parse_observable(observable))
except Exception as e:
print(("-> Unexpected error parsing observable: {0}; continuing.".format(e)))
return iocs
def build_report(fname):
""" parse the provided STIX package and create a
CB Feed Report that includes all suitable observables
as CB IOCs """
# The python STIX libs are pedantic about document versions. See
# https://github.com/STIXProject/python-stix/issues/124
# parser = EntityParser()
# pkg = parser.parse_xml(fname, check_version=False)
pkg = STIXPackage.from_xml(fname)
iocs = {}
if pkg.observables:
iocs = parse_observables(pkg.observables.observables)
if pkg.indicators:
for indicator in pkg.indicators:
iocs = merge(iocs, parse_observables(indicator.observables))
ts = int(time.mktime(pkg.timestamp.timetuple())) if pkg.timestamp else int(time.mktime(time.gmtime()))
fields = {'iocs': iocs,
'score': 100, # does STIX have a severity field?
'timestamp': ts,
'link': 'http://stix.mitre.org',
'id': pkg.id_,
'title': pkg.stix_header.title,
}
if len(list(iocs.keys())) == 0 or all(len(iocs[k]) == 0 for k in iocs):
print(("-> No suitable observables found in {0}; skipping.".format(fname)))
return None
print(("-> Including %s observables from {0}.".format(sum(len(iocs[k]) for k in iocs), fname)))
return CbReport(**fields)
def build_cli_parser():
"""
generate OptionParser to handle command line switches
"""
usage = "usage: %prog [options]"
desc = "Best-effort conversion of one of more STIX Packages into a CB Feed"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option("-i", "--input", action="store", default=None, type="string", dest="input",
help="STIX Package(s) to process. If a directory, will recursively process all .xml")
parser.add_option("-o", "--output", action="store", default=None, type="string", dest="output",
help="CB Feed output filename")
return parser
def build_reports(input_source):
""" given an input file or directory,
build a list of Cb Feed Reports.
This structure chooses to have one
report per STIX Package, with all
suitable observables associated.
Based on your STIX Package structure,
you may prefer a different arrangement.
"""
reports = []
if os.path.isfile(input_source):
reports.append(build_report(input_source))
else:
for root, dirs, files in os.walk(input_source):
for f in files:
if not f.endswith("xml"): continue
try:
rep = build_report(os.path.join(root, f))
if rep: reports.append(rep)
except UnsupportedVersionError as e:
print(("-> Skipping {0}\n"
"UnsupportedVersionError: {1}\n"
"see https://github.com/STIXProject/python-stix/issues/124".format(
f, e)))
except Exception as e:
print(("-> Unexpected error parsing {0}: {1}; skipping.".format(f, e)))
return reports
def create(input_source):
reports = build_reports(input_source)
# ****************************
# TODO - you probably want to change these values to reflect your
# local input source
feedinfo = {'name': 'stiximport',
'display_name': "STIX Package Import",
'provider_url': 'http://stix.mitre.org',
'summary': "This feed was imported from stix package(s) at %s" % input_source,
'tech_data': "There are no requirements to share any data to receive this feed.",
'icon': 'images/stix.gif'
}
feedinfo = CbFeedInfo(**feedinfo)
feed = CbFeed(feedinfo, reports)
return feed.dump()
if __name__ == "__main__":
parser = build_cli_parser()
options, args = parser.parse_args(sys.argv)
if not options.input or not options.output:
print("-> Must specify and input file/directory and output filename")
sys.exit(-1)
#
# Adding namespaces that aren't in defaults
#
def _update_namespaces():
for i in ADDITIONAL_NAMESPACES:
nsparser.STIX_NAMESPACES.add_namespace(i)
mixbox.namespaces.register_namespace(i)
_update_namespaces()
bytes = create(options.input)
open(options.output, "w").write(bytes)
|
# Generated by Django 3.1a1 on 2020-07-07 07:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(help_text='Primary key for Base class.', primary_key=True, serialize=False)),
('last_modified', models.DateTimeField(auto_now=True, help_text='Date the class was last modified')),
('tag', models.CharField(blank=True, help_text='User defined tag for easy searches', max_length=200, null=True)),
('arxiv_id', models.CharField(help_text="Arxiv qualifier like '2000.01234'.", max_length=20)),
('authors', models.TextField(help_text='Authors of the reference as comma seperated list.')),
('title', models.CharField(help_text='Title of the publication.', max_length=256)),
('journal', models.CharField(blank=True, help_text='Journal qualifier of the publication.', max_length=256, null=True)),
('url', models.URLField(blank=True, help_text='Link to access the publication, e.g., inspirehep.net link.', null=True)),
('misc', models.JSONField(blank=True, help_text='Additional optional information.', null=True)),
('user', models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
from flask import Blueprint
auth = Blueprint('auth',__name__)
from . import views,form
|
# testing/engines.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import collections
import re
import typing
from typing import Any
from typing import Dict
from typing import Optional
import warnings
import weakref
from . import config
from .util import decorator
from .util import gc_collect
from .. import event
from .. import pool
from ..util import await_only
from ..util.typing import Literal
if typing.TYPE_CHECKING:
from ..engine import Engine
from ..engine.url import URL
from ..ext.asyncio import AsyncEngine
class ConnectionKiller:
def __init__(self):
self.proxy_refs = weakref.WeakKeyDictionary()
self.testing_engines = collections.defaultdict(set)
self.dbapi_connections = set()
def add_pool(self, pool):
event.listen(pool, "checkout", self._add_conn)
event.listen(pool, "checkin", self._remove_conn)
event.listen(pool, "close", self._remove_conn)
event.listen(pool, "close_detached", self._remove_conn)
# note we are keeping "invalidated" here, as those are still
# opened connections we would like to roll back
def _add_conn(self, dbapi_con, con_record, con_proxy):
self.dbapi_connections.add(dbapi_con)
self.proxy_refs[con_proxy] = True
def _remove_conn(self, dbapi_conn, *arg):
self.dbapi_connections.discard(dbapi_conn)
def add_engine(self, engine, scope):
self.add_pool(engine.pool)
assert scope in ("class", "global", "function", "fixture")
self.testing_engines[scope].add(engine)
def _safe(self, fn):
try:
fn()
except Exception as e:
warnings.warn(
"testing_reaper couldn't rollback/close connection: %s" % e
)
def rollback_all(self):
for rec in list(self.proxy_refs):
if rec is not None and rec.is_valid:
self._safe(rec.rollback)
def checkin_all(self):
# run pool.checkin() for all ConnectionFairy instances we have
# tracked.
for rec in list(self.proxy_refs):
if rec is not None and rec.is_valid:
self.dbapi_connections.discard(rec.dbapi_connection)
self._safe(rec._checkin)
# for fairy refs that were GCed and could not close the connection,
# such as asyncio, roll back those remaining connections
for con in self.dbapi_connections:
self._safe(con.rollback)
self.dbapi_connections.clear()
def close_all(self):
self.checkin_all()
def prepare_for_drop_tables(self, connection):
# don't do aggressive checks for third party test suites
if not config.bootstrapped_as_sqlalchemy:
return
from . import provision
provision.prepare_for_drop_tables(connection.engine.url, connection)
def _drop_testing_engines(self, scope):
eng = self.testing_engines[scope]
for rec in list(eng):
for proxy_ref in list(self.proxy_refs):
if proxy_ref is not None and proxy_ref.is_valid:
if (
proxy_ref._pool is not None
and proxy_ref._pool is rec.pool
):
self._safe(proxy_ref._checkin)
if hasattr(rec, "sync_engine"):
await_only(rec.dispose())
else:
rec.dispose()
eng.clear()
def after_test(self):
self._drop_testing_engines("function")
def after_test_outside_fixtures(self, test):
# don't do aggressive checks for third party test suites
if not config.bootstrapped_as_sqlalchemy:
return
if test.__class__.__leave_connections_for_teardown__:
return
self.checkin_all()
# on PostgreSQL, this will test for any "idle in transaction"
# connections. useful to identify tests with unusual patterns
# that can't be cleaned up correctly.
from . import provision
with config.db.connect() as conn:
provision.prepare_for_drop_tables(conn.engine.url, conn)
def stop_test_class_inside_fixtures(self):
self.checkin_all()
self._drop_testing_engines("function")
self._drop_testing_engines("class")
def stop_test_class_outside_fixtures(self):
# ensure no refs to checked out connections at all.
if pool.base._strong_ref_connection_records:
gc_collect()
if pool.base._strong_ref_connection_records:
ln = len(pool.base._strong_ref_connection_records)
pool.base._strong_ref_connection_records.clear()
assert (
False
), "%d connection recs not cleared after test suite" % (ln)
def final_cleanup(self):
self.checkin_all()
for scope in self.testing_engines:
self._drop_testing_engines(scope)
def assert_all_closed(self):
for rec in self.proxy_refs:
if rec.is_valid:
assert False
testing_reaper = ConnectionKiller()
@decorator
def assert_conns_closed(fn, *args, **kw):
try:
fn(*args, **kw)
finally:
testing_reaper.assert_all_closed()
@decorator
def rollback_open_connections(fn, *args, **kw):
"""Decorator that rolls back all open connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.rollback_all()
@decorator
def close_first(fn, *args, **kw):
"""Decorator that closes all connections before fn execution."""
testing_reaper.checkin_all()
fn(*args, **kw)
@decorator
def close_open_connections(fn, *args, **kw):
"""Decorator that closes all connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.checkin_all()
def all_dialects(exclude=None):
import sqlalchemy.dialects as d
for name in d.__all__:
# TEMPORARY
if exclude and name in exclude:
continue
mod = getattr(d, name, None)
if not mod:
mod = getattr(
__import__("sqlalchemy.dialects.%s" % name).dialects, name
)
yield mod.dialect()
class ReconnectFixture:
def __init__(self, dbapi):
self.dbapi = dbapi
self.connections = []
self.is_stopped = False
def __getattr__(self, key):
return getattr(self.dbapi, key)
def connect(self, *args, **kwargs):
conn = self.dbapi.connect(*args, **kwargs)
if self.is_stopped:
self._safe(conn.close)
curs = conn.cursor() # should fail on Oracle etc.
# should fail for everything that didn't fail
# above, connection is closed
curs.execute("select 1")
assert False, "simulated connect failure didn't work"
else:
self.connections.append(conn)
return conn
def _safe(self, fn):
try:
fn()
except Exception as e:
warnings.warn("ReconnectFixture couldn't close connection: %s" % e)
def shutdown(self, stop=False):
# TODO: this doesn't cover all cases
# as nicely as we'd like, namely MySQLdb.
# would need to implement R. Brewer's
# proxy server idea to get better
# coverage.
self.is_stopped = stop
for c in list(self.connections):
self._safe(c.close)
self.connections = []
def restart(self):
self.is_stopped = False
def reconnecting_engine(url=None, options=None):
url = url or config.db.url
dbapi = config.db.dialect.dbapi
if not options:
options = {}
options["module"] = ReconnectFixture(dbapi)
engine = testing_engine(url, options)
_dispose = engine.dispose
def dispose():
engine.dialect.dbapi.shutdown()
engine.dialect.dbapi.is_stopped = False
_dispose()
engine.test_shutdown = engine.dialect.dbapi.shutdown
engine.test_restart = engine.dialect.dbapi.restart
engine.dispose = dispose
return engine
@typing.overload
def testing_engine(
url: Optional["URL"] = None,
options: Optional[Dict[str, Any]] = None,
asyncio: Literal[False] = False,
transfer_staticpool: bool = False,
) -> "Engine":
...
@typing.overload
def testing_engine(
url: Optional["URL"] = None,
options: Optional[Dict[str, Any]] = None,
asyncio: Literal[True] = True,
transfer_staticpool: bool = False,
) -> "AsyncEngine":
...
def testing_engine(
url=None,
options=None,
asyncio=False,
transfer_staticpool=False,
):
if asyncio:
from sqlalchemy.ext.asyncio import create_async_engine as create_engine
else:
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url
if not options:
use_reaper = True
scope = "function"
else:
use_reaper = options.pop("use_reaper", True)
scope = options.pop("scope", "function")
url = url or config.db.url
url = make_url(url)
if options is None:
if config.db is None or url.drivername == config.db.url.drivername:
options = config.db_opts
else:
options = {}
elif config.db is not None and url.drivername == config.db.url.drivername:
default_opt = config.db_opts.copy()
default_opt.update(options)
engine = create_engine(url, **options)
if transfer_staticpool:
from sqlalchemy.pool import StaticPool
if config.db is not None and isinstance(config.db.pool, StaticPool):
use_reaper = False
engine.pool._transfer_from(config.db.pool)
if scope == "global":
if asyncio:
engine.sync_engine._has_events = True
else:
engine._has_events = (
True # enable event blocks, helps with profiling
)
if isinstance(engine.pool, pool.QueuePool):
engine.pool._timeout = 0
engine.pool._max_overflow = 0
if use_reaper:
testing_reaper.add_engine(engine, scope)
return engine
def mock_engine(dialect_name=None):
"""Provides a mocking engine based on the current testing.db.
This is normally used to test DDL generation flow as emitted
by an Engine.
It should not be used in other cases, as assert_compile() and
assert_sql_execution() are much better choices with fewer
moving parts.
"""
from sqlalchemy import create_mock_engine
if not dialect_name:
dialect_name = config.db.name
buffer = []
def executor(sql, *a, **kw):
buffer.append(sql)
def assert_sql(stmts):
recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer]
assert recv == stmts, recv
def print_sql():
d = engine.dialect
return "\n".join(str(s.compile(dialect=d)) for s in engine.mock)
engine = create_mock_engine(dialect_name + "://", executor)
assert not hasattr(engine, "mock")
engine.mock = buffer
engine.assert_sql = assert_sql
engine.print_sql = print_sql
return engine
class DBAPIProxyCursor:
"""Proxy a DBAPI cursor.
Tests can provide subclasses of this to intercept
DBAPI-level cursor operations.
"""
def __init__(self, engine, conn, *args, **kwargs):
self.engine = engine
self.connection = conn
self.cursor = conn.cursor(*args, **kwargs)
def execute(self, stmt, parameters=None, **kw):
if parameters:
return self.cursor.execute(stmt, parameters, **kw)
else:
return self.cursor.execute(stmt, **kw)
def executemany(self, stmt, params, **kw):
return self.cursor.executemany(stmt, params, **kw)
def __iter__(self):
return iter(self.cursor)
def __getattr__(self, key):
return getattr(self.cursor, key)
class DBAPIProxyConnection:
"""Proxy a DBAPI connection.
Tests can provide subclasses of this to intercept
DBAPI-level connection operations.
"""
def __init__(self, engine, cursor_cls):
self.conn = engine.pool._creator()
self.engine = engine
self.cursor_cls = cursor_cls
def cursor(self, *args, **kwargs):
return self.cursor_cls(self.engine, self.conn, *args, **kwargs)
def close(self):
self.conn.close()
def __getattr__(self, key):
return getattr(self.conn, key)
|
# -------------------------------------------------------------------------------------
# A Bidirectional Focal Atention Network implementation based on
# https://arxiv.org/abs/1909.11416.
# "Focus Your Atention: A Bidirectional Focal Atention Network for Image-Text Matching"
# Chunxiao Liu, Zhendong Mao, An-An Liu, Tianzhu Zhang, Bin Wang, Yongdong Zhang
#
# Writen by Chunxiao Liu, 2019
# -------------------------------------------------------------------------------------
"""testall on MSCOCO"""
from vocab import Vocabulary
import evaluation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
RUN_PATH = "/userhome/BFAN/models/model_best.pth.tar"
DATA_PATH = "/userhome/"
evaluation.evalrank(RUN_PATH, data_path=DATA_PATH, split="testall",fold5=True)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.bigquery_connection_v1.types import connection
from google.cloud.bigquery_connection_v1.types import connection as gcbc_connection
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import ConnectionServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import ConnectionServiceGrpcTransport
class ConnectionServiceGrpcAsyncIOTransport(ConnectionServiceTransport):
"""gRPC AsyncIO backend transport for ConnectionService.
Manages external data source connections and credentials.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'bigqueryconnection.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'bigqueryconnection.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_connection(self) -> Callable[
[gcbc_connection.CreateConnectionRequest],
Awaitable[gcbc_connection.Connection]]:
r"""Return a callable for the create connection method over gRPC.
Creates a new connection.
Returns:
Callable[[~.CreateConnectionRequest],
Awaitable[~.Connection]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connection' not in self._stubs:
self._stubs['create_connection'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/CreateConnection',
request_serializer=gcbc_connection.CreateConnectionRequest.serialize,
response_deserializer=gcbc_connection.Connection.deserialize,
)
return self._stubs['create_connection']
@property
def get_connection(self) -> Callable[
[connection.GetConnectionRequest],
Awaitable[connection.Connection]]:
r"""Return a callable for the get connection method over gRPC.
Returns specified connection.
Returns:
Callable[[~.GetConnectionRequest],
Awaitable[~.Connection]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connection' not in self._stubs:
self._stubs['get_connection'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/GetConnection',
request_serializer=connection.GetConnectionRequest.serialize,
response_deserializer=connection.Connection.deserialize,
)
return self._stubs['get_connection']
@property
def list_connections(self) -> Callable[
[connection.ListConnectionsRequest],
Awaitable[connection.ListConnectionsResponse]]:
r"""Return a callable for the list connections method over gRPC.
Returns a list of connections in the given project.
Returns:
Callable[[~.ListConnectionsRequest],
Awaitable[~.ListConnectionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connections' not in self._stubs:
self._stubs['list_connections'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/ListConnections',
request_serializer=connection.ListConnectionsRequest.serialize,
response_deserializer=connection.ListConnectionsResponse.deserialize,
)
return self._stubs['list_connections']
@property
def update_connection(self) -> Callable[
[gcbc_connection.UpdateConnectionRequest],
Awaitable[gcbc_connection.Connection]]:
r"""Return a callable for the update connection method over gRPC.
Updates the specified connection. For security
reasons, also resets credential if connection properties
are in the update field mask.
Returns:
Callable[[~.UpdateConnectionRequest],
Awaitable[~.Connection]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connection' not in self._stubs:
self._stubs['update_connection'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/UpdateConnection',
request_serializer=gcbc_connection.UpdateConnectionRequest.serialize,
response_deserializer=gcbc_connection.Connection.deserialize,
)
return self._stubs['update_connection']
@property
def delete_connection(self) -> Callable[
[connection.DeleteConnectionRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete connection method over gRPC.
Deletes connection and associated credential.
Returns:
Callable[[~.DeleteConnectionRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connection' not in self._stubs:
self._stubs['delete_connection'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/DeleteConnection',
request_serializer=connection.DeleteConnectionRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_connection']
@property
def get_iam_policy(self) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_iam_policy' not in self._stubs:
self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/GetIamPolicy',
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['get_iam_policy']
@property
def set_iam_policy(self) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on the specified resource.
Replaces any existing policy.
Can return ``NOT_FOUND``, ``INVALID_ARGUMENT``, and
``PERMISSION_DENIED`` errors.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_iam_policy' not in self._stubs:
self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/SetIamPolicy',
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['set_iam_policy']
@property
def test_iam_permissions(self) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a ``NOT_FOUND`` error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'test_iam_permissions' not in self._stubs:
self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary(
'/google.cloud.bigquery.connection.v1.ConnectionService/TestIamPermissions',
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs['test_iam_permissions']
def close(self):
return self.grpc_channel.close()
__all__ = (
'ConnectionServiceGrpcAsyncIOTransport',
)
|
#!/usr/bin/env python3
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex_boards.platforms import arty
from ring import *
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
clk = platform.request("clk100")
rst_n = platform.request("cpu_reset")
self.comb += self.cd_sys.clk.eq(clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~rst_n)
platform.add_period_constraint(clk, 1e9/100e6)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCMini):
def __init__(self, sys_clk_freq=int(100e6), mode=mode.DOUBLE, **kwargs):
platform = arty.Platform(variant="a7-35", toolchain="vivado")
from litex.build.generic_platform import Pins, IOStandard
platform.add_extension([("do", 0, Pins("B7"), IOStandard("LVCMOS33"))])
SoCMini.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty A7-35",
ident_version = True)
self.submodules.crg = CRG(platform, sys_clk_freq)
led = RingControl(platform.request("do"), mode, 12, sys_clk_freq)
self.submodules.ledring = led
self.add_csr("ledring")
self.add_uartbone()
analyzer_signals = []
analyzer_signals += led.dbg
analyzer_signals += led.ring.dbg
from litescope import LiteScopeAnalyzer
self.submodules.analyzer = LiteScopeAnalyzer(
analyzer_signals,
depth = 512,
clock_domain ="sys",
csr_csv = "analyzer.csv"
)
self.add_csr("analyzer")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty A7-35")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--mode-single", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--flash", action="store_true", help="Flash Bitstream")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
m = mode.DOUBLE
if args.mode_single:
m = mode.SINGLE
soc = BaseSoC(
sys_clk_freq = 100e6,
mode = m,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
exit()
if __name__ == "__main__":
main()
|
import uuid
from django.contrib.contenttypes.models import ContentType
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from wagtail.core.models import Page
from .models import IDMapping
from .serializers import get_model_serializer
def pages_for_export(request, root_page_id):
root_page = get_object_or_404(Page, id=root_page_id)
pages = root_page.get_descendants(inclusive=True).specific()
ids_for_import = [
['wagtailcore.page', page.pk] for page in pages
]
objects = []
object_references = set()
for page in pages:
serializer = get_model_serializer(type(page))
objects.append(serializer.serialize(page))
object_references.update(serializer.get_object_references(page))
mappings = []
for i, (model, pk) in enumerate(object_references):
id_mapping, created = IDMapping.objects.get_or_create(
content_type=ContentType.objects.get_for_model(model),
local_id=pk,
defaults={'uid': uuid.uuid1(clock_seq=i)}
)
mappings.append(
[model._meta.label_lower, pk, id_mapping.uid]
)
return JsonResponse({
'ids_for_import': ids_for_import,
'mappings': mappings,
'objects': objects,
}, json_dumps_params={'indent': 2})
|
#!/usr/bin/env python3
# This file is a part of toml++ and is subject to the the terms of the MIT license.
# Copyright (c) Mark Gillard <mark.gillard@outlook.com.au>
# See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.
# SPDX-License-Identifier: MIT
import sys
import utils
import io
import re
import json
import yaml
import math
import dateutil.parser
from pathlib import Path
from datetime import datetime, date, time
def sanitize(s):
s = re.sub(r'[ _:;\/-]+', '_', s, 0, re.I | re.M)
if s in ('bool', 'float', 'int', 'double', 'auto'):
s = s + '_'
return s
def python_value_to_tomlpp(val):
if isinstance(val, str):
if re.fullmatch(r'^[+-]?[0-9]+[eE][+-]?[0-9]+$', val, re.M):
return str(float(val))
else:
return 'R"({})"sv'.format(val)
elif isinstance(val, bool):
return 'true' if val else 'false'
elif isinstance(val, float):
if math.isinf(val):
return f'{"-" if val < 0.0 else ""}std::numeric_limits<double>::infinity()'
elif math.isnan(val):
return 'std::numeric_limits<double>::quiet_NaN()'
else:
return str(val)
elif isinstance(val, int):
if val == 9223372036854775807:
return 'INT64_MAX'
elif val == -9223372036854775808:
return 'INT64_MIN'
else:
return str(val)
elif isinstance(val, (TomlPPArray, TomlPPTable)):
return str(val)
elif isinstance(val, datetime):
offset = None
if val.tzinfo is not None:
offset = val.tzinfo.utcoffset(val)
mins = offset.total_seconds() / 60
offset = (int(mins / 60), int(mins % 60))
return 'toml::date_time{{ {{ {}, {}, {} }}, {{ {}, {}, {}, {}u }}{} }}'.format(
val.year,
val.month,
val.day,
val.hour,
val.minute,
val.second,
val.microsecond*1000,
'' if offset is None else ', {{ {}, {} }}'.format(offset[0], offset[1])
)
elif isinstance(val, date):
return 'toml::date{{ {}, {}, {} }}'.format(
val.year,
val.month,
val.day
)
elif isinstance(val, time):
return 'toml::time{{ {}, {}, {}, {} }}'.format(
val.hour,
val.minute,
val.second,
val.microsecond*1000
)
else:
raise ValueError(str(type(val)))
class TomlPPArray:
def __init__(self, init_data=None):
self.values = init_data if init_data else list()
def render(self, indent = '', indent_declaration = False):
s = ''
if indent_declaration:
s += indent
if len(self.values) == 0:
s += 'toml::array{}'
else:
s += 'toml::array{'
for val in self.values:
s += '\n' + indent + '\t'
if isinstance(val, TomlPPArray) and len(self.values) == 1:
s += 'toml::inserter{'
if isinstance(val, (TomlPPTable, TomlPPArray)) and len(val) > 0:
s += val.render(indent + '\t')
else:
s += python_value_to_tomlpp(val)
if isinstance(val, TomlPPArray) and len(self.values) == 1:
s += '}'
s += ','
s += '\n' + indent + '}'
return s
def __str__(self):
return self.render()
def __len__(self):
return len(self.values)
class TomlPPTable:
def __init__(self, init_data=None):
self.values = init_data if init_data else dict()
def render(self, indent = '', indent_declaration = False):
s = ''
if indent_declaration:
s += indent
if len(self.values) == 0:
s += 'toml::table{}'
else:
s += 'toml::table{{'
for key, val in self.values.items():
s += '\n' + indent + '\t{ '
if isinstance(val, (TomlPPTable, TomlPPArray)) and len(val) > 0:
s += '\n' + indent + '\t\t{},'.format(python_value_to_tomlpp(str(key)))
#s += '\n' + val.render(indent + '\t\t')
s += ' ' + val.render(indent + '\t\t')
s += '\n' + indent + '\t'
else:
s += '{}, {} '.format(python_value_to_tomlpp(str(key)), python_value_to_tomlpp(val))
s += '},'
#else:
#s += '}\n'
s += '\n' + indent + '}}'
return s
def __str__(self):
return self.render()
def __len__(self):
return len(self.values)
def json_to_python(val):
if isinstance(val, dict):
if len(val) == 2 and "type" in val and "value" in val:
val_type = val["type"]
if val_type == "integer":
return int(val["value"])
elif val_type == "float":
return float(val["value"])
elif val_type == "string":
return str(val["value"])
elif val_type == "bool":
return True if val["value"].lower() == "true" else False
elif val_type == "array":
return json_to_python(val["value"])
elif val_type in ("datetime", "date", "time", "datetime-local"):
dt_val = dateutil.parser.parse(val["value"])
if val_type == "date":
return dt_val.date()
elif val_type == "time":
return dt_val.time()
else:
return dt_val
else:
raise ValueError(val_type)
else:
vals = dict()
for k,v in val.items():
vals[k] = json_to_python(v)
return vals
elif isinstance(val, list):
vals = list()
for v in val:
vals.append(json_to_python(v))
return vals
else:
raise ValueError(str(type(val)))
def python_to_tomlpp(node):
if isinstance(node, dict):
table = TomlPPTable()
for key, val in node.items():
table.values[key] = python_to_tomlpp(val)
return table
elif isinstance(node, (set, list, tuple)):
array = TomlPPArray()
for val in node:
array.values.append(python_to_tomlpp(val))
return array
else:
return node
class TomlTest:
def __init__(self, file_path, is_valid_case):
self.__name = file_path.stem
self.__identifier = sanitize(self.__name)
self.__data = utils.read_all_text_from_file(file_path, logger=True).strip()
self.condition = ''
self.requires_unicode = False
if is_valid_case:
self.__expected = True
path_base = str(Path(file_path.parent, file_path.stem))
yaml_file = Path(path_base + r'.yaml')
if yaml_file.exists():
self.__expected = python_to_tomlpp(yaml.load(
utils.read_all_text_from_file(yaml_file, logger=True),
Loader=yaml.FullLoader
))
else:
json_file = Path(path_base + r'.json')
if json_file.exists():
self.__expected = python_to_tomlpp(json_to_python(json.loads(
utils.read_all_text_from_file(json_file, logger=True),
)))
else:
self.__expected = False
def name(self):
return self.__name
def identifier(self):
return self.__identifier
def data(self):
return self.__data
def expected(self):
return self.__expected
def __str__(self):
return 'static constexpr auto {} = R"({})"sv;'.format(
self.__identifier,
self.__data,
)
def load_tests(source_folder, is_valid_set, ignore_list):
source_folder = source_folder.resolve()
utils.assert_existing_directory(source_folder)
files = utils.get_all_files(source_folder, all="*.toml")
if ignore_list:
files = [f for f in files if f.stem not in ignore_list]
return [TomlTest(f, is_valid_set) for f in files]
def set_condition(tests, condition, names):
for test in tests:
if test.name() in names:
test.condition = condition
def load_valid_inputs(tests, extern_root):
tests['valid']['burntsushi'] = load_tests(Path(extern_root, 'toml-test', 'tests', 'valid'), True, (
# newline/escape handling tests. these get broken by I/O (I test them separately)
'string-escapes',
# bugged: https://github.com/BurntSushi/toml-test/issues/58
'datetime'
))
tests['valid']['iarna'] = load_tests(Path(extern_root, 'toml-spec-tests', 'values'), True, (
# these are stress-tests for 'large' datasets. I test these separately. Having them inline in C++ code is insane.
'qa-array-inline-1000',
'qa-array-inline-nested-1000',
'qa-key-literal-40kb',
'qa-key-string-40kb',
'qa-scalar-literal-40kb',
'qa-scalar-literal-multiline-40kb',
'qa-scalar-string-40kb',
'qa-scalar-string-multiline-40kb',
'qa-table-inline-1000',
'qa-table-inline-nested-1000',
# newline/escape handling tests. these get broken by I/O (I test them separately)
'spec-newline-1',
'spec-newline-2',
'spec-newline-3',
'spec-string-escape-1',
'spec-string-escape-2',
'spec-string-escape-3',
'spec-string-escape-4',
'spec-string-escape-5',
'spec-string-escape-6',
'spec-string-escape-7',
'spec-string-escape-8',
'spec-string-escape-9',
# bugged: https://github.com/iarna/toml-spec-tests/issues/3
'spec-date-time-6',
'spec-date-time-local-2',
'spec-time-2',
# breaks gcc:
'spec-string-basic-multiline-4',
))
def load_invalid_inputs(tests, extern_root):
tests['invalid']['burntsushi'] = load_tests(Path(extern_root, 'toml-test', 'tests', 'invalid'), False, (
# false negatives after TOML 0.4.0
'array-mixed-types-arrays-and-ints',
'array-mixed-types-ints-and-floats',
'array-mixed-types-strings-and-ints'
))
set_condition(tests['invalid']['burntsushi'], '!TOML_LANG_UNRELEASED', (
'datetime-malformed-no-secs',
'inline-table-linebreak',
'multi-line-inline-table',
'string-byte-escapes'
))
tests['invalid']['iarna'] = load_tests(Path(extern_root, 'toml-spec-tests', 'errors'), False, (
# I test these explicitly in the other test files (they get broken by I/O)
'comment-control-1',
'comment-control-2',
'comment-control-3',
'comment-control-4',
'string-basic-control-1',
'string-basic-control-2',
'string-basic-control-3',
'string-basic-control-4',
'string-basic-multiline-control-1',
'string-basic-multiline-control-2',
'string-basic-multiline-control-3',
'string-basic-multiline-control-4',
'string-literal-control-1',
'string-literal-control-2',
'string-literal-control-3',
'string-literal-control-4',
'string-literal-multiline-control-1',
'string-literal-multiline-control-2',
'string-literal-multiline-control-3',
'string-literal-multiline-control-4'
))
set_condition(tests['invalid']['iarna'], '!TOML_LANG_UNRELEASED', (
'inline-table-trailing-comma',
))
def requires_unicode(s):
for c in s:
if ord(c) > 127:
return True
return False
def write_test_file(name, test_cases):
conditions = set()
for test in test_cases:
conditions.add(test.condition)
test_file_path = Path(utils.entry_script_dir(), '..', 'tests', rf'conformance_{sanitize(name.strip())}.cpp').resolve()
print(rf'Writing to {test_file_path}')
with open(test_file_path, 'w', encoding='utf-8', newline='\n') as test_file:
write = lambda txt: print(txt, file=test_file)
# preamble
write('// This file is a part of toml++ and is subject to the the terms of the MIT license.')
write('// Copyright (c) Mark Gillard <mark.gillard@outlook.com.au>')
write('// See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.')
write('// SPDX-License-Identifier: MIT')
write('//-----')
write('// this file was generated by generate_conformance_tests.py - do not modify it directly')
write('')
write('#include "tests.h"')
write('using namespace toml::impl;')
write('')
# test data
write('TOML_DISABLE_WARNINGS; // unused variable spam')
write('')
write('namespace')
write('{')
for test in test_cases:
s = f'\t{test}'
test.requires_unicode = requires_unicode(s)
if test.requires_unicode:
write('\t#if UNICODE_LITERALS_OK')
write(s)
write('\t#endif // UNICODE_LITERALS_OK')
else:
write(s)
write('}')
write('')
write('TOML_ENABLE_WARNINGS;')
write('')
# tests
write(f'TEST_CASE("conformance - {name}")')
write('{')
for condition in conditions:
if condition != '':
write('')
write(f'\t#if {condition}');
for test in test_cases:
if test.condition != condition:
continue
expected = test.expected()
if isinstance(expected, bool):
if expected:
write(f'\tparsing_should_succeed(FILE_LINE_ARGS, {test.identifier()});')
else:
write(f'\tparsing_should_fail(FILE_LINE_ARGS, {test.identifier()});')
else:
s = expected.render('\t\t')
if not test.requires_unicode:
test.requires_unicode = requires_unicode(s)
if test.requires_unicode:
write('\t#if UNICODE_LITERALS_OK')
write(f'\tparsing_should_succeed(FILE_LINE_ARGS, {test.identifier()}, [](toml::table&& tbl)')
write('\t{')
write(f'\t\tauto expected = {s};')
write('\t\tREQUIRE(tbl == expected);')
write('\t});')
if test.requires_unicode:
write('\t#endif // UNICODE_LITERALS_OK')
write('')
if condition != '':
write(f'\t#endif // {condition}');
write('}')
write('')
def main():
extern_root = Path(utils.entry_script_dir(), '..', 'external').resolve()
utils.assert_existing_directory(extern_root)
assert extern_root.exists()
tests = { 'valid': dict(), 'invalid': dict() }
load_valid_inputs(tests, extern_root)
load_invalid_inputs(tests, extern_root)
for test_type, test_groups in tests.items():
for test_group, test_cases in test_groups.items():
write_test_file('{}/{}'.format(test_group, test_type), test_cases )
if __name__ == '__main__':
utils.run(main, verbose=True)
|
import numpy as np
class PeakExtractor(object):
"""Extract peaks from xy-datasets.
:param x: x-values
:param y: y-values
"""
def __init__(self, x, y):
self.x = x
self.y = y
def _locate_peak(self, xfit, yfit):
"""Locate peak by fitting a parabola."""
coeff = np.polyfit(xfit, yfit, 2)
x_peak = -0.5*coeff[1]/coeff[0]
y_peak = coeff[0] * x_peak**2 + coeff[1] * x_peak + coeff[2]
return x_peak, y_peak
def peaks(self):
"""Locate peaks.
:return: List of dictionaries
[{"x": x_peak1, "y": y_peak1, "indx1": 3},
{"x": x_peak2, "y": y_peak2, "indx2": 8}]
"""
peaks = []
for i in range(1, len(self.x)-1):
if self.y[i] > self.y[i-1] and self.y[i] > self.y[i+1]:
# We have a peak
x_peak, y_peak = self._locate_peak(self.x[i-1: i+2],
self.y[i-1: i+2])
peaks.append({"x": x_peak, "y": y_peak, "indx": i})
return peaks
|
import os
import time
from simple_saga_task_manager.tests.saga_test_case import SagaTestCase
from simple_saga_task_manager.models import Task
from simple_saga_task_manager.saga_interface import SAGATaskInterface
class SagaInterfaceEndToEndLocalTests(SagaTestCase):
def test_end_to_end_local(self):
# Create task in DB
# Construct task name
task_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'test-tasks', 'hello-world.sh'))
task = self.create_local_queued_task(task_name)
si = SAGATaskInterface(False)
si.initialiseService()
si.submit_saga_task(task)
# Task should now be running
status = task.status
self.assertEqual(Task.RUNNING, status)
# Process running task
while (status == Task.RUNNING):
si.update_running_saga_task_status(task)
status = task.status
time.sleep(5)
# Task should now be finished running
self.assertEqual(Task.FINISHED_RUNNING, status)
si.process_finished_saga_task(task)
# Task should now be complete
self.assertEqual(Task.COMPLETE, task.status)
si.closeService()
self.check_output_files_exist(task)
def test_end_to_end_local_context_manager(self):
# Create task in DB
# Construct task name
task_name = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'test-tasks', 'hello-world.sh'))
task = self.create_local_queued_task(task_name)
with (SAGATaskInterface(False)) as si:
si.submit_saga_task(task)
# Task should now be running
status = task.status
self.assertEqual(Task.RUNNING, status)
# Process running task
while (status == Task.RUNNING):
si.update_running_saga_task_status(task)
status = task.status
time.sleep(5)
# Task should now be finished running
self.assertEqual(Task.FINISHED_RUNNING, status)
si.process_finished_saga_task(task)
# Task should now be complete
self.assertEqual(Task.COMPLETE, task.status)
self.check_output_files_exist(task)
def test_end_to_end_local_args(self):
output_file = 'test.out'
task_args = ['Hello World', '> ' + output_file]
task_name = '/bin/echo'
task_environment = {}
output_files = [output_file]
task = Task.objects.create(name=task_name, arguments=task_args, status=Task.QUEUED, type=Task.LOCAL,
environment=task_environment, expected_output_files=output_files)
with (SAGATaskInterface(False)) as si:
si.submit_saga_task(task)
# Task should now be running
status = task.status
self.assertEqual(Task.RUNNING, status)
# Process running task
while (status == Task.RUNNING):
si.update_running_saga_task_status(task)
status = task.status
time.sleep(5)
# Task should now be finished running
self.assertEqual(Task.FINISHED_RUNNING, status)
si.process_finished_saga_task(task)
# Task should now be complete
self.assertEqual(Task.COMPLETE, task.status)
self.check_output_files_exist(task)
self.check_file_exists(os.path.join('tasks', str(task.id), 'outputs', output_file))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-whitegird")
def scatter_matrix(df, cagegorical = None):
"""
# 散点图矩阵
"""
if cagegorical:
sns.pairplot(df, hue = cagegorical, size = 2.5)
else:
sns.pairplot(df, size = 2.5)
def histogram_plot(df, row, col, categorical, imgpath = None)
"""
分面直方图
"""
gird = sns.FacetGrid(df, row, col, margin_titles = True)
grid.map(plt.hist, categorical, bins = np.linspace(0, 40, 15))
def timeseries_plot(df, ts, y, title):
fig, ax = plt.subplots(figsize = (20, 7))
chart = sns.lineplot(x = ts, y = y, data = df)
chart.set_title("%s Timeseries Data" % y, , fontsize = 15)
plt.show();
def bar_plot(df, x, y, categorical):
fig, ax = plt.subplots(figsize = (20, 5))
palette = sns.color_palette("mako_r", 4)
a = sns.barplot(x = "month", y = "Sales", hue = "year", data = df_new)
a.set_title("Store %s Data" % y, fontsize = 15)
plt.legend(loc = "upper right")
plt.show()
def bar_plots(df, x, y, xlabel, ylabel, nrows):
fig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=nrows)
fig.set_size_inches(20,30)
for ax in [ax1,ax2,ax3,ax4]:
sns.barplot(data = df, x = x,y = y, ax = ax)
ax.set(xlabel = xlabel, ylabel = ylabel)
ax.set_title(title, fontsize=15)
def resid_plot(df, x, y):
f, ax = plt.subplots(figsize=(10, 8))
sns.residplot(x = x, y = y, data = df)
def reg_plot(df, x, y):
f, ax = plt.subplots(figsize=(10, 8))
sns.regplot(
x = x, y = y, data = df, order = 0,
scatter_kws = {
"s": 20
}
)
|
ignoretagarr = []
for line in urlopen(IGNORETAGFILE):
currline = line.decode('utf-8') #utf-8 or iso8859-1 or whatever the page encoding scheme is
currline = currline.replace('\n','')
ignoretagarr.append(currline.replace('%20%',' '))
blockedaccs = []
for line in urlopen(BLOCKUSERFILE):
currline = line.decode('utf-8') #utf-8 or iso8859-1 or whatever the page encoding scheme is
currline = currline.replace('\n','')
blockedaccs.append(currline.replace('%20%',' '))
filteredkeys = []
for line in urlopen(BLOCKWORDFILE):
currline = line.decode('utf-8') #utf-8 or iso8859-1 or whatever the page encoding scheme is
currline = currline.replace('\n','')
filteredkeys.append(currline.replace('%20%',' '))
|
import cv2
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import json
import time
import glob
from io import StringIO
from PIL import Image
import matplotlib.pyplot as plt
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
from multiprocessing.dummy import Pool as ThreadPool
# Windows dependencies
# - Python 2.7.6: http://www.python.org/download/
# - OpenCV: http://opencv.org/
# - Numpy -- get numpy from here because the official builds don't support x64:
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
# Mac Dependencies
# - brew install python
# - pip install numpy
# - brew tap homebrew/science
# - brew install opencv
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
out = cv2.imwrite(os.path.join('image_upload', 'capture.jpg'), frame)
cv2.imshow('Buttons Detection', rgb)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def get_correct_path(files):
return os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), files)
def object_detection_runner(filename):
UPLOAD_FOLDER = 'image_upload'
OUTPUT_FOLDER = 'image_output'
MAX_NUMBER_OF_BOXES = 10
MINIMUM_CONFIDENCE = 0.9
PATH_TO_LABELS = get_correct_path('annotations/label_map.pbtxt')
PATH_TO_TEST_IMAGES_DIR = UPLOAD_FOLDER
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=sys.maxsize,
use_display_name=True)
CATEGORY_INDEX = label_map_util.create_category_index(categories)
# Path to frozen detection graph. This is the actual model that is used for the object detection.
MODEL_NAME = get_correct_path('graphs')
PATH_TO_CKPT = MODEL_NAME + '/ssd_mobilenet_v1.pb'
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def detect_objects(image_path):
import ntpath
head, tail = ntpath.split(image_path)
image_name = tail or ntpath.basename(head)
print(image_name)
image = Image.open(image_path)
(im_width, im_height) = image.size
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
CATEGORY_INDEX,
min_score_thresh=MINIMUM_CONFIDENCE,
use_normalized_coordinates=True,
line_thickness=8)
fig = plt.figure()
dpi = 100
im_width_inches = im_width / dpi
im_height_inches = im_height / dpi
fig.set_size_inches(im_width_inches, im_height_inches)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(image_np, aspect='auto')
plt.savefig(os.path.join(OUTPUT_FOLDER, image_name), dpi=62)
plt.close(fig)
# TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image-{}.jpg'.format(i)) for i in range(1, 4) ]
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
# Load model into memory
print('Loading model...')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
print('detecting...')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
detect_objects(TEST_IMAGE_PATH)
|
from unittest import TestCase
from neat.streaming import Stream
class TestStreaming(TestCase):
def test_streaming_empty(self):
s = Stream(['a'])
c = s.collect()
s.push([dict(a=1)])
self.assertEqual([dict(a=1)], c.fetch())
s.push([dict(a=2)])
s.push([dict(a=3)])
self.assertEqual([dict(a=2), dict(a=3)], c.fetch())
def test_streaming_join_func(self):
def square(arg):
return arg * arg
s = Stream(['a'])
f = s.join_func(['a'], 'b', square)
c = f.collect()
#import pdb; pdb.set_trace()
s.push([dict(a=1)])
self.assertEqual([dict(a=1, b=1)], c.fetch())
def test_streaming_projection(self):
s = Stream(['a', 'b'])
p = s.projection(['a'])
c = p.collect()
s.push([dict(a=1,b=2)])
self.assertEqual([dict(a=1)], c.fetch())
s.push([dict(a=2,b=1)])
self.assertEqual([dict(a=2)], c.fetch())
def test_streaming_projection_dup(self):
s = Stream(['a', 'b'])
p = s.projection(['a'])
c = p.collect()
s.push([dict(a=1,b=1), dict(a=1,b=2)])
self.assertEqual([dict(a=1)], c.fetch())
s.push([dict(a=1,b=3)])
self.assertEqual([], c.fetch())
def test_streaming_natural_join(self):
s1 = Stream(['a', 'b'])
s2 = Stream(['b', 'c'])
j = s1.natural_join(s2)
c = j.collect()
# first thing pushed has nothing to join to
s1.push([dict(a=1,b=1)])
self.assertEqual([], c.fetch())
# matching row pushed emits join
s2.push([dict(b=1, c=1)])
self.assertEqual([dict(a=1,b=1,c=1)], c.fetch())
# another thing pushed to s1 with a different join parameter
# should also output nothing.
s1.push([dict(a=1,b=2)])
self.assertEqual([], c.fetch())
# second matching row pushed emits join
s2.push([dict(b=2, c=1)])
self.assertEqual([dict(a=1,b=2,c=1)], c.fetch())
# a push to a different first parameter should push out another
# merge.
s1.push([dict(a=2,b=2)])
self.assertEqual([dict(a=2,b=2,c=1)], c.fetch())
# a push to the second should now output _two_ matching rows
s2.push([dict(b=2, c=2)])
self.assertEqual([dict(a=2,b=2,c=2), dict(a=1,b=2,c=2)], c.fetch())
def test_streaming_unnest(self):
s = Stream(['id', 'stuffs'])
u = s.unnest('stuffs', 'stuff')
c = u.collect()
s.push([dict(id=1,stuffs=[])])
self.assertEqual([], c.fetch())
s.push([dict(id=2,stuffs=(1,2,3))])
self.assertEqual(
sorted([
dict(id=2,stuff=1),
dict(id=2,stuff=2),
dict(id=2,stuff=3)
]),
sorted(c.fetch()))
|
# Imports from 3rd party libraries
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app, server
from pages import index, predictions, insights, process
# Navbar docs: https://dash-bootstrap-components.opensource.faculty.ai/l/components/navbar
navbar = dbc.NavbarSimple(
brand='The Strain Game',
brand_href='/',
children=[
dbc.NavItem(dcc.Link('Predictions', href='/predictions', className='nav-link')),
dbc.NavItem(dcc.Link('Insights', href='/insights', className='nav-link')),
dbc.NavItem(dcc.Link('Process', href='/process', className='nav-link')),
],
sticky='top',
color='success',
light=True,
dark=False
)
# Footer docs:
# dbc.Container, dbc.Row, dbc.Col: https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
# html.P: https://dash.plot.ly/dash-html-components
# fa (font awesome) : https://fontawesome.com/icons/github-square?style=brands
# mr (margin right) : https://getbootstrap.com/docs/4.3/utilities/spacing/
# className='lead' : https://getbootstrap.com/docs/4.3/content/typography/#lead
footer = dbc.Container(
dbc.Row(
dbc.Col(
html.P(
[
html.Span('Mari Dominguez', className='mr-2'),
html.A(html.I(className='fas fa-envelope-square mr-1'), href='mailto:<you>@<provider>.com'),
html.A(html.I(className='fab fa-github-square mr-1'), href='https://github.com/<you>/<repo>'),
html.A(html.I(className='fab fa-linkedin mr-1'), href='https://www.linkedin.com/in/<you>/'),
html.A(html.I(className='fab fa-twitter-square mr-1'), href='https://twitter.com/<you>'),
],
className='lead'
)
)
)
)
# Layout docs:
# html.Div: https://dash.plot.ly/getting-started
# dcc.Location: https://dash.plot.ly/dash-core-components/location
# dbc.Container: https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
navbar,
dbc.Container(id='page-content', className='mt-4'),
html.Hr(),
footer
])
# URL Routing for Multi-Page Apps: https://dash.plot.ly/urls
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return index.layout
elif pathname == '/predictions':
return predictions.layout
elif pathname == '/insights':
return insights.layout
elif pathname == '/process':
return process.layout
else:
return dcc.Markdown('## Page not found')
# Run app server: https://dash.plot.ly/getting-started
if __name__ == '__main__':
app.run_server(debug=True)
|
#!/usr/bin/python3
from unittest.mock import patch
import unittest
import manager
import job
class TestManager(unittest.TestCase):
def setUp(self):
self.sched_cfg = {
'tmpdir_stagger_phase_major': 3,
'tmpdir_stagger_phase_minor': 0,
'tmpdir_max_jobs': 3 }
@patch('job.Job')
def job_w_tmpdir_phase(self, tmpdir, phase, MockJob):
j = MockJob()
j.progress.return_value = phase
j.tmpdir = tmpdir
return j
@patch('job.Job')
def job_w_dstdir_phase(self, dstdir, phase, MockJob):
j = MockJob()
j.progress.return_value = phase
j.dstdir = dstdir
return j
def test_permit_new_job_post_milestone(self):
self.assertTrue(manager.phases_permit_new_job(
[ (3, 8), (4, 1) ], self.sched_cfg ))
def test_permit_new_job_pre_milestone(self):
self.assertFalse(manager.phases_permit_new_job(
[ (2, 3), (4, 1) ], self.sched_cfg ))
def test_permit_new_job_too_many_jobs(self):
self.assertFalse(manager.phases_permit_new_job(
[ (3, 1), (3, 2), (3, 3) ], self.sched_cfg ))
def test_dstdirs_to_furthest_phase(self):
all_jobs = [ self.job_w_dstdir_phase('/plots1', (1, 5)),
self.job_w_dstdir_phase('/plots2', (1, 1)),
self.job_w_dstdir_phase('/plots2', (3, 1)),
self.job_w_dstdir_phase('/plots2', (2, 1)),
self.job_w_dstdir_phase('/plots3', (4, 1)) ]
self.assertEqual(
{ '/plots1' : (1, 5),
'/plots2' : (3, 1),
'/plots3' : (4, 1) },
manager.dstdirs_to_furthest_phase(all_jobs))
if __name__ == '__main__':
unittest.main()
|
### refer Zhizheng and Simon's ICASSP'16 paper for more details
### http://www.zhizheng.org/papers/icassp2016_lstm.pdf
import numpy as np
import theano
import theano.tensor as T
from theano import config
from theano.tensor.shared_randomstreams import RandomStreams
class VanillaRNN(object):
""" This class implements a standard recurrent neural network: h_{t} = f(W^{hx}x_{t} + W^{hh}h_{t-1}+b_{h})
"""
def __init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training=False):
""" This is to initialise a standard RNN hidden unit
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input data to current layer
:param n_in: dimension of input data
:param n_h: number of hidden units/blocks
:param p: the probability of dropout
:param training: a binary value to indicate training or testing (for dropout training)
"""
self.input = x
if p > 0.0:
if training == 1:
srng = RandomStreams(seed=123456)
self.input = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.input = (1 - p) * x # (1-p) *
self.n_in = int(n_in)
self.n_h = int(n_h)
self.rnn_batch_training = rnn_batch_training
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
# Input gate weights
self.W_xi = theano.shared(value=Wx_value, name='W_xi')
self.W_hi = theano.shared(value=Wh_value, name='W_hi')
# bias
self.b_i = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_i')
# initial value of hidden and cell state
if self.rnn_batch_training:
self.h0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='c0')
self.h0 = T.repeat(self.h0, x.shape[1], 0)
self.c0 = T.repeat(self.c0, x.shape[1], 0)
else:
self.h0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='c0')
self.Wix = T.dot(self.input, self.W_xi)
[self.h, self.c], _ = theano.scan(self.recurrent_as_activation_function, sequences=[self.Wix],
outputs_info=[self.h0, self.c0])
self.output = self.h
self.params = [self.W_xi, self.W_hi, self.b_i]
self.L2_cost = (self.W_xi ** 2).sum() + (self.W_hi ** 2).sum()
def recurrent_as_activation_function(self, Wix, h_tm1, c_tm1):
""" Implement the recurrent unit as an activation function. This function is called by self.__init__().
:param Wix: it equals to W^{hx}x_{t}, as it does not relate with recurrent, pre-calculate the value for fast computation
:type Wix: matrix
:param h_tm1: contains the hidden activation from previous time step
:type h_tm1: matrix, each row means a hidden activation vector of a time step
:param c_tm1: this parameter is not used, just to keep the interface consistent with LSTM
:returns: h_t is the hidden activation of current time step
"""
h_t = T.tanh(Wix + T.dot(h_tm1, self.W_hi) + self.b_i) #
c_t = h_t
return h_t, c_t
class VanillaRNNDecoder(object):
""" This class implements a standard recurrent neural network decoder:
h_{t} = f(W^{hx}x_{t} + W^{hh}h_{t-1}+ W^{yh}y_{t-1} + b_{h})
y_{t} = g(h_{t}W^{hy} + b_{y})
"""
def __init__(self, rng, x, n_in, n_h, n_out, p, training, rnn_batch_training=False):
""" This is to initialise a standard RNN hidden unit
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input data to current layer
:param n_in: dimension of input data
:param n_h: number of hidden units/blocks
:param n_out: dimension of output data
:param p: the probability of dropout
:param training: a binary value to indicate training or testing (for dropout training)
"""
self.input = x
if p > 0.0:
if training == 1:
srng = RandomStreams(seed=123456)
self.input = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.input = (1 - p) * x # (1-p) *
self.n_in = int(n_in)
self.n_h = int(n_h)
self.n_out = int(n_out)
self.rnn_batch_training = rnn_batch_training
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wy_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_out), size=(n_out, n_h)), dtype=config.floatX)
Ux_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_out)), dtype=config.floatX)
Uh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_out)), dtype=config.floatX)
Uy_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_out), size=(n_out, n_out)), dtype=config.floatX)
# Input gate weights
self.W_xi = theano.shared(value=Wx_value, name='W_xi')
self.W_hi = theano.shared(value=Wh_value, name='W_hi')
self.W_yi = theano.shared(value=Wy_value, name='W_yi')
# Output gate weights
self.U_xi = theano.shared(value=Ux_value, name='U_xi')
self.U_hi = theano.shared(value=Uh_value, name='U_hi')
self.U_yi = theano.shared(value=Uy_value, name='U_yi')
# bias
self.b_i = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_i')
self.b = theano.shared(value=np.zeros((n_out,), dtype=config.floatX), name='b')
# initial value of hidden and cell state and output
if self.rnn_batch_training:
self.h0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='c0')
self.y0 = theano.shared(value=np.zeros((1, n_out), dtype=config.floatX), name='y0')
self.h0 = T.repeat(self.h0, x.shape[1], 0)
self.c0 = T.repeat(self.c0, x.shape[1], 0)
self.y0 = T.repeat(self.c0, x.shape[1], 0)
else:
self.h0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='c0')
self.y0 = theano.shared(value=np.zeros((n_out,), dtype=config.floatX), name='y0')
self.Wix = T.dot(self.input, self.W_xi)
[self.h, self.c, self.y], _ = theano.scan(self.recurrent_as_activation_function, sequences=[self.Wix],
outputs_info=[self.h0, self.c0, self.y0])
self.output = self.y
self.params = [self.W_xi, self.W_hi, self.W_yi, self.U_hi, self.b_i, self.b]
self.L2_cost = (self.W_xi ** 2).sum() + (self.W_hi ** 2).sum() + (self.W_yi ** 2).sum() + (self.U_hi ** 2).sum()
def recurrent_as_activation_function(self, Wix, h_tm1, c_tm1, y_tm1):
""" Implement the recurrent unit as an activation function. This function is called by self.__init__().
:param Wix: it equals to W^{hx}x_{t}, as it does not relate with recurrent, pre-calculate the value for fast computation
:type Wix: matrix
:param h_tm1: contains the hidden activation from previous time step
:type h_tm1: matrix, each row means a hidden activation vector of a time step
:param c_tm1: this parameter is not used, just to keep the interface consistent with LSTM
:returns: h_t is the hidden activation of current time step
"""
h_t = T.tanh(Wix + T.dot(h_tm1, self.W_hi) + T.dot(y_tm1, self.W_yi) + self.b_i) #
y_t = T.dot(h_t, self.U_hi) + self.b
c_t = h_t
return h_t, c_t, y_t
class LstmBase(object):
""" This class provides as a base for all long short-term memory (LSTM) related classes.
Several variants of LSTM were investigated in (Wu & King, ICASSP 2016): Zhizheng Wu, Simon King, "Investigating gated recurrent neural networks for speech synthesis", ICASSP 2016
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise all the components in a LSTM block, including input gate, output gate, forget gate, peephole connections
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
:param p: the probability of dropout
:param training: a binary value to indicate training or testing (for dropout training)
"""
n_in = int(n_in) # ensure sizes have integer type
n_h = int(n_h) # ensure sizes have integer type
self.input = x
if p > 0.0:
if training == 1:
srng = RandomStreams(seed=123456)
self.input = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.input = (1 - p) * x
self.n_in = int(n_in)
self.n_h = int(n_h)
self.rnn_batch_training = rnn_batch_training
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Input gate weights
self.W_xi = theano.shared(value=Wx_value, name='W_xi')
self.W_hi = theano.shared(value=Wh_value, name='W_hi')
self.w_ci = theano.shared(value=Wc_value, name='w_ci')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Forget gate weights
self.W_xf = theano.shared(value=Wx_value, name='W_xf')
self.W_hf = theano.shared(value=Wh_value, name='W_hf')
self.w_cf = theano.shared(value=Wc_value, name='w_cf')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Output gate weights
self.W_xo = theano.shared(value=Wx_value, name='W_xo')
self.W_ho = theano.shared(value=Wh_value, name='W_ho')
self.w_co = theano.shared(value=Wc_value, name='w_co')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Cell weights
self.W_xc = theano.shared(value=Wx_value, name='W_xc')
self.W_hc = theano.shared(value=Wh_value, name='W_hc')
# bias
self.b_i = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_i')
self.b_f = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_f')
self.b_o = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_o')
self.b_c = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_c')
### make a layer
# initial value of hidden and cell state
if self.rnn_batch_training:
self.h0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='c0')
self.h0 = T.repeat(self.h0, x.shape[1], 0)
self.c0 = T.repeat(self.c0, x.shape[1], 0)
else:
self.h0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='c0')
self.Wix = T.dot(self.input, self.W_xi)
self.Wfx = T.dot(self.input, self.W_xf)
self.Wcx = T.dot(self.input, self.W_xc)
self.Wox = T.dot(self.input, self.W_xo)
[self.h, self.c], _ = theano.scan(self.recurrent_fn, sequences=[self.Wix, self.Wfx, self.Wcx, self.Wox],
outputs_info=[self.h0, self.c0])
self.output = self.h
def recurrent_fn(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1=None):
""" This implements a genetic recurrent function, called by self.__init__().
:param Wix: pre-computed matrix applying the weight matrix W on the input units, for input gate
:param Wfx: Similar to Wix, but for forget gate
:param Wcx: Similar to Wix, but for cell memory
:param Wox: Similar to Wox, but for output gate
:param h_tm1: hidden activation from previous time step
:param c_tm1: activation from cell memory from previous time step
:returns: h_t is the hidden activation of current time step, and c_t is the activation for cell memory of current time step
"""
h_t, c_t = self.lstm_as_activation_function(Wix, Wfx, Wcx, Wox, h_tm1, c_tm1)
return h_t, c_t
def lstm_as_activation_function(self):
""" A genetic recurrent activation function for variants of LSTM architectures.
The function is called by self.recurrent_fn().
"""
pass
class LstmDecoderBase(object):
""" This class provides as a base for all long short-term memory (LSTM) related classes.
Several variants of LSTM were investigated in (Wu & King, ICASSP 2016): Zhizheng Wu, Simon King, "Investigating gated recurrent neural networks for speech synthesis", ICASSP 2016
"""
def __init__(self, rng, x, n_in, n_h, n_out, p=0.0, training=0, rnn_batch_training=False):
""" Initialise all the components in a LSTM block, including input gate, output gate, forget gate, peephole connections
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
:param p: the probability of dropout
:param training: a binary value to indicate training or testing (for dropout training)
"""
self.input = x
if p > 0.0:
if training == 1:
srng = RandomStreams(seed=123456)
self.input = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.input = (1 - p) * x
self.n_in = int(n_in)
self.n_h = int(n_h)
self.rnn_batch_training = rnn_batch_training
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
Wy_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_out), size=(n_out, n_h)), dtype=config.floatX)
# Input gate weights
self.W_xi = theano.shared(value=Wx_value, name='W_xi')
self.W_hi = theano.shared(value=Wh_value, name='W_hi')
self.w_ci = theano.shared(value=Wc_value, name='w_ci')
self.W_yi = theano.shared(value=Wy_value, name='W_yi')
# random initialisation
Uh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_out)), dtype=config.floatX)
# Output gate weights
self.U_ho = theano.shared(value=Uh_value, name='U_ho')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Forget gate weights
self.W_xf = theano.shared(value=Wx_value, name='W_xf')
self.W_hf = theano.shared(value=Wh_value, name='W_hf')
self.w_cf = theano.shared(value=Wc_value, name='w_cf')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Output gate weights
self.W_xo = theano.shared(value=Wx_value, name='W_xo')
self.W_ho = theano.shared(value=Wh_value, name='W_ho')
self.w_co = theano.shared(value=Wc_value, name='w_co')
# random initialisation
Wx_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
Wh_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)
Wc_value = np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h), size=(n_h,)), dtype=config.floatX)
# Cell weights
self.W_xc = theano.shared(value=Wx_value, name='W_xc')
self.W_hc = theano.shared(value=Wh_value, name='W_hc')
# bias
self.b_i = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_i')
self.b_f = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_f')
self.b_o = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_o')
self.b_c = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_c')
self.b = theano.shared(value=np.zeros((n_out,), dtype=config.floatX), name='b')
### make a layer
# initial value of hidden and cell state
if self.rnn_batch_training:
self.h0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='c0')
self.y0 = theano.shared(value=np.zeros((1, n_out), dtype=config.floatX), name='y0')
self.h0 = T.repeat(self.h0, x.shape[1], 0)
self.c0 = T.repeat(self.c0, x.shape[1], 0)
self.y0 = T.repeat(self.c0, x.shape[1], 0)
else:
self.h0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='c0')
self.y0 = theano.shared(value=np.zeros((n_out,), dtype=config.floatX), name='y0')
self.Wix = T.dot(self.input, self.W_xi)
self.Wfx = T.dot(self.input, self.W_xf)
self.Wcx = T.dot(self.input, self.W_xc)
self.Wox = T.dot(self.input, self.W_xo)
[self.h, self.c, self.y], _ = theano.scan(self.recurrent_fn, sequences=[self.Wix, self.Wfx, self.Wcx, self.Wox],
outputs_info=[self.h0, self.c0, self.y0])
self.output = self.y
def recurrent_fn(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1=None, y_tm1=None):
""" This implements a genetic recurrent function, called by self.__init__().
:param Wix: pre-computed matrix applying the weight matrix W on the input units, for input gate
:param Wfx: Similar to Wix, but for forget gate
:param Wcx: Similar to Wix, but for cell memory
:param Wox: Similar to Wox, but for output gate
:param h_tm1: hidden activation from previous time step
:param c_tm1: activation from cell memory from previous time step
:returns: h_t is the hidden activation of current time step, and c_t is the activation for cell memory of current time step
"""
h_t, c_t, y_t = self.lstm_as_activation_function(Wix, Wfx, Wcx, Wox, h_tm1, c_tm1, y_tm1)
return h_t, c_t, y_t
def lstm_as_activation_function(self):
""" A genetic recurrent activation function for variants of LSTM architectures.
The function is called by self.recurrent_fn().
"""
pass
class VanillaLstm(LstmBase):
""" This class implements the standard LSTM block, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a vanilla LSTM block
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xi, self.W_hi, self.w_ci,
self.W_xf, self.W_hf, self.w_cf,
self.W_xo, self.W_ho, self.w_co,
self.W_xc, self.W_hc,
self.b_i, self.b_f, self.b_o, self.b_c]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the standard LSTM activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
i_t = T.nnet.sigmoid(Wix + T.dot(h_tm1, self.W_hi) + self.w_ci * c_tm1 + self.b_i) #
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.w_cf * c_tm1 + self.b_f) #
c_t = f_t * c_tm1 + i_t * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c)
o_t = T.nnet.sigmoid(Wox + T.dot(h_tm1, self.W_ho) + self.w_co * c_t + self.b_o)
h_t = o_t * T.tanh(c_t)
return h_t, c_t # , i_t, f_t, o_t
class VanillaLstmDecoder(LstmDecoderBase):
""" This class implements the standard LSTM block, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, n_out, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a vanilla LSTM block
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
self.n_out = int(n_out)
LstmDecoderBase.__init__(self, rng, x, n_in, n_h, n_out, p, training, rnn_batch_training)
self.params = [self.W_xi, self.W_hi, self.w_ci, self.W_yi,
self.W_xf, self.W_hf, self.w_cf,
self.W_xo, self.W_ho, self.w_co,
self.W_xc, self.W_hc,
self.U_ho,
self.b_i, self.b_f, self.b_o, self.b_c, self.b]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1, y_tm1):
""" This function treats the LSTM block as an activation function, and implements the standard LSTM activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
i_t = T.nnet.sigmoid(Wix + T.dot(h_tm1, self.W_hi) + self.w_ci * c_tm1 + self.b_i) #
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.w_cf * c_tm1 + self.b_f) #
c_t = f_t * c_tm1 + i_t * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + T.dot(y_tm1, self.W_yi) + self.b_c)
o_t = T.nnet.sigmoid(Wox + T.dot(h_tm1, self.W_ho) + self.w_co * c_t + self.b_o)
h_t = o_t * T.tanh(c_t)
y_t = T.dot(h_t, self.U_ho) + self.b
return h_t, c_t, y_t # , i_t, f_t, o_t
class SimplifiedLstmDecoder(LstmDecoderBase):
""" This class implements a simplified LSTM block which only keeps the forget gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, n_out, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with only the forget gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
self.n_out = int(n_out)
LstmDecoderBase.__init__(self, rng, x, n_in, n_h, n_out, p, training, rnn_batch_training)
self.params = [self.W_yi,
self.W_xf, self.W_hf,
self.W_xc, self.W_hc,
self.U_ho,
self.b_f, self.b_c, self.b]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1, y_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (simplified LSTM) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.b_f) # self.w_cf * c_tm1
c_t = f_t * c_tm1 + (1 - f_t) * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + T.dot(y_tm1, self.W_yi) + self.b_c)
h_t = T.tanh(c_t)
y_t = T.dot(h_t, self.U_ho) + self.b
return h_t, c_t, y_t
class LstmNFG(LstmBase):
""" This class implements a LSTM block without the forget gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with the forget gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xi, self.W_hi, self.w_ci,
self.W_xo, self.W_ho, self.w_co,
self.W_xc, self.W_hc,
self.b_i, self.b_o, self.b_c]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (without the forget gate) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
i_t = T.nnet.sigmoid(Wix + T.dot(h_tm1, self.W_hi) + self.w_ci * c_tm1 + self.b_i) #
c_t = c_tm1 + i_t * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c) # f_t *
o_t = T.nnet.sigmoid(Wox + T.dot(h_tm1, self.W_ho) + self.w_co * c_t + self.b_o)
h_t = o_t * T.tanh(c_t)
return h_t, c_t
class LstmNIG(LstmBase):
""" This class implements a LSTM block without the input gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with the input gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xf, self.W_hf, self.w_cf,
self.W_xo, self.W_ho, self.w_co,
self.W_xc, self.W_hc,
self.b_f, self.b_o, self.b_c]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (without the input gate) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.w_cf * c_tm1 + self.b_f) #
c_t = f_t * c_tm1 + T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c) # i_t *
o_t = T.nnet.sigmoid(Wox + T.dot(h_tm1, self.W_ho) + self.w_co * c_t + self.b_o)
h_t = o_t * T.tanh(c_t)
return h_t, c_t
class LstmNOG(LstmBase):
""" This class implements a LSTM block without the output gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with the output gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xi, self.W_hi, self.w_ci,
self.W_xf, self.W_hf, self.w_cf,
self.W_xc, self.W_hc,
self.b_i, self.b_f,
self.b_c]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (without the output gate) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
i_t = T.nnet.sigmoid(Wix + T.dot(h_tm1, self.W_hi) + self.w_ci * c_tm1 + self.b_i) #
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.w_cf * c_tm1 + self.b_f) #
c_t = f_t * c_tm1 + i_t * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c) # i_t *
h_t = T.tanh(c_t)
return h_t, c_t
class LstmNoPeepholes(LstmBase):
""" This class implements a LSTM block without the peephole connections, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with the peephole connections
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xi, self.W_hi, # self.W_ci,
self.W_xf, self.W_hf, # self.W_cf,
self.W_xo, self.W_ho, # self.W_co,
self.W_xc, self.W_hc,
self.b_i, self.b_f,
self.b_o, self.b_c]
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (without the output gate) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
i_t = T.nnet.sigmoid(Wix + T.dot(h_tm1, self.W_hi) + self.b_i)
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.b_f)
c_t = f_t * c_tm1 + i_t * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c)
o_t = T.nnet.sigmoid(Wox + T.dot(h_tm1, self.W_ho) + self.b_o)
h_t = o_t * T.tanh(c_t)
return h_t, c_t
class SimplifiedLstm(LstmBase):
""" This class implements a simplified LSTM block which only keeps the forget gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with only the forget gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xf, self.W_hf,
self.W_xc, self.W_hc,
self.b_f, self.b_c]
self.L2_cost = (self.W_xf ** 2).sum() + (self.W_hf ** 2).sum() + (self.W_xc ** 2).sum() + (self.W_hc ** 2).sum()
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (simplified LSTM) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.b_f) # self.w_cf * c_tm1
c_t = f_t * c_tm1 + (1 - f_t) * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c)
h_t = T.tanh(c_t)
return h_t, c_t
class SimplifiedGRU(LstmBase):
""" This class implements a simplified GRU block which only keeps the forget gate, inheriting the genetic class :class:`layers.gating.LstmBase`.
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a LSTM with the the forget gate
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
"""
LstmBase.__init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training)
self.params = [self.W_xf, self.W_hf, self.w_cf,
self.W_xc, self.W_hc,
self.b_f, self.b_c]
self.L2_cost = (self.W_xf ** 2).sum() + (self.W_hf ** 2).sum() + (self.W_xc ** 2).sum() + (self.W_hc ** 2).sum()
def lstm_as_activation_function(self, Wix, Wfx, Wcx, Wox, h_tm1, c_tm1):
""" This function treats the LSTM block as an activation function, and implements the LSTM (simplified LSTM) activation function.
The meaning of each input and output parameters can be found in :func:`layers.gating.LstmBase.recurrent_fn`
"""
##can_h_t = T.tanh(Whx + r_t * T.dot(h_tm1, self.W_hh) + self.b_h)
f_t = T.nnet.sigmoid(Wfx + T.dot(h_tm1, self.W_hf) + self.b_f) # self.w_cf * c_tm1
can_h_t = T.tanh(Wcx + f_t * T.dot(h_tm1, self.W_hc) + self.b_c)
h_t = self.w_cf * (1.0 - f_t) * h_tm1 + f_t * can_h_t
c_t = h_t
# c_t = f_t * c_tm1 + (1 - f_t) * T.tanh(Wcx + T.dot(h_tm1, self.W_hc) + self.b_c)
# h_t = T.tanh(c_t)
return h_t, c_t
class BidirectionSLstm(SimplifiedLstm):
def __init__(self, rng, x, n_in, n_h, n_out, p=0.0, training=0, rnn_batch_training=False):
fwd = SimplifiedLstm(rng, x, n_in, n_h, p, training, rnn_batch_training)
bwd = SimplifiedLstm(rng, x[::-1], n_in, n_h, p, training, rnn_batch_training)
self.params = fwd.params + bwd.params
self.output = T.concatenate([fwd.output, bwd.output[::-1]], axis=-1)
class BidirectionLstm(VanillaLstm):
def __init__(self, rng, x, n_in, n_h, n_out, p=0.0, training=0, rnn_batch_training=False):
fwd = VanillaLstm(rng, x, n_in, n_h, p, training, rnn_batch_training)
bwd = VanillaLstm(rng, x[::-1], n_in, n_h, p, training, rnn_batch_training)
self.params = fwd.params + bwd.params
self.output = T.concatenate([fwd.output, bwd.output[::-1]], axis=-1)
class RecurrentOutput(object):
def __init__(self, rng, x, n_in, n_out, p=0.0, training=0, rnn_batch_training=False):
self.W_h = theano.shared(
value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_out), size=(n_in, n_out)), dtype=config.floatX),
name='W_h')
self.W_y = theano.shared(
value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_out), size=(n_out, n_out)), dtype=config.floatX),
name='W_y')
self.b_y = theano.shared(value=np.zeros((n_out,), dtype=config.floatX), name='b_y')
# Gated Recurrent Unit
class GatedRecurrentUnit(object):
""" This class implements a gated recurrent unit (GRU), as proposed in Cho et al 2014 (http://arxiv.org/pdf/1406.1078.pdf).
"""
def __init__(self, rng, x, n_in, n_h, p=0.0, training=0, rnn_batch_training=False):
""" Initialise a gated recurrent unit
:param rng: random state, fixed value for randome state for reproducible objective results
:param x: input to a network
:param n_in: number of input features
:type n_in: integer
:param n_h: number of hidden units
:type n_h: integer
:param p: the probability of dropout
:param training: a binary value to indicate training or testing (for dropout training)
"""
self.n_in = int(n_in)
self.n_h = int(n_h)
self.rnn_batch_training = rnn_batch_training
self.input = x
if p > 0.0:
if training == 1:
srng = RandomStreams(seed=123456)
self.input = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
else:
self.input = (1 - p) * x
self.W_xz = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in),
size=(n_in, n_h)), dtype=config.floatX), name='W_xz')
self.W_hz = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h),
size=(n_h, n_h)), dtype=config.floatX), name='W_hz')
self.W_xr = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in),
size=(n_in, n_h)), dtype=config.floatX), name='W_xr')
self.W_hr = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h),
size=(n_h, n_h)), dtype=config.floatX), name='W_hr')
self.W_xh = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_in),
size=(n_in, n_h)), dtype=config.floatX), name='W_xh')
self.W_hh = theano.shared(value=np.asarray(rng.normal(0.0, 1.0 / np.sqrt(n_h),
size=(n_h, n_h)), dtype=config.floatX), name='W_hh')
self.b_z = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_z')
self.b_r = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_r')
self.b_h = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='b_h')
if self.rnn_batch_training:
self.h0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((1, n_h), dtype=config.floatX), name='c0')
self.h0 = T.repeat(self.h0, x.shape[1], 0)
self.c0 = T.repeat(self.c0, x.shape[1], 0)
else:
self.h0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='h0')
self.c0 = theano.shared(value=np.zeros((n_h,), dtype=config.floatX), name='c0')
## pre-compute these for fast computation
self.Wzx = T.dot(self.input, self.W_xz)
self.Wrx = T.dot(self.input, self.W_xr)
self.Whx = T.dot(self.input, self.W_xh)
[self.h, self.c], _ = theano.scan(self.gru_as_activation_function,
sequences=[self.Wzx, self.Wrx, self.Whx],
outputs_info=[self.h0, self.c0]) #
self.output = self.h
self.params = [self.W_xz, self.W_hz, self.W_xr, self.W_hr, self.W_xh, self.W_hh,
self.b_z, self.b_r, self.b_h]
self.L2_cost = (self.W_xz ** 2).sum() + (self.W_hz ** 2).sum() + (self.W_xr ** 2).sum() + (
self.W_hr ** 2).sum() + (self.W_xh ** 2).sum() + (self.W_hh ** 2).sum()
def gru_as_activation_function(self, Wzx, Wrx, Whx, h_tm1, c_tm1=None):
""" This function treats the GRU block as an activation function, and implements the GRU activation function.
This function is called by :func:`layers.gating.GatedRecurrentUnit.__init__`.
Wzx, Wrx, Whx have been pre-computed before passing to this function.
To make the same interface as LSTM, we keep a c_tm1 (means the cell state of previous time step, but GRU does not maintain a cell state).
"""
z_t = T.nnet.sigmoid(Wzx + T.dot(h_tm1, self.W_hz) + self.b_z)
r_t = T.nnet.sigmoid(Wrx + T.dot(h_tm1, self.W_hr) + self.b_r)
can_h_t = T.tanh(Whx + r_t * T.dot(h_tm1, self.W_hh) + self.b_h)
h_t = (1 - z_t) * h_tm1 + z_t * can_h_t
c_t = h_t ## in order to have the same interface as LSTM
return h_t, c_t
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebServiceResult',
'AwaitableGetWebServiceResult',
'get_web_service',
]
@pulumi.output_type
class GetWebServiceResult:
"""
Instance of an Azure ML web service resource.
"""
def __init__(__self__, location=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.WebServicePropertiesForGraphResponse':
"""
Contains the property payload that describes the web service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebServiceResult(GetWebServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebServiceResult(
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_web_service(resource_group_name: Optional[str] = None,
web_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebServiceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which the web service is located.
:param str web_service_name: The name of the web service.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['webServiceName'] = web_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearning/v20160501preview:getWebService', __args__, opts=opts, typ=GetWebServiceResult).value
return AwaitableGetWebServiceResult(
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
|
# coding: utf-8
"""
a proof of concept implementation of SQLite FTS tokenizers in Python
"""
import sys
from cffi import FFI # type: ignore
SQLITE_OK = 0
SQLITE_DONE = 101
ffi = FFI()
if sys.platform == "win32":
import sqlite3 # noqa
dll = ffi.dlopen("sqlite3.dll")
else:
from ctypes.util import find_library
dll = ffi.dlopen(find_library("sqlite3"))
if hasattr(sys, "getobjects"):
# for a python built with Py_TRACE_REFS
ffi.cdef(
"""
typedef struct sqlite3 sqlite3;
typedef struct {
void *_ob_next;
void *_ob_prev;
size_t ob_refcnt;
void *ob_type;
sqlite3 *db;
} PyObject;
"""
)
else:
ffi.cdef(
"""
typedef struct sqlite3 sqlite3;
typedef struct {
size_t ob_refcnt;
void *ob_type;
sqlite3 *db;
} PyObject;
"""
)
def get_db_from_connection(c):
db = getattr(c, "_db", None)
if db:
# pypy's SQLite3 connection has _db using cffi
db = ffi.cast("sqlite3*", db)
else:
db = ffi.cast("PyObject *", id(c)).db
return db
__all__ = ["get_db_from_connection", "SQLITE_OK", "SQLITE_DONE"]
|
import os
import logging
import claripy
from cle import MetaELF
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger('angr.simos.linux')
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._vsyscall_addr = None
def configure_project(self): # pylint: disable=arguments-differ
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self._vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self._vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
# set up some static data in the loader object...
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
libc_obj = self.project.loader.find_object('libc.so.6')
if libc_obj:
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch), libc_obj)
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.elftype != 'STT_GNU_IFUNC':
continue
except AttributeError:
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.unpack_word(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.pack_word(gotaddr, randaddr)
# maybe move this into archinfo?
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
# https://www.linux-mips.org/wiki/WhatsWrongWithO32N32N64
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = [] # ?
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
if state.history.jumpkind == 'Ijk_Sys_int128':
return 'i386'
elif state.history.jumpkind == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd=b'/home/user', pathsep=b'/', **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
if fs is None: fs = {}
for name in fs:
if type(fs[name]) is str:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
mounts[pathsep] = SimHostFilesystem(chroot if chroot is not None else os.path.sep)
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
if args is None:
args = []
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) is int: # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
table.append_args(args)
# Add environment to string table
table.append_env(env)
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, b"\xAE\xC0" * 8)]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, bytes):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.items():
if isinstance(val, int):
state.registers.store(reg, val, size=state.arch.bytes)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.unpack_word(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.pack_word(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
|
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', '').replace(
'postgres://', 'postgresql://') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT')
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['gwiese@tpg.com.au']
LANGUAGES = ['en', 'es', 'de']
MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
POSTS_PER_PAGE = 10
|
import copy
import typing
from pathlib import Path
from typing import Dict
from deliverable_model.processor_base import ProcessorBase
from deliverable_model.request import Request
from deliverable_model.response import Response
if typing.TYPE_CHECKING:
from nlp_utils.preprocess.lookup_table import LookupTable as Lookuper
class LookupProcessor(ProcessorBase):
def __init__(self, lookup_table=None, **kwargs):
super().__init__(**kwargs)
self.lookup_table = lookup_table # type: "Lookuper"
@classmethod
def load(cls, parameter: dict, asset_dir) -> "ProcessorBase":
from nlp_utils.preprocess.lookup_table import LookupTable as Lookuper
config = parameter.pop("config", {})
instance_asset = asset_dir / "data"
lookup_table = Lookuper.load_from_file(instance_asset, **config)
self = cls(lookup_table, **parameter)
return self
def preprocess(self, request: Request) -> Request:
query_id_list = []
for query_item in request[self.pre_input_key]:
query_item_id = [self.lookup_table.lookup(i) for i in query_item]
query_id_list.append(query_item_id)
request[self.pre_output_key] = query_id_list
return request
def postprocess(self, response: Response) -> Response:
data_str_list = []
for data_int in response[self.post_input_key]:
data_str = [self.lookup_table.inverse_lookup(i) for i in data_int]
data_str_list.append(data_str)
response[self.post_output_key] = data_str_list
return response
def get_config(self) -> dict:
base_config = super().get_config()
config = self.lookup_table.get_config()
return {**base_config, **{"config": config}}
def serialize(self, asset_dir: Path):
instance_asset = asset_dir / "data"
self.lookup_table.dump_to_file(instance_asset)
def get_dependency(self) -> list:
return ["nlp_utils"]
|
import cv2
import numpy as np
import PIL.Image as Image
from carla.image_converter import labels_to_cityscapes_palette
from erdos.op import Op
from erdos.utils import setup_logging
class SegmentedVideoOperator(Op):
def __init__(self, name, log_file_name=None):
super(SegmentedVideoOperator, self).__init__(name)
self._logger = setup_logging(self.name, log_file_name)
@staticmethod
def setup_streams(input_streams, filter_name):
input_streams.filter_name(filter_name)\
.add_callback(SegmentedVideoOperator.display_frame)
return []
def display_frame(self, msg):
frame_array = labels_to_cityscapes_palette(msg.data)
img = Image.fromarray(np.uint8(frame_array)).convert('RGB')
open_cv_image = np.array(img)
cv2.imshow(self.name, open_cv_image)
cv2.waitKey(1)
def execute(self):
self.spin()
|
from direct.fsm.State import State
from direct.fsm.ClassicFSM import ClassicFSM
from direct.showbase.DirectObject import DirectObject
from panda3d.core import PandaNode, PGButton, NodePath, MouseWatcherRegion
class Clickable(PandaNode, DirectObject):
def __init__(self, name):
PandaNode.__init__(self, name)
DirectObject.__init__(self)
self.fsm = ClassicFSM(name, [
State('off', self.enterOff, self.exitOff),
State('rollover', self.enterRollover, self.exitRollover),
State('ready', self.enterReady, self.exitReady),
State('depressed', self.enterDepressed, self.exitDepressed),
State('inactive', self.enterInactive, self.exitInactive)], 'off', 'off')
self.fsm.enterInitialState()
self.active = True
self.lastClickState = PGButton.SReady
self.clickState = PGButton.SReady
self.__hovering = False
self.clickEvent = ''
self.clickExtraArgs = []
self.contents = NodePath.anyPath(self).attachNewNode('contents')
# Create a MouseWatcherRegion:
self.regionName = self.getUniqueName() + '-region'
self.region = MouseWatcherRegion(self.regionName, 0, 0, 0, 0)
base.mouseWatcherNode.addRegion(self.region)
# Accept the mouse events:
enterPattern = base.mouseWatcherNode.getEnterPattern()
leavePattern = base.mouseWatcherNode.getLeavePattern()
buttonDownPattern = base.mouseWatcherNode.getButtonDownPattern()
buttonUpPattern = base.mouseWatcherNode.getButtonUpPattern()
self.accept(enterPattern.replace('%r', self.regionName), self.__handleMouseEnter)
self.accept(leavePattern.replace('%r', self.regionName), self.__handleMouseLeave)
self.accept(buttonDownPattern.replace('%r', self.regionName), self.__handleMouseDown)
self.accept(buttonUpPattern.replace('%r', self.regionName), self.__handleMouseUp)
def destroy(self):
self.ignoreAll()
if self.region is not None:
base.mouseWatcherNode.removeRegion(self.region)
self.region = None
if self.contents is not None:
self.contents.removeNode()
self.contents = None
def getUniqueName(self):
return 'Clickable-%s' % id(self)
def setActive(self, active):
self.active = active
def getActive(self):
return self.active
def isClickable(self):
return self.active
def isHovering(self):
return self.__hovering
def setClickState(self, clickState):
self.lastClickState = self.clickState
self.clickState = clickState
if self.clickState == PGButton.SReady:
self.fsm.request('ready')
elif self.clickState == PGButton.SDepressed:
self.fsm.request('depressed')
elif self.clickState == PGButton.SRollover:
self.fsm.request('rollover')
elif self.clickState == PGButton.SInactive:
self.fsm.request('inactive')
def getClickState(self):
return self.clickState
def enterOff(self):
pass
def exitOff(self):
pass
def enterReady(self):
pass
def exitReady(self):
pass
def enterDepressed(self):
pass
def exitDepressed(self):
if self.isClickable():
messenger.send(self.clickEvent, self.clickExtraArgs)
def enterRollover(self):
pass
def exitRollover(self):
pass
def enterInactive(self):
pass
def exitInactive(self):
pass
def setClickEvent(self, event, extraArgs=[]):
self.clickEvent = event
self.clickExtraArgs = extraArgs
def setClickRegionFrame(self, left, right, bottom, top):
self.region.setFrame(left, right, bottom, top)
def __handleMouseEnter(self, region, button):
self.__hovering = True
self.setClickState(PGButton.SRollover)
def __handleMouseLeave(self, region, button):
self.__hovering = False
if self.clickState != PGButton.SDepressed:
self.setClickState(PGButton.SReady)
def __handleMouseDown(self, region, button):
if button == 'mouse1':
self.setClickState(PGButton.SDepressed)
def __handleMouseUp(self, region, button):
if button == 'mouse1':
if self.__hovering:
self.setClickState(PGButton.SRollover)
else:
self.setClickState(PGButton.SReady)
|
class Solution:
def reverse(self, x: int) -> int:
MIN, MAX = -(2 ** 31), (2 ** 31) - 1
if x == 0:
return 0
negative = False
if x < 0:
negative = True
x *= -1
new = 0
while x:
new = (new * 10) + (x % 10)
x //= 10
if negative:
new *= -1
return new if MIN <= new <= MAX else 0
|
from block import *
from shard import *
from logging import ERROR, WARN, INFO, DEBUG
import time
class categorize_shard(Shard):
@classmethod
def initial_configs(cls, config):
return [config for i in range(config["nodes"])]
@classmethod
def node_type(self):
return {"name": "Categorize", "input_port": "input", "output_port": "output", "port_type": "PUSH"}
def on_load(self, config):
self.config = config
self.nodes = config["nodes"]
self.max_nodes = 20
self.current_node = 0
self.add_port("input", Port.PUSH, Port.UNNAMED, [])
self.log(INFO, "Categorize shard loaded")
def config_for_new_node(self):
return self.config
def recv_push(self, port, log):
self.log(INFO, "%s sending to port %d" % (self.id, self.current_node))
self.push_node(self.current_node, log)
self.current_node = (self.current_node + 1) % self.nodes
def can_add_node(self):
return (self.nodes < self.max_nodes)
def should_add_node(self, node_num):
self.log(INFO, self.id + " should_add_node got a new node")
self.nodes += 1
# start distribution from the new node
self.current_node = node_num
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import openbabel
from ribbonbuilder import translate as t
import pytest
@pytest.fixture()
def mol_and_params():
mol = openbabel.OBMol()
distance = 3.0
border_distance = 1.0
carbon = 6
hydrogen = 1
# Add the 4 "bulk" atoms first
for vec in [(0.0, 0.0, 0.0),
(distance, 0.0, 0.0),
(0.0, distance, 0.0),
(distance, distance, 0.0)]:
atom = openbabel.OBAtom()
atom.SetVector(*vec)
atom.SetAtomicNum(carbon)
mol.AddAtom(atom)
# Now add the atoms on the border
for vec in [(0.0, -border_distance, 0.0), # atom 5
(distance, -border_distance, 0.0), # atom 6
(distance+border_distance, 0.0, 0.0), # atom 7
(distance+border_distance, distance, 0.0), # atom 8
(distance, distance+border_distance, 0.0), # atom 9
(0.0, distance+border_distance, 0.0), # atom 10
(-border_distance, distance, 0.0), # atom 11
(-border_distance, 0.0, 0.0) # atom 12
]:
atom = openbabel.OBAtom()
atom.SetVector(*vec)
atom.SetAtomicNum(hydrogen)
mol.AddAtom(atom)
return mol, distance, border_distance
@pytest.fixture()
def border_top_and_param():
border_dist = 1.0
atoms = []
@pytest.fixture()
def empty_mol():
return openbabel.OBMol()
def test_build_bulk(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.create_bulk()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([0.0, 0.0, 0.0]),
np.array([distance, 0.0, 0.0]),
np.array([0.0, distance, 0.0]),
np.array([distance, distance, 0.0]),
np.array([0.0, 2*distance, 0.0]),
np.array([distance, 2*distance, 0.0])])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == n*m
for elem in expected:
assert elem in result
def test_top(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.top_border = {'atoms': [10], 'offset': np.array([0, distance, 0])}
builder.create_top_border()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([0.0, 2*distance+border_distance, 0.0]),
np.array([distance, 2*distance+border_distance, 0.0])])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == 2
for elem in expected:
assert elem in result
def test_bottom(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.bottom_border = {'atoms': [5], 'offset': np.array([0, 0, 0])}
builder.create_bottom_border()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([0, -border_distance, 0.0]),
np.array([distance, -border_distance, 0.0])])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == 2
for elem in expected:
assert elem in result
def test_left(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.left_border = {'atoms': [11],
'offset': np.array([0, distance, 0])}
builder.create_left_border()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([-border_distance, 0, 0]),
np.array([-border_distance, distance, 0]),
np.array([-border_distance, 2*distance, 0])
])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == 3
for elem in expected:
assert elem in result
def test_right(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.right_border = {'atoms': [8],
'offset': np.array([distance, distance, 0])}
builder.create_right_border()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([distance+border_distance, 0, 0]),
np.array([distance+border_distance, distance, 0]),
np.array([distance+border_distance, 2*distance, 0])
])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == 3
for elem in expected:
assert elem in result
def test_build_all(mol_and_params, empty_mol):
olig, distance, border_distance = mol_and_params
a1 = np.array([0.0, distance, 0.0])
a2 = np.array([distance, 0.0, 0.0])
n = 2
m = 3
builder = t.MolBuilder(olig, empty_mol, a1, a2, n, m, [1])
builder.top_border = {'atoms': [10], 'offset': np.array([0, distance, 0])}
builder.bottom_border = {'atoms': [5], 'offset': np.array([0, 0, 0])}
builder.left_border = {'atoms': [11],
'offset': np.array([0, distance, 0])}
builder.right_border = {'atoms': [8],
'offset': np.array([distance, distance, 0])}
builder.create_all()
iterator = openbabel.OBMolAtomIter(empty_mol)
expected = np.array([np.array([0.0, 0.0, 0.0]), # 1; start bulk
np.array([distance, 0.0, 0.0]), # 2
np.array([0.0, distance, 0.0]), # 3
np.array([distance, distance, 0.0]), # 4
np.array([0.0, 2*distance, 0.0]), # 5
np.array([distance, 2*distance, 0.0]), # 6; end of bulk
np.array([0, -border_distance, 0.0]), # 7; start bottom
np.array([distance, -border_distance, 0.0]), # 8; end bottom
np.array([distance + border_distance, 0, 0]), # 9; start right
np.array([distance + border_distance, distance, 0]), # 10
np.array([distance + border_distance, 2 * distance, 0]), # 11; end right
np.array([distance, 2 * distance + border_distance, 0.0]), # 12; start top
np.array([0.0, 2 * distance + border_distance, 0.0]), # 13; end top
np.array([-border_distance, 2 * distance, 0]), # 14; start left
np.array([-border_distance, distance, 0]), # 15
np.array([-border_distance, 0, 0]) # 16; end left
])
result = []
for atom in iterator:
x = atom.GetX()
y = atom.GetY()
z = atom.GetZ()
result.append(np.array([x, y, z]))
result = np.array(result)
assert len(result) == 16
for elem in expected:
assert elem in result
|
from irctest import cases
class ListTestCase(cases.BaseServerTestCase):
@cases.mark_specifications("RFC1459", "RFC2812")
def testListEmpty(self):
"""<https://tools.ietf.org/html/rfc1459#section-4.2.6>
<https://tools.ietf.org/html/rfc2812#section-3.2.6>
"""
self.connectClient("foo")
self.connectClient("bar")
self.getMessages(1)
self.sendLine(2, "LIST")
m = self.getMessage(2)
if m.command == "321":
# skip RPL_LISTSTART
m = self.getMessage(2)
while m.command == "322" and m.params[1] == "&SERVER":
# ngircd adds this pseudo-channel
m = self.getMessage(2)
self.assertNotEqual(
m.command,
"322", # RPL_LIST
"LIST response gives (at least) one channel, whereas there " "is none.",
)
self.assertMessageMatch(
m,
command="323", # RPL_LISTEND
fail_msg="Second reply to LIST is not 322 (RPL_LIST) "
"or 323 (RPL_LISTEND), or but: {msg}",
)
@cases.mark_specifications("RFC1459", "RFC2812")
def testListOne(self):
"""When a channel exists, LIST should get it in a reply.
<https://tools.ietf.org/html/rfc1459#section-4.2.6>
<https://tools.ietf.org/html/rfc2812#section-3.2.6>
"""
self.connectClient("foo")
self.connectClient("bar")
self.sendLine(1, "JOIN #chan")
self.getMessages(1)
self.sendLine(2, "LIST")
m = self.getMessage(2)
if m.command == "321":
# skip RPL_LISTSTART
m = self.getMessage(2)
self.assertNotEqual(
m.command,
"323", # RPL_LISTEND
fail_msg="LIST response ended (ie. 323, aka RPL_LISTEND) "
"without listing any channel, whereas there is one.",
)
self.assertMessageMatch(
m,
command="322", # RPL_LIST
fail_msg="Second reply to LIST is not 322 (RPL_LIST), "
"nor 323 (RPL_LISTEND) but: {msg}",
)
m = self.getMessage(2)
while m.command == "322" and m.params[1] == "&SERVER":
# ngircd adds this pseudo-channel
m = self.getMessage(2)
self.assertNotEqual(
m.command,
"322", # RPL_LIST
fail_msg="LIST response gives (at least) two channels, "
"whereas there is only one.",
)
self.assertMessageMatch(
m,
command="323", # RPL_LISTEND
fail_msg="Third reply to LIST is not 322 (RPL_LIST) "
"or 323 (RPL_LISTEND), or but: {msg}",
)
|
from distributed.comm.addressing import (
get_address_host,
get_address_host_port,
get_local_address_for,
normalize_address,
parse_address,
parse_host_port,
resolve_address,
unparse_address,
unparse_host_port,
)
from distributed.comm.core import Comm, CommClosedError, connect, listen
from distributed.comm.registry import backends
from distributed.comm.utils import get_tcp_server_address, get_tcp_server_addresses
def _register_transports():
import dask.config
from distributed.comm import inproc, ws
tcp_backend = dask.config.get("distributed.comm.tcp.backend")
if tcp_backend == "asyncio":
from distributed.comm import asyncio_tcp
backends["tcp"] = asyncio_tcp.TCPBackend()
backends["tls"] = asyncio_tcp.TLSBackend()
elif tcp_backend == "tornado":
from distributed.comm import tcp
backends["tcp"] = tcp.TCPBackend()
backends["tls"] = tcp.TLSBackend()
else:
raise ValueError(
f"Expected `distributed.comm.tcp.backend` to be in `('asyncio', "
f"'tornado')`, got {tcp_backend}"
)
try:
from distributed.comm import ucx
except ImportError:
pass
_register_transports()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
__author__ = 'Chia-Jung, Yang'
__email__ = 'jeroyang@gmail.com'
__version__ = '0.1.0'
|
from .VAENAR import VAENAR
from .optimizer import ScheduledOptim
|
from nextcord.ext import commands
import os
def is_owner():
async def predicate(context) -> bool:
return os.getenv("OWNER_IDS")
return commands.check(predicate)
|
import io
from typing import List, Set, Tuple, Optional, Any
from clvm import SExp
from clvm import run_program as default_run_program
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.operators import OPERATOR_LOOKUP
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm_rs import STRICT_MODE as MEMPOOL_MODE, run_salvia_program, serialized_length, run_generator2
from clvm_tools.curry import curry, uncurry
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.hash import std_hash
from salvia.util.ints import uint16
from salvia.util.byte_types import hexstr_to_bytes
from .tree_hash import sha256_treehash
def run_program(
program,
args,
max_cost,
operator_lookup=OPERATOR_LOOKUP,
pre_eval_f=None,
):
return default_run_program(
program,
args,
operator_lookup,
max_cost,
pre_eval_f=pre_eval_f,
)
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> "Program":
f = io.BytesIO(blob)
result = cls.parse(f) # noqa
assert f.read() == b""
return result
@classmethod
def fromhex(cls, hexstr: str) -> "Program":
return cls.from_bytes(hexstr_to_bytes(hexstr))
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
@classmethod
def from_serialized_program(cls, sp: "SerializedProgram") -> "Program":
return cls.from_bytes(bytes(sp))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def at(self, position: str) -> "Program":
"""
Take a string of only `f` and `r` characters and follow the corresponding path.
Example:
`assert Program.to(17) == Program.to([10, 20, 30, [15, 17], 40, 50]).at("rrrfrf")`
"""
v = self
for c in position.lower():
if c == "f":
v = v.first()
elif c == "r":
v = v.rest()
else:
raise ValueError(f"`at` got illegal character `{c}`. Only `f` & `r` allowed")
return v
def get_tree_hash(self, *args: bytes32) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_program(self, prog_args, max_cost)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def uncurry(self) -> Tuple["Program", "Program"]:
r = uncurry(self)
if r is None:
return self, self.to(0)
return r
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def fromhex(cls, hexstr: str) -> "SerializedProgram":
return cls.from_bytes(hexstr_to_bytes(hexstr))
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self, *args: bytes32) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
tmp = sexp_from_stream(io.BytesIO(self._buf), SExp.to)
return _tree_hash(tmp, set(args))
def run_mempool_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, MEMPOOL_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
# returns an optional error code and an optional PySpendBundleConditions (from clvm_rs)
# exactly one of those will hold a value
def run_as_generator(self, max_cost: int, flags: int, *args) -> Tuple[Optional[uint16], Optional[Any]]:
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
return run_generator2(
self._buf,
serialized_args,
max_cost,
flags,
)
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
cost, ret = run_salvia_program(
self._buf,
serialized_args,
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os, sys
sys.path.append(os.path.dirname(__file__))
from Editor_TestClass import BaseClass
class Editor_ComponentPropertyCommands_Works(BaseClass):
# Description:
# Tests a portion of the Component Property Get/Set Python API while the Editor is running
@staticmethod
def GetSetCompareTest(component, path, value):
import azlmbr.bus as bus
import azlmbr.editor as editor
oldObj = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentProperty', component, path)
BaseClass.check_result(oldObj.IsSuccess(), 'oldObj.IsSuccess()')
oldValue = oldObj.GetValue()
oldValueCompared = editor.EditorComponentAPIBus(bus.Broadcast, 'CompareComponentProperty', component, path, oldValue)
editor.EditorComponentAPIBus(bus.Broadcast, 'SetComponentProperty', component, path, value)
newObj = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentProperty', component, path)
BaseClass.check_result(newObj.IsSuccess(), 'newObj.IsSuccess()')
newValue = newObj.GetValue()
newValueCompared = editor.EditorComponentAPIBus(bus.Broadcast, 'CompareComponentProperty', component, path, newValue)
isOldNewValueSame = editor.EditorComponentAPIBus(bus.Broadcast, 'CompareComponentProperty', component, path, oldValue)
BaseClass.check_result(not(newValue == oldValue and oldValueCompared and newValueCompared and not isOldNewValueSame), "GetSetCompareTest " + path)
@staticmethod
def PteTest(pte, path, value):
oldObj = pte.get_value(path)
BaseClass.check_result(oldObj.IsSuccess(), 'oldObj.IsSuccess()')
oldValue = oldObj.GetValue()
oldValueCompared = pte.compare_value(path, oldValue)
pte.set_value(path, value)
newObj = pte.get_value(path)
BaseClass.check_result(newObj.IsSuccess(), 'newObj.IsSuccess()')
newValue = newObj.GetValue()
newValueCompared = pte.compare_value(path, newValue)
isOldNewValueSame = pte.compare_value(path, oldValue)
BaseClass.check_result(not(newValue == oldValue and oldValueCompared and newValueCompared and not isOldNewValueSame), "PteTest " + path)
@staticmethod
def test():
import azlmbr.legacy.general
import azlmbr.prefab
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.entity as entity
import azlmbr.math as math
check_result = BaseClass.check_result
GetSetCompareTest = Editor_ComponentPropertyCommands_Works.GetSetCompareTest
PteTest = Editor_ComponentPropertyCommands_Works.PteTest
# Create new Entity
entityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', entity.EntityId())
check_result(entityId, "New entity with no parent created")
# Get Component Type for Quad Shape
typeIdsList = editor.EditorComponentAPIBus(bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Quad Shape"], entity.EntityType().Game)
componentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', entityId, typeIdsList)
check_result(componentOutcome.IsSuccess(), f"Quad Shape component {typeIdsList} added to entity")
components = componentOutcome.GetValue()
component = components[0]
hasComponent = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', entityId, typeIdsList[0])
check_result(hasComponent, "Entity has an Quad Shape component")
# Test BuildComponentPropertyList
paths = editor.EditorComponentAPIBus(bus.Broadcast, 'BuildComponentPropertyList', component)
check_result(len(paths) > 1, f"BuildComponentPropertyList {len(paths)}")
# Tests for GetComponentProperty/SetComponentProperty
path_color = 'Shape Color'
path_visible = 'Visible'
path_quad_width = 'Quad Shape|Quad Configuration|Width'
GetSetCompareTest(component, path_visible, False)
GetSetCompareTest(component, path_quad_width, 42.0)
color = math.Color()
color.r = 0.4
color.g = 0.5
color.b = 0.6
GetSetCompareTest(component, path_color, color)
# Tests for BuildComponentPropertyTreeEditor
pteObj = editor.EditorComponentAPIBus(bus.Broadcast, 'BuildComponentPropertyTreeEditor', component)
check_result(pteObj.IsSuccess(), "BuildComponentPropertyTreeEditor")
pte = pteObj.GetValue()
PteTest(pte, path_visible, True)
PteTest(pte, path_quad_width, 48.0)
color = math.Color()
color.r = 0.9
color.g = 0.1
color.b = 0.3
PteTest(pte, path_color, color)
if __name__ == "__main__":
tester = Editor_ComponentPropertyCommands_Works()
tester.test_case(tester.test)
|
# -*- coding: utf-8 -*-
import os
import appdirs
import errno
import logging
# merge two track lists based on ID
def merge_track_lists(tracks, new_tracks):
track_ids = [t.get('id') for t in tracks]
for t in new_tracks:
if t.get('id') not in track_ids:
tracks.append(t)
return tracks
# match playlist track to library track
def match_track_by_id(track_id, library):
matching_tracks = [t for t in library if t.get('id') == track_id]
if matching_tracks:
return matching_tracks[0]
else:
return False
# return a valid id (storeId or nid)
def get_aa_id(track):
if track.has_key('storeId') and track.get('storeId').startswith('T'):
return track.get('storeId')
elif track.has_key('nid') and track.get('nid').startswith('T'):
return track.get('nid')
return False
# check if track appears to be a valid All Access track
def track_has_aa_data(track):
if track.has_key('storeId') and track.get('storeId').startswith('T'):
return True
elif track.has_key('nid') and track.get('nid').startswith('T'):
return True
return False
# get key/value pair representing a radio station ID, return False if ID not present
# pass allow_locker=True to allow stations that are based on uploaded songs
def get_station_id(station, allow_locker = False):
seed = station.get('seed')
if seed.has_key('albumId'):
return {'album_id': seed.get('albumId')}
elif seed.has_key('artistId'):
return {'artist_id': seed.get('artistId')}
elif seed.has_key('trackId'):
return {'track_id': seed.get('trackId')}
elif seed.has_key('genreId'):
return {'genre_id': seed.get('genreId')}
if seed.has_key('trackLockerId') and allow_locker:
return {'track_id': seed.get('trackLockerId')}
return False
# initialize logger
def logger(name, console_loglevel = 'INFO', file_loglevel = 'INFO'):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
# create null handler if running silent
if console_loglevel == 'NONE' and file_loglevel == 'NONE':
nh = logging.NullHandler()
log.addHandler(nh)
# set up console logging
if console_loglevel != 'NONE':
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
if console_loglevel == 'CRITICAL':
ch.setLevel(logging.CRITICAL)
elif console_loglevel == 'ERROR':
ch.setLevel(logging.ERROR)
elif console_loglevel == 'WARNING':
ch.setLevel(logging.WARNING)
elif console_loglevel == 'DEBUG':
ch.setLevel(logging.DEBUG)
else: ch.setLevel(logging.INFO)
log.addHandler(ch)
# set up file logging
if file_loglevel != 'NONE':
log_path = os.path.join(appdirs.user_log_dir(name), name + '.log')
try:
os.makedirs(os.path.dirname(log_path), 0o700)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
fh = logging.FileHandler(log_path)
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s [%(levelname)s]: %(message)s'))
if file_loglevel == 'CRITICAL':
fh.setLevel(logging.CRITICAL)
elif file_loglevel == 'ERROR':
fh.setLevel(logging.ERROR)
elif file_loglevel == 'WARNING':
fh.setLevel(logging.WARNING)
elif file_loglevel == 'DEBUG':
fh.setLevel(logging.DEBUG)
else: fh.setLevel(logging.INFO)
log.addHandler(fh)
return log
|
import os
import csv
import cv2
import argparse
def main(args):
crop_size_min = 1000
gt_dict = {}
seq_list = os.listdir(args.data_root)
for seq_name in seq_list:
gt_path = os.path.join(args.data_root, seq_name, 'gt/gt.txt')
with open(gt_path) as gt_file:
gt_reader = csv.reader(gt_file, delimiter=',')
for row in gt_reader:
if int(row[1]) not in gt_dict:
gt_dict[int(row[1])] = {}
if seq_name not in gt_dict[int(row[1])]:
gt_dict[int(row[1])][seq_name] = {}
gt_dict[int(row[1])][seq_name][int(row[0])] = (int(row[2]), int(row[3]), int(row[4]), int(row[5]))
crop_root_dir = os.path.join(args.data_root, 'crops')
if not os.path.exists(crop_root_dir):
os.makedirs(crop_root_dir)
for global_id in sorted(gt_dict.keys()):
print('%04d' % global_id)
crop_dir = os.path.join(crop_root_dir, '%04d' % global_id)
os.makedirs(crop_dir)
if len(gt_dict[global_id].keys()) < 2:
print('ERROR: Less than 2 camera IDs for global ID %d' % global_id)
for seq_name in sorted(gt_dict[global_id].keys()):
img_dir = os.path.join(args.data_root, seq_name, 'img1')
for frm_idx in sorted(gt_dict[global_id][seq_name].keys()):
img_frm_path = os.path.join(img_dir, '%06d.jpg' % frm_idx)
img_frm = cv2.imread(img_frm_path)
bbox = gt_dict[global_id][seq_name][frm_idx]
img_crop = img_frm[bbox[1]:bbox[1]+bbox[3], bbox[0]:bbox[0]+bbox[2]]
crop_size = bbox[2] * bbox[3]
crop_flag = True
if crop_size < crop_size_min:
crop_flag = False
img_crop_path = os.path.join(crop_dir, '%s_%06d_%s.jpg' % (seq_name, frm_idx, crop_flag))
cv2.imwrite(img_crop_path, img_crop)
if __name__ == '__main__':
print("Loading parameters...")
parser = argparse.ArgumentParser(description='Plot vehicle crops')
parser.add_argument('--data-root', dest='data_root', default='train/S01',
help='dataset root path')
args = parser.parse_args()
main(args)
|
# see: https://docs.python.org/3/library/functions.html#eval
import re
def parse_into(line):
while ("+" in line) or ("*" in line):
e = re.findall(r"^(\d+ [\*\+] \d+)", line)[0]
line = str(eval(e)) + line[len(e):]
return int(line)
def parse(line):
while line.count("("):
eval = re.findall(r"\(\d+ [\*\+] [\d+|\d+ \[\*\+\] \d+]+\)", line)
for e in eval:
line = line.replace(e, str(parse_into(e[1:-1])))
return parse_into(line)
sum = 0
with open("input", "r") as f:
for l in f.readlines():
sum = sum + parse(l)
print("Result = ", sum)
|
import itertools
from . import (Point, MultiPoint, LineString, MultiLineString,
Polygon, MultiPolygon, GeometryCollection,
FeatureCollection)
def merge(items):
""" Combine a list of GeoJSON objects into the single most specific type
that retains all information.
For example,
- merging two Point objects creates a MultiPoint
- merging a Point and a LineString creates a GeometryCollection
- merging a multiple Features creates a FeatureCollection
Raises
- ValueError when the list contains nothing
- TypeError when merging a Geometry and a Feature/FeatureCollection
"""
items = list(items)
if len(items) == 0:
raise ValueError("zero-length iterable cannot be merged")
elif len(items) == 1:
return items[0]
t0 = type(items[0]).__name__
if all(type(g).__name__ == t0 for g in items[1:]):
if items[0].crs is None and any(it.crs is not None for it in items[1:]):
raise ValueError("all inputs must share the same CRS")
elif any(items[0].crs != it.crs for it in items[1:]):
raise ValueError("all inputs must share the same CRS")
if t0 == "Point":
return MultiPoint([g.coordinates for g in items], crs=items[0].crs)
elif t0 == "LineString":
return MultiLineString([g.coordinates for g in items], crs=items[0].crs)
elif t0 == "Polygon":
return MultiPolygon([g.coordinates for g in items], crs=items[0].crs)
elif t0 == "GeometryCollection":
return GeometryCollection(items, crs=items[0].crs)
elif t0 == "Feature":
return FeatureCollection(items, crs=items[0].crs)
elif t0 == "FeatureCollection":
features = itertools.chain.from_iterable([f.features for f in items])
return FeatureCollection(list(features), crs=items[0].crs)
else:
raise TypeError("unhandled type '{}'".format(type(items[0]).__name__))
elif "Feature" not in (type(g).__name__ for g in items) and \
"FeatureCollection" not in (type(g).__name__ for g in items):
return GeometryCollection(items)
elif all(type(g).__name__ in ("Feature", "FeatureCollection") for g in items):
features = []
for item in items:
if type(item).__name__ == "Feature":
features.append(item)
else:
features.extend(item.features)
return FeatureCollection(features)
else:
raise TypeError("no rule to merge {}".format(set(type(g).__name__ for g in items)))
def burst(item):
""" Generator that breaks a composite GeoJSON type into atomic Points,
LineStrings, Polygons, or Features. """
if type(item).__name__ == "GeometryCollection":
for geometry in item.geometries:
for subgeometry in burst(geometry):
subgeometry.crs = item.crs
yield subgeometry
elif type(item).__name__ == "FeatureCollection":
for feature in item.features:
if item.crs is not None:
feature.crs = item.crs
yield feature
elif type(item).__name__ == "MultiPoint":
for coords in item.coordinates:
pt = Point(coords, crs=item.crs)
yield pt
elif type(item).__name__ == "MultiLineString":
for coords in item.coordinates:
geom = LineString(coords, crs=item.crs)
yield geom
elif type(item).__name__ == "MultiPolygon":
for coords in item.coordinates:
geom = Polygon(coords, crs=item.crs)
yield geom
else:
yield item
|
class Animals(object):
def __init__(self, type, price, product, productValue, sellValue):
self.__type = type
self.__price = price
self.__product = product
self.__productValue = productValue
self.__sellValue = sellValue
def get_type(self):
return self.__type
def get_price(self):
return float(self.__price)
def get_product(self):
return self.__product
def get_productValue(self):
return float(self.__productValue)
def get_sellValue(self):
return float(self.__sellValue)
def __str__(self):
string = f"{self.__type}...\n Cost: {self.__price}\n Product: {self.__product}, sells at ${self.__productValue} per pound"
string += f"\n Sell Price: each {self.__type} sells at {self.__sellValue} per {self.__type}"
return string
|
from collections import deque
from pathlib import Path
def read_numbers(path: Path, cast=int) -> list[int]:
"""Read numeric data from a text file."""
if cast not in (int, float):
raise ValueError("Can only cast values to int or float")
data = []
with open(path, "r") as file:
for line in file.readlines():
data.append(cast(line))
return data
def diff(data: list[int]) -> list[int]:
"""Calculate the difference between numeric values in a list."""
previous = data[0]
difference = []
for value in data[1:]:
difference.append(value - previous)
previous = value
return difference
def count_positives(data: list[int]) -> int:
"""Count the positive numbers in a list."""
return sum(x > 0 for x in data)
def moving_sum(data: list[int], size: int) -> list[int]:
"""Calculate the sum of a moving window over a list."""
window = deque(maxlen=size)
summed = []
for number in data:
window.append(number)
summed.append(sum(window))
return summed[size - 1 :]
def main():
data = read_numbers(Path("data/day_1_data.txt"))
derivative = diff(data)
count = count_positives(derivative)
print(f"There are {count} measurements larger than the previous measurement!")
size = 3
summed = moving_sum(data, size)
summed_derivative = diff(summed)
summed_count = count_positives(summed_derivative)
print(f"Summed (window: {size}), there are {summed_count} measurements larger than the previous measurement!")
if __name__ == "__main__":
main()
|
'''
RunBundle is a bundle type that is produced by running a program on an input.
Its constructor takes a program target (which must be in a ProgramBundle),
an input target (which can be in any bundle), and a command to run.
When the bundle is executed, it symlinks the program target in to ./program,
symlinks the input target in to ./input, and then streams output to ./stdout
and ./stderr. The ./output directory may also be used to store output files.
'''
from typing import List
from codalab.bundles.derived_bundle import DerivedBundle
from codalab.common import UsageError
from codalab.lib.completers import DockerImagesCompleter
from codalab.objects.metadata_spec import MetadataSpec
from codalab.worker.bundle_state import State
class RunBundle(DerivedBundle):
BUNDLE_TYPE = 'run'
METADATA_SPECS = list(DerivedBundle.METADATA_SPECS) # type: List
# Note that these are strings, which need to be parsed
# Request a machine with this much resources and don't let run exceed these resources
# Don't format metadata specs
# fmt: off
METADATA_SPECS.append(MetadataSpec('request_docker_image', str, 'Which docker image (either tag or digest, e.g., '
'codalab/default-cpu:latest) we wish to use.', completer=DockerImagesCompleter, hide_when_anonymous=True, default=None))
METADATA_SPECS.append(MetadataSpec('request_time', str, 'Amount of time (e.g., 3, 3m, 3h, 3d) allowed for this run. Defaults to user time quota left.', formatting='duration', default=None))
METADATA_SPECS.append(MetadataSpec('request_memory', str, 'Amount of memory (e.g., 3, 3k, 3m, 3g, 3t) allowed for this run.', formatting='size', default='2g'))
METADATA_SPECS.append(MetadataSpec('request_disk', str, 'Amount of disk space (e.g., 3, 3k, 3m, 3g, 3t) allowed for this run. Defaults to user disk quota left.', formatting='size', default=None))
METADATA_SPECS.append(MetadataSpec('request_cpus', int, 'Number of CPUs allowed for this run.', default=1))
METADATA_SPECS.append(MetadataSpec('request_gpus', int, 'Number of GPUs allowed for this run.', default=0))
METADATA_SPECS.append(MetadataSpec('request_queue', str, 'Submit run to this job queue.', hide_when_anonymous=True, default=None))
METADATA_SPECS.append(MetadataSpec('request_priority', int, 'Job priority (higher is more important). Negative '
'priority bundles are queued behind bundles with no specified priority.', default=None))
METADATA_SPECS.append(MetadataSpec('request_network', bool, 'Whether to allow network access.', default=False))
METADATA_SPECS.append(
MetadataSpec('cpu_usage', float, 'Portion of CPU used by this container (e.g., 0.24)', generated=True))
METADATA_SPECS.append(
MetadataSpec('memory_limit', int, 'Limit of Memory available to this container (e.g., 2085326848)', generated=True))
METADATA_SPECS.append(MetadataSpec('exclude_patterns', list, 'Exclude these file patterns from being saved into the bundle contents.', default=[]))
METADATA_SPECS.append(MetadataSpec('actions', list, 'Actions (e.g., kill) that were performed on this run.', generated=True))
METADATA_SPECS.append(MetadataSpec('time', float, 'Amount of wall clock time (seconds) used by this run in total. '
'[Runtime of the Docker container excluding CodaLab related '
'steps such as preparing/uploading results]',
generated=True,
formatting='duration'))
METADATA_SPECS.append(MetadataSpec('time_user', float, 'Amount of user time (seconds) used by this run.', generated=True, formatting='duration'))
METADATA_SPECS.append(MetadataSpec('time_system', float, 'Amount of system time (seconds) used by this run.', generated=True, formatting='duration'))
METADATA_SPECS.append(MetadataSpec('memory', float, 'Amount of memory (bytes) used by this run.', generated=True, formatting='size'))
METADATA_SPECS.append(MetadataSpec('memory_max', float, 'Maximum amount of memory (bytes) used by this run at any time during execution.', generated=True, formatting='size'))
METADATA_SPECS.append(MetadataSpec('started', int, 'Time when this bundle started executing.', generated=True, formatting='date'))
METADATA_SPECS.append(MetadataSpec('last_updated', int, 'Time when information about this bundle was last updated.', generated=True, formatting='date'))
METADATA_SPECS.append(MetadataSpec('run_status', str, 'Execution status of the bundle.', generated=True))
METADATA_SPECS.append(MetadataSpec('staged_status', str, 'Information about the status of the staged bundle.', generated=True))
# Information about running
METADATA_SPECS.append(MetadataSpec('docker_image', str, 'Which docker image was used to run the process.', generated=True, hide_when_anonymous=True))
METADATA_SPECS.append(MetadataSpec('exitcode', int, 'Exitcode of the process.', generated=True))
METADATA_SPECS.append(MetadataSpec('job_handle', str, 'Identifies the job handle (internal).', generated=True, hide_when_anonymous=True))
METADATA_SPECS.append(MetadataSpec('remote', str, 'Where this job is/was run (internal).', generated=True, hide_when_anonymous=True))
# fmt: on
@classmethod
def construct(
cls, targets, command, metadata, owner_id, uuid=None, data_hash=None, state=State.CREATED
):
if not isinstance(command, str):
raise UsageError('%r is not a valid command!' % (command,))
return super(RunBundle, cls).construct(
targets, command, metadata, owner_id, uuid, data_hash, state
)
def validate(self):
super(RunBundle, self).validate()
for dep in self.dependencies:
dep.validate(require_child_path=True)
|
#!/usr/bin/env python
# encoding: utf-8
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
login_manager = LoginManager()
db = SQLAlchemy()
migrate = Migrate()
|
try:
with open("myfile.txt") as fh:
file_data = fh.read()
print(file_data)
except FileNotFoundError as ex:
print("The Data File is missing")
except PermissionError as px:
print("This is not allowed")
except:
print("Some error occured")
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import copy
# Import Salt Testing libs
from salttesting.unit import skipIf, TestCase
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import salt.config
import salt.loader
from salt.modules import boto_elasticsearch_domain
# Import 3rd-party libs
import logging
# Import Mock libraries
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# pylint: disable=import-error,no-name-in-module
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module
# the boto_elasticsearch_domain module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
not_found_error = ClientError({
'Error': {
'Code': 'ResourceNotFoundException',
'Message': "Test-defined error"
}
}, 'msg')
domain_ret = dict(DomainName='testdomain',
ElasticsearchClusterConfig={},
EBSOptions={},
AccessPolicies={},
SnapshotOptions={},
AdvancedOptions={})
log = logging.getLogger(__name__)
opts = salt.config.DEFAULT_MINION_OPTS
context = {}
utils = salt.loader.utils(opts, whitelist=['boto3'], context=context)
boto_elasticsearch_domain.__utils__ = utils
boto_elasticsearch_domain.__init__(opts)
boto_elasticsearch_domain.__salt__ = {}
class BotoElasticsearchDomainTestCaseBase(TestCase):
conn = None
# Set up MagicMock to replace the boto3 session
def setUp(self):
context.clear()
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
session_instance.client.return_value = self.conn
class BotoElasticsearchDomainTestCaseMixin(object):
pass
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoElasticsearchDomainTestCase(BotoElasticsearchDomainTestCaseBase, BotoElasticsearchDomainTestCaseMixin):
'''
TestCase for salt.modules.boto_elasticsearch_domain module
'''
def test_that_when_checking_if_a_domain_exists_and_a_domain_exists_the_domain_exists_method_returns_true(self):
'''
Tests checking domain existence when the domain already exists
'''
result = boto_elasticsearch_domain.exists(DomainName='testdomain', **conn_parameters)
self.assertTrue(result['exists'])
def test_that_when_checking_if_a_domain_exists_and_a_domain_does_not_exist_the_domain_exists_method_returns_false(self):
'''
Tests checking domain existence when the domain does not exist
'''
self.conn.describe_elasticsearch_domain.side_effect = not_found_error
result = boto_elasticsearch_domain.exists(DomainName='mydomain', **conn_parameters)
self.assertFalse(result['exists'])
def test_that_when_checking_if_a_domain_exists_and_boto3_returns_an_error_the_domain_exists_method_returns_error(self):
'''
Tests checking domain existence when boto returns an error
'''
self.conn.describe_elasticsearch_domain.side_effect = ClientError(error_content, 'list_domains')
result = boto_elasticsearch_domain.exists(DomainName='mydomain', **conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_domains'))
def test_that_when_checking_domain_status_and_a_domain_exists_the_domain_status_method_returns_info(self):
'''
Tests checking domain existence when the domain already exists
'''
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.status(DomainName='testdomain', **conn_parameters)
self.assertTrue(result['domain'])
def test_that_when_checking_domain_status_and_boto3_returns_an_error_the_domain_status_method_returns_error(self):
'''
Tests checking domain existence when boto returns an error
'''
self.conn.describe_elasticsearch_domain.side_effect = ClientError(error_content, 'list_domains')
result = boto_elasticsearch_domain.status(DomainName='mydomain', **conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_domains'))
def test_that_when_describing_domain_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters if domain exists
'''
domainconfig = {}
for k, v in domain_ret.iteritems():
if k == 'DomainName':
continue
domainconfig[k] = {'Options': v}
self.conn.describe_elasticsearch_domain_config.return_value = {'DomainConfig': domainconfig}
result = boto_elasticsearch_domain.describe(DomainName=domain_ret['DomainName'], **conn_parameters)
log.warn(result)
desired_ret = copy.copy(domain_ret)
desired_ret.pop('DomainName')
self.assertEqual(result, {'domain': desired_ret})
def test_that_when_describing_domain_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.describe_elasticsearch_domain_config.side_effect = ClientError(error_content, 'list_domains')
result = boto_elasticsearch_domain.describe(DomainName='testdomain', **conn_parameters)
self.assertTrue('error' in result)
def test_that_when_creating_a_domain_succeeds_the_create_domain_method_returns_true(self):
'''
tests True domain created.
'''
self.conn.create_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.create(**args)
self.assertTrue(result['created'])
def test_that_when_creating_a_domain_fails_the_create_domain_method_returns_error(self):
'''
tests False domain not created.
'''
self.conn.create_elasticsearch_domain.side_effect = ClientError(error_content, 'create_domain')
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.create(**args)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('create_domain'))
def test_that_when_deleting_a_domain_succeeds_the_delete_domain_method_returns_true(self):
'''
tests True domain deleted.
'''
result = boto_elasticsearch_domain.delete(DomainName='testdomain',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_a_domain_fails_the_delete_domain_method_returns_false(self):
'''
tests False domain not deleted.
'''
self.conn.delete_elasticsearch_domain.side_effect = ClientError(error_content, 'delete_domain')
result = boto_elasticsearch_domain.delete(DomainName='testdomain',
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_updating_a_domain_succeeds_the_update_domain_method_returns_true(self):
'''
tests True domain updated.
'''
self.conn.update_elasticsearch_domain_config.return_value = {'DomainConfig': domain_ret}
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.update(**args)
self.assertTrue(result['updated'])
def test_that_when_updating_a_domain_fails_the_update_domain_method_returns_error(self):
'''
tests False domain not updated.
'''
self.conn.update_elasticsearch_domain_config.side_effect = ClientError(error_content, 'update_domain')
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.update(**args)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_domain'))
def test_that_when_adding_tags_succeeds_the_add_tags_method_returns_true(self):
'''
tests True tags added.
'''
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.add_tags(DomainName='testdomain', a='b', **conn_parameters)
self.assertTrue(result['tagged'])
def test_that_when_adding_tags_fails_the_add_tags_method_returns_false(self):
'''
tests False tags not added.
'''
self.conn.add_tags.side_effect = ClientError(error_content, 'add_tags')
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.add_tags(DomainName=domain_ret['DomainName'], a='b', **conn_parameters)
self.assertFalse(result['tagged'])
def test_that_when_removing_tags_succeeds_the_remove_tags_method_returns_true(self):
'''
tests True tags removed.
'''
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.remove_tags(DomainName=domain_ret['DomainName'], TagKeys=['a'], **conn_parameters)
self.assertTrue(result['tagged'])
def test_that_when_removing_tags_fails_the_remove_tags_method_returns_false(self):
'''
tests False tags not removed.
'''
self.conn.remove_tags.side_effect = ClientError(error_content, 'remove_tags')
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.remove_tags(DomainName=domain_ret['DomainName'], TagKeys=['b'], **conn_parameters)
self.assertFalse(result['tagged'])
def test_that_when_listing_tags_succeeds_the_list_tags_method_returns_true(self):
'''
tests True tags listed.
'''
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.list_tags(DomainName=domain_ret['DomainName'], **conn_parameters)
self.assertEqual(result['tags'], {})
def test_that_when_listing_tags_fails_the_list_tags_method_returns_false(self):
'''
tests False tags not listed.
'''
self.conn.list_tags.side_effect = ClientError(error_content, 'list_tags')
self.conn.describe_elasticsearch_domain.return_value = {'DomainStatus': domain_ret}
result = boto_elasticsearch_domain.list_tags(DomainName=domain_ret['DomainName'], **conn_parameters)
self.assertTrue(result['error'])
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(BotoElasticsearchDomainTestCase, needs_daemon=False)
|
#!/usr/bin/env python3
"""Alta3 Research | RZFeeser
CHALLENGE 01 - Solution"""
def main():
user_input = input("Please enter an IPv4 IP address: ")
## the line below creates a single string that is passed to print()
# print("You told me the IPv4 address is:" + user_input)
## print() can be given a series of objects separated by a comma
print("You told me the IPv4 address is:", user_input)
# asking user for 'vendor name'
vendor = input("Please input the vendor name: ")
print(vendor)
main()
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gui_overlay_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='gui_overlay_config.proto',
package='gazebo.msgs',
serialized_pb=_b('\n\x18gui_overlay_config.proto\x12\x0bgazebo.msgs\"+\n\x10GUIOverlayConfig\x12\x17\n\x0flayout_filename\x18\x01 \x02(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GUIOVERLAYCONFIG = _descriptor.Descriptor(
name='GUIOverlayConfig',
full_name='gazebo.msgs.GUIOverlayConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='layout_filename', full_name='gazebo.msgs.GUIOverlayConfig.layout_filename', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=84,
)
DESCRIPTOR.message_types_by_name['GUIOverlayConfig'] = _GUIOVERLAYCONFIG
GUIOverlayConfig = _reflection.GeneratedProtocolMessageType('GUIOverlayConfig', (_message.Message,), dict(
DESCRIPTOR = _GUIOVERLAYCONFIG,
__module__ = 'gui_overlay_config_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.GUIOverlayConfig)
))
_sym_db.RegisterMessage(GUIOverlayConfig)
# @@protoc_insertion_point(module_scope)
|
class LargeInteger:
def __init__(self, integer_str):
if integer_str[0] == '-':
self.sign = -1
array = [int(c) for c in integer_str[1:]]
elif integer_str[0] == '+':
self.sign = 1
array = [int(c) for c in integer_str[1:]]
else:
self.sign = 1
array = [int(c) for c in integer_str]
self.array = [self.sign * i for i in reversed(array)]
@property
def integer_str(self):
array = [str(abs(i)) for i in reversed(self.array)]
string = "".join(array)
if self.sign == -1:
string = '-' + string
return string
def __len__(self):
return len(self.array)
def __repr__(self):
return self.integer_str
def __mul__(self, other):
if str(self) == '0' or str(other) == '0':
return LargeInteger('0')
n = max(len(self), len(other))
if n < 3:
return LargeInteger(str(int(self.integer_str) * int(other.integer_str)))
else:
m = n // 2
x = LargeInteger(self.integer_str)
x.array = x.array[m:] if m < len(x) else [0]
y = LargeInteger(self.integer_str)
y.array = y.array[:m]
w = LargeInteger(other.integer_str)
w.array = w.array[m:] if m < len(w) else [0]
z = LargeInteger(other.integer_str)
z.array = z.array[:m]
part1 = x * w
part1.array = [0] * (2*m) + part1.array
part2 = x * z + w * y
part2.array = [0] * m + part2.array
part3 = y * z
return part1 + part2 + part3
@staticmethod
def _adjust_array(array):
while len(array) > 0 and array[-1] == 0:
array.pop(-1)
if len(array) == 0:
return [0], 1
array_new = []
if array[-1] > 0:
sign = 1
for i in range(len(array) - 1):
item = array[i]
if 10 > item >= 0:
array_new.append(item)
elif item >= 10:
array_new.append(item - 10)
array[i+1] += 1
else:
array_new.append(item + 10)
array[i+1] -= 1
else:
sign = -1
for i in range(len(array) - 1):
item = array[i]
if -10 < item <= 0:
array_new.append(item)
elif item <= -10:
array_new.append(item + 10)
array[i+1] -= 1
else:
array_new.append(item - 10)
array[i+1] += 1
if array[-1] != 0:
array_new.append(array[-1])
while len(array_new) > 0 and array_new[-1] == 0:
array_new.pop(-1)
array_new = [i*sign for i in array_new]
return array_new, sign
def __add__(self, other):
array = []
for a, b in zip(self.array, other.array):
array.append(a+b)
if len(self) > len(other):
array.extend(self.array[len(other):])
else:
array.extend(other.array[len(self):])
array, sign = self._adjust_array(array)
array = [str(i) for i in reversed(array)]
if sign == -1:
array.insert(0, '-')
return LargeInteger("".join(array))
def __sub__(self, other):
other = -other
return self + other
def __neg__(self):
integer_str = self.integer_str
if integer_str[0] == '-':
integer_str[0] = '+'
elif integer_str[0] == '+':
integer_str[0] = '-'
else:
integer_str = '-' + integer_str
return LargeInteger(integer_str)
|
# coding: utf-8
from conf import settings
print(settings.MYSQL_HOST) # noqa
print(settings.MYSQL_PASSWD) # noqa
print(settings.EXAMPLE) # noqa
print(settings.current_env) # noqa
print(settings.WORKS) # noqa
|
# -*- coding: utf-8 -*-
"""@package set_FEMM_materials
@date Created on août 06 17:04 2018
@author franco_i+
@todo: why is the label "Lamination_Stator_Bore" and not "Lamination_Stator"
"""
import femm
from numpy import exp, pi
from pyleecan.Functions.FEMM import GROUP_FM
from pyleecan.Functions.FEMM.create_FEMM_bar import create_FEMM_bar
from pyleecan.Functions.FEMM.create_FEMM_circuit_material import (
create_FEMM_circuit_material,
)
from pyleecan.Functions.FEMM.create_FEMM_magnet import create_FEMM_magnet
def create_FEMM_materials(
machine,
surf_list,
Is,
Ir,
BHs,
BHr,
is_mmfs,
is_mmfr,
is_stator_linear_BH,
is_rotor_linear_BH,
is_eddies,
j_t0,
):
"""Add materials in FEMM
Parameters
----------
machine : Machine
the machine to simulate
surf_list : list
List of surface of the machine
Is : ndarray
Stator current matrix [A]
Ir : ndarray
Rotor current matrix [A]
BHs: ndarray
B(H) curve of the stator
BHr: ndarray
B(H) curve of the rotor
is_mmfs : bool
1 to compute the stator magnetomotive force/stator magnetic field
is_mmfr : bool
1 to compute the rotor magnetomotive force / rotor magnetic field
is_stator_linear_BH: bool
1 to use linear B(H) curve according to mur_lin, 0 to use the B(H) curve
is_rotor_linear_BH: bool
1 to use linear B(H) curve according to mur_lin, 0 to use the B(H) curve
is_eddies : bool
1 to calculate eddy currents
jt_0 : int
Current time step for winding calculation
Returns
-------
Tuple: dict, list
Dictionary of properties and list containing the name of the circuits created
"""
prop_dict = dict() # Initialisation of the dictionnary to return
rotor = machine.rotor
stator = machine.stator
materials = list()
circuits = list()
# Starting creation of properties for each surface of the machine
for surf in surf_list:
label = surf.label
if "Lamination_Stator_Bore" in label: # Stator
if is_stator_linear_BH == 2:
mu_is = 100000 # Infinite permeability
else:
mu_is = stator.mat_type.mag.mur_lin # Relative
# Check if the property already exist in FEMM
if "Stator Iron" not in materials:
# magnetic permeability
femm.mi_addmaterial(
"Stator Iron", mu_is, mu_is, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0
)
materials.append("Stator Iron")
prop_dict[label] = "Stator Iron"
elif "Lamination_Rotor_Bore" in label: # Rotor
# Initialisation from the rotor of the machine
if is_rotor_linear_BH == 2:
mu_ir = 100000 # Infinite permeability
else:
mu_ir = rotor.mat_type.mag.mur_lin # Relative
# Check if the property already exist in FEMM
if "Rotor Iron" not in materials:
# magnetic permeability
femm.mi_addmaterial(
"Rotor Iron", mu_ir, mu_ir, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0
)
materials.append("Rotor Iron")
prop_dict[label] = "Rotor Iron"
elif "Airgap" in label: # Airgap surface
if "Airgap" not in materials:
femm.mi_addmaterial("Airgap", 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0)
materials.append("Airgap")
prop_dict[label] = "Airgap"
elif "Ventilation" in label: # Ventilation
# Check if the property already exist in FEMM
if "Air" not in materials:
femm.mi_addmaterial("Air", 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0)
materials.append("Air")
prop_dict[label] = "Air"
elif "Hole_" in label: # Hole but not HoleMagnet
# Check if the property already exist in FEMM
if "Air" not in materials:
femm.mi_addmaterial("Air", 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0)
materials.append("Air")
prop_dict[label] = "Air"
elif "BarR" in label: # Squirrel cage
prop, materials = create_FEMM_bar(
is_mmfr, rotor.mat_type.elec.rho, materials
)
prop_dict[label] = prop
elif "WindR" in label: # Rotor Winding
prop, materials, circuits = create_FEMM_circuit_material(
circuits, label, is_eddies, rotor, Ir, is_mmfr, j_t0, materials
)
prop_dict[label] = prop
elif "WindS" in label: # Stator Winding
prop, materials, circuits = create_FEMM_circuit_material(
circuits, label, is_eddies, stator, Is, is_mmfs, j_t0, materials
)
prop_dict[label] = prop
elif "Magnet" in label and "Rotor" in label: # Rotor Magnet
prop, materials = create_FEMM_magnet(
label, is_mmfr, is_eddies, materials, rotor
)
prop_dict[label] = prop
elif "Magnet" in label and "Stator" in label: # Stator Magnet
prop, materials = create_FEMM_magnet(
label, is_mmfs, is_eddies, materials, stator
)
prop_dict[label] = prop
elif "No_mesh" in label: # Sliding band
prop_dict[label] = "<No Mesh>"
elif "Yoke" in label:
prop_dict[label] = "<No Mesh>"
# Set Rotor and Stator BH curves (if needed)
if is_stator_linear_BH == 0:
for ii in range(BHs.shape[0]):
femm.mi_addbhpoint("Stator Iron", BHs[ii][1], BHs[ii][0])
if is_rotor_linear_BH == 0:
for ii in range(BHr.shape[0]):
femm.mi_addbhpoint("Rotor Iron", BHr[ii][1], BHr[ii][0])
return prop_dict, materials, circuits
|
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
from tkinter import _flatten, _join, _stringify, _splitdict
_sentinel = object()
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
opts = []
for opt, value in optdict.items():
if not ignore or opt not in ignore:
opts.append("-%s" % opt)
if value is not None:
opts.append(_format_optvalue(value, script))
return _flatten(opts)
def _mapdict_values(items):
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
# E.g. (script=False):
# [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
# returns:
# ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
opt_val = []
for *state, val in items:
# hacks for bakward compatibility
state[0] # raise IndexError if empty
if len(state) == 1:
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or ''
else:
# group multiple states
state = ' '.join(state) # raise TypeError if not str
opt_val.append(state)
if val is not None:
opt_val.append(val)
return opt_val
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
opts = []
for opt, value in mapdict.items():
opts.extend(("-%s" % opt,
_format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
opts = ' '.join(opts)
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't have to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
ltuple = tk.splitlist(ltuple)
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
def _val_or_dict(tk, options, *args):
"""Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If an option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _splitdict(tk, res, conv=_tclobj_to_py)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def _to_number(x):
if isinstance(x, str):
if '.' in x:
x = float(x)
else:
x = int(x)
return x
def _tclobj_to_py(val):
"""Return value converted from Tcl object to Python object."""
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
return val
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
adict[opt] = _tclobj_to_py(val)
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
result = _val_or_dict(self.tk, kw, self._name, "configure", style)
if result or query_opt:
return result
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(self.tk.splitlist(
self.tk.call(self._name, "map", style, '-%s' % query_opt)))
return _splitdict(
self.tk,
self.tk.call(self._name, "map", style, *_format_mapdict(kw)),
conv=_tclobj_to_py)
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return tuple(n.lstrip('-') for n in self.tk.splitlist(
self.tk.call(self._name, "element", "names")))
def element_options(self, elementname):
"""Return the list of elementname's options."""
return tuple(o.lstrip('-') for o in self.tk.splitlist(
self.tk.call(self._name, "element", "options", elementname)))
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.getboolean(
self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
return ret
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return self.tk.getboolean(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
if newindex is None:
return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if omitted."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.splitlist(
self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return self.tk.getboolean(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=_sentinel, items=None):
"""Returns the tuple of selected items."""
if selop is _sentinel:
selop = None
elif selop is None:
import warnings
warnings.warn(
"The selop=None argument of selection() is deprecated "
"and will be removed in Python 3.7",
DeprecationWarning, 3)
elif selop in ('set', 'add', 'remove', 'toggle'):
import warnings
warnings.warn(
"The selop argument of selection() is deprecated "
"and will be removed in Python 3.7, "
"use selection_%s() instead" % (selop,),
DeprecationWarning, 3)
else:
raise TypeError('Unsupported operation')
return self.tk.splitlist(self.tk.call(self._w, "selection", selop, items))
def _selection(self, selop, items):
if len(items) == 1 and isinstance(items[0], (tuple, list)):
items = items[0]
self.tk.call(self._w, "selection", selop, items)
def selection_set(self, *items):
"""The specified items becomes the new selection."""
self._selection("set", items)
def selection_add(self, *items):
"""Add all of the specified items to the selection."""
self._selection("add", items)
def selection_remove(self, *items):
"""Remove all of the specified items from the selection."""
self._selection("remove", items)
def selection_toggle(self, *items):
"""Toggle the selection state of each specified item."""
self._selection("toggle", items)
def set(self, item, column=None, value=None):
"""Query or set the value of given item.
With one argument, return a dictionary of column/value pairs
for the specified item. With two arguments, return the current
value of the specified column. With three arguments, set the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _splitdict(self.tk, res,
cut_minus=False, conv=_tclobj_to_py)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
if item is None:
return self.tk.splitlist(
self.tk.call(self._w, "tag", "has", tagname))
else:
return self.tk.getboolean(
self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct a horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_ = _to_number(self.scale['from'])
to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self)
|
import textwrap
from unittest.mock import Mock
import pytest
import ujson
from irrd.conf import RPKI_IRR_PSEUDO_SOURCE
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.scopefilter.validators import ScopeFilterValidator
from irrd.storage.database_handler import DatabaseHandler
from irrd.utils.test_utils import flatten_mock_calls
from ..importer import ROADataImporter, ROAParserException
@pytest.fixture()
def mock_scopefilter(monkeypatch):
mock_scopefilter = Mock(spec=ScopeFilterValidator)
monkeypatch.setattr('irrd.rpki.importer.ScopeFilterValidator',
lambda: mock_scopefilter)
mock_scopefilter.validate_rpsl_object = lambda obj: (ScopeFilterStatus.out_scope_as, '')
class TestROAImportProcess:
def test_valid_process(self, monkeypatch, mock_scopefilter):
# Note that this test does not mock RPSLObjectFromROA, used
# for generating the pseudo-IRR object, or the ROA class itself.
mock_dh = Mock(spec=DatabaseHandler)
rpki_data = ujson.dumps({
"roas": [{
"asn": "AS64496",
"prefix": "192.0.2.0/24",
"maxLength": 26,
"ta": "APNIC RPKI Root"
}, {
"asn": "AS64497",
"prefix": "2001:db8::/32",
"maxLength": 40,
"ta": "RIPE NCC RPKI Root"
}, {
# Filtered out by SLURM due to origin
"asn": "AS64498",
"prefix": "192.0.2.0/24",
"maxLength": 32,
"ta": "APNIC RPKI Root"
}, {
# Filtered out by SLURM due to prefix
"asn": "AS64496",
"prefix": "203.0.113.0/25",
"maxLength": 26,
"ta": "APNIC RPKI Root"
}, {
# Filtered out by SLURM due to prefix
"asn": "AS64497",
"prefix": "203.0.113.0/26",
"maxLength": 26,
"ta": "APNIC RPKI Root"
}, {
# Filtered out by SLURM due to prefix plus origin
"asn": "AS64497",
"prefix": "203.0.113.128/26",
"maxLength": 26,
"ta": "APNIC RPKI Root"
}]
})
slurm_data = ujson.dumps({
"slurmVersion": 1,
"validationOutputFilters": {
"prefixFilters": [
{
"prefix": "203.0.113.0/25",
"comment": "All VRPs encompassed by prefix",
},
{
"asn": 64498,
"comment": "All VRPs matching ASN",
},
{
"prefix": "203.0.113.128/25",
"asn": 64497,
"comment": "All VRPs encompassed by prefix, matching ASN",
},
{
# This filters out nothing, the ROA for this prefix has AS 64496
"prefix": "192.0.2.0/24",
"asn": 64497,
"comment": "All VRPs encompassed by prefix, matching ASN",
},
{
# This should not filter out the assertion for 198.51.100/24
"prefix": "198.51.100.0/24",
"asn": 64496,
"comment": "All VRPs encompassed by prefix, matching ASN",
}
],
},
"locallyAddedAssertions": {
"prefixAssertions": [
{
"asn": 64496,
"prefix": "198.51.100.0/24",
"comment": "My other important route",
},
{
"asn": 64497,
"prefix": "2001:DB8::/32",
"maxPrefixLength": 48,
"comment": "My other important de-aggregated routes",
}
],
}
})
roa_importer = ROADataImporter(rpki_data, slurm_data, mock_dh)
assert flatten_mock_calls(mock_dh, flatten_objects=True) == [
['insert_roa_object', (),
{'ip_version': 4, 'prefix_str': '192.0.2.0/24', 'asn': 64496,
'max_length': 26, 'trust_anchor': 'APNIC RPKI Root'}],
['upsert_rpsl_object',
('route/192.0.2.0/24AS64496/ML26/RPKI', 'JournalEntryOrigin.pseudo_irr'),
{'rpsl_guaranteed_no_existing': True}],
['insert_roa_object', (),
{'ip_version': 6, 'prefix_str': '2001:db8::/32', 'asn': 64497,
'max_length': 40, 'trust_anchor': 'RIPE NCC RPKI Root'}],
['upsert_rpsl_object',
('route6/2001:db8::/32AS64497/ML40/RPKI', 'JournalEntryOrigin.pseudo_irr'),
{'rpsl_guaranteed_no_existing': True}],
['insert_roa_object', (),
{'ip_version': 4, 'prefix_str': '198.51.100.0/24', 'asn': 64496,
'max_length': 24, 'trust_anchor': 'SLURM file'}],
['upsert_rpsl_object',
('route/198.51.100.0/24AS64496/ML24/RPKI', 'JournalEntryOrigin.pseudo_irr'),
{'rpsl_guaranteed_no_existing': True}],
['insert_roa_object', (),
{'ip_version': 6, 'prefix_str': '2001:db8::/32', 'asn': 64497,
'max_length': 48, 'trust_anchor': 'SLURM file'}],
['upsert_rpsl_object',
('route6/2001:db8::/32AS64497/ML48/RPKI', 'JournalEntryOrigin.pseudo_irr'),
{'rpsl_guaranteed_no_existing': True}],
]
assert roa_importer.roa_objs[0]._rpsl_object.scopefilter_status == ScopeFilterStatus.out_scope_as
assert roa_importer.roa_objs[0]._rpsl_object.source() == RPKI_IRR_PSEUDO_SOURCE
assert roa_importer.roa_objs[0]._rpsl_object.parsed_data == {
'origin': 'AS64496',
'route': '192.0.2.0/24',
'rpki_max_length': 26,
'source': 'RPKI',
}
assert roa_importer.roa_objs[0]._rpsl_object.render_rpsl_text() == textwrap.dedent("""
route: 192.0.2.0/24
descr: RPKI ROA for 192.0.2.0/24 / AS64496
remarks: This AS64496 route object represents routing data retrieved
from the RPKI. This route object is the result of an automated
RPKI-to-IRR conversion process performed by IRRd.
max-length: 26
origin: AS64496
source: RPKI # Trust Anchor: APNIC RPKI Root
""").strip() + '\n'
def test_invalid_rpki_json(self, monkeypatch, mock_scopefilter):
mock_dh = Mock(spec=DatabaseHandler)
with pytest.raises(ROAParserException) as rpe:
ROADataImporter('invalid', None, mock_dh)
assert 'Unable to parse ROA input: invalid JSON: Expected object or value' in str(rpe.value)
data = ujson.dumps({'invalid root': 42})
with pytest.raises(ROAParserException) as rpe:
ROADataImporter(data, None, mock_dh)
assert 'Unable to parse ROA input: root key "roas" not found' in str(rpe.value)
assert flatten_mock_calls(mock_dh) == []
def test_invalid_data_in_roa(self, monkeypatch, mock_scopefilter):
mock_dh = Mock(spec=DatabaseHandler)
data = ujson.dumps({
"roas": [{
"asn": "AS64496",
"prefix": "192.0.2.999/24",
"maxLength": 26,
"ta": "APNIC RPKI Root"
}]
})
with pytest.raises(ROAParserException) as rpe:
ROADataImporter(data, None, mock_dh)
assert "Invalid value in ROA or SLURM: '192.0.2.999': single byte must be 0 <= byte < 256" in str(rpe.value)
data = ujson.dumps({
"roas": [{
"asn": "ASx",
"prefix": "192.0.2.0/24",
"maxLength": 24,
"ta": "APNIC RPKI Root"
}]
})
with pytest.raises(ROAParserException) as rpe:
ROADataImporter(data, None, mock_dh)
assert 'Invalid AS number ASX: number part is not numeric' in str(rpe.value)
data = ujson.dumps({
"roas": [{
"prefix": "192.0.2.0/24",
"maxLength": 24,
"ta": "APNIC RPKI Root"
}]
})
with pytest.raises(ROAParserException) as rpe:
ROADataImporter(data, None, mock_dh)
assert "Unable to parse ROA record: missing key 'asn'" in str(rpe.value)
data = ujson.dumps({
"roas": [{
"asn": "AS64496",
"prefix": "192.0.2.0/24",
"maxLength": 22,
"ta": "APNIC RPKI Root"
}]
})
with pytest.raises(ROAParserException) as rpe:
ROADataImporter(data, None, mock_dh)
assert 'Invalid ROA: prefix size 24 is smaller than max length 22' in str(rpe.value)
assert flatten_mock_calls(mock_dh) == []
def test_invalid_slurm_version(self, monkeypatch, mock_scopefilter):
mock_dh = Mock(spec=DatabaseHandler)
with pytest.raises(ROAParserException) as rpe:
ROADataImporter('{"roas": []}', '{"slurmVersion": 2}', mock_dh)
assert 'SLURM data has invalid version: 2' in str(rpe.value)
|
# Generated by Django 3.0.3 on 2020-02-06 13:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reply', to='comment.Comment')),
],
),
]
|
import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from HiggsAnalysis.Skimming.higgsToInvisible_EventContent_cff import *
higgsToInvisibleOutputModuleRECOSIM = cms.OutputModule("PoolOutputModule",
RECOSIMEventContent,
higgsToInvisibleEventSelection,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('higgsToInvisibleRECOSIM'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('hToInvis_RECOSIM.root')
)
|
import json
import scrapy
from locations.items import GeojsonPointItem
class PaneraBread(scrapy.Spider):
name = 'panera'
item_attributes = { 'brand': "Panera Bread" }
download_delay = 1.5
allowed_domains = ["panerabread.com"]
start_urls = (
'https://locations.panerabread.com/index.html',
)
def store_hours(self, day_hours):
day_groups = []
this_day_group = {}
for day_hour in day_hours:
hours = ''
day, intervals = day_hour['day'], day_hour['intervals']
short_day = day.title()[:2]
epochs = []
for interval in intervals:
hours_today = '{}-{}'.format('0' + str(interval['start'])[:-2] +
':00', str(interval['end'])[:-2] + ':00')
epochs.append(hours_today)
hours = ','.join(epochs)
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_location(self, loc):
props = {}
props['name'] = loc.xpath(
'//h1[@class="c-location-title"]/span[@class="location-name-geo"]/text()').extract_first()
props['addr_full'] = loc.xpath(
'//div[@class="nap-info-left"]//span[@class="c-address-street-1"]/text()').extract_first()
props['city'] = loc.xpath(
'//div[@class="nap-info-left"]//span[@itemprop="addressLocality"]/text()').extract_first()
props['state'] = loc.xpath(
'//div[@class="nap-info-left"]//abbr[@class="c-address-state"]/text()').extract_first()
props['postcode'] = loc.xpath(
'//div[@class="nap-info-left"]//span[@class="c-address-postal-code"]/text()').extract_first()
props['phone'] = loc.xpath(
'//div[@class="nap-info-left"]//a[@class="c-phone-number-link c-phone-main-number-link"]/text()').extract_first()
props['website'] = loc.url
props['ref'] = loc.url
hours = loc.xpath(
'//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days').extract_first()
props['lat'] = loc.xpath('//meta[@itemprop="latitude"]/@content').extract_first()
props['lon'] = loc.xpath('//meta[@itemprop="longitude"]/@content').extract_first()
props['opening_hours'] = self.store_hours(json.loads(hours))
return GeojsonPointItem(**props)
def parse_city(self, city_page):
locations = city_page.xpath('//h2[@class="c-location-grid-item-title"]').extract()
if len(locations) > 0:
for loc in locations:
yield scrapy.Request(city_page.urljoin(loc), callback=self.parse_location)
else:
yield self.parse_location(city_page)
def parse_state(self, state_page):
cities = state_page.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for city in cities:
yield scrapy.Request(state_page.urljoin(city), callback=self.parse_city)
def parse(self, response):
states = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for state in states:
yield scrapy.Request(response.urljoin(state), callback=self.parse_state)
|
from pathlib import Path
import depthai as dai
import numpy as np
import cv2
import sys
# Importing from parent folder
sys.path.insert(0, str(Path(__file__).parent.parent.parent)) # move to parent path
from utils.draw import drawROI, displayFPS
from utils.OakRunner import OakRunner
from utils.compute import to_planar
frame_width, frame_height = 300, 300
# Function called before entering inside the process loop, useful to set few arguments
def init(runner, device):
runner.custom_arguments["required_confidence"] = 0.2
# Function called inside the process loop, useful to apply any treatment
def process(runner):
for side in ["left", "right"]:
frame = runner.output_queues[side+"_cam"].get().getCvFrame()
faces_data = runner.output_queues["nn_"+side+"_faces"].get().getFirstLayerFp16()
if(faces_data[2] > runner.custom_arguments["required_confidence"]):
# Get pixels instead of percentages
xmin = int(faces_data[3]*frame_width) if faces_data[3]>0 else 0
ymin = int(faces_data[4]*frame_height) if faces_data[4]>0 else 0
xmax = int(faces_data[5]*frame_width) if faces_data[5]<1 else frame_width
ymax = int(faces_data[6]*frame_height) if faces_data[6]<1 else frame_height
# Compute the face to get landmarks
land_data = dai.NNData()
planar_cropped_face = to_planar(frame[ymin:ymax, xmin:xmax], (48, 48))
land_data.setLayer("0", planar_cropped_face)
runner.input_queues["nn_"+side+"_landmarks"].send(land_data)
output = runner.output_queues["nn_"+side+"_landmarks"].get().getFirstLayerFp16()
landmarks = np.array(output).reshape(5,2)
# Draw detections
drawROI(frame, (xmin,ymin), (xmax,ymax), color=(0,200,230))
for x,y in landmarks:
cv2.circle(frame, (int(x*(xmax-xmin))+xmin,int(y*(ymax-ymin))+ymin), 2, (0,0,255))
displayFPS(frame, runner.getFPS())
cv2.imshow(side, frame)
runner = OakRunner()
for side in ["left", "right"]:
if(side == "left"):
runner.setLeftCamera(frame_width, frame_height)
face_manip = runner.getLeftCameraManip()
else:
runner.setRightCamera(frame_width, frame_height)
face_manip = runner.getRightCameraManip()
face_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)
runner.addNeuralNetworkModel(stream_name="nn_"+side+"_faces", path=str(Path(__file__).parent) + "/../../../_models/face_detection.blob", handle_mono_depth=False)
face_manip.out.link(runner.neural_networks["nn_"+side+"_faces"].input) # link transformed video stream to neural network entry
runner.addNeuralNetworkModel(stream_name="nn_"+side+"_landmarks", path=str(Path(__file__).parent) + "/../../../_models/tiny_face_landmarks.blob", handle_mono_depth=False)
runner.run(process=process, init=init)
|
from datetime import timedelta
import uuid
import logging
import email_normalize
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, AnonymousUser, PermissionsMixin
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.db import models, IntegrityError, transaction
from django.utils import timezone
from django.conf import settings
from netaddr import IPAddress, AddrFormatError, IPNetwork
from capapi.permissions import staff_level_permissions
from model_utils import FieldTracker
from rest_framework.authtoken.models import Token
logger = logging.getLogger(__name__)
class CapUserManager(BaseUserManager):
def create_user(self, email, password, **kwargs):
if not email:
raise ValueError('Email address is required')
user = self.model(email=self.normalize_email(email), **kwargs)
user.set_password(password)
user.create_nonce()
user.save(using=self._db)
return user
def create_superuser(self, email, password, **kwargs):
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_superuser', True)
kwargs.setdefault('email_verified', True)
kwargs.setdefault('total_case_allowance', settings.API_CASE_DAILY_ALLOWANCE)
kwargs.setdefault('case_allowance_remaining', settings.API_CASE_DAILY_ALLOWANCE)
if kwargs.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if kwargs.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self.create_user(email=email, password=password, **kwargs)
# This is a temporary workaround for the problem described in
# https://github.com/jazzband/django-model-utils/issues/331#issuecomment-478994563
# where django-model-utils FieldTracker breaks the setter for overridden attributes on abstract base classes
del AbstractBaseUser.is_active
class CapUser(PermissionsMixin, AbstractBaseUser):
email = models.EmailField(
max_length=254,
unique=True,
db_index=True,
error_messages={'unique': "A user with that email address already exists."}
)
normalized_email = models.CharField(max_length=255, help_text="Used to ensure that new emails are unique.")
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
total_case_allowance = models.IntegerField(null=True, blank=True, default=0)
case_allowance_remaining = models.IntegerField(null=False, blank=False, default=0)
# when we last reset the user's case count:
case_allowance_last_updated = models.DateTimeField(auto_now_add=True)
unlimited_access = models.BooleanField(default=False)
harvard_access = models.BooleanField(default=False)
unlimited_access_until = models.DateTimeField(null=True, blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
email_verified = models.BooleanField(default=False, help_text="Whether user has verified their email address")
activation_nonce = models.CharField(max_length=40, null=True, blank=True)
nonce_expires = models.DateTimeField(null=True, blank=True)
date_joined = models.DateTimeField(auto_now_add=True)
agreed_to_tos = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = CapUserManager()
tracker = FieldTracker()
class Meta:
verbose_name = 'User'
def get_activation_nonce(self):
if self.nonce_expires + timedelta(hours=24) < timezone.now():
self.create_nonce()
self.save()
return self.activation_nonce
def unlimited_access_in_effect(self):
return (
(
self.unlimited_access or
(self.harvard_access and self.harvard_ip())
) and (
self.unlimited_access_until is None or
self.unlimited_access_until > timezone.now()
)
)
def harvard_ip(self):
""" Return True if X-Forwarded-For header is a Harvard IP address. """
if not hasattr(self, '_is_harvard_ip'):
try:
ip = IPAddress(self.ip_address) # set by AuthenticationMiddleware
except AddrFormatError:
self._is_harvard_ip = False
else:
self._is_harvard_ip = any(IPAddress(ip) in IPNetwork(ip_range) for ip_range in settings.HARVARD_IP_RANGES)
return self._is_harvard_ip
def update_case_allowance(self, case_count=0, save=True):
if self.unlimited_access_in_effect():
return
if self.case_allowance_last_updated + timedelta(hours=settings.API_CASE_EXPIRE_HOURS) < timezone.now():
self.case_allowance_remaining = self.total_case_allowance
self.case_allowance_last_updated = timezone.now()
if case_count:
if self.case_allowance_remaining < case_count:
raise AttributeError("Case allowance is too low.")
self.case_allowance_remaining -= case_count
if save:
self.save(update_fields=['case_allowance_remaining', 'case_allowance_last_updated'])
def authenticate_user(self, activation_nonce):
if self.activation_nonce == activation_nonce and self.nonce_expires + timedelta(hours=24) > timezone.now():
Token.objects.create(user=self)
self.activation_nonce = ''
self.email_verified = True
self.save()
else:
raise PermissionDenied
def reset_api_key(self):
if self.get_api_key() and self.email_verified:
Token.objects.get(user=self).delete()
Token.objects.create(user=self)
self.save()
else:
raise PermissionDenied
def create_nonce(self):
self.activation_nonce = self.generate_nonce_timestamp()
self.nonce_expires = timezone.now()
self.save()
def save(self, *args, **kwargs):
if self.tracker.has_changed('email'):
self.normalized_email = self.normalize_email(self.email)
super(CapUser, self).save(*args, **kwargs)
@staticmethod
def generate_nonce_timestamp():
nonce = uuid.uuid1()
return nonce.hex
def get_api_key(self):
try:
# relying on DRF's Token model
return self.auth_token.key
except ObjectDoesNotExist:
return None
def get_short_name(self):
return self.email.split('@')[0]
def case_download_allowed(self, case_count):
if case_count > 0:
self.update_case_allowance()
return self.case_allowance_remaining >= case_count
else:
return True
def has_module_perms(self, app_label):
if app_label == 'capapi' or app_label == 'capdb':
return self.is_staff
return self.is_superuser
def has_perm(self, perm, obj=None):
if perm in staff_level_permissions:
return self.is_staff
return self.is_superuser
@staticmethod
def normalize_email(email):
"""
Return a normalized form of the email address:
- lowercase
- applying host-specific rules for domains hosted by Google, Microsoft, Yahoo, Fastmail
"""
return email_normalize.normalize(email.strip(), resolve=False)
# make AnonymousUser API conform with CapUser API
AnonymousUser.unlimited_access_until = None
AnonymousUser.unlimited_access_in_effect = lambda self: False
class ResearchRequest(models.Model):
""" Request for research access submitted by an unaffiliated user. """
user = models.ForeignKey(CapUser, on_delete=models.CASCADE, related_name='research_requests')
submitted_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
institution = models.CharField(max_length=255, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
area_of_interest = models.TextField(blank=True, null=True)
status = models.CharField(max_length=20, default='pending', verbose_name="research request status",
choices=(('pending', 'pending'), ('approved', 'approved'), ('denied', 'denied'), ('awaiting signature', 'awaiting signature')))
notes = models.TextField(blank=True, null=True)
class Meta:
ordering = ['-submitted_date']
class ResearchContract(models.Model):
""" Signed application for access submitted by an affiliated researcher. """
user = models.ForeignKey(CapUser, on_delete=models.CASCADE, related_name='research_contracts')
user_signature_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
institution = models.CharField(max_length=255)
title = models.CharField(max_length=255)
area_of_interest = models.TextField(blank=True, null=True)
contract_html = models.TextField(blank=True, null=True)
approver = models.ForeignKey(CapUser, blank=True, null=True, on_delete=models.DO_NOTHING, related_name='approved_contracts')
approver_signature_date = models.DateTimeField(blank=True, null=True)
status = models.CharField(max_length=20, default='pending', verbose_name="research contract status",
choices=(('pending', 'pending'), ('approved', 'approved'), ('denied', 'denied')))
approver_notes = models.TextField(blank=True, null=True)
notes = models.TextField(blank=True, null=True)
class Meta:
ordering = ['-user_signature_date']
class HarvardContract(models.Model):
""" Signed access contract submitted by a Harvard user. """
user = models.ForeignKey(CapUser, on_delete=models.CASCADE, related_name='harvard_contracts')
user_signature_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
title = models.CharField(max_length=255)
area_of_interest = models.TextField(blank=True, null=True)
contract_html = models.TextField(blank=True, null=True)
class Meta:
ordering = ['-user_signature_date']
class SiteLimits(models.Model):
"""
Singleton model to track sitewide values in a row with ID=1
"""
daily_signup_limit = models.IntegerField(default=50)
daily_signups = models.IntegerField(default=0)
daily_download_limit = models.IntegerField(default=50000)
daily_downloads = models.IntegerField(default=0)
class Meta:
verbose_name_plural = "Site limits"
@classmethod
def create(cls):
""" Create and return the ID=1 row, or fetch the existing one. """
site_limits = cls(pk=1)
try:
site_limits.save()
except IntegrityError:
return cls.objects.get(pk=1)
else:
return site_limits
@classmethod
def get(cls):
""" Get the ID=1 row, creating if necessary. """
try:
return cls.objects.get(pk=1)
except cls.DoesNotExist:
return cls.create()
@classmethod
def get_for_update(cls):
"""
Get the ID=1 row with select_for_update()
This must be run from within a transaction.
"""
try:
site_limits = cls.objects.select_for_update().get(pk=1)
except cls.DoesNotExist:
cls.create()
site_limits = cls.objects.select_for_update().get(pk=1)
return site_limits
@classmethod
def add_values(cls, **pairs):
"""
Modify existing values.
E.g., SiteLimits.add_values(daily_downloads=1) increases daily_downloads by 1.
"""
with transaction.atomic():
site_limits = cls.get_for_update()
for k, v in pairs.items():
setattr(site_limits, k, getattr(site_limits, k) + v)
site_limits.save()
return site_limits
@classmethod
def reset(cls):
""" Reset all counters to 0. """
with transaction.atomic():
site_limits = cls.get_for_update()
site_limits.daily_signups = 0
site_limits.daily_downloads = 0
site_limits.save()
|
from __future__ import absolute_import
import json
import logging
import warnings
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from pip._vendor import six
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: legacy (default), columns, "
"freeze or json.",
)
cmd_opts.add_option(
'--columns',
action='store_const',
const='columns',
dest='list_format',
help="Align package names and versions into vertical columns.",
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.list_format is None:
warnings.warn(
"The default format will switch to columns in the future. "
"You can use --format=legacy (or define a list_format "
"in your pip.conf) to disable this warning.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
else: # legacy
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist))
else:
logger.info(self.output_legacy(dist))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if any(dist_is_editable(x) for x in pkgs):
header.append("Location")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if dist_is_editable(proj):
row.append(proj.location)
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
|
maior = 0
menor = 0
totalPessoas = 10
for pessoa in range(1, 11):
idade = int(input("Digite a idade: "))
if idade >= 18:
maior += 1
else:
menor += 1
print("Quantidade de pessoas maior de idade: ", maior)
print("Quantidade de pessoas menor de idade: ", menor)
print("Porcentagem de pessoas menores de idade = ", (menor*100)/totalPessoas, "%")
print("Porcentagem de pessoas maiores de idade = ", (maior*100)/totalPessoas, "%")
|
#!/usr/bin/env python
import rospy
from inertial_sense_ros.msg import GPS
import gps_common.msg
def callback(data):
x=data.latitude
y=data.longitude
z=data.altitude
# rospy.loginfo('x: {}, y:{}, z:{},' .format(x,y, z))
def convert_msg(data):
x=data.latitude
y=data.longitude
z=data.altitude
new_message = gps_common.msg.GPSFix()
new_message.latitude=data.latitude
new_message.longitude=data.longitude
new_message.altitude=data.altitude
pub = rospy.Publisher('gps_converted', gps_common.msg.GPSFix, queue_size=10)
pub.publish(new_message)
# rospy.loginfo('x: {}, y:{}, z:{},' .format(x,y, z))
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('gps_new_node', anonymous=True)
rospy.Subscriber("gps", GPS, convert_msg)
# spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
# def publisher():
# pub = rospy.Publisher('gps_converted', gps_common.msg.GPSFix, queue_size=10)
# pub.publish(new_message)
# rospy.init_node('node_name')
# r = rospy.Rate(10) # 10hz
# if __name__ == '__main__':
while not rospy.is_shutdown():
listener()
r=rospy.Rate(0.00000001)
# publisher()
|
# -*- coding: utf-8 -*-
"""Tests that disaggregations report the number of indicators assigned to them and can be archived"""
from factories import (
indicators_models as i_factories,
workflow_models as w_factories
)
from indicators.models import DisaggregationType, DisaggregationLabel, DisaggregatedValue
from django import test
class TestDisaggregationIndicatorCounts(test.TestCase):
def setUp(self):
self.country = w_factories.CountryFactory()
self.program = w_factories.RFProgramFactory()
self.program.country.set([self.country])
def test_disaggregation_no_indicators(self):
disagg = i_factories.DisaggregationTypeFactory(country=self.country)
disagg_from_db = DisaggregationType.objects.get(pk=disagg.pk)
self.assertFalse(disagg_from_db.has_indicators)
def test_disaggregation_one_indicator(self):
disagg = i_factories.DisaggregationTypeFactory(country=self.country)
indicator = i_factories.RFIndicatorFactory(program=self.program)
indicator.disaggregation.add(disagg)
indicator.save()
disagg_from_db = DisaggregationType.objects.get(pk=disagg.pk)
self.assertTrue(disagg_from_db.has_indicators)
def test_disaggregation_five_indicators(self):
disagg = i_factories.DisaggregationTypeFactory(country=self.country)
for _ in range(5):
indicator = i_factories.RFIndicatorFactory(program=self.program)
indicator.disaggregation.add(disagg)
indicator.save()
disagg_from_db = DisaggregationType.objects.get(pk=disagg.pk)
self.assertTrue(disagg_from_db.has_indicators)
class TestArchivedDisaggregationQueryset(test.TestCase):
def setUp(self):
self.country = w_factories.CountryFactory(country="TolaLand IN", code="IN")
self.program = w_factories.RFProgramFactory()
self.program.country.set([self.country])
self.country_b = w_factories.CountryFactory(country="Some other country", code="OUT")
self.program_b = w_factories.RFProgramFactory()
self.program_b.country.set([self.country_b])
def test_all_country_disaggregations_in_program(self):
created_disaggs = [i_factories.DisaggregationTypeFactory(country=self.country) for _ in range(5)]
_, disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual(len(disaggs), 1)
self.assertEqual(disaggs[0][0], "TolaLand IN")
self.assertEqual(sorted([d.pk for d in created_disaggs]), sorted([d.pk for d in disaggs[0][1]]))
def test_out_of_country_disaggregations_not_in_program(self):
created_disagg = i_factories.DisaggregationTypeFactory(country=self.country)
i_factories.DisaggregationTypeFactory(country=self.country_b)
_, disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual(len(disaggs[0][1]), 1)
self.assertEqual(disaggs[0][1][0].pk, created_disagg.pk)
def test_standard_disaggregations_in_program(self):
standard_disagg = i_factories.DisaggregationTypeFactory(country=None, standard=True)
created_disagg = i_factories.DisaggregationTypeFactory(country=self.country)
global_disaggs, country_disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual(len(global_disaggs), 1)
self.assertEqual(global_disaggs[0].pk, standard_disagg.pk)
self.assertEqual(len(country_disaggs), 1)
self.assertEqual(country_disaggs[0][0], "TolaLand IN")
self.assertEqual(country_disaggs[0][1][0].pk, created_disagg.pk)
def test_archived_in_country_disaggregations_not_in_program(self):
not_archived = i_factories.DisaggregationTypeFactory(country=self.country)
i_factories.DisaggregationTypeFactory(country=self.country, is_archived=True)
_, disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual([d.pk for d in disaggs[0][1]], [not_archived.pk])
def test_archived_standard_disaggregations_not_in_program(self):
not_archived = i_factories.DisaggregationTypeFactory(country=None, standard=True)
i_factories.DisaggregationTypeFactory(country=None, standard=True, is_archived=True)
disaggs, _ = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual([d.pk for d in disaggs], [not_archived.pk])
def test_archived_disagg_in_use_in_program(self):
archived_in_use = i_factories.DisaggregationTypeFactory(country=self.country, is_archived=True)
indicator = i_factories.RFIndicatorFactory(program=self.program)
indicator.disaggregation.add(archived_in_use)
indicator.save()
_, disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual([d.pk for d in disaggs[0][1]], [archived_in_use.pk])
def test_archived_standard_disagg_in_use_in_program(self):
archived_in_use = i_factories.DisaggregationTypeFactory(country=None, standard=True, is_archived=True)
indicator = i_factories.RFIndicatorFactory(program=self.program)
indicator.disaggregation.add(archived_in_use)
indicator.save()
disaggs, _ = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual([d.pk for d in disaggs], [archived_in_use.pk])
def test_full_scenario(self):
standard = i_factories.DisaggregationTypeFactory(country=None, standard=True)
# standard, archived:
i_factories.DisaggregationTypeFactory(country=None, standard=True, is_archived=True)
standard_archived_in_use_a = i_factories.DisaggregationTypeFactory(
country=None, standard=True, is_archived=True
)
indicator_a = i_factories.RFIndicatorFactory(program=self.program)
indicator_a.disaggregation.add(standard_archived_in_use_a)
indicator_a.save()
standard_archived_in_use_b = i_factories.DisaggregationTypeFactory(
country=None, standard=True, is_archived=True
)
indicator_b = i_factories.RFIndicatorFactory(program=self.program_b)
indicator_b.disaggregation.add(standard_archived_in_use_b)
indicator_b.save()
in_country = i_factories.DisaggregationTypeFactory(country=self.country)
# in-country, archived:
i_factories.DisaggregationTypeFactory(country=self.country, is_archived=True)
in_country_archived_in_use = i_factories.DisaggregationTypeFactory(country=self.country, is_archived=True)
indicator_c = i_factories.RFIndicatorFactory(program=self.program)
indicator_c.disaggregation.add(in_country_archived_in_use)
indicator_c.save()
# out-of-country:
i_factories.DisaggregationTypeFactory(country=self.country_b)
# out-of-country, archived:
i_factories.DisaggregationTypeFactory(country=self.country_b, is_archived=True)
out_of_country_archived_in_use = i_factories.DisaggregationTypeFactory(
country=self.country_b, is_archived=True
)
indicator_d = i_factories.RFIndicatorFactory(program=self.program_b)
indicator_d.disaggregation.add(out_of_country_archived_in_use)
indicator_d.save()
global_disaggs, country_disaggs = DisaggregationType.program_disaggregations(self.program.pk)
self.assertEqual(
sorted([d.pk for d in global_disaggs]),
sorted([
standard.pk,
standard_archived_in_use_a.pk
])
)
self.assertEqual(len(country_disaggs), 1)
self.assertEqual(
sorted([d.pk for d in country_disaggs[0][1]]),
sorted([
in_country.pk,
in_country_archived_in_use.pk
])
)
class TestDisaggregationLabelCounts(test.TestCase):
def setUp(self):
self.country = w_factories.CountryFactory(country="Testland", code="TL")
self.program = w_factories.RFProgramFactory()
self.program.country.set([self.country])
self.standard_disagg = i_factories.DisaggregationTypeFactory(
disaggregation_type="Standard 1",
standard=True,
country=None,
labels=[],
)
self.standard_disagg_archived = i_factories.DisaggregationTypeFactory(
disaggregation_type="Standard 2",
standard=True,
country=None,
is_archived=True,
labels=[],
)
self.country_disagg = i_factories.DisaggregationTypeFactory(
disaggregation_type="Country 1",
standard=False,
country=self.country,
labels=[],
)
self.country_disagg_archived = i_factories.DisaggregationTypeFactory(
disaggregation_type="Country 2",
standard=False,
country=self.country,
labels=[],
)
def get_labels(self, disagg, count=2):
def make_label(disagg, c):
label = DisaggregationLabel(
disaggregation_type=disagg,
label="label {} for disagg {}".format(c+1, disagg.disaggregation_type),
customsort=c+1
)
label.save()
return label
labels = [make_label(disagg, x) for x in range(count)]
return disagg, labels
def test_two_labels_none_in_use(self):
disagg, labels = self.get_labels(self.standard_disagg)
annotated_disagg = DisaggregationType.form_objects.get(pk=disagg.pk)
self.assertEqual(
[l.pk for l in annotated_disagg.categories],
[labels[0].pk, labels[1].pk]
)
self.assertFalse(annotated_disagg.in_use)
self.assertTrue(all(not l.in_use for l in annotated_disagg.categories))
def test_two_labels_one_in_use(self):
disagg, labels = self.get_labels(self.standard_disagg)
indicator = i_factories.RFIndicatorFactory(program=self.program)
indicator.disaggregation.add(disagg)
result = i_factories.ResultFactory(
indicator=indicator,
periodic_target=indicator.periodictargets.first(),
achieved=100
)
d_value = DisaggregatedValue(
category=labels[1],
value=100,
result=result
)
d_value.save()
annotated_disagg = DisaggregationType.form_objects.get(pk=disagg.pk)
self.assertTrue(annotated_disagg.in_use)
self.assertFalse(
annotated_disagg.categories[0].in_use
)
self.assertTrue(
annotated_disagg.categories[1].in_use
)
def test_labels_in_use_for_indicator(self):
disagg, labels = self.get_labels(self.standard_disagg)
indicator_a = i_factories.RFIndicatorFactory(program=self.program)
indicator_a.disaggregation.add(disagg)
indicator_b = i_factories.RFIndicatorFactory(program=self.program)
indicator_b.disaggregation.add(disagg)
result = i_factories.ResultFactory(
indicator=indicator_a,
periodic_target=indicator_a.periodictargets.first(),
achieved=100
)
d_value = DisaggregatedValue(
category=labels[1],
value=100,
result=result
)
d_value.save()
annotated_disagg_a = DisaggregationType.form_objects.for_indicator(indicator_a.pk).get(pk=disagg.pk)
self.assertTrue(annotated_disagg_a.in_use)
self.assertFalse(
annotated_disagg_a.categories[0].in_use,
)
self.assertTrue(
annotated_disagg_a.categories[1].in_use
)
self.assertTrue(annotated_disagg_a.has_results)
annotated_disagg_b = DisaggregationType.form_objects.for_indicator(indicator_b.pk).get(pk=disagg.pk)
self.assertTrue(annotated_disagg_b.in_use)
self.assertFalse(
annotated_disagg_b.categories[0].in_use,
)
self.assertTrue(
annotated_disagg_b.categories[1].in_use
)
self.assertFalse(annotated_disagg_b.has_results)
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
class Spark_Component(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
env.set_params(params)
self.configure(env)
start_spark_cmd = """env SPARK_PID_DIR={app_pid_dir} SPARK_LOG_DIR={app_log_dir} SPARK_MASTER_PORT={master_port} SPARK_MASTER_WEBUI_PORT={webui_port} {app_root}/sbin/start-master.sh
"""
pid_file = format("{app_pid_dir}/spark-yarn-org.apache.spark.deploy.master.Master-1.pid")
process_cmd = format(start_spark_cmd.replace("\n", " "))
print("Starting Spark master using command: "+process_cmd)
Execute(process_cmd,
logoutput=True,
wait_for_finish=False,
pid_file=pid_file,
poll_after = 10,
cwd=format("{app_root}")
)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("{app_root}/sbin/stop-master.sh --port {master_port}")
Execute(stop_cmd,
logoutput=True,
wait_for_finish=True,
cwd=format("{app_root}")
)
def status(self, env):
import params
env.set_params(params)
pid_file = format("{app_pid_dir}/spark-yarn-org.apache.spark.deploy.master.Master-1.pid")
check_process_status(pid_file)
if __name__ == "__main__":
Spark_Component().execute()
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the regular expressions crafted from ABNF."""
import sys
# https://tools.ietf.org/html/rfc3986#page-13
GEN_DELIMS = GENERIC_DELIMITERS = ":/?#[]@"
GENERIC_DELIMITERS_SET = set(GENERIC_DELIMITERS)
# https://tools.ietf.org/html/rfc3986#page-13
SUB_DELIMS = SUB_DELIMITERS = "!$&'()*+,;="
SUB_DELIMITERS_SET = set(SUB_DELIMITERS)
# Escape the '*' for use in regular expressions
SUB_DELIMITERS_RE = r"!$&'()\*+,;="
RESERVED_CHARS_SET = GENERIC_DELIMITERS_SET.union(SUB_DELIMITERS_SET)
ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
DIGIT = '0123456789'
# https://tools.ietf.org/html/rfc3986#section-2.3
UNRESERVED = UNRESERVED_CHARS = ALPHA + DIGIT + r'._!-'
UNRESERVED_CHARS_SET = set(UNRESERVED_CHARS)
NON_PCT_ENCODED_SET = RESERVED_CHARS_SET.union(UNRESERVED_CHARS_SET)
# We need to escape the '-' in this case:
UNRESERVED_RE = r'A-Za-z0-9._~\-'
# Percent encoded character values
PERCENT_ENCODED = PCT_ENCODED = '%[A-Fa-f0-9]{2}'
PCHAR = '([' + UNRESERVED_RE + SUB_DELIMITERS_RE + ':@]|%s)' % PCT_ENCODED
# NOTE(sigmavirus24): We're going to use more strict regular expressions
# than appear in Appendix B for scheme. This will prevent over-eager
# consuming of items that aren't schemes.
SCHEME_RE = '[a-zA-Z][a-zA-Z0-9+.-]*'
_AUTHORITY_RE = '[^/?#]*'
_PATH_RE = '[^?#]*'
_QUERY_RE = '[^#]*'
_FRAGMENT_RE = '.*'
# Extracted from http://tools.ietf.org/html/rfc3986#appendix-B
COMPONENT_PATTERN_DICT = {
'scheme': SCHEME_RE,
'authority': _AUTHORITY_RE,
'path': _PATH_RE,
'query': _QUERY_RE,
'fragment': _FRAGMENT_RE,
}
# See http://tools.ietf.org/html/rfc3986#appendix-B
# In this case, we name each of the important matches so we can use
# SRE_Match#groupdict to parse the values out if we so choose. This is also
# modified to ignore other matches that are not important to the parsing of
# the reference so we can also simply use SRE_Match#groups.
URL_PARSING_RE = (
r'(?:(?P<scheme>{scheme}):)?(?://(?P<authority>{authority}))?'
r'(?P<path>{path})(?:\?(?P<query>{query}))?'
r'(?:#(?P<fragment>{fragment}))?'
).format(**COMPONENT_PATTERN_DICT)
# #########################
# Authority Matcher Section
# #########################
# Host patterns, see: http://tools.ietf.org/html/rfc3986#section-3.2.2
# The pattern for a regular name, e.g., www.google.com, api.github.com
REGULAR_NAME_RE = REG_NAME = '((?:{0}|[{1}])*)'.format(
'%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + UNRESERVED_RE
)
# The pattern for an IPv4 address, e.g., 192.168.255.255, 127.0.0.1,
IPv4_RE = r'([0-9]{1,3}\.){3}[0-9]{1,3}'
# Hexadecimal characters used in each piece of an IPv6 address
HEXDIG_RE = '[0-9A-Fa-f]{1,4}'
# Least-significant 32 bits of an IPv6 address
LS32_RE = '({hex}:{hex}|{ipv4})'.format(hex=HEXDIG_RE, ipv4=IPv4_RE)
# Substitutions into the following patterns for IPv6 patterns defined
# http://tools.ietf.org/html/rfc3986#page-20
_subs = {'hex': HEXDIG_RE, 'ls32': LS32_RE}
# Below: h16 = hexdig, see: https://tools.ietf.org/html/rfc5234 for details
# about ABNF (Augmented Backus-Naur Form) use in the comments
variations = [
# 6( h16 ":" ) ls32
'(%(hex)s:){6}%(ls32)s' % _subs,
# "::" 5( h16 ":" ) ls32
'::(%(hex)s:){5}%(ls32)s' % _subs,
# [ h16 ] "::" 4( h16 ":" ) ls32
'(%(hex)s)?::(%(hex)s:){4}%(ls32)s' % _subs,
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
'((%(hex)s:)?%(hex)s)?::(%(hex)s:){3}%(ls32)s' % _subs,
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
'((%(hex)s:){0,2}%(hex)s)?::(%(hex)s:){2}%(ls32)s' % _subs,
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
'((%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s' % _subs,
# [ *4( h16 ":" ) h16 ] "::" ls32
'((%(hex)s:){0,4}%(hex)s)?::%(ls32)s' % _subs,
# [ *5( h16 ":" ) h16 ] "::" h16
'((%(hex)s:){0,5}%(hex)s)?::%(hex)s' % _subs,
# [ *6( h16 ":" ) h16 ] "::"
'((%(hex)s:){0,6}%(hex)s)?::' % _subs,
]
IPv6_RE = '(({0})|({1})|({2})|({3})|({4})|({5})|({6})|({7})|({8}))'.format(
*variations
)
IPv_FUTURE_RE = r'v[0-9A-Fa-f]+\.[%s]+' % (
UNRESERVED_RE + SUB_DELIMITERS_RE + ':'
)
# RFC 6874 Zone ID ABNF
ZONE_ID = '(?:[' + UNRESERVED_RE + ']|' + PCT_ENCODED + ')+'
IPv6_ADDRZ_RFC4007_RE = IPv6_RE + '(?:(?:%25|%)' + ZONE_ID + ')?'
IPv6_ADDRZ_RE = IPv6_RE + '(?:%25' + ZONE_ID + ')?'
IP_LITERAL_RE = r'\[({0}|{1})\]'.format(
IPv6_ADDRZ_RFC4007_RE,
IPv_FUTURE_RE,
)
# Pattern for matching the host piece of the authority
HOST_RE = HOST_PATTERN = '({0}|{1}|{2})'.format(
REG_NAME,
IPv4_RE,
IP_LITERAL_RE,
)
USERINFO_RE = '^([%s%s:]|%s)+.*(?=@)' % (
UNRESERVED_RE, SUB_DELIMITERS_RE, PCT_ENCODED
)
PORT_RE = '[0-9]{1,5}'
# ####################
# Path Matcher Section
# ####################
# See http://tools.ietf.org/html/rfc3986#section-3.3 for more information
# about the path patterns defined below.
segments = {
'segment': PCHAR + '*',
# Non-zero length segment
'segment-nz': PCHAR + '+',
# Non-zero length segment without ":"
'segment-nz-nc': PCHAR.replace(':', '') + '+'
}
# Path types taken from Section 3.3 (linked above)
PATH_EMPTY = '^$'
PATH_ROOTLESS = '%(segment-nz)s(/%(segment)s)*' % segments
PATH_NOSCHEME = '%(segment-nz-nc)s(/%(segment)s)*' % segments
PATH_ABSOLUTE = '/(%s)?' % PATH_ROOTLESS
PATH_ABEMPTY = '(/%(segment)s)*' % segments
PATH_RE = '^(%s|%s|%s|%s|%s)$' % (
PATH_ABEMPTY, PATH_ABSOLUTE, PATH_NOSCHEME, PATH_ROOTLESS, PATH_EMPTY
)
FRAGMENT_RE = QUERY_RE = (
'^([/?:@' + UNRESERVED_RE + SUB_DELIMITERS_RE + ']|%s)*$' % PCT_ENCODED
)
# ##########################
# Relative reference matcher
# ##########################
# See http://tools.ietf.org/html/rfc3986#section-4.2 for details
RELATIVE_PART_RE = '(//%s%s|%s|%s|%s)' % (
COMPONENT_PATTERN_DICT['authority'],
PATH_ABEMPTY,
PATH_ABSOLUTE,
PATH_NOSCHEME,
PATH_EMPTY,
)
# See http://tools.ietf.org/html/rfc3986#section-3 for definition
HIER_PART_RE = '(//%s%s|%s|%s|%s)' % (
COMPONENT_PATTERN_DICT['authority'],
PATH_ABEMPTY,
PATH_ABSOLUTE,
PATH_ROOTLESS,
PATH_EMPTY,
)
# ###############
# IRIs / RFC 3987
# ###############
# Only wide-unicode gets the high-ranges of UCSCHAR
if sys.maxunicode > 0xFFFF: # pragma: no cover
IPRIVATE = u'\uE000-\uF8FF\U000F0000-\U000FFFFD\U00100000-\U0010FFFD'
UCSCHAR_RE = (
u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF'
u'\U00010000-\U0001FFFD\U00020000-\U0002FFFD'
u'\U00030000-\U0003FFFD\U00040000-\U0004FFFD'
u'\U00050000-\U0005FFFD\U00060000-\U0006FFFD'
u'\U00070000-\U0007FFFD\U00080000-\U0008FFFD'
u'\U00090000-\U0009FFFD\U000A0000-\U000AFFFD'
u'\U000B0000-\U000BFFFD\U000C0000-\U000CFFFD'
u'\U000D0000-\U000DFFFD\U000E1000-\U000EFFFD'
)
else: # pragma: no cover
IPRIVATE = u'\uE000-\uF8FF'
UCSCHAR_RE = (
u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF'
)
IUNRESERVED_RE = u'A-Za-z0-9\\._~\\-' + UCSCHAR_RE
IPCHAR = u'([' + IUNRESERVED_RE + SUB_DELIMITERS_RE + u':@]|%s)' % PCT_ENCODED
isegments = {
'isegment': IPCHAR + u'*',
# Non-zero length segment
'isegment-nz': IPCHAR + u'+',
# Non-zero length segment without ":"
'isegment-nz-nc': IPCHAR.replace(':', '') + u'+'
}
IPATH_ROOTLESS = u'%(isegment-nz)s(/%(isegment)s)*' % isegments
IPATH_NOSCHEME = u'%(isegment-nz-nc)s(/%(isegment)s)*' % isegments
IPATH_ABSOLUTE = u'/(?:%s)?' % IPATH_ROOTLESS
IPATH_ABEMPTY = u'(?:/%(isegment)s)*' % isegments
IPATH_RE = u'^(?:%s|%s|%s|%s|%s)$' % (
IPATH_ABEMPTY, IPATH_ABSOLUTE, IPATH_NOSCHEME, IPATH_ROOTLESS, PATH_EMPTY
)
IREGULAR_NAME_RE = IREG_NAME = u'(?:{0}|[{1}])*'.format(
u'%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + IUNRESERVED_RE
)
IHOST_RE = IHOST_PATTERN = u'({0}|{1}|{2})'.format(
IREG_NAME,
IPv4_RE,
IP_LITERAL_RE,
)
IUSERINFO_RE = u'^(?:[%s%s:]|%s)+.*(?=@)' % (
IUNRESERVED_RE, SUB_DELIMITERS_RE, PCT_ENCODED
)
IFRAGMENT_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE
+ u']|%s)*$' % PCT_ENCODED)
IQUERY_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE
+ IPRIVATE + u']|%s)*$' % PCT_ENCODED)
IRELATIVE_PART_RE = u'(//%s%s|%s|%s|%s)' % (
COMPONENT_PATTERN_DICT['authority'],
IPATH_ABEMPTY,
IPATH_ABSOLUTE,
IPATH_NOSCHEME,
PATH_EMPTY,
)
IHIER_PART_RE = u'(//%s%s|%s|%s|%s)' % (
COMPONENT_PATTERN_DICT['authority'],
IPATH_ABEMPTY,
IPATH_ABSOLUTE,
IPATH_ROOTLESS,
PATH_EMPTY,
)
|
"""Set up the objects ipython profile."""
import typing as tp
from databroker import Broker
from ophyd.sim import SynAxis, SynSignalWithRegistry, SynSignalRO
from xpdsim.movers import SimFilterBank
from .beamtime import Beamtime
from .beamtimeSetup import start_xpdacq
from .xpdacq import CustomizedRunEngine
from .xpdacq_conf import GlblYamlDict
from .xpdacq_conf import _load_beamline_config
from .xpdacq_conf import configure_device, _reload_glbl, _set_glbl
def ipysetup(
area_det: SynSignalWithRegistry,
shutter: SynAxis,
temp_controller: SynAxis,
filter_bank: SimFilterBank,
ring_current: SynSignalRO,
db: Broker,
glbl_yaml: str = None,
blconfig_yaml: str = None,
test: bool = False
) -> tp.Tuple[GlblYamlDict, Beamtime, CustomizedRunEngine]:
"""Set up the beamtime, run engine and global configuration.
Parameters
----------
area_det :
Area detector, like "pe1c".
shutter :
Shutter control, like "shctl1".
temp_controller :
Temperature control, like "cs700".
filter_bank :
The filter bank, like "fb".
ring_current :
The ring current reader, like "ring_current".
db :
The data broker.
glbl_yaml :
The global configuration for the beam time in a yaml file.
If None, use the 'glbl_yaml_path' specified in the `xpdacq.xpdacq_conf.glbl_dict`.
Default None.
blconfig_yaml :
The beamline configuratino yaml file. If None, use glbl["blconfig_path"].
test :
If true, use test mode (for developers).
Returns
-------
glbl :
A dictionary of the global configuration for this beam time. The variable is `~xpdacq.glbl.glbl`.
bt :
An interface to create, read and update the plans, samples and beam time information.
xrun :
A customized bluesky run engine to run the plans.
"""
# configure devices
configure_device(
area_det=area_det,
shutter=shutter,
temp_controller=temp_controller,
filter_bank=filter_bank,
ring_current=ring_current,
db=db
)
# reload glbl
from xpdacq.glbl import glbl
_glbl = _reload_glbl(glbl_yaml)
if _glbl:
_set_glbl(glbl, _glbl)
# load beamtime
bt = start_xpdacq()
if bt:
print("INFO: Reload beamtime objects:\n{}\n".format(bt))
else:
print("INFO: No Beamtime object.")
# instantiate xrun without beamtime, like bluesky setup
xrun = CustomizedRunEngine(None)
xrun.md["beamline_id"] = glbl["beamline_id"]
xrun.md["group"] = glbl["group"]
xrun.md["facility"] = glbl["facility"]
if not blconfig_yaml:
blconfig_yaml = glbl["blconfig_path"]
xrun.md["beamline_config"] = _load_beamline_config(blconfig_yaml, test=test)
# insert header to db, either simulated or real
xrun.subscribe(db.v1.insert)
if bt:
xrun.beamtime = bt
return glbl, bt, xrun
|
from flask import Flask
app = Flask(__name__)
app.config.from_object('config')
from app import views
|
"""
Legend
------
The :meth:`pygmt.Figure.legend` method can automatically create a legend for
symbols plotted using :meth:`pygmt.Figure.plot`. Legend entries are only
created when the ``label`` argument is used.
"""
import pygmt
fig = pygmt.Figure()
fig.basemap(projection="x1i", region=[0, 7, 3, 7], frame=True)
fig.plot(
data="@Table_5_11.txt",
style="c0.15i",
color="lightgreen",
pen="faint",
label="Apples",
)
fig.plot(data="@Table_5_11.txt", pen="1.5p,gray", label='"My lines"')
fig.plot(data="@Table_5_11.txt", style="t0.15i", color="orange", label="Oranges")
fig.legend(position="JTR+jTR+o0.2c", box=True)
fig.show()
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""BesselI0 op"""
from tbe import dsl
from te import tvm
from te.platform.fusion_manager import fusion_manager
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
bessel_i0_op_info = TBERegOp("BesselI0") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("bessel_i0.so") \
.compute_cost(10) \
.kernel_name("bessel_i0") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(bessel_i0_op_info)
def _bessel_i0_tbe():
"""BesselI0 TBE register"""
return
A = [-1.30002500998624804212E-8, 6.04699502254191894932E-8,
-2.67079385394061173391E-7, 1.11738753912010371815E-6,
-4.41673835845875056359E-6, 1.64484480707288970893E-5,
-5.75419501008210370398E-5, 1.88502885095841655729E-4,
-5.76375574538582365885E-4, 1.63947561694133579842E-3,
-4.32430999505057594430E-3, 1.05464603945949983183E-2,
-2.37374148058994688156E-2, 4.93052842396707084878E-2,
-9.49010970480476444210E-2, 1.71620901522208775349E-1,
-3.04682672343198398683E-1, 6.76795274409476084995E-1]
B = [3.39623202570838634515E-9, 2.26666899049817806459E-8,
2.04891858946906374183E-7, 2.89137052083475648297E-6,
6.88975834691682398426E-5, 3.36911647825569408990E-3,
8.04490411014108831608E-1]
def chebevl(x, num, coef, shape, dtype):
"""chebevl"""
broad_coef = dsl.broadcast(coef[0], shape, dtype)
broad_zero = dsl.broadcast(0, shape, dtype)
none_signal = None
for i in range(1, num):
none_signal = broad_zero
broad_zero = broad_coef
coef_i = dsl.broadcast(coef[i], shape, dtype)
broad_coef = dsl.vsub(dsl.vadd(dsl.vmul(x, broad_zero), coef_i), none_signal)
return dsl.vmuls(dsl.vsub(broad_coef, none_signal), 0.5)
@fusion_manager.register("bessel_i0")
def bessel_i0_compute(input_x, output, kernel_name="bessel_i0"):
"""bessel_i0_compute"""
dtype = input_x.dtype
shape = input_x.shape
has_improve_precision = False
if dtype != "float32":
input_x = dsl.cast_to(input_x, "float32")
dtype = "float32"
has_improve_precision = True
y = dsl.vabs(input_x)
y_le_eight_in = dsl.vmuls(y, 0.5)
y_le_eight_in = dsl.vadds(y_le_eight_in, -2.0)
y_le_eight = chebevl(y_le_eight_in, 18, A, shape, dtype)
y_gt_eight_in = dsl.vadds(dsl.vmuls(dsl.vrec(y), 32.0), -2.0)
y_gt_eight = chebevl(y_gt_eight_in, 7, B, shape, dtype)
y_gt_eight = dsl.vmul(y_gt_eight, dsl.vrsqrt(y))
res = dsl.vcmpsel(y, 8.0, 'le', y_le_eight, y_gt_eight)
res = dsl.vmul(res, dsl.vexp(y))
if has_improve_precision:
res = dsl.cast_to(res, "float16")
return res
def bessel_i0(x, output, kernel_name="bessel_i0"):
"""bessel_i0"""
data_x = tvm.placeholder(x.get("shape"), dtype=x.get("dtype"), name="data_x")
res = bessel_i0_compute(data_x, output, kernel_name)
# auto schedule
with tvm.target.cce():
schedule = dsl.auto_schedule(res)
# operator build
config = {"name": kernel_name,
"tensor_list": [data_x, res]}
dsl.build(schedule, config)
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from .callback_game import CallbackGame
from .callback_query import CallbackQuery
from .force_reply import ForceReply
from .game_high_score import GameHighScore
from .inline_keyboard_button import InlineKeyboardButton
from .inline_keyboard_markup import InlineKeyboardMarkup
from .keyboard_button import KeyboardButton
from .reply_keyboard_markup import ReplyKeyboardMarkup
from .reply_keyboard_remove import ReplyKeyboardRemove
# added
from .bot_info import BotInfo
from .bot_command import BotCommand
__all__ = [
"CallbackGame", "CallbackQuery", "ForceReply", "GameHighScore", "InlineKeyboardButton", "InlineKeyboardMarkup",
"KeyboardButton", "ReplyKeyboardMarkup", "ReplyKeyboardRemove",
# added
"BotInfo",
"BotCommand"
]
|
import os
import h5py
import shutil
import sklearn
import tempfile
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.linear_model
import matplotlib.pyplot as plt
X, y = sklearn.datasets.make_classification(
n_samples=10000, n_features=4, n_redundant=0, n_informative=2,
n_clusters_per_class=2, hypercube=False, random_state=0
)
# Split into train and test
X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)
# Write out the data to HDF5 files in a temp directory.
# This file is assumed to be caffe_root/examples/hdf5_classification.ipynb
dirname = os.path.abspath('./hdf5_data')
if not os.path.exists(dirname):
os.makedirs(dirname)
train_filename = os.path.join(dirname, 'train.h5')
test_filename = os.path.join(dirname, 'test.h5')
# HDF5DataLayer source should be a file containing a list of HDF5 filenames.
# To show this off, we'll list the same data file twice.
with h5py.File(train_filename, 'w') as f:
f['data'] = X
f['label'] = y.astype(np.float32)
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
f.write(train_filename + '\n')
f.write(train_filename + '\n')
# HDF5 is pretty efficient, but can be further compressed.
comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
with h5py.File(test_filename, 'w') as f:
f.create_dataset('data', data=Xt, **comp_kwargs)
f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs)
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
class BotoCoreError(Exception):
"""
The base exception class for BotoCore exceptions.
:ivar msg: The descriptive message associated with the error.
"""
fmt = 'An unspecified error occured'
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class DataNotFoundError(BotoCoreError):
"""
The data associated with a particular path could not be loaded.
:ivar path: The data path that the user attempted to load.
"""
fmt = 'Unable to load data for: {data_path}'
class ApiVersionNotFoundError(BotoCoreError):
"""
The data associated with either that API version or a compatible one
could not be loaded.
:ivar path: The data path that the user attempted to load.
:ivar path: The API version that the user attempted to load.
"""
fmt = 'Unable to load data {data_path} for: {api_version}'
class NoCredentialsError(BotoCoreError):
"""
No credentials could be found
"""
fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
"""
Only partial credentials were found.
:ivar cred_var: The missing credential variable name.
"""
fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class NoRegionError(BotoCoreError):
"""
No region was specified
:ivar env_var: The name of the environment variable to use to
specify the default region.
"""
fmt = 'You must specify a region or set the {env_var} environment variable.'
class UnknownSignatureVersionError(BotoCoreError):
"""
Requested Signature Version is not known.
:ivar signature_version: The name of the requested signature version.
"""
fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
"""
The service is not available in requested region.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = 'Service {service_name} not available in region {region_name}'
class UnknownEndpointError(BotoCoreError):
"""
Could not construct an endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'Unable to construct an endpoint for '
'{service_name} in region {region_name}')
class ProfileNotFound(BotoCoreError):
"""
The specified configuration profile was not found in the
configuration file.
:ivar profile: The name of the profile the user attempted to load.
"""
fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
"""
The configuration file could not be parsed.
:ivar path: The path to the configuration file.
"""
fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
"""
The specified configuration file could not be found.
:ivar path: The path to the configuration file.
"""
fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
"""
One or more required parameters were not supplied.
:ivar object: The object that has missing parameters.
This can be an operation or a parameter (in the
case of inner params). The str() of this object
will be used so it doesn't need to implement anything
other than str().
:ivar missing: The names of the missing parameters.
"""
fmt = ('The following required parameters are missing for '
'{object_name}: {missing}')
class ValidationError(BotoCoreError):
"""
An exception occurred validating parameters.
Subclasses must accept a ``value`` and ``param``
argument in their ``__init__``.
:ivar value: The value that was being validated.
:ivar param: The parameter that failed validation.
:ivar type_name: The name of the underlying type.
"""
fmt = ("Invalid value ('{value}') for param {param} "
"of type {type_name} ")
class ParamValidationError(BotoCoreError):
fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
"""
Unknown key in a struct paramster.
:ivar value: The value that was being checked.
:ivar param: The name of the parameter.
:ivar choices: The valid choices the value can be.
"""
fmt = ("Unknown key '{value}' for param '{param}'. Must be one "
"of: {choices}")
class RangeError(ValidationError):
"""
A parameter value was out of the valid range.
:ivar value: The value that was being checked.
:ivar param: The parameter that failed validation.
:ivar min_value: The specified minimum value.
:ivar max_value: The specified maximum value.
"""
fmt = ('Value out of range for param {param}: '
'{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
"""
Unknown top level parameter.
:ivar name: The name of the unknown parameter.
:ivar operation: The name of the operation.
:ivar choices: The valid choices the parameter name can be.
"""
fmt = ("Unknown parameter '{name}' for operation {operation}. Must be one "
"of: {choices}")
class UnknownServiceStyle(BotoCoreError):
"""
Unknown style of service invocation.
:ivar service_style: The style requested.
"""
fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
fmt = 'Operation cannot be paginated: {operation_name}'
class EventNotFound(BotoCoreError):
"""
The specified event name is unknown to the system.
:ivar event_name: The name of the event the user attempted to use.
"""
fmt = 'The event ({event_name}) is not known'
class ChecksumError(BotoCoreError):
"""The expected checksum did not match the calculated checksum.
"""
fmt = ('Checksum {checksum_type} failed, expected checksum '
'{expected_checksum} did not match calculated checksum '
'{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
"""Need to seek a stream, but stream does not support seeking.
"""
fmt = ('Need to rewind the stream {stream_object}, but stream '
'is not seekable.')
class WaiterError(BotoCoreError):
"""Waiter failed to reach desired state."""
fmt = 'Waiter {name} failed: {reason}'
class IncompleteReadError(BotoCoreError):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
"""Expression is either invalid or too complex."""
fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
"""Tried to insert before/after an unregistered credential type."""
fmt = 'Credential named {name} not found.'
|
# Copyright (c) 2020. Lena "Teekeks" During <info@teawork.de>
"""
The Twitch API client
---------------------
This is the base of this library, it handles authentication renewal, error handling and permission management.
Look at the `Twitch API reference <https://dev.twitch.tv/docs/api/reference>`__ for a more detailed documentation on
what each endpoint does.
**************
Example Usage:
**************
.. code-block:: python
from twitchAPI.twitch import Twitch
from pprint import pprint
twitch = Twitch('my_app_key', 'my_app_secret')
pprint(twitch.get_users(logins=['your_twitch_username']))
**************
Authentication
**************
The Twitch API knows 2 different authentications. App and User Authentication.
Which one you need (or if one at all) depends on what calls you want to use.
Its always good to get at least App authentication even for calls where you don't need it since the rate limits are way
better for authenticated calls.
App Authentication
==================
By default, The lib will try to attempt to create a App Authentication on Initialization:
.. code-block:: python
from twitchAPI.twitch import Twitch
twitch = Twitch('my_app_id', 'my_app_secret')
You can set a Auth Scope like this:
.. code-block:: python
from twitchAPI.twitch import Twitch, AuthScope
twitch = Twitch('my_app_id', 'my_app_secret', target_app_auth_scope=[AuthScope.USER_EDIT])
If you want to change the AuthScope later use this:
.. code-block:: python
twitch.authenticate_app(my_new_scope)
If you don't want to use App Authentication, Initialize like this:
.. code-block:: python
from twitchAPI.twitch import Twitch
twitch = Twitch('my_app_id', authenticate_app=False)
User Authentication
===================
Only use a user auth token, use this:
.. code-block:: python
from twitchAPI.twitch import Twitch
twitch = Twitch('my_app_id', authenticate_app=False)
# make sure to set the second parameter as the scope used to generate the token
twitch.set_user_authentication('token', [], 'refresh_token')
Use both App and user Authentication:
.. code-block:: python
from twitchAPI.twitch import Twitch
twitch = Twitch('my_app_id', 'my_app_secret')
# make sure to set the second parameter as the scope used to generate the token
twitch.set_user_authentication('token', [], 'refresh_token')
To get a user auth token, the user has to explicitly click "Authorize" on the twitch website. You can use various online
services to generate a token or use my build in authenticator.
See :obj:`twitchAPI.oauth` for more info on my build in authenticator.
Authentication refresh callback
===============================
Optionally you can set a callback for both user access token refresh and app access token refresh.
.. code-block:: python
from twitchAPI.twitch import Twitch
def user_refresh(token: str, refresh_token: str):
print(f'my new user token is: {token}')
def app_refresh(token: str):
print(f'my new app token is: {token}')
twitch = Twitch('my_app_id', 'my_app_secret')
twitch.app_auth_refresh_callback = app_refresh
twitch.user_auth_refresh_callback = user_refresh
********************
Class Documentation:
********************
"""
import requests
from typing import Union, List, Optional, Callable
from .helper import build_url, TWITCH_API_BASE_URL, TWITCH_AUTH_BASE_URL, make_fields_datetime, build_scope, \
fields_to_enum, enum_value_or_none, datetime_to_str
from datetime import datetime
from logging import getLogger, Logger
from .types import *
class Twitch:
"""
Twitch API client
:param str app_id: Your app id
:param str app_secret: Your app secret, leave as None if you only want to use User Authentication
|default| :code:`None`
:param bool authenticate_app: If true, auto generate a app token on startup |default| :code:`True`
:param list[~twitchAPI.types.AuthScope] target_app_auth_scope: AuthScope to use if :code:`authenticate_app` is True
|default| :code:`None`
:var bool auto_refresh_auth: If set to true, auto refresh the auth token once it expires. |default| :code:`True`
:var Callable[[str,str],None] user_auth_refresh_callback: If set, gets called whenever a user auth token gets
refreshed. Parameter: Auth Token, Refresh Token |default| :code:`None`
:var Callable[[str,str],None] app_auth_refresh_callback: If set, gets called whenever a app auth token gets
refreshed. Parameter: Auth Token |default| :code:`None`
"""
app_id: Optional[str] = None
app_secret: Optional[str] = None
user_auth_refresh_callback: Optional[Callable[[str, str], None]] = None
app_auth_refresh_callback: Optional[Callable[[str], None]] = None
__app_auth_token: Optional[str] = None
__app_auth_scope: List[AuthScope] = []
__has_app_auth: bool = False
__user_auth_token: Optional[str] = None
__user_auth_refresh_token: Optional[str] = None
__user_auth_scope: List[AuthScope] = []
__has_user_auth: bool = False
__logger: Logger = None
auto_refresh_auth: bool = True
def __init__(self,
app_id: str,
app_secret: Optional[str] = None,
authenticate_app: bool = True,
target_app_auth_scope: Optional[List[AuthScope]] = None):
self.app_id = app_id
self.app_secret = app_secret
self.__logger = getLogger('twitchAPI.twitch')
if authenticate_app:
self.authenticate_app(target_app_auth_scope if target_app_auth_scope is not None else [])
def __generate_header(self, auth_type: 'AuthType', required_scope: List[AuthScope]) -> dict:
header = {"Client-ID": self.app_id}
if auth_type == AuthType.EITHER:
has_auth, target, token, scope = self.__get_used_either_auth(required_scope)
if not has_auth:
raise UnauthorizedException('No authorization with correct scope set!')
header['Authorization'] = f'Bearer {token}'
elif auth_type == AuthType.APP:
if not self.__has_app_auth:
raise UnauthorizedException('Require app authentication!')
for s in required_scope:
if s not in self.__app_auth_scope:
raise MissingScopeException('Require app auth scope ' + s.name)
header['Authorization'] = f'Bearer {self.__app_auth_token}'
elif auth_type == AuthType.USER:
if not self.__has_user_auth:
raise UnauthorizedException('require user authentication!')
for s in required_scope:
if s not in self.__user_auth_scope:
raise MissingScopeException('Require user auth scope ' + s.name)
header['Authorization'] = f'Bearer {self.__user_auth_token}'
elif auth_type == AuthType.NONE:
# set one anyway for better performance if possible but don't error if none found
has_auth, target, token, scope = self.__get_used_either_auth(required_scope)
if has_auth:
header['Authorization'] = f'Bearer {token}'
return header
def __get_used_either_auth(self, required_scope: List[AuthScope]) -> \
(bool, AuthType, Union[None, str], List[AuthScope]):
if self.has_required_auth(AuthType.USER, required_scope):
return True, AuthType.USER, self.__user_auth_token, self.__user_auth_scope
if self.has_required_auth(AuthType.APP, required_scope):
return True, AuthType.APP, self.__app_auth_token, self.__app_auth_scope
return False, AuthType.NONE, None, []
def get_user_auth_scope(self) -> List[AuthScope]:
"""Returns the set User auth Scope"""
return self.__user_auth_scope
def has_required_auth(self, required_type: AuthType, required_scope: List[AuthScope]) -> bool:
if required_type == AuthType.NONE:
return True
if required_type == AuthType.EITHER:
return self.has_required_auth(AuthType.USER, required_scope) or \
self.has_required_auth(AuthType.APP, required_scope)
if required_type == AuthType.USER:
if not self.__has_user_auth:
return False
for s in required_scope:
if s not in self.__user_auth_scope:
return False
return True
if required_type == AuthType.APP:
if not self.__has_app_auth:
return False
for s in required_scope:
if s not in self.__app_auth_scope:
return False
return True
# default to false
return False
# FIXME rewrite refresh_used_token
def refresh_used_token(self):
"""Refreshes the currently used token"""
if self.__has_user_auth:
self.__logger.debug('refreshing user token')
from .oauth import refresh_access_token
self.__user_auth_token, \
self.__user_auth_refresh_token = refresh_access_token(self.__user_auth_refresh_token,
self.app_id,
self.app_secret)
if self.user_auth_refresh_callback is not None:
self.user_auth_refresh_callback(self.__user_auth_token, self.__user_auth_refresh_token)
else:
self.__generate_app_token()
if self.app_auth_refresh_callback is not None:
self.app_auth_refresh_callback(self.__app_auth_token)
def __check_request_return(self,
response: requests.Response,
retry_func: Callable,
reply_func_has_data: bool,
url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
data: Optional[dict] = None,
retries: int = 1
) -> requests.Response:
if self.auto_refresh_auth and retries > 0:
if response.status_code == 401:
# unauthorized, lets try to refresh the token once
self.__logger.debug('got 401 response -> try to refresh token')
self.refresh_used_token()
if reply_func_has_data:
return retry_func(url, auth_type, required_scope, data=data, retries=retries - 1)
else:
return retry_func(url, auth_type, required_scope, retries=retries - 1)
elif response.status_code == 503:
# service unavailable, retry exactly once as recommended by twitch documentation
self.__logger.debug('got 503 response -> retry once')
if reply_func_has_data:
return retry_func(url, auth_type, required_scope, data=data, retries=retries - 1)
else:
return retry_func(url, auth_type, required_scope, retries=retries - 1)
elif self.auto_refresh_auth and retries <= 0:
if response.status_code == 503:
raise TwitchBackendException('The Twitch API returns a server error')
if response.status_code == 401:
msg = response.json().get('message', '')
self.__logger.debug(f'got 401 response and can\'t refresh. Message: "{msg}"')
raise UnauthorizedException(msg)
if response.status_code == 500:
raise TwitchBackendException('Internal Server Error')
if response.status_code == 400:
raise TwitchAPIException('Bad Request')
return response
def __api_post_request(self,
url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
data: Optional[dict] = None,
retries: int = 1) -> requests.Response:
"""Make POST request with authorization"""
headers = self.__generate_header(auth_type, required_scope)
self.__logger.debug(f'making POST request to {url}')
if data is None:
req = requests.post(url, headers=headers)
else:
req = requests.post(url, headers=headers, json=data)
return self.__check_request_return(req,
self.__api_post_request,
True,
url,
auth_type,
required_scope,
data,
retries)
def __api_put_request(self,
url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
data: Optional[dict] = None,
retries: int = 1) -> requests.Response:
"""Make PUT request with authorization"""
headers = self.__generate_header(auth_type, required_scope)
self.__logger.debug(f'making PUT request to {url}')
if data is None:
req = requests.put(url, headers=headers)
else:
req = requests.put(url, headers=headers, json=data)
return self.__check_request_return(req,
self.__api_put_request,
True,
url,
auth_type,
required_scope,
data,
retries)
def __api_patch_request(self,
url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
data: Optional[dict] = None,
retries: int = 1) -> requests.Response:
"""Make PATCH request with authorization"""
headers = self.__generate_header(auth_type, required_scope)
self.__logger.debug(f'making PATCH request to {url}')
if data is None:
req = requests.patch(url, headers=headers)
else:
req = requests.patch(url, headers=headers, json=data)
return self.__check_request_return(req,
self.__api_patch_request,
True,
url,
auth_type,
required_scope,
data,
retries)
def __api_delete_request(self,
url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
data: Optional[dict] = None,
retries: int = 1) -> requests.Response:
"""Make DELETE request with authorization"""
headers = self.__generate_header(auth_type, required_scope)
self.__logger.debug(f'making DELETE request to {url}')
if data is None:
req = requests.delete(url, headers=headers)
else:
req = requests.delete(url, headers=headers, json=data)
return self.__check_request_return(req,
self.__api_delete_request,
True,
url,
auth_type,
required_scope,
data,
retries)
def __api_get_request(self, url: str,
auth_type: 'AuthType',
required_scope: List[AuthScope],
retries: int = 1) -> requests.Response:
"""Make GET request with authorization"""
headers = self.__generate_header(auth_type, required_scope)
self.__logger.debug(f'making GET request to {url}')
req = requests.get(url, headers=headers)
return self.__check_request_return(req,
self.__api_get_request,
False,
url,
auth_type,
required_scope,
None,
retries)
def __generate_app_token(self) -> None:
if self.app_secret is None:
raise MissingAppSecretException()
params = {
'client_id': self.app_id,
'client_secret': self.app_secret,
'grant_type': 'client_credentials',
'scope': build_scope(self.__app_auth_scope)
}
self.__logger.debug('generating fresh app token')
url = build_url(TWITCH_AUTH_BASE_URL + 'oauth2/token', params)
result = requests.post(url)
if result.status_code != 200:
raise TwitchAuthorizationException(f'Authentication failed with code {result.status_code} ({result.text})')
try:
data = result.json()
self.__app_auth_token = data['access_token']
except ValueError:
raise TwitchAuthorizationException('Authentication response did not have a valid json body')
except KeyError:
raise TwitchAuthorizationException('Authentication response did not contain access_token')
def authenticate_app(self, scope: List[AuthScope]) -> None:
"""Authenticate with a fresh generated app token
:param list[~twitchAPI.types.AuthScope] scope: List of Authorization scopes to use
:raises ~twitchAPI.types.TwitchAuthorizationException: if the authentication fails
:return: None
"""
self.__app_auth_scope = scope
self.__generate_app_token()
self.__has_app_auth = True
def set_user_authentication(self,
token: str,
scope: List[AuthScope],
refresh_token: Optional[str] = None,
validate: bool = True):
"""Set a user token to be used.
:param str token: the generated user token
:param list[~twitchAPI.types.AuthScope] scope: List of Authorization Scopes that the given user token has
:param str refresh_token: The generated refresh token, has to be provided if :attr:`auto_refresh_auth` is True
|default| :code:`None`
:param bool validate: if true, validate the set token for being a user auth token and having the required scope
|default| :code:`True`
:raises ValueError: if :attr:`auto_refresh_auth` is True but refresh_token is not set
:raises ~twitchAPI.types.MissingScopeException: if given token is missing one of the required scopes
:raises ~twitchAPI.types.InvalidTokenException: if the given token is invalid or for a different client id
"""
if refresh_token is None and self.auto_refresh_auth:
raise ValueError('refresh_token has to be provided when auto_refresh_user_auth is True')
if validate:
from .oauth import validate_token
val_result = validate_token(token)
if val_result.get('status', 200) == 401:
raise InvalidTokenException(val_result.get('message', ''))
if 'login' not in val_result or 'user_id' not in val_result:
# this is a app token or not valid
raise InvalidTokenException('not a user oauth token')
if val_result.get('client_id') != self.app_id:
raise InvalidTokenException('client id does not match')
scopes = val_result.get('scopes', [])
for s in scope:
if s not in scopes:
raise MissingScopeException(f'given token is missing scope {s.value}')
self.__user_auth_token = token
self.__user_auth_refresh_token = refresh_token
self.__user_auth_scope = scope
self.__has_user_auth = True
def get_app_token(self) -> Union[str, None]:
"""Returns the app token that the api uses or None when not authenticated.
:return: app token
:rtype: Union[str, None]
"""
return self.__app_auth_token
def get_user_auth_token(self) -> Union[str, None]:
"""Returns the current user auth token, None if no user Authentication is set
:return: current user auth token
:rtype: str or None
"""
return self.__user_auth_token
def get_used_token(self) -> Union[str, None]:
"""Returns the currently used token, can be either the app or user auth Token or None if no auth is set
:return: the currently used auth token or None if no Authentication is set
"""
# if no auth is set, self.__app_auth_token will be None
return self.__user_auth_token if self.__has_user_auth else self.__app_auth_token
# ======================================================================================================================
# API calls
# ======================================================================================================================
def get_extension_analytics(self,
after: Optional[str] = None,
extension_id: Optional[str] = None,
first: int = 20,
ended_at: Optional[datetime] = None,
started_at: Optional[datetime] = None,
report_type: Optional[AnalyticsReportType] = None) -> dict:
"""Gets a URL that extension developers can use to download analytics reports (CSV files) for their extensions.
The URL is valid for 5 minutes.\n\n
Requires User authentication with scope :py:const:`twitchAPI.types.AuthScope.ANALYTICS_READ_EXTENSION`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-extension-analytics
:param str after: cursor for forward pagination |default| :code:`None`
:param str extension_id: If this is specified, the returned URL points to an analytics report for just the
specified extension. |default| :code:`None`
:param int first: Maximum number of objects returned, range 1 to 100, |default| :code:`20`
:param ~datetime.datetime ended_at: Ending date/time for returned reports, if this is provided,
`started_at` must also be specified. |default| :code:`None`
:param ~datetime.datetime started_at: Starting date/time for returned reports, if this is provided,
`ended_at` must also be specified. |default| :code:`None`
:param ~twitchAPI.types.AnalyticsReportType report_type: Type of analytics report that is returned
|default| :code:`None`
:rtype: dict
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: When you only supply `started_at` or `ended_at` without the other or when first is not in
range 1 to 100
"""
if ended_at is not None or started_at is not None:
# you have to put in both:
if ended_at is None or started_at is None:
raise ValueError('you must specify both ended_at and started_at')
if started_at > ended_at:
raise ValueError('started_at must be before ended_at')
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
url_params = {
'after': after,
'ended_at': datetime_to_str(ended_at),
'extension_id': extension_id,
'first': first,
'started_at': datetime_to_str(started_at),
'type': enum_value_or_none(report_type)
}
url = build_url(TWITCH_API_BASE_URL + 'analytics/extensions',
url_params,
remove_none=True)
response = self.__api_get_request(url, AuthType.USER, required_scope=[AuthScope.ANALYTICS_READ_EXTENSION])
data = response.json()
return make_fields_datetime(data, ['started_at', 'ended_at'])
def get_game_analytics(self,
after: Optional[str] = None,
first: int = 20,
game_id: Optional[str] = None,
ended_at: Optional[datetime] = None,
started_at: Optional[datetime] = None,
report_type: Optional[AnalyticsReportType] = None) -> dict:
"""Gets a URL that game developers can use to download analytics reports (CSV files) for their games.
The URL is valid for 5 minutes.\n\n
Requires User authentication with scope :py:const:`twitchAPI.types.AuthScope.ANALYTICS_READ_GAMES`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-game-analytics
:param str after: cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects returned, range 1 to 100, |default| :code:`20`
:param str game_id: Game ID |default| :code:`None`
:param ~datetime.datetime ended_at: Ending date/time for returned reports, if this is provided,
`started_at` must also be specified. |default| :code:`None`
:param ~datetime.datetime started_at: Starting date/time for returned reports, if this is provided,
`ended_at` must also be specified. |default| :code:`None`
:param ~twitchAPI.types.AnalyticsReportType report_type: Type of analytics report that is returned.
|default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: When you only supply `started_at` or `ended_at` without the other or when first is not in
range 1 to 100
:rtype: dict
"""
if ended_at is not None or started_at is not None:
if ended_at is None or started_at is None:
raise ValueError('you must specify both ended_at and started_at')
if ended_at < started_at:
raise ValueError('ended_at must be after started_at')
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
url_params = {
'after': after,
'ended_at': datetime_to_str(ended_at),
'first': first,
'game_id': game_id,
'started_at': datetime_to_str(started_at),
'type': enum_value_or_none(report_type)
}
url = build_url(TWITCH_API_BASE_URL + 'analytics/games',
url_params,
remove_none=True)
response = self.__api_get_request(url, AuthType.USER, [AuthScope.ANALYTICS_READ_GAMES])
data = response.json()
return make_fields_datetime(data, ['ended_at', 'started_at'])
def get_bits_leaderboard(self,
count: int = 10,
period: TimePeriod = TimePeriod.ALL,
started_at: Optional[datetime] = None,
user_id: Optional[str] = None) -> dict:
"""Gets a ranked list of Bits leaderboard information for an authorized broadcaster.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.BITS_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-bits-leaderboard
:param int count: Number of results to be returned. In range 1 to 100, |default| :code:`10`
:param ~twitchAPI.types.TimePeriod period: Time period over which data is aggregated, |default|
:const:`twitchAPI.types.TimePeriod.ALL`
:param ~datetime.datetime started_at: Timestamp for the period over which the returned data is aggregated.
|default| :code:`None`
:param str user_id: ID of the user whose results are returned; i.e., the person who paid for the Bits.
|default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if count > 100 or count < 1:
raise ValueError('count must be between 1 and 100')
url_params = {
'count': count,
'period': period.value,
'started_at': datetime_to_str(started_at),
'user_id': user_id
}
url = build_url(TWITCH_API_BASE_URL + 'bits/leaderboard', url_params, remove_none=True)
response = self.__api_get_request(url, AuthType.USER, [AuthScope.BITS_READ])
data = response.json()
return make_fields_datetime(data, ['ended_at', 'started_at'])
def get_extension_transactions(self,
extension_id: str,
transaction_id: Optional[str] = None,
after: Optional[str] = None,
first: int = 20) -> dict:
"""Get Extension Transactions allows extension back end servers to fetch a list of transactions that have
occurred for their extension across all of Twitch.
A transaction is a record of a user exchanging Bits for an in-Extension digital good.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-extension-transactions
:param str extension_id: ID of the extension to list transactions for.
:param str transaction_id: Transaction IDs to look up. |default| :code:`None`
:param str after: cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects returned, range 1 to 100, |default| :code:`20`
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError("first must be between 1 and 100")
url_param = {
'extension_id': extension_id,
'id': transaction_id,
'after': after,
first: first
}
url = build_url(TWITCH_API_BASE_URL + 'extensions/transactions', url_param, remove_none=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
data = result.json()
return make_fields_datetime(data, ['timestamp'])
def create_clip(self,
broadcaster_id: str,
has_delay: bool = False) -> dict:
"""Creates a clip programmatically. This returns both an ID and an edit URL for the new clip.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CLIPS_EDIT`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#create-clip
:param str broadcaster_id: Broadcaster ID of the stream from which the clip will be made.
:param bool has_delay: If False, the clip is captured from the live stream when the API is called; otherwise,
a delay is added before the clip is captured (to account for the brief delay between the broadcaster’s
stream and the viewer’s experience of that stream). |default| :code:`False`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
param = {
'broadcaster_id': broadcaster_id,
'has_delay': has_delay
}
url = build_url(TWITCH_API_BASE_URL + 'clips', param)
result = self.__api_post_request(url, AuthType.USER, [AuthScope.CLIPS_EDIT])
return result.json()
def get_clips(self,
broadcaster_id: Optional[str] = None,
game_id: Optional[str] = None,
clip_id: Optional[List[str]] = None,
after: Optional[str] = None,
before: Optional[str] = None,
ended_at: Optional[datetime] = None,
started_at: Optional[datetime] = None,
first: int = 20) -> dict:
"""Gets clip information by clip ID (one or more), broadcaster ID (one only), or game ID (one only).
Clips are returned sorted by view count, in descending order.\n\n
Requires App or User authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-clips
:param str broadcaster_id: ID of the broadcaster for whom clips are returned. |default| :code:`None`
:param str game_id: ID of the game for which clips are returned. |default| :code:`None`
:param list[str] clip_id: ID of the clip being queried. Limit: 100. |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param ~datetime.datetime ended_at: Ending date/time for returned clips |default| :code:`None`
:param ~datetime.datetime started_at: Starting date/time for returned clips |default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if you try to query more than 100 clips in one call
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ValueError: if not exactly one of clip_id, broadcaster_id or game_id is given
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if clip_id is not None and len(clip_id) > 100:
raise ValueError('A maximum of 100 clips can be queried in one call')
if not (sum([clip_id is not None, broadcaster_id is not None, game_id is not None]) == 1):
raise ValueError('You need to specify exactly one of clip_id, broadcaster_id or game_id')
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
param = {
'broadcaster_id': broadcaster_id,
'game_id': game_id,
'clip_id': clip_id,
'after': after,
'before': before,
'first': first,
'ended_at': datetime_to_str(ended_at),
'started_at': datetime_to_str(started_at)
}
url = build_url(TWITCH_API_BASE_URL + 'clips', param, split_lists=True, remove_none=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
data = result.json()
return make_fields_datetime(data, ['created_at'])
def get_code_status(self,
code: List[str],
user_id: int) -> dict:
"""Gets the status of one or more provided Bits codes.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-code-status
:param list[str] code: The code to get the status of. Maximum of 20 entries
:param int user_id: Represents the numeric Twitch user ID of the account which is going to receive the
entitlement associated with the code.
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if length of code is not in range 1 to 20
:rtype: dict
"""
if len(code) > 20 or len(code) < 1:
raise ValueError('only between 1 and 20 codes are allowed')
param = {
'code': code,
'user_id': user_id
}
url = build_url(TWITCH_API_BASE_URL + 'entitlements/codes', param, split_lists=True)
result = self.__api_get_request(url, AuthType.APP, [])
data = result.json()
return fields_to_enum(data, ['status'], CodeStatus, CodeStatus.UNKNOWN_VALUE)
def redeem_code(self,
code: List[str],
user_id: int) -> dict:
"""Redeems one or more provided Bits codes to the authenticated Twitch user.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#redeem-code
:param list[str] code: The code to redeem to the authenticated user’s account. Maximum of 20 entries
:param int user_id: Represents the numeric Twitch user ID of the account which is going to receive the
entitlement associated with the code.
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if length of code is not in range 1 to 20
:rtype: dict
"""
if len(code) > 20 or len(code) < 1:
raise ValueError('only between 1 and 20 codes are allowed')
param = {
'code': code,
'user_id': user_id
}
url = build_url(TWITCH_API_BASE_URL + 'entitlements/code', param, split_lists=True)
result = self.__api_post_request(url, AuthType.APP, [])
data = result.json()
return fields_to_enum(data, ['status'], CodeStatus, CodeStatus.UNKNOWN_VALUE)
def get_top_games(self,
after: Optional[str] = None,
before: Optional[str] = None,
first: int = 20) -> dict:
"""Gets games sorted by number of current viewers on Twitch, most popular first.\n\n
Requires App or User authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-top-games
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
param = {
'after': after,
'before': before,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'games/top', param, remove_none=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return result.json()
def get_games(self,
game_ids: Optional[List[str]] = None,
names: Optional[List[str]] = None) -> dict:
"""Gets game information by game ID or name.\n\n
Requires User or App authentication.
In total, only 100 game ids and names can be fetched at once.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-games
:param list[str] game_ids: Game ID |default| :code:`None`
:param list[str] names: Game Name |default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if neither game_ids nor names are given or if game_ids and names are more than 100 entries
combined.
:rtype: dict
"""
if game_ids is None and names is None:
raise ValueError('at least one of either game_ids and names has to be set')
if (len(game_ids) if game_ids is not None else 0) + (len(names) if names is not None else 0) > 100:
raise ValueError('in total, only 100 game_ids and names can be passed')
param = {
'id': game_ids,
'name': names
}
url = build_url(TWITCH_API_BASE_URL + 'games', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return result.json()
def check_automod_status(self,
broadcaster_id: str,
msg_id: str,
msg_text: str,
user_id: str) -> dict:
"""Determines whether a string message meets the channel’s AutoMod requirements.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.MODERATION_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#check-automod-status
:param str broadcaster_id: Provided broadcaster ID must match the user ID in the user auth token.
:param str msg_id: Developer-generated identifier for mapping messages to results.
:param str msg_text: Message text.
:param str user_id: User ID of the sender.
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
# TODO you can pass multiple sets in the body, account for that
url_param = {
'broadcaster_id': broadcaster_id
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/enforcements/status', url_param)
body = {
'data': [{
'msg_id': msg_id,
'msg_text': msg_text,
'user_id': user_id}
]
}
result = self.__api_post_request(url, AuthType.USER, [AuthScope.MODERATION_READ], data=body)
return result.json()
def get_banned_events(self,
broadcaster_id: str,
user_id: Optional[str] = None,
after: Optional[str] = None,
first: int = 20) -> dict:
"""Returns all user bans and un-bans in a channel.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.MODERATION_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-banned-events
:param str broadcaster_id: Provided broadcaster ID must match the user ID in the user auth token.
:param str user_id: Filters the results and only returns a status object for users who are banned in
this channel and have a matching user_id |default| :code:`None`
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 ot 100
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_id,
'after': after,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/banned/events', param, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.MODERATION_READ])
data = result.json()
data = fields_to_enum(data, ['event_type'], ModerationEventType, ModerationEventType.UNKNOWN)
data = make_fields_datetime(data, ['event_timestamp', 'expires_at'])
return data
def get_banned_users(self,
broadcaster_id: str,
user_id: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = 20,
before: Optional[str] = None) -> dict:
"""Returns all banned and timed-out users in a channel.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.MODERATION_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-banned-users
:param str broadcaster_id: Provided broadcaster ID must match the user ID in the user auth token.
:param str user_id: Filters the results and only returns a status object for users who are banned in this
channel and have a matching user_id. |default| :code:`None`
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_id,
'after': after,
'first': first,
'before': before
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/banned', param, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.MODERATION_READ])
return make_fields_datetime(result.json(), ['expires_at'])
def get_moderators(self,
broadcaster_id: str,
user_ids: Optional[List[str]] = None,
first: Optional[int] = 20,
after: Optional[str] = None) -> dict:
"""Returns all moderators in a channel.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.MODERATION_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-moderators
:param str broadcaster_id: Provided broadcaster ID must match the user ID in the user auth token.
:param list[str] user_ids: Filters the results and only returns a status object for users who are moderator in
this channel and have a matching user_id. Maximum 100 |default| :code:`None`
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if user_ids has more than 100 entries
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
if user_ids is not None and len(user_ids) > 100:
raise ValueError('user_ids can only be 100 entries long')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_ids,
'first': first,
'after': after
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/moderators', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.MODERATION_READ])
return result.json()
def get_moderator_events(self,
broadcaster_id: str,
user_ids: Optional[List[str]] = None,
after: Optional[str] = None,
first: Optional[int] = 20) -> dict:
"""Returns a list of moderators or users added and removed as moderators from a channel.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.MODERATION_READ`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-moderator-events
:param str broadcaster_id: Provided broadcaster ID must match the user ID in the user auth token.
:param list[str] user_ids: Filters the results and only returns a status object for users who are moderator in
this channel and have a matching user_id. Maximum 100 |default| :code:`None`
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if user_ids has more than 100 entries
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
if user_ids is not None and len(user_ids) > 100:
raise ValueError('user_ids can only be 100 entries long')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_ids,
'after': after,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/moderators/events', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.MODERATION_READ])
data = result.json()
data = fields_to_enum(data, ['event_type'], ModerationEventType, ModerationEventType.UNKNOWN)
data = make_fields_datetime(data, ['event_timestamp'])
return data
def create_stream_marker(self,
user_id: str,
description: Optional[str] = None) -> dict:
"""Creates a marker in the stream of a user specified by user ID.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#create-stream-marker
:param str user_id: ID of the broadcaster in whose live stream the marker is created.
:param str description: Description of or comments on the marker. Max length is 140 characters.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if description has more than 140 characters
:rtype: dict
"""
if description is not None and len(description) > 140:
raise ValueError('max length for description is 140')
url = build_url(TWITCH_API_BASE_URL + 'streams/markers', {})
body = {'user_id': user_id}
if description is not None:
body['description'] = description
result = self.__api_post_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_BROADCAST], data=body)
data = result.json()
return make_fields_datetime(data, ['created_at'])
def get_streams(self,
after: Optional[str] = None,
before: Optional[str] = None,
first: int = 20,
game_id: Optional[List[str]] = None,
language: Optional[List[str]] = None,
user_id: Optional[List[str]] = None,
user_login: Optional[List[str]] = None) -> dict:
"""Gets information about active streams. Streams are returned sorted by number of current viewers, in
descending order. Across multiple pages of results, there may be duplicate or missing streams, as viewers join
and leave streams.\n\n
Requires App or User authentication.\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-streams
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param list[str] game_id: Returns streams broadcasting a specified game ID. You can specify up to 100 IDs.
|default| :code:`None`
:param list[str] language: Stream language. You can specify up to 100 languages. |default| :code:`None`
:param list[str] user_id: Returns streams broadcast by one or more specified user IDs. You can specify up
to 100 IDs. |default| :code:`None`
:param list[str] user_login: Returns streams broadcast by one or more specified user login names.
You can specify up to 100 names. |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or one of the following fields have more than 100 entries:
`user_id, game_id, language, user_login`
:rtype: dict
"""
if user_id is not None and len(user_id) > 100:
raise ValueError('a maximum of 100 user_id entries are allowed')
if user_login is not None and len(user_login) > 100:
raise ValueError('a maximum of 100 user_login entries are allowed')
if language is not None and len(language) > 100:
raise ValueError('a maximum of 100 languages are allowed')
if game_id is not None and len(game_id) > 100:
raise ValueError('a maximum of 100 game_id entries are allowed')
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
param = {
'after': after,
'before': before,
'first': first,
'game_id': game_id,
'language': language,
'user_id': user_id,
'user_login': user_login
}
url = build_url(TWITCH_API_BASE_URL + 'streams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
data = result.json()
return make_fields_datetime(data, ['started_at'])
def get_stream_markers(self,
user_id: str,
video_id: str,
after: Optional[str] = None,
before: Optional[str] = None,
first: int = 20) -> dict:
"""Gets a list of markers for either a specified user’s most recent stream or a specified VOD/video (stream),
ordered by recency.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_READ_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-stream-markers
Only one of user_id and video_id must be specified.
:param str user_id: ID of the broadcaster from whose stream markers are returned.
:param str video_id: ID of the VOD/video whose stream markers are returned.
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Number of values to be returned when getting videos by user or game ID. Limit: 100.
|default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or neither user_id nor video_id is provided
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
if user_id is None and video_id is None:
raise ValueError('you must specify either user_id and/or video_id')
param = {
'user_id': user_id,
'video_id': video_id,
'after': after,
'before': before,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'streams/markers', param, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.USER_READ_BROADCAST])
return make_fields_datetime(result.json(), ['created_at'])
def get_broadcaster_subscriptions(self,
broadcaster_id: str,
user_ids: Optional[List[str]] = None,
after: Optional[str] = None,
first: Optional[int] = 20) -> dict:
"""Get all of a broadcaster’s subscriptions.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_READ_SUBSCRIPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-broadcaster-subscriptions
:param str broadcaster_id: User ID of the broadcaster. Must match the User ID in the Bearer token.
:param list[str] user_ids: Unique identifier of account to get subscription status of. Maximum 100 entries
|default| :code:`None`
:param str after: Cursor for forward pagination. |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if user_ids has more than 100 entries
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
if user_ids is not None and len(user_ids) > 100:
raise ValueError('user_ids can have a maximum of 100 entries')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_ids,
'first': first,
'after': after
}
url = build_url(TWITCH_API_BASE_URL + 'subscriptions', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_SUBSCRIPTIONS])
return result.json()
def check_user_subscription(self,
broadcaster_id: str,
user_id: str) -> dict:
"""Checks if a specific user (user_id) is subscribed to a specific channel (broadcaster_id).
Requires User or App Authorization with scope :const:`twitchAPI.types.AuthScope.USER_READ_SUBSCRIPTIONS`
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#check-user-subscription
:param str broadcaster_id: User ID of an Affiliate or Partner broadcaster.
:param str user_id: User ID of a Twitch viewer.
:rtype: dict
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the app or user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
"""
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_id
}
url = build_url(TWITCH_API_BASE_URL + 'subscriptions/user', param)
result = self.__api_get_request(url, AuthType.EITHER, [AuthScope.USER_READ_SUBSCRIPTIONS])
return result.json()
def get_all_stream_tags(self,
after: Optional[str] = None,
first: int = 20,
tag_ids: Optional[List[str]] = None) -> dict:
"""Gets the list of all stream tags defined by Twitch, optionally filtered by tag ID(s).\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-all-stream-tags
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param list[str] tag_ids: IDs of tags. Maximum 100 entries |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or tag_ids has more than 100 entries
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
if tag_ids is not None and len(tag_ids) > 100:
raise ValueError('tag_ids can not have more than 100 entries')
param = {
'after': after,
'first': first,
'tag_id': tag_ids
}
url = build_url(TWITCH_API_BASE_URL + 'tags/streams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.APP, [])
return result.json()
def get_stream_tags(self,
broadcaster_id: str) -> dict:
"""Gets the list of tags for a specified stream (channel).\n\n
Requires User authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-stream-tags
:param str broadcaster_id: ID of the stream that's tags are going to be fetched
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'streams/tags', {'broadcaster_id': broadcaster_id})
result = self.__api_get_request(url, AuthType.USER, [])
return result.json()
def replace_stream_tags(self,
broadcaster_id: str,
tag_ids: List[str]) -> dict:
"""Applies specified tags to a specified stream, overwriting any existing tags applied to that stream.
If no tags are specified, all tags previously applied to the stream are removed.
Automated tags are not affected by this operation.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#replace-stream-tags
:param str broadcaster_id: ID of the stream for which tags are to be replaced.
:param list[str] tag_ids: IDs of tags to be applied to the stream. Maximum of 100 supported.
:return: {}
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if more than 100 tag_ids where provided
:rtype: dict
"""
if len(tag_ids) > 100:
raise ValueError('tag_ids can not have more than 100 entries')
url = build_url(TWITCH_API_BASE_URL + 'streams/tags', {'broadcaster_id': broadcaster_id})
self.__api_put_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_BROADCAST], data={'tag_ids': tag_ids})
# this returns nothing
return {}
def get_channel_teams(self,
broadcaster_id: str) -> dict:
"""Retrieves a list of Twitch Teams of which the specified channel/broadcaster is a member.\n\n
Requires User or App authentication.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference/#get-channel-teams
:param str broadcaster_id: User ID for a Twitch user.
:rtype: dict
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
"""
url = build_url(TWITCH_API_BASE_URL + 'teams/channel', {'broadcaster_id': broadcaster_id})
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['created_at', 'updated_at'])
def get_teams(self,
team_id: Optional[str] = None,
name: Optional[str] = None) -> dict:
"""Gets information for a specific Twitch Team.\n\n
Requires User or App authentication.
One of the two optional query parameters must be specified.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference/#get-teams
:param str team_id: Team ID |default| :code:`None`
:param str name: Team Name |default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if neither team_id nor name are given or if both team_id and names are given.
:rtype: dict
"""
if team_id is None and name is None:
raise ValueError('You need to specify one of the two optional parameter.')
if team_id is not None and name is not None:
raise ValueError('Only one optional parameter must be specified.')
param = {
'id': team_id,
'name': name
}
url = build_url(TWITCH_API_BASE_URL + 'teams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['created_at', 'updated_at'])
def get_users(self,
user_ids: Optional[List[str]] = None,
logins: Optional[List[str]] = None) -> dict:
"""Gets information about one or more specified Twitch users.
Users are identified by optional user IDs and/or login name.
If neither a user ID nor a login name is specified, the user is the one authenticated.\n\n
Requires App authentication if either user_ids or logins is provided, otherwise requires a User authentication.
If you have user Authentication and want to get your email info, you also need the authentication scope
:const:`twitchAPI.types.AuthScope.USER_READ_EMAIL`\n
If you provide user_ids and/or logins, the maximum combined entries should not exceed 100.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-users
:param list[str] user_ids: User ID. Multiple user IDs can be specified. Limit: 100. |default| :code:`None`
:param list[str] logins: User login name. Multiple login names can be specified. Limit: 100.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if more than 100 combined user_ids and logins where provided
:rtype: dict
"""
if (len(user_ids) if user_ids is not None else 0) + (len(logins) if logins is not None else 0) > 100:
raise ValueError('the total number of entries in user_ids and logins can not be more than 100')
url_params = {
'id': user_ids,
'login': logins
}
url = build_url(TWITCH_API_BASE_URL + 'users', url_params, remove_none=True, split_lists=True)
response = self.__api_get_request(url,
AuthType.USER if (user_ids is None or len(user_ids) == 0) and (
logins is None or len(logins) == 0) else AuthType.EITHER,
[])
return response.json()
def get_users_follows(self,
after: Optional[str] = None,
first: int = 20,
from_id: Optional[str] = None,
to_id: Optional[str] = None) -> dict:
"""Gets information on follow relationships between two Twitch users.
Information returned is sorted in order, most recent follow first.\n\n
Requires App authentication.\n
You have to use at least one of the following fields: from_id, to_id
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-users-follows
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param str from_id: User ID. The request returns information about users who are being followed by
the from_id user. |default| :code:`None`
:param str to_id: User ID. The request returns information about users who are following the to_id user.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or neither from_id nor to_id is provided
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
if from_id is None and to_id is None:
raise ValueError('at least one of from_id and to_id needs to be set')
param = {
'after': after,
'first': first,
'from_id': from_id,
'to_id': to_id
}
url = build_url(TWITCH_API_BASE_URL + 'users/follows', param, remove_none=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['followed_at'])
def update_user(self,
description: str) -> dict:
"""Updates the description of the Authenticated user.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_EDIT`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#update-user
:param str description: User’s account description
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'users', {'description': description})
result = self.__api_put_request(url, AuthType.USER, [AuthScope.USER_EDIT])
return result.json()
def get_user_extensions(self) -> dict:
"""Gets a list of all extensions (both active and inactive) for the authenticated user\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_READ_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-user-extensions
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'users/extensions/list', {})
result = self.__api_get_request(url, AuthType.USER, [AuthScope.USER_READ_BROADCAST])
return result.json()
def get_user_active_extensions(self,
user_id: Optional[str] = None) -> dict:
"""Gets information about active extensions installed by a specified user, identified by a user ID or the
authenticated user.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_READ_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-user-active-extensions
:param str user_id: ID of the user whose installed extensions will be returned. |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'users/extensions', {'user_id': user_id}, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.USER_READ_BROADCAST])
return result.json()
def update_user_extensions(self,
data: dict) -> dict:
""""Updates the activation state, extension ID, and/or version number of installed extensions
for the authenticated user.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_EDIT_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#update-user-extensions
:param dict data: The user extension data to be written
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'users/extensions', {})
result = self.__api_put_request(url,
AuthType.USER,
[AuthScope.USER_EDIT_BROADCAST],
data=data)
return result.json()
def get_videos(self,
ids: Optional[List[str]] = None,
user_id: Optional[str] = None,
game_id: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
first: Optional[int] = 20,
language: Optional[str] = None,
period: TimePeriod = TimePeriod.ALL,
sort: SortMethod = SortMethod.TIME,
video_type: VideoType = VideoType.ALL) -> dict:
"""Gets video information by video ID (one or more), user ID (one only), or game ID (one only).\n\n
Requires App authentication.\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-videos
:param list[str] ids: ID of the video being queried. Limit: 100. |default| :code:`None`
:param str user_id: ID of the user who owns the video. |default| :code:`None`
:param str game_id: ID of the game the video is of. |default| :code:`None`
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Number of values to be returned when getting videos by user or game ID.
Limit: 100. |default| :code:`20`
:param str language: Language of the video being queried. |default| :code:`None`
:param ~twitchAPI.types.TimePeriod period: Period during which the video was created.
|default| :code:`TimePeriod.ALL`
:param ~twitchAPI.types.SortMethod sort: Sort order of the videos.
|default| :code:`SortMethod.TIME`
:param ~twitchAPI.types.VideoType video_type: Type of video.
|default| :code:`VideoType.ALL`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100, ids has more than 100 entries or none of ids, user_id
nor game_id is provided.
:rtype: dict
"""
if ids is None and user_id is None and game_id is None:
raise ValueError('you must use either ids, user_id or game_id')
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
if ids is not None and len(ids) > 100:
raise ValueError('ids can only have a maximum of 100 entries')
param = {
'id': ids,
'user_id': user_id,
'game_id': game_id,
'after': after,
'before': before,
'first': first,
'language': language,
'period': period.value,
'sort': sort.value,
'type': video_type.value
}
url = build_url(TWITCH_API_BASE_URL + 'videos', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
data = result.json()
data = make_fields_datetime(data, ['created_at', 'published_at'])
data = fields_to_enum(data, ['type'], VideoType, VideoType.UNKNOWN)
return data
def get_webhook_subscriptions(self,
first: Optional[int] = 20,
after: Optional[str] = None) -> dict:
"""Gets the Webhook subscriptions of the authenticated user, in order of expiration.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-webhook-subscriptions
:param int first: Number of values to be returned per page. Limit: 100. |default| :code:`20`
:param str after: Cursor for forward pagination |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
url = build_url(TWITCH_API_BASE_URL + 'webhooks/subscriptions',
{'first': first, 'after': after},
remove_none=True)
response = self.__api_get_request(url, AuthType.APP, [])
return response.json()
def get_channel_information(self,
broadcaster_id: str) -> dict:
"""Gets channel information for users.\n\n
Requires App or user authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-channel-information
:param str broadcaster_id: ID of the channel to be updated
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'channels', {'broadcaster_id': broadcaster_id})
response = self.__api_get_request(url, AuthType.EITHER, [])
return response.json()
def modify_channel_information(self,
broadcaster_id: str,
game_id: Optional[str] = None,
broadcaster_language: Optional[str] = None,
title: Optional[str] = None) -> bool:
"""Modifies channel information for users.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#modify-channel-information
:param str broadcaster_id: ID of the channel to be updated
:param str game_id: The current game ID being played on the channel |default| :code:`None`
:param str broadcaster_language: The language of the channel |default| :code:`None`
:param str title: The title of the stream |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if none of the following fields are specified: `game_id, broadcaster_language, title`
:raises ValueError: if title is a empty string
:rtype: bool
"""
if game_id is None and broadcaster_language is None and title is None:
raise ValueError('You need to specify at least one of the optional parameter')
if len(title) == 0:
raise ValueError('title cant be a empty string')
url = build_url(TWITCH_API_BASE_URL + 'channels',
{'broadcaster_id': broadcaster_id}, remove_none=True)
body = {k: v for k, v in {'game_id': game_id,
'broadcaster_language': broadcaster_language,
'title': title}.items() if v is not None}
response = self.__api_patch_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_BROADCAST], data=body)
return response.status_code == 204
def search_channels(self,
query: str,
first: Optional[int] = 20,
after: Optional[str] = None,
live_only: Optional[bool] = False) -> dict:
"""Returns a list of channels (users who have streamed within the past 6 months) that match the query via
channel name or description either entirely or partially.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#search-channels
:param str query: search query
:param int first: Maximum number of objects to return. Maximum: 100 |default| :code:`20`
:param str after: Cursor for forward pagination |default| :code:`None`
:param bool live_only: Filter results for live streams only. |default| :code:`False`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
url = build_url(TWITCH_API_BASE_URL + 'search/channels',
{'query': query,
'first': first,
'after': after,
'live_only': live_only}, remove_none=True)
response = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(response.json(), ['started_at'])
def search_categories(self,
query: str,
first: Optional[int] = 20,
after: Optional[str] = None) -> dict:
"""Returns a list of games or categories that match the query via name either entirely or partially.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#search-categories
:param str query: search query
:param int first: Maximum number of objects to return. Maximum: 100 |default| :code:`20`
:param str after: Cursor for forward pagination |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
url = build_url(TWITCH_API_BASE_URL + 'search/categories',
{'query': query,
'first': first,
'after': after}, remove_none=True)
response = self.__api_get_request(url, AuthType.EITHER, [])
return response.json()
def get_stream_key(self,
broadcaster_id: str) -> dict:
"""Gets the channel stream key for a user.\n\n
Requires User authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_READ_STREAM_KEY`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-stream-key
:param str broadcaster_id: User ID of the broadcaster
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'streams/key', {'broadcaster_id': broadcaster_id})
response = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_STREAM_KEY])
return response.json()
def start_commercial(self,
broadcaster_id: str,
length: int) -> dict:
"""Starts a commercial on a specified channel.\n\n
Requires User authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_EDIT_COMMERCIAL`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#start-commercial
:param str broadcaster_id: ID of the channel requesting a commercial
:param int length: Desired length of the commercial in seconds. , one of these: [30, 60, 90, 120, 150, 180]
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if length is not one of these: `30, 60, 90, 120, 150, 180`
:rtype: dict
"""
if length not in [30, 60, 90, 120, 150, 180]:
raise ValueError('length needs to be one of these: [30, 60, 90, 120, 150, 180]')
url = build_url(TWITCH_API_BASE_URL + 'channels/commercial',
{'broadcaster_id': broadcaster_id,
'length': length})
response = self.__api_post_request(url, AuthType.USER, [AuthScope.CHANNEL_EDIT_COMMERCIAL])
return response.json()
def create_user_follows(self,
from_id: str,
to_id: str,
allow_notifications: Optional[bool] = False) -> bool:
"""Adds a specified user to the followers of a specified channel.\n\n
Requires User authentication with :const:`twitchAPI.types.AuthScope.USER_EDIT_FOLLOWS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#create-user-follows
:param str from_id: User ID of the follower
:param str to_id: ID of the channel to be followed by the user
:param bool allow_notifications: If true, the user gets email or push notifications (depending on the user’s
notification settings) when the channel goes live. |default| :code:`False`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: bool
"""
url = build_url(TWITCH_API_BASE_URL + 'users/follows',
{'from_id': from_id,
'to_id': to_id,
'allow_notifications': allow_notifications}, remove_none=True)
response = self.__api_post_request(url, AuthType.USER, [AuthScope.USER_EDIT_FOLLOWS])
return response.status_code == 204
def delete_user_follows(self,
from_id: str,
to_id: str) -> bool:
"""Deletes a specified user from the followers of a specified channel.\n\n
Requires User authentication with :const:`twitchAPI.types.AuthScope.USER_EDIT_FOLLOWS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#delete-user-follows
:param str from_id: User ID of the follower
:param str to_id: Channel to be unfollowed by the user
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: bool
"""
url = build_url(TWITCH_API_BASE_URL + 'users/follows',
{'from_id': from_id,
'to_id': to_id})
response = self.__api_delete_request(url, AuthType.USER, [AuthScope.USER_EDIT_FOLLOWS])
return response.status_code == 204
def get_cheermotes(self,
broadcaster_id: str) -> dict:
"""Retrieves the list of available Cheermotes, animated emotes to which viewers can assign Bits,
to cheer in chat.\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-cheermotes
:param str broadcaster_id: ID for the broadcaster who might own specialized Cheermotes.
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'bits/cheermotes',
{'broadcaster_id': broadcaster_id})
response = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(response.json(), ['last_updated'])
def get_hype_train_events(self,
broadcaster_id: str,
first: Optional[int] = 1,
id: Optional[str] = None,
cursor: Optional[str] = None) -> dict:
"""Gets the information of the most recent Hype Train of the given channel ID.
When there is currently an active Hype Train, it returns information about that Hype Train.
When there is currently no active Hype Train, it returns information about the most recent Hype Train.
After 5 days, if no Hype Train has been active, the endpoint will return an empty response.\n\n
Requires App or User authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_READ_HYPE_TRAIN`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-hype-train-events
:param str broadcaster_id: User ID of the broadcaster.
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`1`
:param str id: The id of the wanted event, if known |default| :code:`None`
:param str cursor: Cursor for forward pagination |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user or app authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
url = build_url(TWITCH_API_BASE_URL + 'hypetrain/events',
{'broadcaster_id': broadcaster_id,
'first': first,
'id': id,
'cursor': cursor}, remove_none=True)
response = self.__api_get_request(url, AuthType.EITHER, [AuthScope.CHANNEL_READ_HYPE_TRAIN])
data = make_fields_datetime(response.json(), ['event_timestamp',
'started_at',
'expires_at',
'cooldown_end_time'])
data = fields_to_enum(data, ['type'], HypeTrainContributionMethod, HypeTrainContributionMethod.UNKNOWN)
return data
def get_drops_entitlements(self,
id: Optional[str] = None,
user_id: Optional[str] = None,
game_id: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = 20) -> dict:
"""Gets a list of entitlements for a given organization that have been granted to a game, user, or both.
OAuth Token Client ID must have ownership of Game\n\n
Requires App or User authentication\n
See Twitch documentation for valid parameter combinations!\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-drops-entitlements
:param str id: Unique Identifier of the entitlement |default| :code:`None`
:param str user_id: A Twitch User ID |default| :code:`None`
:param str game_id: A Twitch Game ID |default| :code:`None`
:param str after: The cursor used to fetch the next page of data. |default| :code:`None`
:param int first: Maximum number of entitlements to return. Maximum: 100 |default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 1000
:rtype: dict
"""
if first < 1 or first > 1000:
raise ValueError('first must be between 1 and 1000')
can_use, auth_type, token, scope = self.__get_used_either_auth([])
if auth_type == AuthType.USER:
if user_id is not None:
raise ValueError('cant use user_id when using User Authentication')
url = build_url(TWITCH_API_BASE_URL + 'entitlements/drops',
{
'id': id,
'user_id': user_id,
'game_id': game_id,
'after': after,
'first': first
}, remove_none=True)
response = self.__api_get_request(url, AuthType.EITHER, [])
data = make_fields_datetime(response.json(), ['timestamp'])
return data
def create_custom_reward(self,
broadcaster_id: str,
title: str,
prompt: str,
cost: int,
is_enabled: Optional[bool] = True,
background_color: Optional[str] = None,
is_user_input_required: Optional[bool] = False,
is_max_per_stream_enabled: Optional[bool] = False,
max_per_stream: Optional[int] = None,
is_max_per_user_per_stream_enabled: Optional[bool] = False,
max_per_user_per_stream: Optional[int] = None,
is_global_cooldown_enabled: Optional[bool] = False,
global_cooldown_seconds: Optional[int] = None,
should_redemptions_skip_request_queue: Optional[bool] = False) -> dict:
"""Creates a Custom Reward on a channel.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#create-custom-rewards
:param str broadcaster_id: ID of the broadcaster, must be same as user_id of auth token
:param str title: The title of the reward
:param str prompt: The prompt for the viewer when they are redeeming the reward
:param int cost: The cost of the reward
:param is_enabled: Is the reward currently enabled, if false the reward won’t show up to viewers.
|default| :code:`true`
:param str background_color: Custom background color for the reward.
Format: Hex with # prefix. Example: :code:`#00E5CB`. |default| :code:`None`
:param bool is_user_input_required: Does the user need to enter information when redeeming the reward.
|default| :code:`false`
:param bool is_max_per_stream_enabled: Whether a maximum per stream is enabled. |default| :code:`false`
:param int max_per_stream: The maximum number per stream if enabled |default| :code:`None`
:param bool is_max_per_user_per_stream_enabled: Whether a maximum per user per stream is enabled.
|default| :code:`false`
:param int max_per_user_per_stream: The maximum number per user per stream if enabled |default| :code:`None`
:param bool is_global_cooldown_enabled: Whether a cooldown is enabled. |default| :code:`false`
:param int global_cooldown_seconds: The cooldown in seconds if enabled |default| :code:`None`
:param bool should_redemptions_skip_request_queue: Should redemptions be set to FULFILLED status immediately
when redeemed and skip the request queue instead of the normal UNFULFILLED status.
|default| :code:`false`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ValueError: if is_global_cooldown_enabled is True but global_cooldown_seconds is not specified
:raises ValueError: if is_max_per_stream_enabled is True but max_per_stream is not specified
:raises ValueError: if is_max_per_user_per_stream_enabled is True but max_per_user_per_stream is not specified
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ~twitchAPI.types.TwitchAPIException: if Channel Points are not available for the broadcaster
:rtype: dict
"""
if is_global_cooldown_enabled and global_cooldown_seconds is None:
raise ValueError('please specify global_cooldown_seconds')
if is_max_per_stream_enabled and max_per_stream is None:
raise ValueError('please specify max_per_stream')
if is_max_per_user_per_stream_enabled and max_per_user_per_stream is None:
raise ValueError('please specify max_per_user_per_stream')
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards',
{'broadcaster_id': broadcaster_id})
body = {x: y for x, y in {
'title': title,
'prompt': prompt,
'cost': cost,
'is_enabled': is_enabled,
'background_color': background_color,
'is_user_input_required': is_user_input_required,
'is_max_per_stream_enabled': is_max_per_stream_enabled,
'max_per_stream': max_per_stream,
'is_max_per_user_per_stream_enabled': is_max_per_user_per_stream_enabled,
'max_per_user_per_stream': max_per_user_per_stream,
'is_global_cooldown_enabled': is_global_cooldown_enabled,
'global_cooldown_seconds': global_cooldown_seconds,
'should_redemptions_skip_request_queue': should_redemptions_skip_request_queue
}.items() if y is not None}
result = self.__api_post_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_REDEMPTIONS], body)
if result.status_code == 403:
raise TwitchAPIException('Forbidden: Channel Points are not available for the broadcaster')
data = result.json()
return make_fields_datetime(data, ['cooldown_expires_at'])
def delete_custom_reward(self,
broadcaster_id: str,
reward_id: str):
"""Deletes a Custom Reward on a channel.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#delete-custom-rewards
:param str broadcaster_id: Provided broadcaster_id must match the user_id in the auth token
:param str reward_id: ID of the Custom Reward to delete, must match a Custom Reward on broadcaster_id’s channel.
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ~twitchAPI.types.NotFoundException: if the broadcaster has no custom reward with the given id
"""
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards',
{'broadcaster_id': broadcaster_id,
'id': reward_id})
result = self.__api_delete_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_REDEMPTIONS])
if result.status_code == 200:
return
if result.status_code == 404:
raise NotFoundException()
def get_custom_reward(self,
broadcaster_id: str,
reward_id: Optional[List[str]] = None,
only_manageable_rewards: Optional[bool] = False) -> dict:
"""Returns a list of Custom Reward objects for the Custom Rewards on a channel.
Developers only have access to update and delete rewards that the same/calling client_id created.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_READ_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-custom-reward
:param str broadcaster_id: Provided broadcaster_id must match the user_id in the auth token
:param list[str] reward_id: When used, this parameter filters the results and only returns reward objects
for the Custom Rewards with matching ID. Maximum: 50 |default| :code:`None`
:param bool only_manageable_rewards: When set to true, only returns custom rewards
that the calling client_id can manage. |default| :code:`false`
:rtype: dict
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user or app authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if if reward_id is longer than 50 entries
"""
if reward_id is not None and len(reward_id) > 50:
raise ValueError('reward_id can not contain more than 50 entries')
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards',
{
'broadcaster_id': broadcaster_id,
'id': reward_id,
'only_manageable_rewards': only_manageable_rewards
}, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_REDEMPTIONS])
return make_fields_datetime(result.json(), ['cooldown_expires_at'])
def get_custom_reward_redemption(self,
broadcaster_id: str,
reward_id: str,
id: Optional[List[str]] = None,
status: Optional[CustomRewardRedemptionStatus] = None,
sort: Optional[SortOrder] = SortOrder.OLDEST,
after: Optional[str] = None,
first: Optional[int] = 20) -> dict:
"""Returns Custom Reward Redemption objects for a Custom Reward on a channel that was created by the
same client_id.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_READ_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-custom-reward-redemption
:param str broadcaster_id: Provided broadcaster_id must match the user_id in the auth token
:param str reward_id: When ID is not provided, this parameter returns paginated Custom
Reward Redemption objects for redemptions of the Custom Reward with ID reward_id
:param list(str) id: When used, this param filters the results and only returns |default| :code:`None`
Custom Reward Redemption objects for the redemptions with matching ID. Maximum: 50 ids
|default| :code:`None`
:param ~twitchAPI.types.CustomRewardRedemptionStatus status: When id is not provided, this param is required
and filters the paginated Custom Reward Redemption objects for redemptions with the matching status.
|default| :code:`None`
:param ~twitchAPI.types.SortOrder sort: Sort order of redemptions returned when getting the paginated
Custom Reward Redemption objects for a reward.
|default| :code:`SortOrder.OLDEST`
:param str after: Cursor for forward pagination. |default| :code:`None`
:param int first: Number of results to be returned when getting the paginated Custom Reward
Redemption objects for a reward. Limit: 50
|default| :code:`20`
:rtype: dict
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user or app authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if id has more than 50 entries
:raises ValueError: if first is not in range 1 to 50
"""
if first is not None and (first < 1 or first > 50):
raise ValueError('first must be in range 1 to 50')
if id is not None and len(id) > 50:
raise ValueError('id can not have more than 50 entries')
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards/redemption',
{
'broadcaster_id': broadcaster_id,
'reward_id': reward_id,
'id': id,
'status': status,
'sort': sort,
'after': after,
'first': first
}, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_REDEMPTIONS])
data = make_fields_datetime(result.json(), ['redeemed_at'])
data = fields_to_enum(data,
['status'],
CustomRewardRedemptionStatus,
CustomRewardRedemptionStatus.CANCELED)
return data
def update_custom_reward(self,
broadcaster_id: str,
reward_id: str,
title: str,
prompt: str,
cost: int,
is_enabled: Optional[bool] = True,
background_color: Optional[str] = None,
is_user_input_required: Optional[bool] = False,
is_max_per_stream_enabled: Optional[bool] = False,
max_per_stream: Optional[int] = None,
is_max_per_user_per_stream_enabled: Optional[bool] = False,
max_per_user_per_stream: Optional[int] = None,
is_global_cooldown_enabled: Optional[bool] = False,
global_cooldown_seconds: Optional[int] = None,
should_redemptions_skip_request_queue: Optional[bool] = False) -> dict:
"""Updates a Custom Reward created on a channel.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#update-custom-rewards
:param str broadcaster_id: ID of the broadcaster, must be same as user_id of auth token
:param str reward_id: ID of the reward that you want to update
:param str title: The title of the reward
:param str prompt: The prompt for the viewer when they are redeeming the reward
:param int cost: The cost of the reward
:param is_enabled: Is the reward currently enabled, if false the reward won’t show up to viewers.
|default| :code:`true`
:param str background_color: Custom background color for the reward. |default| :code:`None`
Format: Hex with # prefix. Example: :code:`#00E5CB`.
:param bool is_user_input_required: Does the user need to enter information when redeeming the reward.
|default| :code:`false`
:param bool is_max_per_stream_enabled: Whether a maximum per stream is enabled. |default| :code:`false`
:param int max_per_stream: The maximum number per stream if enabled |default| :code:`None`
:param bool is_max_per_user_per_stream_enabled: Whether a maximum per user per stream is enabled.
|default| :code:`false`
:param int max_per_user_per_stream: The maximum number per user per stream if enabled |default| :code:`None`
:param bool is_global_cooldown_enabled: Whether a cooldown is enabled. |default| :code:`false`
:param int global_cooldown_seconds: The cooldown in seconds if enabled |default| :code:`None`
:param bool should_redemptions_skip_request_queue: Should redemptions be set to FULFILLED status immediately
when redeemed and skip the request queue instead of the normal UNFULFILLED status.
|default| :code:`false`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ValueError: if is_global_cooldown_enabled is True but global_cooldown_seconds is not specified
:raises ValueError: if is_max_per_stream_enabled is True but max_per_stream is not specified
:raises ValueError: if is_max_per_user_per_stream_enabled is True but max_per_user_per_stream is not specified
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ~twitchAPI.types.TwitchAPIException: if Channel Points are not available for the broadcaster or
the custom reward belongs to a different broadcaster
:raises ValueError: if the given reward_id does not match a custom reward by the given broadcaster
:rtype: dict
"""
if is_global_cooldown_enabled and global_cooldown_seconds is None:
raise ValueError('please specify global_cooldown_seconds')
if is_max_per_stream_enabled and max_per_stream is None:
raise ValueError('please specify max_per_stream')
if is_max_per_user_per_stream_enabled and max_per_user_per_stream is None:
raise ValueError('please specify max_per_user_per_stream')
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards',
{'broadcaster_id': broadcaster_id,
'id': reward_id})
body = {x: y for x, y in {
'title': title,
'prompt': prompt,
'cost': cost,
'is_enabled': is_enabled,
'background_color': background_color,
'is_user_input_required': is_user_input_required,
'is_max_per_stream_enabled': is_max_per_stream_enabled,
'max_per_stream': max_per_stream,
'is_max_per_user_per_stream_enabled': is_max_per_user_per_stream_enabled,
'max_per_user_per_stream': max_per_user_per_stream,
'is_global_cooldown_enabled': is_global_cooldown_enabled,
'global_cooldown_seconds': global_cooldown_seconds,
'should_redemptions_skip_request_queue': should_redemptions_skip_request_queue
}.items() if y is not None}
result = self.__api_patch_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_REDEMPTIONS], body)
if result.status_code == 404:
raise ValueError('Custom reward does not exist with the given reward_id for the given broadcaster')
elif result.status_code == 403:
raise TwitchAPIException('This custom reward was created by a different broadcaster or channel points are'
'not available for the broadcaster')
data = result.json()
return make_fields_datetime(data, ['cooldown_expires_at'])
def update_redemption_status(self,
broadcaster_id: str,
reward_id: str,
redemption_ids: List[str],
status: CustomRewardRedemptionStatus) -> dict:
"""Updates the status of Custom Reward Redemption objects on a channel that
are in the :code:`UNFULFILLED` status.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_REDEMPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#update-redemption-status
:param str broadcaster_id: Provided broadcaster_id must match the user_id in the auth token.
:param str reward_id: ID of the Custom Reward the redemptions to be updated are for.
:param list(str) redemption_ids: IDs of the Custom Reward Redemption to update, must match a
Custom Reward Redemption on broadcaster_id’s channel Max: 50
:param ~twitchAPI.types.CustomRewardRedemptionStatus status: The new status to set redemptions to.
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ~twitchAPI.types.TwitchAPIException: if Channel Points are not available for the broadcaster or
the custom reward belongs to a different broadcaster
:raises ValueError: if redemption_ids is longer than 50 entries
:raises ValueError: if no custom reward redemptions with status UNFULFILLED where found for the given ids
:raises ~twitchAPI.types.TwitchAPIException: if Channel Points are not available for the broadcaster or
the custom reward belongs to a different broadcaster
:rtype: dict
"""
if len(redemption_ids) > 50:
raise ValueError('redemption_ids cant have more than 50 entries')
url = build_url(TWITCH_API_BASE_URL + 'channel_points/custom_rewards/redemptions',
{
'id': redemption_ids,
'broadcaster_id': broadcaster_id,
'reward_id': reward_id
}, split_lists=True)
body = {'status': status.value}
result = self.__api_patch_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_REDEMPTIONS], data=body)
if result.status_code == 404:
raise ValueError('no custom reward redemptions with the specified ids where found '
'with a status of UNFULFILLED')
if result.status_code == 403:
raise TwitchAPIException('This custom reward was created by a different broadcaster or channel points are'
'not available for the broadcaster')
data = make_fields_datetime(result.json(), ['redeemed_at'])
return fields_to_enum(data, ['status'], CustomRewardRedemptionStatus, CustomRewardRedemptionStatus.CANCELED)
def get_channel_editors(self,
broadcaster_id: str) -> dict:
"""Gets a list of users who have editor permissions for a specific channel.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_READ_EDITORS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-channel-editors
:param str broadcaster_id: Broadcaster’s user ID associated with the channel
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'channels/editors', {'broadcaster_id': broadcaster_id})
result = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_EDITORS])
return make_fields_datetime(result.json(), ['created_at'])
def delete_videos(self,
video_ids: List[str]) -> Union[bool, dict]:
"""Deletes one or more videos. Videos are past broadcasts, Highlights, or uploads.
Returns False if the User was not Authorized to delete at least one of the given videos.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_VIDEOS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#delete-videos
:param list(str) video_ids: ids of the videos, Limit: 5 ids
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ValueError: if video_ids contains more than 5 entries or is a empty list
:rtype: dict or False
"""
if video_ids is None or len(video_ids) == 0 or len(video_ids) > 5:
raise ValueError('video_ids must contain between 1 and 5 entries')
url = build_url(TWITCH_API_BASE_URL + 'videos', {'id': video_ids}, split_lists=True)
result = self.__api_delete_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_VIDEOS])
if result.status_code == 200:
return result.json()
else:
return False
def get_user_block_list(self,
broadcaster_id: str,
first: Optional[int] = 20,
after: Optional[str] = None) -> dict:
"""Gets a specified user’s block list. The list is sorted by when the block occurred in descending order
(i.e. most recent block first).
Requires User Authentication with :const:`twitchAPI.types.AuthScope.USER_READ_BLOCKED_USERS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-user-block-list
:param str broadcaster_id: User ID for a twitch user
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param str after: Cursor for forward pagination |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
url = build_url(TWITCH_API_BASE_URL + 'users/blocks',
{'broadcaster_id': broadcaster_id,
'first': first,
'after': after}, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.USER_READ_BLOCKED_USERS])
return result.json()
def block_user(self,
target_user_id: str,
source_context: Optional[BlockSourceContext] = None,
reason: Optional[BlockReason] = None) -> dict:
"""Blocks the specified user on behalf of the authenticated user.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.USER_MANAGE_BLOCKED_USERS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#block-user
:param str target_user_id: User ID of the user to be blocked.
:param ~twitchAPI.types.BlockSourceContext source_context: Source context for blocking the user. Optional
|default| :code:`None`
:param ~twitchAPI.types.BlockReason reason: Reason for blocking the user. Optional. |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'users/blocks',
{'target_user_id': target_user_id,
'source_context': enum_value_or_none(source_context),
'reason': enum_value_or_none(reason)},
remove_none=True)
result = self.__api_put_request(url, AuthType.USER, [AuthScope.USER_MANAGE_BLOCKED_USERS])
return result.json()
def unblock_user(self,
target_user_id: str) -> bool:
"""Unblocks the specified user on behalf of the authenticated user.
Requires User Authentication with :const:`twitchAPI.types.AuthScope.USER_MANAGE_BLOCKED_USERS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#unblock-user
:param str target_user_id: User ID of the user to be unblocked.
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ~twitchAPI.types.TwitchAPIException: if a Query Parameter is missing or invalid
:rtype: bool
"""
url = build_url(TWITCH_API_BASE_URL + 'users/blocks', {'target_user_id': target_user_id})
result = self.__api_delete_request(url, AuthType.USER, [AuthScope.USER_MANAGE_BLOCKED_USERS])
return result.status_code == 204
|
"""Author: SKHEO, KHU"""
from gym_SBR.envs import sub_phases_batchPID_fbPID as cycle
import numpy as np
#class SBR_model(object):
def run(WV, IV, t_ratio, influent, DO_control_par, x0, DO_setpoints,u_batch_1,u_batch_2,u_batch_3,u_batch_4,u_batch_5,u_batch_8,kla_memory1,kla_memory2,kla_memory3,kla_memory4,kla_memory5,kla_memory8):
# Plant Config.
WV = WV # m^3, Working Volume
IV = IV # m^3, Inoculum Volume
# phase time
t_cycle = 12 / 24 # hour -> day, 12hr
t_phs1 = (t_cycle) * t_ratio[0] # around 30 min
t_phs2 = (t_cycle) * t_ratio[1]
t_phs3 = (t_cycle) * t_ratio[2]
t_phs4 = (t_cycle) * t_ratio[3]
t_phs5 = (t_cycle) * t_ratio[4]
t_phs6 = (t_cycle) * t_ratio[5]
t_phs7 = (t_cycle) * t_ratio[6]
t_phs8 = (t_cycle) * t_ratio[7]
t_delta = 0.002 /24# 0.5 / 24 # min
# flowrate
Qin = (WV - IV)
qin = Qin / (t_phs1) # per day
# solver: Runge-Kutta alg., constant integration step size: 0.002 hr
# system stabilisation
# stabilization time : 100 day
# cycle : 12hr
# num. of phases: 8
# Inflowrate - constant flow
loading = influent
# V, Si, Ss, Xi, Xs, Xbh, Xba, Xp, So, Sno, Snh, Snd, Xnd, Salk
# Buffer tank
# Parameters
Spar = [0.24, 0.67, 0.08, 0.08, 0.06] # (ref. BSM1 report Tbl. 2)
# Ya Yh fp ixb ixp
Kpar = [4.0, 10.0, 0.2, 0.5, 0.3, 0.8, 0.8, 3.0, 0.1, 0.5, 1.0, 0.05, 0.4, 0.05] # (ref. BSM1 report Tbl. 3)
# muhath Ks Koh Kno bh tag etah kh Kx muhata Knh ba Koa Ka
# result plot
x1 = []
x2 = []
x3 = []
x4 = []
x5 = []
x6 = []
x7 = []
x8 = []
x9 = []
x10 = []
x11 = []
x12 = []
x13 = []
x14 = []
t = []
history_eff = np.zeros((100, 14))
history_EQ = []
history_AE = []
history_ME = []
history_PE = []
history_SP = []
history_EC = []
t_start = 0
t_end = 0
for cc in range(1) :
phs1 = cycle.filling(WV, qin)
DO_control_par[3] = DO_setpoints[0]
#kla0 = DO_control_par[5]
t_start = t_end
t_end = t_start + t_phs1
t_save1, x_phs1, AE_1, ME_1 , Kla_memory1, sp_memory1, So_memory1 = phs1.sim_rxn(t_start, t_end, t_delta, x0, Spar, Kpar, DO_control_par, loading, kla_memory1,u_batch_1)
for i in range(int(len(t_save1))):
x1.append((x_phs1[i])[0])
x2.append((x_phs1[i])[1])
x3.append((x_phs1[i])[2])
x4.append((x_phs1[i])[3])
x5.append((x_phs1[i])[4])
x6.append((x_phs1[i])[5])
x7.append((x_phs1[i])[6])
x8.append((x_phs1[i])[7])
x9.append((x_phs1[i])[8])
x10.append((x_phs1[i])[9])
x11.append((x_phs1[i])[10])
x12.append((x_phs1[i])[11])
x13.append((x_phs1[i])[12])
x14.append((x_phs1[i])[13])
t.append(t_save1[i])
phs2 = cycle.rxn(WV)
#DO_control_par[5] = 0
DO_control_par[3] = DO_setpoints[1]
t_start = t_end + t_delta
t_end = t_start + t_phs2
t_save2, x_phs2, AE_2, ME_2 , Kla_memory2, sp_memory2, So_memory2 = phs2.sim_rxn(t_start, t_end, t_delta, x_phs1[-1], Spar, Kpar, DO_control_par, kla_memory2,u_batch_2)
for i in range(int(len(t_save2))):
x1.append((x_phs2[i])[0])
x2.append((x_phs2[i])[1])
x3.append((x_phs2[i])[2])
x4.append((x_phs2[i])[3])
x5.append((x_phs2[i])[4])
x6.append((x_phs2[i])[5])
x7.append((x_phs2[i])[6])
x8.append((x_phs2[i])[7])
x9.append((x_phs2[i])[8])
x10.append((x_phs2[i])[9])
x11.append((x_phs2[i])[10])
x12.append((x_phs2[i])[11])
x13.append((x_phs2[i])[12])
x14.append((x_phs2[i])[13])
t.append(t_save2[i])
phs3 = cycle.rxn(WV)
#DO_control_par[5] = 240
DO_control_par[3] = DO_setpoints[2]
t_start = t_end + t_delta
t_end = t_start + t_phs3
t_save3, x_phs3, AE_3, ME_3 , Kla_memory3, sp_memory3, So_memory3 = phs3.sim_rxn(t_start, t_end, t_delta, x_phs2[-1], Spar, Kpar, DO_control_par, kla_memory3,u_batch_3)
for i in range(int(len(t_save3))):
x1.append((x_phs3[i])[0])
x2.append((x_phs3[i])[1])
x3.append((x_phs3[i])[2])
x4.append((x_phs3[i])[3])
x5.append((x_phs3[i])[4])
x6.append((x_phs3[i])[5])
x7.append((x_phs3[i])[6])
x8.append((x_phs3[i])[7])
x9.append((x_phs3[i])[8])
x10.append((x_phs3[i])[9])
x11.append((x_phs3[i])[10])
x12.append((x_phs3[i])[11])
x13.append((x_phs3[i])[12])
x14.append((x_phs3[i])[13])
t.append(t_save3[i])
phs4 = cycle.rxn(WV)
#DO_control_par[5] = 0
DO_control_par[3] = DO_setpoints[3]
t_start = t_end + t_delta
t_end = t_start + t_phs4
t_save4, x_phs4, AE_4, ME_4 , Kla_memory4, sp_memory4, So_memory4 = phs4.sim_rxn(t_start, t_end, t_delta, x_phs3[-1], Spar, Kpar, DO_control_par, kla_memory4,u_batch_4)
for i in range(int(len(t_save4))):
x1.append((x_phs4[i])[0])
x2.append((x_phs4[i])[1])
x3.append((x_phs4[i])[2])
x4.append((x_phs4[i])[3])
x5.append((x_phs4[i])[4])
x6.append((x_phs4[i])[5])
x7.append((x_phs4[i])[6])
x8.append((x_phs4[i])[7])
x9.append((x_phs4[i])[8])
x10.append((x_phs4[i])[9])
x11.append((x_phs4[i])[10])
x12.append((x_phs4[i])[11])
x13.append((x_phs4[i])[12])
x14.append((x_phs4[i])[13])
t.append(t_save4[i])
phs5 = cycle.rxn(WV)
#DO_control_par[5] = 240
DO_control_par[3] = DO_setpoints[4]
t_start = t_end + t_delta
t_end = t_start + t_phs5
t_save5, x_phs5, AE_5, ME_5 , Kla_memory5, sp_memory5, So_memory5 = phs5.sim_rxn(t_start, t_end, t_delta, x_phs4[-1], Spar, Kpar, DO_control_par, kla_memory5,u_batch_5)
for i in range(int(len(t_save5))):
x1.append((x_phs5[i])[0])
x2.append((x_phs5[i])[1])
x3.append((x_phs5[i])[2])
x4.append((x_phs5[i])[3])
x5.append((x_phs5[i])[4])
x6.append((x_phs5[i])[5])
x7.append((x_phs5[i])[6])
x8.append((x_phs5[i])[7])
x9.append((x_phs5[i])[8])
x10.append((x_phs5[i])[9])
x11.append((x_phs5[i])[10])
x12.append((x_phs5[i])[11])
x13.append((x_phs5[i])[12])
x14.append((x_phs5[i])[13])
t.append(t_save5[i])
phs6 = cycle.settling()
t_start = t_end + t_delta
t_end = t_start + t_phs6
t_save6, Xnd, sX, Xf = phs6.sim_settling(t_start,t_end,t_delta,x_phs5[-1])
"""
List of variables :
0=V, 1=Si, 2=Ss, 3=Xi, 4=Xs, 5=Xbh, 6=Xba, 7=Xp, 8=So, 9=Sno, 10=Snh, 11=Snd, 12=Xnd, 13=Salk
(ref. BSM1 report Tbl. 1)
"""
for i in range(int(len(t_save6))):
x1.append((x_phs5[-1])[0])
x2.append((x_phs5[-1])[1])
x3.append((x_phs5[-1])[2])
x4.append((x_phs5[-1])[3])
x5.append((x_phs5[-1])[4])
x6.append((x_phs5[-1])[5])
x7.append((x_phs5[-1])[6])
x8.append((x_phs5[-1])[7])
x9.append((x_phs5[-1])[8])
x10.append((x_phs5[-1])[9])
x11.append((x_phs5[-1])[10])
x12.append((x_phs5[-1])[11])
x13.append((x_phs5[-1])[12])
x14.append((x_phs5[-1])[13])
t.append(t_save6[i])
# fractions: convert sludge to particular conc.
f_xs = x5[-1]/Xf
f_xp = x8[-1]/Xf
f_xi = x4[-1]/Xf
f_xbh = x6[-1]/Xf
f_xba = x7[-1]/Xf
# Waste sludge conc.
w_Xs = f_xs * sX[0]
w_Xp = f_xp * sX[0]
w_Xi = f_xi * sX[0]
w_Xbh = f_xbh * sX[0]
w_Xba = f_xba * sX[0]
biomass_setpoint = 5400 #2700*(x_phs5[-1][3] + x_phs5[-1][4] + x_phs5[-1][5] + x_phs5[-1][6] + x_phs5[-1][7])/(x_phs5[-1][5] + x_phs5[-1][6])
biomass_eff = sX[-1]#*(f_xbh+f_xba)
biomass_w = sX[0]#w_Xbh + w_Xba
#Qw = ((x6[-1] + x7[-1])*WV - biomass_setpoint*(WV-qin*t_phs1) - qin*t_phs1* biomass_eff)/(biomass_w-biomass_eff)
Qw = (sum(sX)*WV/10 - biomass_setpoint*(WV-qin*t_phs1) - qin*t_phs1* biomass_eff)/(biomass_w-biomass_eff)
Qeff = qin*t_phs1 - Qw
qeff = Qeff / t_phs7
qw = Qw/ t_phs7
phs7 = cycle.drawing()
t_start = t_end + t_delta
t_end = t_start + t_phs7
t_save7, x_phs7 , PE_7, SP_7 = phs7.sim_drawing(t_start,t_end,t_delta,x_phs5[-1], sX,Xf, Qin, Qeff,Qw)
for i in range(int(len(t_save7))):
x1.append((x_phs7[0]))
x2.append((x_phs7[1]))
x3.append((x_phs7[2]))
x4.append((x_phs7[3]))
x5.append((x_phs7[4]))
x6.append((x_phs7[5]))
x7.append((x_phs7[6]))
x8.append((x_phs7[7]))
x9.append((x_phs7[8]))
x10.append((x_phs7[9]))
x11.append((x_phs7[10]))
x12.append((x_phs7[11]))
x13.append((x_phs7[12]))
x14.append((x_phs7[13]))
t.append(t_save7[i])
# effluent information
eq, eff = phs7.cal_eq(x_phs7,sX, Xf, x_phs5[-1], Spar, Qeff)
history_eff[cc] = eff
history_EQ.append(eq)
phs8 = cycle.rxn(WV)
#DO_control_par[5] = 240
DO_control_par[3] =DO_setpoints[7]
t_start = t_end + t_delta
t_end = t_start + t_phs8
t_save8, x_phs8, AE_8, ME_8, Kla_memory8 , sp_memory8, So_memory8 = phs8.sim_rxn(t_start, t_end, t_delta, x_phs7, Spar, Kpar, DO_control_par, kla_memory8,u_batch_8)
for i in range(int(len(t_save8))):
x1.append((x_phs8[i])[0])
x2.append((x_phs8[i])[1])
x3.append((x_phs8[i])[2])
x4.append((x_phs8[i])[3])
x5.append((x_phs8[i])[4])
x6.append((x_phs8[i])[5])
x7.append((x_phs8[i])[6])
x8.append((x_phs8[i])[7])
x9.append((x_phs8[i])[8])
x10.append((x_phs8[i])[9])
x11.append((x_phs8[i])[10])
x12.append((x_phs8[i])[11])
x13.append((x_phs8[i])[12])
x14.append((x_phs8[i])[13])
t.append(t_save8[i])
x0 = [x1[-1], x2[-1], x3[-1], x4[-1], x5[-1], x6[-1], x7[-1], x8[-1], x9[-1], x10[-1], x11[-1], x12[-1], x13[-1], x14[-1]] # 공정의 마지막 값, 다음 cycle의 initial state
x = np.vstack([x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14])
return t,x,x0, t_save1, sp_memory1, So_memory1, t_save2, sp_memory2, So_memory2, t_save3, sp_memory3, So_memory3, \
t_save4, sp_memory4, So_memory4, t_save5, sp_memory5, So_memory5, t_save8, sp_memory8, So_memory8, \
Kla_memory1,Kla_memory2,Kla_memory3,Kla_memory4,Kla_memory5,Kla_memory8, \
Qeff,Qw
|
# Program to display the Fibonacci sequence up to n-th term
nterms = int(input("How many terms? "))
n1, n2 = 0, 1
count = 0
if nterms <= 0:
print("Please enter a positive integer")
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n1)
else:
print("Fibonacci sequence:")
while count < nterms:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count += 1
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: anki_vector/messaging/shared.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from anki_vector.messaging import behavior_pb2 as anki__vector_dot_messaging_dot_behavior__pb2
from anki_vector.messaging import cube_pb2 as anki__vector_dot_messaging_dot_cube__pb2
from anki_vector.messaging import messages_pb2 as anki__vector_dot_messaging_dot_messages__pb2
from anki_vector.messaging import settings_pb2 as anki__vector_dot_messaging_dot_settings__pb2
from anki_vector.messaging import extensions_pb2 as anki__vector_dot_messaging_dot_extensions__pb2
from anki_vector.messaging import response_status_pb2 as anki__vector_dot_messaging_dot_response__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='anki_vector/messaging/shared.proto',
package='Anki.Vector.external_interface',
syntax='proto3',
serialized_pb=_b('\n\"anki_vector/messaging/shared.proto\x12\x1e\x41nki.Vector.external_interface\x1a$anki_vector/messaging/behavior.proto\x1a anki_vector/messaging/cube.proto\x1a$anki_vector/messaging/messages.proto\x1a$anki_vector/messaging/settings.proto\x1a&anki_vector/messaging/extensions.proto\x1a+anki_vector/messaging/response_status.proto\"J\n\x16ProtocolVersionRequest\x12\x16\n\x0e\x63lient_version\x18\x01 \x01(\x03\x12\x18\n\x10min_host_version\x18\x02 \x01(\x03\"\xa7\x01\n\x17ProtocolVersionResponse\x12N\n\x06result\x18\x01 \x01(\x0e\x32>.Anki.Vector.external_interface.ProtocolVersionResponse.Result\x12\x14\n\x0chost_version\x18\x02 \x01(\x03\"&\n\x06Result\x12\x0f\n\x0bUNSUPPORTED\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\"h\n\x12\x43onnectionResponse\x12>\n\x06status\x18\x01 \x01(\x0b\x32..Anki.Vector.external_interface.ResponseStatus\x12\x12\n\nis_primary\x18\x02 \x01(\x08\"\x86\x08\n\x05\x45vent\x12P\n\x13time_stamped_status\x18\x01 \x01(\x0b\x32\x31.Anki.Vector.external_interface.TimeStampedStatusH\x00\x12=\n\twake_word\x18\x03 \x01(\x0b\x32(.Anki.Vector.external_interface.WakeWordH\x00\x12P\n\x13robot_observed_face\x18\x05 \x01(\x0b\x32\x31.Anki.Vector.external_interface.RobotObservedFaceH\x00\x12\x64\n\x1erobot_changed_observed_face_id\x18\x06 \x01(\x0b\x32:.Anki.Vector.external_interface.RobotChangedObservedFaceIDH\x00\x12\x43\n\x0cobject_event\x18\x07 \x01(\x0b\x32+.Anki.Vector.external_interface.ObjectEventH\x00\x12K\n\x10stimulation_info\x18\x08 \x01(\x0b\x32/.Anki.Vector.external_interface.StimulationInfoH\x00\x12\x41\n\x0bphoto_taken\x18\t \x01(\x0b\x32*.Anki.Vector.external_interface.PhotoTakenH\x00\x12\x41\n\x0brobot_state\x18\n \x01(\x0b\x32*.Anki.Vector.external_interface.RobotStateH\x00\x12\x43\n\x0c\x63ube_battery\x18\x0b \x01(\x0b\x32+.Anki.Vector.external_interface.CubeBatteryH\x00\x12\x43\n\nkeep_alive\x18\x0c \x01(\x0b\x32-.Anki.Vector.external_interface.KeepAlivePingH\x00\x12Q\n\x13\x63onnection_response\x18\r \x01(\x0b\x32\x32.Anki.Vector.external_interface.ConnectionResponseH\x00\x12R\n\x14mirror_mode_disabled\x18\x10 \x01(\x0b\x32\x32.Anki.Vector.external_interface.MirrorModeDisabledH\x00\x12]\n\x1avision_modes_auto_disabled\x18\x11 \x01(\x0b\x32\x37.Anki.Vector.external_interface.VisionModesAutoDisabledH\x00\x42\x0c\n\nevent_type\"\x1a\n\nFilterList\x12\x0c\n\x04list\x18\x01 \x03(\t\"\xb6\x01\n\x0c\x45ventRequest\x12@\n\nwhite_list\x18\x01 \x01(\x0b\x32*.Anki.Vector.external_interface.FilterListH\x00\x12@\n\nblack_list\x18\x02 \x01(\x0b\x32*.Anki.Vector.external_interface.FilterListH\x00\x12\x15\n\rconnection_id\x18\x03 \x01(\tB\x0b\n\tlist_type\"\x8b\x01\n\rEventResponse\x12>\n\x06status\x18\x01 \x01(\x0b\x32..Anki.Vector.external_interface.ResponseStatus\x12\x34\n\x05\x65vent\x18\x02 \x01(\x0b\x32%.Anki.Vector.external_interface.Event:\x04\x80\xa6\x1d\x01\"I\n\x19UserAuthenticationRequest\x12\x17\n\x0fuser_session_id\x18\x01 \x01(\x0c\x12\x13\n\x0b\x63lient_name\x18\x02 \x01(\x0c\"\xf0\x01\n\x1aUserAuthenticationResponse\x12>\n\x06status\x18\x01 \x01(\x0b\x32..Anki.Vector.external_interface.ResponseStatus\x12M\n\x04\x63ode\x18\x02 \x01(\x0e\x32?.Anki.Vector.external_interface.UserAuthenticationResponse.Code\x12\x19\n\x11\x63lient_token_guid\x18\x03 \x01(\x0c\"(\n\x04\x43ode\x12\x10\n\x0cUNAUTHORIZED\x10\x00\x12\x0e\n\nAUTHORIZED\x10\x01\x62\x06proto3')
,
dependencies=[anki__vector_dot_messaging_dot_behavior__pb2.DESCRIPTOR,anki__vector_dot_messaging_dot_cube__pb2.DESCRIPTOR,anki__vector_dot_messaging_dot_messages__pb2.DESCRIPTOR,anki__vector_dot_messaging_dot_settings__pb2.DESCRIPTOR,anki__vector_dot_messaging_dot_extensions__pb2.DESCRIPTOR,anki__vector_dot_messaging_dot_response__status__pb2.DESCRIPTOR,])
_PROTOCOLVERSIONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='Anki.Vector.external_interface.ProtocolVersionResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=509,
serialized_end=547,
)
_sym_db.RegisterEnumDescriptor(_PROTOCOLVERSIONRESPONSE_RESULT)
_USERAUTHENTICATIONRESPONSE_CODE = _descriptor.EnumDescriptor(
name='Code',
full_name='Anki.Vector.external_interface.UserAuthenticationResponse.Code',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNAUTHORIZED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTHORIZED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2319,
serialized_end=2359,
)
_sym_db.RegisterEnumDescriptor(_USERAUTHENTICATIONRESPONSE_CODE)
_PROTOCOLVERSIONREQUEST = _descriptor.Descriptor(
name='ProtocolVersionRequest',
full_name='Anki.Vector.external_interface.ProtocolVersionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='client_version', full_name='Anki.Vector.external_interface.ProtocolVersionRequest.client_version', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_host_version', full_name='Anki.Vector.external_interface.ProtocolVersionRequest.min_host_version', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=303,
serialized_end=377,
)
_PROTOCOLVERSIONRESPONSE = _descriptor.Descriptor(
name='ProtocolVersionResponse',
full_name='Anki.Vector.external_interface.ProtocolVersionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='Anki.Vector.external_interface.ProtocolVersionResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='host_version', full_name='Anki.Vector.external_interface.ProtocolVersionResponse.host_version', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_PROTOCOLVERSIONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=380,
serialized_end=547,
)
_CONNECTIONRESPONSE = _descriptor.Descriptor(
name='ConnectionResponse',
full_name='Anki.Vector.external_interface.ConnectionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Anki.Vector.external_interface.ConnectionResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_primary', full_name='Anki.Vector.external_interface.ConnectionResponse.is_primary', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=549,
serialized_end=653,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='Anki.Vector.external_interface.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time_stamped_status', full_name='Anki.Vector.external_interface.Event.time_stamped_status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wake_word', full_name='Anki.Vector.external_interface.Event.wake_word', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='robot_observed_face', full_name='Anki.Vector.external_interface.Event.robot_observed_face', index=2,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='robot_changed_observed_face_id', full_name='Anki.Vector.external_interface.Event.robot_changed_observed_face_id', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_event', full_name='Anki.Vector.external_interface.Event.object_event', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stimulation_info', full_name='Anki.Vector.external_interface.Event.stimulation_info', index=5,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='photo_taken', full_name='Anki.Vector.external_interface.Event.photo_taken', index=6,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='robot_state', full_name='Anki.Vector.external_interface.Event.robot_state', index=7,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cube_battery', full_name='Anki.Vector.external_interface.Event.cube_battery', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keep_alive', full_name='Anki.Vector.external_interface.Event.keep_alive', index=9,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connection_response', full_name='Anki.Vector.external_interface.Event.connection_response', index=10,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror_mode_disabled', full_name='Anki.Vector.external_interface.Event.mirror_mode_disabled', index=11,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vision_modes_auto_disabled', full_name='Anki.Vector.external_interface.Event.vision_modes_auto_disabled', index=12,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='event_type', full_name='Anki.Vector.external_interface.Event.event_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=656,
serialized_end=1686,
)
_FILTERLIST = _descriptor.Descriptor(
name='FilterList',
full_name='Anki.Vector.external_interface.FilterList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='list', full_name='Anki.Vector.external_interface.FilterList.list', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1688,
serialized_end=1714,
)
_EVENTREQUEST = _descriptor.Descriptor(
name='EventRequest',
full_name='Anki.Vector.external_interface.EventRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='white_list', full_name='Anki.Vector.external_interface.EventRequest.white_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='black_list', full_name='Anki.Vector.external_interface.EventRequest.black_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connection_id', full_name='Anki.Vector.external_interface.EventRequest.connection_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='list_type', full_name='Anki.Vector.external_interface.EventRequest.list_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=1717,
serialized_end=1899,
)
_EVENTRESPONSE = _descriptor.Descriptor(
name='EventResponse',
full_name='Anki.Vector.external_interface.EventResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Anki.Vector.external_interface.EventResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='event', full_name='Anki.Vector.external_interface.EventResponse.event', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\200\246\035\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1902,
serialized_end=2041,
)
_USERAUTHENTICATIONREQUEST = _descriptor.Descriptor(
name='UserAuthenticationRequest',
full_name='Anki.Vector.external_interface.UserAuthenticationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_session_id', full_name='Anki.Vector.external_interface.UserAuthenticationRequest.user_session_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='client_name', full_name='Anki.Vector.external_interface.UserAuthenticationRequest.client_name', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2043,
serialized_end=2116,
)
_USERAUTHENTICATIONRESPONSE = _descriptor.Descriptor(
name='UserAuthenticationResponse',
full_name='Anki.Vector.external_interface.UserAuthenticationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Anki.Vector.external_interface.UserAuthenticationResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='Anki.Vector.external_interface.UserAuthenticationResponse.code', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='client_token_guid', full_name='Anki.Vector.external_interface.UserAuthenticationResponse.client_token_guid', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_USERAUTHENTICATIONRESPONSE_CODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2119,
serialized_end=2359,
)
_PROTOCOLVERSIONRESPONSE.fields_by_name['result'].enum_type = _PROTOCOLVERSIONRESPONSE_RESULT
_PROTOCOLVERSIONRESPONSE_RESULT.containing_type = _PROTOCOLVERSIONRESPONSE
_CONNECTIONRESPONSE.fields_by_name['status'].message_type = anki__vector_dot_messaging_dot_response__status__pb2._RESPONSESTATUS
_EVENT.fields_by_name['time_stamped_status'].message_type = anki__vector_dot_messaging_dot_messages__pb2._TIMESTAMPEDSTATUS
_EVENT.fields_by_name['wake_word'].message_type = anki__vector_dot_messaging_dot_messages__pb2._WAKEWORD
_EVENT.fields_by_name['robot_observed_face'].message_type = anki__vector_dot_messaging_dot_messages__pb2._ROBOTOBSERVEDFACE
_EVENT.fields_by_name['robot_changed_observed_face_id'].message_type = anki__vector_dot_messaging_dot_messages__pb2._ROBOTCHANGEDOBSERVEDFACEID
_EVENT.fields_by_name['object_event'].message_type = anki__vector_dot_messaging_dot_cube__pb2._OBJECTEVENT
_EVENT.fields_by_name['stimulation_info'].message_type = anki__vector_dot_messaging_dot_messages__pb2._STIMULATIONINFO
_EVENT.fields_by_name['photo_taken'].message_type = anki__vector_dot_messaging_dot_messages__pb2._PHOTOTAKEN
_EVENT.fields_by_name['robot_state'].message_type = anki__vector_dot_messaging_dot_messages__pb2._ROBOTSTATE
_EVENT.fields_by_name['cube_battery'].message_type = anki__vector_dot_messaging_dot_messages__pb2._CUBEBATTERY
_EVENT.fields_by_name['keep_alive'].message_type = anki__vector_dot_messaging_dot_messages__pb2._KEEPALIVEPING
_EVENT.fields_by_name['connection_response'].message_type = _CONNECTIONRESPONSE
_EVENT.fields_by_name['mirror_mode_disabled'].message_type = anki__vector_dot_messaging_dot_messages__pb2._MIRRORMODEDISABLED
_EVENT.fields_by_name['vision_modes_auto_disabled'].message_type = anki__vector_dot_messaging_dot_messages__pb2._VISIONMODESAUTODISABLED
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['time_stamped_status'])
_EVENT.fields_by_name['time_stamped_status'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['wake_word'])
_EVENT.fields_by_name['wake_word'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['robot_observed_face'])
_EVENT.fields_by_name['robot_observed_face'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['robot_changed_observed_face_id'])
_EVENT.fields_by_name['robot_changed_observed_face_id'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['object_event'])
_EVENT.fields_by_name['object_event'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['stimulation_info'])
_EVENT.fields_by_name['stimulation_info'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['photo_taken'])
_EVENT.fields_by_name['photo_taken'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['robot_state'])
_EVENT.fields_by_name['robot_state'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['cube_battery'])
_EVENT.fields_by_name['cube_battery'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['keep_alive'])
_EVENT.fields_by_name['keep_alive'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['connection_response'])
_EVENT.fields_by_name['connection_response'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['mirror_mode_disabled'])
_EVENT.fields_by_name['mirror_mode_disabled'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENT.oneofs_by_name['event_type'].fields.append(
_EVENT.fields_by_name['vision_modes_auto_disabled'])
_EVENT.fields_by_name['vision_modes_auto_disabled'].containing_oneof = _EVENT.oneofs_by_name['event_type']
_EVENTREQUEST.fields_by_name['white_list'].message_type = _FILTERLIST
_EVENTREQUEST.fields_by_name['black_list'].message_type = _FILTERLIST
_EVENTREQUEST.oneofs_by_name['list_type'].fields.append(
_EVENTREQUEST.fields_by_name['white_list'])
_EVENTREQUEST.fields_by_name['white_list'].containing_oneof = _EVENTREQUEST.oneofs_by_name['list_type']
_EVENTREQUEST.oneofs_by_name['list_type'].fields.append(
_EVENTREQUEST.fields_by_name['black_list'])
_EVENTREQUEST.fields_by_name['black_list'].containing_oneof = _EVENTREQUEST.oneofs_by_name['list_type']
_EVENTRESPONSE.fields_by_name['status'].message_type = anki__vector_dot_messaging_dot_response__status__pb2._RESPONSESTATUS
_EVENTRESPONSE.fields_by_name['event'].message_type = _EVENT
_USERAUTHENTICATIONRESPONSE.fields_by_name['status'].message_type = anki__vector_dot_messaging_dot_response__status__pb2._RESPONSESTATUS
_USERAUTHENTICATIONRESPONSE.fields_by_name['code'].enum_type = _USERAUTHENTICATIONRESPONSE_CODE
_USERAUTHENTICATIONRESPONSE_CODE.containing_type = _USERAUTHENTICATIONRESPONSE
DESCRIPTOR.message_types_by_name['ProtocolVersionRequest'] = _PROTOCOLVERSIONREQUEST
DESCRIPTOR.message_types_by_name['ProtocolVersionResponse'] = _PROTOCOLVERSIONRESPONSE
DESCRIPTOR.message_types_by_name['ConnectionResponse'] = _CONNECTIONRESPONSE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['FilterList'] = _FILTERLIST
DESCRIPTOR.message_types_by_name['EventRequest'] = _EVENTREQUEST
DESCRIPTOR.message_types_by_name['EventResponse'] = _EVENTRESPONSE
DESCRIPTOR.message_types_by_name['UserAuthenticationRequest'] = _USERAUTHENTICATIONREQUEST
DESCRIPTOR.message_types_by_name['UserAuthenticationResponse'] = _USERAUTHENTICATIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProtocolVersionRequest = _reflection.GeneratedProtocolMessageType('ProtocolVersionRequest', (_message.Message,), dict(
DESCRIPTOR = _PROTOCOLVERSIONREQUEST,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.ProtocolVersionRequest)
))
_sym_db.RegisterMessage(ProtocolVersionRequest)
ProtocolVersionResponse = _reflection.GeneratedProtocolMessageType('ProtocolVersionResponse', (_message.Message,), dict(
DESCRIPTOR = _PROTOCOLVERSIONRESPONSE,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.ProtocolVersionResponse)
))
_sym_db.RegisterMessage(ProtocolVersionResponse)
ConnectionResponse = _reflection.GeneratedProtocolMessageType('ConnectionResponse', (_message.Message,), dict(
DESCRIPTOR = _CONNECTIONRESPONSE,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.ConnectionResponse)
))
_sym_db.RegisterMessage(ConnectionResponse)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.Event)
))
_sym_db.RegisterMessage(Event)
FilterList = _reflection.GeneratedProtocolMessageType('FilterList', (_message.Message,), dict(
DESCRIPTOR = _FILTERLIST,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.FilterList)
))
_sym_db.RegisterMessage(FilterList)
EventRequest = _reflection.GeneratedProtocolMessageType('EventRequest', (_message.Message,), dict(
DESCRIPTOR = _EVENTREQUEST,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.EventRequest)
))
_sym_db.RegisterMessage(EventRequest)
EventResponse = _reflection.GeneratedProtocolMessageType('EventResponse', (_message.Message,), dict(
DESCRIPTOR = _EVENTRESPONSE,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.EventResponse)
))
_sym_db.RegisterMessage(EventResponse)
UserAuthenticationRequest = _reflection.GeneratedProtocolMessageType('UserAuthenticationRequest', (_message.Message,), dict(
DESCRIPTOR = _USERAUTHENTICATIONREQUEST,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.UserAuthenticationRequest)
))
_sym_db.RegisterMessage(UserAuthenticationRequest)
UserAuthenticationResponse = _reflection.GeneratedProtocolMessageType('UserAuthenticationResponse', (_message.Message,), dict(
DESCRIPTOR = _USERAUTHENTICATIONRESPONSE,
__module__ = 'anki_vector.messaging.shared_pb2'
# @@protoc_insertion_point(class_scope:Anki.Vector.external_interface.UserAuthenticationResponse)
))
_sym_db.RegisterMessage(UserAuthenticationResponse)
_EVENTRESPONSE.has_options = True
_EVENTRESPONSE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\200\246\035\001'))
# @@protoc_insertion_point(module_scope)
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import re
from .generic_manager import GenericManager, GenericManagerProperties
from .apt_parser import AptParser
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons import shell
from resource_management.core.logger import Logger
def replace_underscores(function_to_decorate):
def wrapper(*args, **kwargs):
self = args[0]
name = args[1].replace("_", "-")
return function_to_decorate(self, name, *args[2:], **kwargs)
return wrapper
class AptManagerProperties(GenericManagerProperties):
"""
Class to keep all Package-manager depended properties
"""
locked_output = "Unable to lock the administration directory"
repo_error = "Failure when receiving data from the peer"
repo_manager_bin = "/usr/bin/apt-get"
repo_cache_bin = "/usr/bin/apt-cache"
pkg_manager_bin = "/usr/bin/dpkg"
repo_update_cmd = [repo_manager_bin, 'update', '-qq']
available_packages_cmd = [repo_cache_bin, "dump"]
installed_packages_cmd = [pkg_manager_bin, "-l"]
repo_definition_location = "/etc/apt/sources.list.d"
install_cmd = {
True: [repo_manager_bin, '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install'],
False: [repo_manager_bin, '-q', '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install']
}
remove_cmd = {
True: [repo_manager_bin, '-y', 'remove'],
False: [repo_manager_bin, '-y', '-q', 'remove']
}
verify_dependency_cmd = [repo_manager_bin, '-qq', 'check']
install_cmd_env = {'DEBIAN_FRONTEND': 'noninteractive'}
check_cmd = pkg_manager_bin + " --get-selections | grep -v deinstall | awk '{print $1}' | grep ^%s$"
repo_url_exclude = "ubuntu.com"
configuration_dump_cmd = [AMBARI_SUDO_BINARY, "apt-config", "dump"]
class AptManager(GenericManager):
def get_installed_package_version(self, package_name):
r = shell.subprocess_executor("dpkg -s {0} | grep Version | awk '{{print $2}}'".format(package_name))
return r.out.strip(os.linesep)
@property
def properties(self):
return AptManagerProperties
def installed_packages(self, pkg_names=None, repo_filter=None):
"""
Return all installed packages in the system except packages in REPO_URL_EXCLUDE
:type pkg_names list|set
:type repo_filter str|None
:return formatted list of packages
"""
packages = []
available_packages = self._available_packages_dict(pkg_names, repo_filter)
with shell.process_executor(self.properties.installed_packages_cmd, error_callback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for package, version in AptParser.packages_installed_reader(output):
if package in available_packages:
packages.append(available_packages[package])
if package not in available_packages:
packages.append([package, version, "installed"]) # case, when some package not belongs to any known repo
return packages
def _available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
with shell.process_executor(self.properties.available_packages_cmd, error_callback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for pkg_item in AptParser.packages_reader(output):
if repo_filter and repo_filter not in pkg_item[2]:
continue
if self.properties.repo_url_exclude in pkg_item[2]:
continue
if pkg_names and pkg_item[0] not in pkg_names:
continue
yield pkg_item
def _available_packages_dict(self, pkg_names=None, repo_filter=None):
"""
Same as available packages, but result returns as dict and package name as key
:type pkg_names list|set
:type repo_filter str|None
"""
result = {}
for item in self._available_packages(pkg_names, repo_filter):
result[item[0]] = item
return result
def available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
return [item for item in self._available_packages(pkg_names, repo_filter)]
def all_packages(self, pkg_names=None, repo_filter=None):
return self.available_packages(pkg_names, repo_filter)
def get_available_packages_in_repos(self, repos):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repos resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
filtered_packages = []
packages = self.available_packages()
repo_ids = []
for repo in repos.items:
repo_ids.append(repo.base_url.replace("http://", "").replace("/", "_"))
if repos.feat.scoped:
Logger.info("Looking for matching packages in the following repositories: {0}".format(", ".join(repo_ids)))
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
return filtered_packages
else:
Logger.info("Packages will be queried using all available repositories on the system.")
# this is the case where the hosts are marked as sysprepped, but
# search the repos on-system anyway. the url specified in ambari must match the one
# in the list file for this to work
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
if len(filtered_packages) > 0:
Logger.info("Found packages for repo {}".format(str(filtered_packages)))
return filtered_packages
else:
return [package[0] for package in packages]
def package_manager_configuration(self):
"""
Reading apt configuration
:return dict with apt properties
"""
with shell.process_executor(self.properties.configuration_dump_cmd, error_callback=self._executor_error_handler) as output:
configuration = list(AptParser.config_reader(output))
return dict(configuration)
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
r = shell.subprocess_executor(self.properties.verify_dependency_cmd)
pattern = re.compile("has missing dependency|E:")
if r.code or (r.out and pattern.search(r.out)):
err_msg = Logger.filter_text("Failed to verify package dependencies. Execution of '%s' returned %s. %s" % (self.properties.verify_dependency_cmd, r.code, r.out))
Logger.error(err_msg)
return False
return True
@replace_underscores
def install_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
from resource_management.core import sudo
apt_sources_list_tmp_dir = None
if not name:
raise ValueError("Installation command was executed with no package name")
elif context.is_upgrade or context.use_repos or not self._check_existence(name):
cmd = self.properties.install_cmd[context.log_output]
copied_sources_files = []
is_tmp_dir_created = False
if context.use_repos:
if 'base' in context.use_repos:
use_repos = set([v for k, v in context.use_repos.items() if k != 'base'])
else:
cmd = cmd + ['-o', 'Dir::Etc::SourceList={0}'.format(self.properties.empty_file)]
use_repos = set(context.use_repos.values())
if use_repos:
is_tmp_dir_created = True
apt_sources_list_tmp_dir = tempfile.mkdtemp(suffix="-ambari-apt-sources-d")
Logger.info("Temporary sources directory was created: %s" % apt_sources_list_tmp_dir)
for repo in use_repos:
new_sources_file = os.path.join(apt_sources_list_tmp_dir, repo + '.list')
Logger.info("Temporary sources file will be copied: {0}".format(new_sources_file))
sudo.copy(os.path.join(self.properties.repo_definition_location, repo + '.list'), new_sources_file)
copied_sources_files.append(new_sources_file)
cmd = cmd + ['-o', 'Dir::Etc::SourceParts='.format(apt_sources_list_tmp_dir)]
cmd = cmd + [name]
Logger.info("Installing package {0} ('{1}')".format(name, shell.string_cmd_from_args_list(cmd)))
shell.repository_manager_executor(cmd, self.properties, context, env=self.properties.install_cmd_env)
if is_tmp_dir_created:
for temporary_sources_file in copied_sources_files:
Logger.info("Removing temporary sources file: {0}".format(temporary_sources_file))
os.remove(temporary_sources_file)
if apt_sources_list_tmp_dir:
Logger.info("Removing temporary sources directory: {0}".format(apt_sources_list_tmp_dir))
os.rmdir(apt_sources_list_tmp_dir)
else:
Logger.info("Skipping installation of existing package {0}".format(name))
@replace_underscores
def upgrade_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
context.is_upgrade = True
return self.install_package(name, context)
@replace_underscores
def remove_package(self, name, context, ignore_dependencies=False):
"""
Remove package
:type name str
:type context ambari_commons.shell.RepoCallContext
:type ignore_dependencies bool
:raise ValueError if name is empty
"""
if not name:
raise ValueError("Installation command were executed with no package name passed")
elif self._check_existence(name):
cmd = self.properties.remove_cmd[context.log_output] + [name]
Logger.info("Removing package {0} ('{1}')".format(name, shell.string_cmd_from_args_list(cmd)))
shell.repository_manager_executor(cmd, self.properties, context)
else:
Logger.info("Skipping removal of non-existing package {0}".format(name))
@replace_underscores
def _check_existence(self, name):
"""
For regexp names:
If only part of packages were installed during early canceling.
Let's say:
1. install hbase-2-3-.*
2. Only hbase-2-3-1234 is installed, but is not hbase-2-3-1234-regionserver yet.
3. We cancel the apt-get
In that case this is bug of packages we require.
And hbase-2-3-*-regionserver should be added to metainfo.xml.
Checking existence should never fail in such a case for hbase-2-3-.*, otherwise it
gonna break things like removing packages and some other things.
Note: this method SHOULD NOT use apt-get (apt.cache is using dpkg not apt). Because a lot of issues we have, when customer have
apt-get in inconsistant state (locked, used, having invalid repo). Once packages are installed
we should not rely on that.
"""
r = shell.subprocess_executor(self.properties.check_cmd % name)
return not bool(r.code)
|
# 8.56-9.14,20min
N=int(input())
nums = [int(x) for x in input().split()]
if max(nums)<0:
print(0, nums[0], nums[-1])
exit(0)
if len(nums)==1:
print(nums[0],nums[0],nums[0])
exit(0)
# f[i]=max(f[i-1]+nums[i], nums[i])
f=[0 for _ in nums]
f[0] = nums[0]
s=[0 for __ in nums]
s[0] = nums[0]
for i in range(1, len(nums)):
if f[i-1]<0:
f[i]=nums[i]
s[i]=nums[i]
else:
f[i]=f[i-1]+nums[i]
s[i]=s[i-1]
max_f = max(f)
for i in range(len(f)):
if f[i]==max_f:
print(max_f, s[i], nums[i])
exit(0)
# max_f = max(f)
# for i in range(len(f)):
# if f[i]==max_f:
# tmp = max_f
# for j in range(i, -1, -1):
# tmp -= nums[j]
# if tmp==0:
# print(max_f, nums[j], nums[i])
# exit(0)
"""
6
-2 11 -4 13 -5 -2
"""
|
import pybaseball.utils
from .playerid_lookup import playerid_reverse_lookup
from .playerid_lookup import playerid_lookup
from .statcast import statcast, statcast_single_game
from .statcast_pitcher import statcast_pitcher
from .statcast_batter import statcast_batter, statcast_batter_exitvelo_barrels
from .league_batting_stats import batting_stats_bref
from .league_batting_stats import batting_stats_range
from .league_batting_stats import bwar_bat
from .league_pitching_stats import pitching_stats_bref
from .league_pitching_stats import pitching_stats_range
from .league_pitching_stats import bwar_pitch
from .standings import standings
from .team_results import schedule_and_record
from .pitching_leaders import pitching_stats
from .batting_leaders import batting_stats
from .team_pitching import team_pitching
from .team_batting import team_batting
from .top_prospects import top_prospects
from .amateur_draft import amateur_draft
from .lahman import parks
from .lahman import all_star_full
from .lahman import appearances
from .lahman import awards_managers
from .lahman import awards_players
from .lahman import awards_share_managers
from .lahman import awards_share_players
from .lahman import batting
from .lahman import batting_post
from .lahman import college_playing
from .lahman import fielding
from .lahman import fielding_of
from .lahman import fielding_of_split
from .lahman import fielding_post
from .lahman import hall_of_fame
from .lahman import home_games
from .lahman import managers
from .lahman import managers_half
from .lahman import master
from .lahman import people
from .lahman import pitching
from .lahman import pitching_post
from .lahman import salaries
from .lahman import schools
from .lahman import series_post
from .lahman import teams
from .lahman import teams_franchises
from .lahman import teams_half
from .lahman import download_lahman
from .retrosheet import season_game_logs
from .retrosheet import world_series_logs
from .retrosheet import all_star_game_logs
from .retrosheet import wild_card_logs
from .retrosheet import division_series_logs
from .retrosheet import lcs_logs
from .team_game_logs import team_game_logs
|
from django.urls import path, include
from .views import apiOverview
urlpatterns = [
path('', apiOverview, name="api-overview"),
path('accounts/', include('accounts.urls')),
path('rooms/', include('rooms.urls')),
path('university/', include('universities.urls')),
path('notifications/', include('notifications.urls'))
]
|
# extdiff.py - external diff program support for mercurial
#
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called meld, runs meld (no need to name twice). If
# the meld executable is not available, the meld tool in [merge-tools]
# will be used, if available
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from __future__ import absolute_import
import os
import re
import shutil
import tempfile
from mercurial.i18n import _
from mercurial.node import (
nullid,
short,
)
from mercurial import (
archival,
cmdutil,
error,
filemerge,
pycompat,
registrar,
scmutil,
util,
)
cmdtable = {}
command = registrar.command(cmdtable)
configtable = {}
configitem = registrar.configitem(configtable)
configitem('extdiff', r'opts\..*',
default='',
generic=True,
)
configitem('diff-tools', r'.*\.diffargs$',
default=None,
generic=True,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
def snapshot(ui, repo, files, node, tmproot, listsubrepos):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
fnsandstat = []
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
if files:
repo.ui.setconfig("ui", "archivemeta", False)
archival.archive(repo, base, node, 'files',
matchfn=scmutil.matchfiles(repo, files),
subrepos=listsubrepos)
for fn in sorted(files):
wfn = util.pconvert(fn)
ui.note(' %s\n' % wfn)
if node is None:
dest = os.path.join(base, wfn)
fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
return dirname, fnsandstat
def dodiff(ui, repo, cmdline, pats, opts):
'''Do the actual diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
do3way = '$parent2' in cmdline
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise error.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
subrepos=opts.get('subrepos')
matcher = scmutil.match(repo[node2], pats, opts)
if opts.get('patch'):
if subrepos:
raise error.Abort(_('--patch cannot be used with --subrepos'))
if node2 is None:
raise error.Abort(_('--patch requires two revisions'))
else:
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
listsubrepos=subrepos)[:3])
if do3way:
mod_b, add_b, rem_b = map(set,
repo.status(node1b, node2, matcher,
listsubrepos=subrepos)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
if not opts.get('patch'):
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
subrepos)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
subrepos)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fnsandstat = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
subrepos)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
else:
template = 'hg-%h.patch'
cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
fntemplate=repo.vfs.reljoin(tmproot, template),
match=matcher)
label1a = cmdutil.makefilename(repo, template, node1a)
label2 = cmdutil.makefilename(repo, template, node2)
dir1a = repo.vfs.reljoin(tmproot, label1a)
dir2 = repo.vfs.reljoin(tmproot, label2)
dir1b = None
label1b = None
fnsandstat = []
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
'plabel1': label1a, 'plabel2': label1b,
'clabel': label2, 'child': dir2,
'root': repo.root}
def quote(match):
pre = match.group(2)
key = match.group(3)
if not do3way and key == 'parent2':
return pre
return pre + util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = (r'''(['"]?)([^\s'"$]*)'''
r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
if not do3way and not re.search(regex, cmdline):
cmdline += ' $parent1 $child'
cmdline = re.sub(regex, quote, cmdline)
ui.debug('running %r in %s\n' % (cmdline, tmproot))
ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
for copy_fn, working_fn, st in fnsandstat:
cpstat = os.lstat(copy_fn)
# Some tools copy the file and attributes, so mtime may not detect
# all changes. A size check will detect more cases, but not all.
# The only certain way to detect every case is to diff all files,
# which could be expensive.
# copyfile() carries over the permission, so the mode check could
# be in an 'elif' branch, but for the case where the file has
# changed without affecting mtime or size.
if (cpstat.st_mtime != st.st_mtime or cpstat.st_size != st.st_size
or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
extdiffopts = [
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [], _('revision'), _('REV')),
('c', 'change', '', _('change made by revision'), _('REV')),
('', 'patch', None, _('compare patches for two revisions'))
] + cmdutil.walkopts + cmdutil.subrepoopts
@command('extdiff',
[('p', 'program', '', _('comparison program to run'), _('CMD')),
] + extdiffopts,
_('hg extdiff [OPT]... [FILE]...'),
inferrepo=True)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
cmdline = ' '.join(map(util.shellquote, [program] + option))
return dodiff(ui, repo, cmdline, pats, opts)
class savedcmd(object):
"""use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
the following program::
%(path)s
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.
"""
def __init__(self, path, cmdline):
# We can't pass non-ASCII through docstrings (and path is
# in an unknown encoding anyway)
docpath = util.escapestr(path)
self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
self._cmdline = cmdline
def __call__(self, ui, repo, *pats, **opts):
options = ' '.join(map(util.shellquote, opts['option']))
if options:
options = ' ' + options
return dodiff(ui, repo, self._cmdline + options, pats, opts)
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
path = util.expandpath(path)
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = util.findexe(cmd)
if path is None:
path = filemerge.findexternaltool(ui, cmd) or cmd
diffopts = ui.config('extdiff', 'opts.' + cmd)
cmdline = util.shellquote(path)
if diffopts:
cmdline += ' ' + diffopts
elif cmd.startswith('opts.'):
continue
else:
if path:
# case "cmd = path opts"
cmdline = path
diffopts = len(pycompat.shlexsplit(cmdline)) > 1
else:
# case "cmd ="
path = util.findexe(cmd)
if path is None:
path = filemerge.findexternaltool(ui, cmd) or cmd
cmdline = util.shellquote(path)
diffopts = False
# look for diff arguments in [diff-tools] then [merge-tools]
if not diffopts:
args = ui.config('diff-tools', cmd+'.diffargs') or \
ui.config('merge-tools', cmd+'.diffargs')
if args:
cmdline += ' ' + args
command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
inferrepo=True)(savedcmd(path, cmdline))
# tell hggettext to extract docstrings from these functions:
i18nfunctions = [savedcmd]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import time
import gevent
from django.conf import settings
from fakeredis import FakeRedis
from mock import MagicMock, patch
from django.test import TestCase
from gcloud.tests.mock import MockTaskOperationTimesConfig
from gcloud.tests.mock_settings import TASK_OPERATION_TIMES_CONFIG_GET
from api.utils.thread import ThreadPool
from gcloud.utils.throttle import check_task_operation_throttle
class CheckTaskOperationThrottleTestCase(TestCase):
def setUp(self):
self.start_time_stamp = time.time()
self.times_config = MockTaskOperationTimesConfig({"times": 10, "time_unit": "m"})
setattr(settings, "redis_inst", FakeRedis())
def tearDown(self):
delattr(settings, "redis_inst")
def test__task_operation_throttle_exceed_times(self):
with patch(TASK_OPERATION_TIMES_CONFIG_GET, MagicMock(return_value=self.times_config)):
for time_num in range(100):
result = check_task_operation_throttle(project_id=1, operation="test_exceed_times")
if time_num < 10:
self.assertTrue(result)
else:
self.assertFalse(result)
def test__task_operation_throttle_within_times(self):
with patch(TASK_OPERATION_TIMES_CONFIG_GET, MagicMock(return_value=self.times_config)):
for time_num in range(100):
time_stamp = self.start_time_stamp + time_num * 7
with patch("time.time", MagicMock(return_value=time_stamp)):
result = check_task_operation_throttle(project_id=1, operation="test_within_times")
self.assertTrue(result)
def test__task_operation_throttle_concurrency(self):
with patch(TASK_OPERATION_TIMES_CONFIG_GET, MagicMock(return_value=self.times_config)):
pool = ThreadPool()
result_list = []
for time_num in range(20):
result_list.append(
pool.apply_async(
check_task_operation_throttle, kwds={"project_id": 1, "operation": "test_concurrency"}
)
)
pool.close()
pool.join()
success_num = len([result for result in result_list if result.get() is True])
self.assertEqual(success_num, self.times_config.times)
def test__task_operation_throttle_gevent(self):
with patch(TASK_OPERATION_TIMES_CONFIG_GET, MagicMock(return_value=self.times_config)):
jobs = [gevent.spawn(check_task_operation_throttle, 1, "test_gevent") for _ in range(20)]
gevent.joinall(jobs)
success_num = len([job.value for job in jobs if job.value is True])
self.assertEqual(success_num, self.times_config.times)
|
""" Learning Concurrency in Python - Chapter 01 - sequential calculation """
import time
import random
# This does all of our prime factorization on a given number 'n'
def calculate_prime_factors(n_v):
""" Calculate prime factor. """
prime_factor = []
d_v = 2
while d_v * d_v <= n_v:
while (n_v % d_v) == 0:
prime_factor.append(d_v) # supposing you want multiple factors repeated
n_v //= d_v
d_v += 1
if n_v > 1:
prime_factor.append(n_v)
return prime_factor
def main():
""" Sequential number calculation. """
print("Starting number crunching")
t_0 = time.time()
for _ in range(10000):
rand = random.randint(20000, 100000000)
print(calculate_prime_factors(rand))
t_1 = time.time()
total_time = t_1 - t_0
print(f"Execution Time: {total_time}")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
import unittest
from hashlib import sha1, sha224, sha256, sha384, sha512
from btclib import dsa
from btclib.curvemult import mult
from btclib.curves import (nistp192, nistp224, nistp256, nistp384, nistp521,
secp256k1)
from btclib.rfc6979 import rfc6979
class Testrfc6979(unittest.TestCase):
def test_rfc6979(self):
# source: https://bitcointalk.org/index.php?topic=285142.40
msg = sha256(b'Satoshi Nakamoto').digest()
x = 0x1
k = rfc6979(msg, x)
expected = 0x8F8A276C19F4149656B280621E358CCE24F5F52542772691EE69063B74F15D15
self.assertEqual(k, expected)
# mismatch between hf digest size and hashed message size
self.assertRaises(ValueError, rfc6979, msg[:-1], x)
#rfc6979(msg[:-1], x)
def test_rfc6979_example(self):
class _helper:
def __init__(self, n: int) -> None:
self.n = n
self.nlen = n.bit_length()
self.nsize = (self.nlen + 7) // 8
# source: https://tools.ietf.org/html/rfc6979 section A.1
fake_ec = _helper(0x4000000000000000000020108A2E0CC0D99F8A5EF)
x = 0x09A4D6792295A7F730FC3F2B49CBC0F62E862272F
msg = b'sample'
m = sha256(msg).digest()
k = 0x23AF4074C90A02B3FE61D286D5C87F425E6BDD81B
self.assertEqual(k, rfc6979(m, x, fake_ec))
def test_rfc6979_tv(self):
# source: https://tools.ietf.org/html/rfc6979 section A.2.3
ec = nistp192
x = 0x6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4
Ux = 0xAC2C77F529F91689FEA0EA5EFEC7F210D8EEA0B9E047ED56
Uy = 0x3BC723E57670BD4887EBC732C523063D0A7C957BC97C1C43
U = mult(x, ec.G, ec)
self.assertEqual((Ux, Uy), U)
ec = nistp192
hf = sha1
msg = b"sample"
m = hf(msg).digest()
k = 0x37D7CA00D2C7B0E5E412AC03BD44BA837FDD5B28CD3B0021
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x98C6BD12B23EAF5E2A2045132086BE3EB8EBD62ABF6698FF
s = 0x57A22B07DEA9530F8DE9471B1DC6624472E8E2844BC25B64
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha224
msg = b"sample"
m = hf(msg).digest()
k = 0x4381526B3FC1E7128F202E194505592F01D5FF4C5AF015D8
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xA1F00DAD97AEEC91C95585F36200C65F3C01812AA60378F5
s = 0xE07EC1304C7C6C9DEBBE980B9692668F81D4DE7922A0F97A
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha256
msg = b"sample"
m = hf(msg).digest()
k = 0x32B1B6D7D42A05CB449065727A84804FB1A3E34D8F261496
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x4B0B8CE98A92866A2820E20AA6B75B56382E0F9BFD5ECB55
s = 0xCCDB006926EA9565CBADC840829D8C384E06DE1F1E381B85
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha384
msg = b"sample"
m = hf(msg).digest()
k = 0x4730005C4FCB01834C063A7B6760096DBE284B8252EF4311
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xDA63BF0B9ABCF948FBB1E9167F136145F7A20426DCC287D5
s = 0xC3AA2C960972BD7A2003A57E1C4C77F0578F8AE95E31EC5E
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha512
msg = b"sample"
m = hf(msg).digest()
k = 0xA2AC7AB055E4F20692D49209544C203A7D1F2C0BFBC75DB1
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x4D60C5AB1996BD848343B31C00850205E2EA6922DAC2E4B8
s = 0x3F6E837448F027A1BF4B34E796E32A811CBB4050908D8F67
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha1
msg = b"test"
m = hf(msg).digest()
k = 0xD9CF9C3D3297D3260773A1DA7418DB5537AB8DD93DE7FA25
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0F2141A0EBBC44D2E1AF90A50EBCFCE5E197B3B7D4DE036D
s = 0xEB18BC9E1F3D7387500CB99CF5F7C157070A8961E38700B7
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha224
msg = b"test"
m = hf(msg).digest()
k = 0xF5DC805F76EF851800700CCE82E7B98D8911B7D510059FBE
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x6945A1C1D1B2206B8145548F633BB61CEF04891BAF26ED34
s = 0xB7FB7FDFC339C0B9BD61A9F5A8EAF9BE58FC5CBA2CB15293
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha256
msg = b"test"
m = hf(msg).digest()
k = 0x5C4CE89CF56D9E7C77C8585339B006B97B5F0680B4306C6C
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x3A718BD8B4926C3B52EE6BBE67EF79B18CB6EB62B1AD97AE
s = 0x5662E6848A4A19B1F1AE2F72ACD4B8BBE50F1EAC65D9124F
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha384
msg = b"test"
m = hf(msg).digest()
k = 0x5AFEFB5D3393261B828DB6C91FBC68C230727B030C975693
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xB234B60B4DB75A733E19280A7A6034BD6B1EE88AF5332367
s = 0x7994090B2D59BB782BE57E74A44C9A1C700413F8ABEFE77A
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp192
hf = sha512
msg = b"test"
m = hf(msg).digest()
k = 0x0758753A5254759C7CFBAD2E2D9B0792EEE44136C9480527
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xFE4F4AE86A58B6507946715934FE2D8FF9D95B6B098FE739
s = 0x74CF5605C98FBA0E1EF34D4B5A1577A7DCF59457CAE52290
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
# source: https://tools.ietf.org/html/rfc6979 section A.2.4
ec = nistp224
x = 0xF220266E1105BFE3083E03EC7A3A654651F45E37167E88600BF257C1
Ux = 0x00CF08DA5AD719E42707FA431292DEA11244D64FC51610D94B130D6C
Uy = 0xEEAB6F3DEBE455E3DBF85416F7030CBD94F34F2D6F232C69F3C1385A
U = mult(x, ec.G, ec)
self.assertEqual((Ux, Uy), U)
ec = nistp224
hf = sha1
msg = b"sample"
m = hf(msg).digest()
k = 0x7EEFADD91110D8DE6C2C470831387C50D3357F7F4D477054B8B426BC
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x22226F9D40A96E19C4A301CE5B74B115303C0F3A4FD30FC257FB57AC
s = 0x66D1CDD83E3AF75605DD6E2FEFF196D30AA7ED7A2EDF7AF475403D69
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha224
msg = b"sample"
m = hf(msg).digest()
k = 0xC1D1F2F10881088301880506805FEB4825FE09ACB6816C36991AA06D
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x1CDFE6662DDE1E4A1EC4CDEDF6A1F5A2FB7FBD9145C12113E6ABFD3E
s = 0xA6694FD7718A21053F225D3F46197CA699D45006C06F871808F43EBC
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha256
msg = b"sample"
m = hf(msg).digest()
k = 0xAD3029E0278F80643DE33917CE6908C70A8FF50A411F06E41DEDFCDC
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x61AA3DA010E8E8406C656BC477A7A7189895E7E840CDFE8FF42307BA
s = 0xBC814050DAB5D23770879494F9E0A680DC1AF7161991BDE692B10101
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha384
msg = b"sample"
m = hf(msg).digest()
k = 0x52B40F5A9D3D13040F494E83D3906C6079F29981035C7BD51E5CAC40
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0B115E5E36F0F9EC81F1325A5952878D745E19D7BB3EABFABA77E953
s = 0x830F34CCDFE826CCFDC81EB4129772E20E122348A2BBD889A1B1AF1D
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha512
msg = b"sample"
m = hf(msg).digest()
k = 0x9DB103FFEDEDF9CFDBA05184F925400C1653B8501BAB89CEA0FBEC14
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x074BD1D979D5F32BF958DDC61E4FB4872ADCAFEB2256497CDAC30397
s = 0xA4CECA196C3D5A1FF31027B33185DC8EE43F288B21AB342E5D8EB084
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha1
msg = b"test"
m = hf(msg).digest()
k = 0x2519178F82C3F0E4F87ED5883A4E114E5B7A6E374043D8EFD329C253
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xDEAA646EC2AF2EA8AD53ED66B2E2DDAA49A12EFD8356561451F3E21C
s = 0x95987796F6CF2062AB8135271DE56AE55366C045F6D9593F53787BD2
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha224
msg = b"test"
m = hf(msg).digest()
k = 0xDF8B38D40DCA3E077D0AC520BF56B6D565134D9B5F2EAE0D34900524
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xC441CE8E261DED634E4CF84910E4C5D1D22C5CF3B732BB204DBEF019
s = 0x902F42847A63BDC5F6046ADA114953120F99442D76510150F372A3F4
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha256
msg = b"test"
m = hf(msg).digest()
k = 0xFF86F57924DA248D6E44E8154EB69F0AE2AEBAEE9931D0B5A969F904
r = 0xAD04DDE87B84747A243A631EA47A1BA6D1FAA059149AD2440DE6FBA6
self.assertEqual(k, rfc6979(m, x, ec, hf))
s = 0x178D49B1AE90E3D8B629BE3DB5683915F4E8C99FDF6E666CF37ADCFD
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha384
msg = b"test"
m = hf(msg).digest()
k = 0x7046742B839478C1B5BD31DB2E862AD868E1A45C863585B5F22BDC2D
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x389B92682E399B26518A95506B52C03BC9379A9DADF3391A21FB0EA4
s = 0x414A718ED3249FF6DBC5B50C27F71F01F070944DA22AB1F78F559AAB
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp224
hf = sha512
msg = b"test"
m = hf(msg).digest()
k = 0xE39C2AA4EA6BE2306C72126D40ED77BF9739BB4D6EF2BBB1DCB6169D
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x049F050477C5ADD858CAC56208394B5A55BAEBBE887FDF765047C17C
s = 0x077EB13E7005929CEFA3CD0403C7CDCC077ADF4E44F3C41B2F60ECFF
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
# source: https://tools.ietf.org/html/rfc6979 section A.2.5
ec = nistp256
x = 0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721
Ux = 0x60FED4BA255A9D31C961EB74C6356D68C049B8923B61FA6CE669622E60F29FB6
Uy = 0x7903FE1008B8BC99A41AE9E95628BC64F2F1B20C2D7E9F5177A3C294D4462299
U = mult(x, ec.G, ec)
self.assertEqual((Ux, Uy), U)
ec = nistp256
hf = sha1
msg = b"sample"
m = hf(msg).digest()
k = 0x882905F1227FD620FBF2ABF21244F0BA83D0DC3A9103DBBEE43A1FB858109DB4
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x61340C88C3AAEBEB4F6D667F672CA9759A6CCAA9FA8811313039EE4A35471D32
s = 0x6D7F147DAC089441BB2E2FE8F7A3FA264B9C475098FDCF6E00D7C996E1B8B7EB
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha224
msg = b"sample"
m = hf(msg).digest()
k = 0x103F90EE9DC52E5E7FB5132B7033C63066D194321491862059967C715985D473
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x53B2FFF5D1752B2C689DF257C04C40A587FABABB3F6FC2702F1343AF7CA9AA3F
s = 0xB9AFB64FDC03DC1A131C7D2386D11E349F070AA432A4ACC918BEA988BF75C74C
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha256
msg = b"sample"
m = hf(msg).digest()
k = 0xA6E3C57DD01ABE90086538398355DD4C3B17AA873382B0F24D6129493D8AAD60
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xEFD48B2AACB6A8FD1140DD9CD45E81D69D2C877B56AAF991C34D0EA84EAF3716
s = 0xF7CB1C942D657C41D436C7A1B6E29F65F3E900DBB9AFF4064DC4AB2F843ACDA8
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha384
msg = b"sample"
m = hf(msg).digest()
k = 0x09F634B188CEFD98E7EC88B1AA9852D734D0BC272F7D2A47DECC6EBEB375AAD4
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0EAFEA039B20E9B42309FB1D89E213057CBF973DC0CFC8F129EDDDC800EF7719
s = 0x4861F0491E6998B9455193E34E7B0D284DDD7149A74B95B9261F13ABDE940954
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha512
msg = b"sample"
m = hf(msg).digest()
k = 0x5FA81C63109BADB88C1F367B47DA606DA28CAD69AA22C4FE6AD7DF73A7173AA5
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x8496A60B5E9B47C825488827E0495B0E3FA109EC4568FD3F8D1097678EB97F00
s = 0x2362AB1ADBE2B8ADF9CB9EDAB740EA6049C028114F2460F96554F61FAE3302FE
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha1
msg = b"test"
m = hf(msg).digest()
k = 0x8C9520267C55D6B980DF741E56B4ADEE114D84FBFA2E62137954164028632A2E
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0CBCC86FD6ABD1D99E703E1EC50069EE5C0B4BA4B9AC60E409E8EC5910D81A89
s = 0x01B9D7B73DFAA60D5651EC4591A0136F87653E0FD780C3B1BC872FFDEAE479B1
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha224
msg = b"test"
m = hf(msg).digest()
k = 0x669F4426F2688B8BE0DB3A6BD1989BDAEFFF84B649EEB84F3DD26080F667FAA7
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xC37EDB6F0AE79D47C3C27E962FA269BB4F441770357E114EE511F662EC34A692
s = 0xC820053A05791E521FCAAD6042D40AEA1D6B1A540138558F47D0719800E18F2D
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha256
msg = b"test"
m = hf(msg).digest()
k = 0xD16B6AE827F17175E040871A1C7EC3500192C4C92677336EC2537ACAEE0008E0
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xF1ABB023518351CD71D881567B1EA663ED3EFCF6C5132B354F28D3B0B7D38367
s = 0x019F4113742A2B14BD25926B49C649155F267E60D3814B4C0CC84250E46F0083
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha384
msg = b"test"
m = hf(msg).digest()
k = 0x16AEFFA357260B04B1DD199693960740066C1A8F3E8EDD79070AA914D361B3B8
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x83910E8B48BB0C74244EBDF7F07A1C5413D61472BD941EF3920E623FBCCEBEB6
s = 0x8DDBEC54CF8CD5874883841D712142A56A8D0F218F5003CB0296B6B509619F2C
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp256
hf = sha512
msg = b"test"
m = hf(msg).digest()
k = 0x6915D11632ACA3C40D5D51C08DAF9C555933819548784480E93499000D9F0B7F
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x461D93F31B6540894788FD206C07CFA0CC35F46FA3C91816FFF1040AD1581A04
s = 0x39AF9F15DE0DB8D97E72719C74820D304CE5226E32DEDAE67519E840D1194E55
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
# source: https://tools.ietf.org/html/rfc6979 section A.2.6
ec = nistp384
x = 0x6B9D3DAD2E1B8C1C05B19875B6659F4DE23C3B667BF297BA9AA47740787137D896D5724E4C70A825F872C9EA60D2EDF5
Ux = 0xEC3A4E415B4E19A4568618029F427FA5DA9A8BC4AE92E02E06AAE5286B300C64DEF8F0EA9055866064A254515480BC13
Uy = 0x8015D9B72D7D57244EA8EF9AC0C621896708A59367F9DFB9F54CA84B3F1C9DB1288B231C3AE0D4FE7344FD2533264720
U = mult(x, ec.G, ec)
self.assertEqual((Ux, Uy), U)
ec = nistp384
hf = sha1
msg = b"sample"
m = hf(msg).digest()
k = 0x4471EF7518BB2C7C20F62EAE1C387AD0C5E8E470995DB4ACF694466E6AB096630F29E5938D25106C3C340045A2DB01A7
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xEC748D839243D6FBEF4FC5C4859A7DFFD7F3ABDDF72014540C16D73309834FA37B9BA002899F6FDA3A4A9386790D4EB2
s = 0xA3BCFA947BEEF4732BF247AC17F71676CB31A847B9FF0CBC9C9ED4C1A5B3FACF26F49CA031D4857570CCB5CA4424A443
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha224
msg = b"sample"
m = hf(msg).digest()
k = 0xA4E4D2F0E729EB786B31FC20AD5D849E304450E0AE8E3E341134A5C1AFA03CAB8083EE4E3C45B06A5899EA56C51B5879
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x42356E76B55A6D9B4631C865445DBE54E056D3B3431766D0509244793C3F9366450F76EE3DE43F5A125333A6BE060122
s = 0x9DA0C81787064021E78DF658F2FBB0B042BF304665DB721F077A4298B095E4834C082C03D83028EFBF93A3C23940CA8D
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha256
msg = b"sample"
m = hf(msg).digest()
k = 0x180AE9F9AEC5438A44BC159A1FCB277C7BE54FA20E7CF404B490650A8ACC414E375572342863C899F9F2EDF9747A9B60
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x21B13D1E013C7FA1392D03C5F99AF8B30C570C6F98D4EA8E354B63A21D3DAA33BDE1E888E63355D92FA2B3C36D8FB2CD
s = 0xF3AA443FB107745BF4BD77CB3891674632068A10CA67E3D45DB2266FA7D1FEEBEFDC63ECCD1AC42EC0CB8668A4FA0AB0
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha384
msg = b"sample"
m = hf(msg).digest()
k = 0x94ED910D1A099DAD3254E9242AE85ABDE4BA15168EAF0CA87A555FD56D10FBCA2907E3E83BA95368623B8C4686915CF9
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x94EDBB92A5ECB8AAD4736E56C691916B3F88140666CE9FA73D64C4EA95AD133C81A648152E44ACF96E36DD1E80FABE46
s = 0x99EF4AEB15F178CEA1FE40DB2603138F130E740A19624526203B6351D0A3A94FA329C145786E679E7B82C71A38628AC8
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha512
msg = b"sample"
m = hf(msg).digest()
k = 0x92FC3C7183A883E24216D1141F1A8976C5B0DD797DFA597E3D7B32198BD35331A4E966532593A52980D0E3AAA5E10EC3
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xED0959D5880AB2D869AE7F6C2915C6D60F96507F9CB3E047C0046861DA4A799CFE30F35CC900056D7C99CD7882433709
s = 0x512C8CCEEE3890A84058CE1E22DBC2198F42323CE8ACA9135329F03C068E5112DC7CC3EF3446DEFCEB01A45C2667FDD5
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha1
msg = b"test"
m = hf(msg).digest()
k = 0x66CC2C8F4D303FC962E5FF6A27BD79F84EC812DDAE58CF5243B64A4AD8094D47EC3727F3A3C186C15054492E30698497
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x4BC35D3A50EF4E30576F58CD96CE6BF638025EE624004A1F7789A8B8E43D0678ACD9D29876DAF46638645F7F404B11C7
s = 0xD5A6326C494ED3FF614703878961C0FDE7B2C278F9A65FD8C4B7186201A2991695BA1C84541327E966FA7B50F7382282
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha224
msg = b"test"
m = hf(msg).digest()
k = 0x18FA39DB95AA5F561F30FA3591DC59C0FA3653A80DAFFA0B48D1A4C6DFCBFF6E3D33BE4DC5EB8886A8ECD093F2935726
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xE8C9D0B6EA72A0E7837FEA1D14A1A9557F29FAA45D3E7EE888FC5BF954B5E62464A9A817C47FF78B8C11066B24080E72
s = 0x07041D4A7A0379AC7232FF72E6F77B6DDB8F09B16CCE0EC3286B2BD43FA8C6141C53EA5ABEF0D8231077A04540A96B66
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha256
msg = b"test"
m = hf(msg).digest()
k = 0x0CFAC37587532347DC3389FDC98286BBA8C73807285B184C83E62E26C401C0FAA48DD070BA79921A3457ABFF2D630AD7
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x6D6DEFAC9AB64DABAFE36C6BF510352A4CC27001263638E5B16D9BB51D451559F918EEDAF2293BE5B475CC8F0188636B
s = 0x2D46F3BECBCC523D5F1A1256BF0C9B024D879BA9E838144C8BA6BAEB4B53B47D51AB373F9845C0514EEFB14024787265
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha384
msg = b"test"
m = hf(msg).digest()
k = 0x015EE46A5BF88773ED9123A5AB0807962D193719503C527B031B4C2D225092ADA71F4A459BC0DA98ADB95837DB8312EA
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x8203B63D3C853E8D77227FB377BCF7B7B772E97892A80F36AB775D509D7A5FEB0542A7F0812998DA8F1DD3CA3CF023DB
s = 0xDDD0760448D42D8A43AF45AF836FCE4DE8BE06B485E9B61B827C2F13173923E06A739F040649A667BF3B828246BAA5A5
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp384
hf = sha512
msg = b"test"
m = hf(msg).digest()
k = 0x3780C4F67CB15518B6ACAE34C9F83568D2E12E47DEAB6C50A4E4EE5319D1E8CE0E2CC8A136036DC4B9C00E6888F66B6C
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0xA0D5D090C9980FAF3C2CE57B7AE951D31977DD11C775D314AF55F76C676447D06FB6495CD21B4B6E340FC236584FB277
s = 0x976984E59B4C77B0E8E4460DCA3D9F20E07B9BB1F63BEEFAF576F6B2E8B224634A2092CD3792E0159AD9CEE37659C736
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
# source: https://tools.ietf.org/html/rfc6979 section A.2.7
ec = nistp521
x = 0x0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538
Ux = 0x1894550D0785932E00EAA23B694F213F8C3121F86DC97A04E5A7167DB4E5BCD371123D46E45DB6B5D5370A7F20FB633155D38FFA16D2BD761DCAC474B9A2F5023A4
Uy = 0x0493101C962CD4D2FDDF782285E64584139C2F91B47F87FF82354D6630F746A28A0DB25741B5B34A828008B22ACC23F924FAAFBD4D33F81EA66956DFEAA2BFDFCF5
U = mult(x, ec.G, ec)
self.assertEqual((Ux, Uy), U)
ec = nistp521
hf = sha1
msg = b"sample"
m = hf(msg).digest()
k = 0x089C071B419E1C2820962321787258469511958E80582E95D8378E0C2CCDB3CB42BEDE42F50E3FA3C71F5A76724281D31D9C89F0F91FC1BE4918DB1C03A5838D0F9
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0343B6EC45728975EA5CBA6659BBB6062A5FF89EEA58BE3C80B619F322C87910FE092F7D45BB0F8EEE01ED3F20BABEC079D202AE677B243AB40B5431D497C55D75D
s = 0x0E7B0E675A9B24413D448B8CC119D2BF7B2D2DF032741C096634D6D65D0DBE3D5694625FB9E8104D3B842C1B0E2D0B98BEA19341E8676AEF66AE4EBA3D5475D5D16
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha224
msg = b"sample"
m = hf(msg).digest()
k = 0x121415EC2CD7726330A61F7F3FA5DE14BE9436019C4DB8CB4041F3B54CF31BE0493EE3F427FB906393D895A19C9523F3A1D54BB8702BD4AA9C99DAB2597B92113F3
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x1776331CFCDF927D666E032E00CF776187BC9FDD8E69D0DABB4109FFE1B5E2A30715F4CC923A4A5E94D2503E9ACFED92857B7F31D7152E0F8C00C15FF3D87E2ED2E
s = 0x050CB5265417FE2320BBB5A122B8E1A32BD699089851128E360E620A30C7E17BA41A666AF126CE100E5799B153B60528D5300D08489CA9178FB610A2006C254B41F
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha256
msg = b"sample"
m = hf(msg).digest()
k = 0x0EDF38AFCAAECAB4383358B34D67C9F2216C8382AAEA44A3DAD5FDC9C32575761793FEF24EB0FC276DFC4F6E3EC476752F043CF01415387470BCBD8678ED2C7E1A0
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x1511BB4D675114FE266FC4372B87682BAECC01D3CC62CF2303C92B3526012659D16876E25C7C1E57648F23B73564D67F61C6F14D527D54972810421E7D87589E1A7
s = 0x04A171143A83163D6DF460AAF61522695F207A58B95C0644D87E52AA1A347916E4F7A72930B1BC06DBE22CE3F58264AFD23704CBB63B29B931F7DE6C9D949A7ECFC
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha384
msg = b"sample"
m = hf(msg).digest()
k = 0x1546A108BC23A15D6F21872F7DED661FA8431DDBD922D0DCDB77CC878C8553FFAD064C95A920A750AC9137E527390D2D92F153E66196966EA554D9ADFCB109C4211
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x1EA842A0E17D2DE4F92C15315C63DDF72685C18195C2BB95E572B9C5136CA4B4B576AD712A52BE9730627D16054BA40CC0B8D3FF035B12AE75168397F5D50C67451
s = 0x1F21A3CEE066E1961025FB048BD5FE2B7924D0CD797BABE0A83B66F1E35EEAF5FDE143FA85DC394A7DEE766523393784484BDF3E00114A1C857CDE1AA203DB65D61
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha512
msg = b"sample"
m = hf(msg).digest()
k = 0x1DAE2EA071F8110DC26882D4D5EAE0621A3256FC8847FB9022E2B7D28E6F10198B1574FDD03A9053C08A1854A168AA5A57470EC97DD5CE090124EF52A2F7ECBFFD3
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x0C328FAFCBD79DD77850370C46325D987CB525569FB63C5D3BC53950E6D4C5F174E25A1EE9017B5D450606ADD152B534931D7D4E8455CC91F9B15BF05EC36E377FA
s = 0x0617CCE7CF5064806C467F678D3B4080D6F1CC50AF26CA209417308281B68AF282623EAA63E5B5C0723D8B8C37FF0777B1A20F8CCB1DCCC43997F1EE0E44DA4A67A
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha1
msg = b"test"
m = hf(msg).digest()
k = 0x0BB9F2BF4FE1038CCF4DABD7139A56F6FD8BB1386561BD3C6A4FC818B20DF5DDBA80795A947107A1AB9D12DAA615B1ADE4F7A9DC05E8E6311150F47F5C57CE8B222
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x13BAD9F29ABE20DE37EBEB823C252CA0F63361284015A3BF430A46AAA80B87B0693F0694BD88AFE4E661FC33B094CD3B7963BED5A727ED8BD6A3A202ABE009D0367
s = 0x1E9BB81FF7944CA409AD138DBBEE228E1AFCC0C890FC78EC8604639CB0DBDC90F717A99EAD9D272855D00162EE9527567DD6A92CBD629805C0445282BBC916797FF
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha224
msg = b"test"
m = hf(msg).digest()
k = 0x040D09FCF3C8A5F62CF4FB223CBBB2B9937F6B0577C27020A99602C25A01136987E452988781484EDBBCF1C47E554E7FC901BC3085E5206D9F619CFF07E73D6F706
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x1C7ED902E123E6815546065A2C4AF977B22AA8EADDB68B2C1110E7EA44D42086BFE4A34B67DDC0E17E96536E358219B23A706C6A6E16BA77B65E1C595D43CAE17FB
s = 0x177336676304FCB343CE028B38E7B4FBA76C1C1B277DA18CAD2A8478B2A9A9F5BEC0F3BA04F35DB3E4263569EC6AADE8C92746E4C82F8299AE1B8F1739F8FD519A4
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha256
msg = b"test"
m = hf(msg).digest()
k = 0x01DE74955EFAABC4C4F17F8E84D881D1310B5392D7700275F82F145C61E843841AF09035BF7A6210F5A431A6A9E81C9323354A9E69135D44EBD2FCAA7731B909258
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x00E871C4A14F993C6C7369501900C4BC1E9C7B0B4BA44E04868B30B41D8071042EB28C4C250411D0CE08CD197E4188EA4876F279F90B3D8D74A3C76E6F1E4656AA8
s = 0x0CD52DBAA33B063C3A6CD8058A1FB0A46A4754B034FCC644766CA14DA8CA5CA9FDE00E88C1AD60CCBA759025299079D7A427EC3CC5B619BFBC828E7769BCD694E86
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha384
msg = b"test"
m = hf(msg).digest()
k = 0x1F1FC4A349A7DA9A9E116BFDD055DC08E78252FF8E23AC276AC88B1770AE0B5DCEB1ED14A4916B769A523CE1E90BA22846AF11DF8B300C38818F713DADD85DE0C88
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x14BEE21A18B6D8B3C93FAB08D43E739707953244FDBE924FA926D76669E7AC8C89DF62ED8975C2D8397A65A49DCC09F6B0AC62272741924D479354D74FF6075578C
s = 0x133330865C067A0EAF72362A65E2D7BC4E461E8C8995C3B6226A21BD1AA78F0ED94FE536A0DCA35534F0CD1510C41525D163FE9D74D134881E35141ED5E8E95B979
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
ec = nistp521
hf = sha512
msg = b"test"
m = hf(msg).digest()
k = 0x16200813020EC986863BEDFC1B121F605C1215645018AEA1A7B215A564DE9EB1B38A67AA1128B80CE391C4FB71187654AAA3431027BFC7F395766CA988C964DC56D
self.assertEqual(k, rfc6979(m, x, ec, hf))
r = 0x13E99020ABF5CEE7525D16B69B229652AB6BDF2AFFCAEF38773B4B7D08725F10CDB93482FDCC54EDCEE91ECA4166B2A7C6265EF0CE2BD7051B7CEF945BABD47EE6D
s = 0x1FBD0013C674AA79CB39849527916CE301C66EA7CE8B80682786AD60F98F7E78A19CA69EFF5C57400E3B3A0AD66CE0978214D13BAF4E9AC60752F7B155E2DE4DCE3
sig = dsa.sign(msg, x, k, ec, hf)
self.assertEqual(r, sig[0])
self.assertIn(s, (sig[1], ec.n - sig[1]))
if __name__ == "__main__":
# execute only if run as a script
unittest.main()
|
"""Utilities file. Takes a given xml document and adds an attribute ID
to each of its paragraph tags"""
import xml.etree.ElementTree
def load(filename):
et = xml.etree.ElementTree.parse(filename)
p_tags = et.iter(tag="{http://www.tei-c.org/ns/1.0}p")
id_num = 0
for tag in p_tags:
tag.set("ID",str(id_num))
id_num += 1
et.write(filename)
if __name__ == "__main__":
load("../data/oneself_as_another.xml")
|
# PROJECT : kungfucms
# TIME : 19-2-8 下午10:20
# AUTHOR : Younger Shen
# EMAIL : younger.x.shen@gmail.com
# CELL : 13811754531
# WECHAT : 13811754531
# WEB : https://punkcoder.cn
import os
from datetime import datetime
from kungfucms.utils.common import get_env, get_base_path
def get_log_path():
env = get_env()
path = env.str('LOG_DIR')
return path if path.startswith('/') else os.path.join(get_base_path(), path)
def get_log_file():
now = datetime.now()
path = get_log_path()
return os.path.join(path, now.strftime('%Y-%m-%d.log'))
|
#!/usr/bin/env python
from datetime import datetime
from collections import Counter
data = [i for i in open('day04.input').read().splitlines()]
parsed = {}
for line in data:
dt, msg = line.split(']')
parsed[dt] = msg
days_sorted = sorted(parsed, key=lambda day: datetime.strptime(day[1:], "%Y-%m-%d %M:%S"))
guards = {}
current_guard = None
for i in range(len(days_sorted)):
dt = days_sorted[i]
min_a = int(dt.split(':')[1])
msg = parsed[dt]
if 'Guard' in msg:
current_guard = int(msg.split()[1][1:])
if current_guard not in guards:
guards[current_guard] = []
if 'asleep' in msg:
min_b = int(days_sorted[i+1].split(':')[1])
guards[current_guard] += range(min_a, min_b)
sleepy_guard = max(guards, key=lambda x:len(guards[x]))
m = Counter(guards[sleepy_guard])
sleepy_max = max(m, key=m.get)
print(sleepy_guard * sleepy_max)
most_mins = 0
for guard, mins in guards.items():
m = Counter(mins)
for k, v in m.items():
if v > most_mins:
most_mins = v
saved_guard = guard
saved_min = k
print(saved_guard * saved_min)
|
import numpy as np
from numpy import ma
def fill_between_steps(ax, x, y1, y2=0, step_where='pre', **kwargs):
''' fill between a step plot and
Parameters
----------
ax : Axes
The axes to draw to
x : array-like
Array/vector of index values.
y1 : array-like or float
Array/vector of values to be filled under.
y2 : array-Like or float, optional
Array/vector or bottom values for filled area. Default is 0.
step_where : {'pre', 'post', 'mid'}
where the step happens, same meanings as for `step`
**kwargs will be passed to the matplotlib fill_between() function.
Returns
-------
ret : PolyCollection
The added artist
'''
if step_where not in {'pre', 'post', 'mid'}:
raise ValueError("where must be one of {{'pre', 'post', 'mid'}} "
"You passed in {wh}".format(wh=step_where))
# make sure y values are up-converted to arrays
if np.isscalar(y1):
y1 = np.ones_like(x) * y1
if np.isscalar(y2):
y2 = np.ones_like(x) * y2
# temporary array for up-converting the values to step corners
# 3 x 2N - 1 array
vertices = np.vstack((x, y1, y2))
# this logic is lifted from lines.py
# this should probably be centralized someplace
if step_where == 'pre':
steps = ma.zeros((3, 2 * len(x) - 1), np.float)
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
elif step_where == 'post':
steps = ma.zeros((3, 2 * len(x) - 1), np.float)
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
elif step_where == 'mid':
steps = ma.zeros((3, 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
else:
raise RuntimeError("should never hit end of if-elif block for validated input")
# un-pack
xx, yy1, yy2 = steps
# now to the plotting part:
return ax.fill_between(xx, yy1, y2=yy2, **kwargs)
|
"""
Django settings for NLPFrontEnd project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b1dae011-9b96-4d18-8e5c-9c3d84af535b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'FrontEnd',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'bootstrap4',
'simple_history',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
ROOT_URLCONF = 'NLPFrontEnd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NLPFrontEnd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Argentina/Buenos_Aires'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
BOOTSTRAP3 = {
'include_jquery': True,
}
MEDIA_URL = '/documentos/'
MEDIA_ROOT = os.path.join(BASE_DIR,'documentos')
# My settings
LOGIN_URL = '/login/'
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields
from azure.ai.ml._schema.core.schema_meta import PatchedSchemaMeta
from azure.ai.ml._restclient.v2021_10_01.models import UsageUnit
from marshmallow.decorators import post_load
from azure.ai.ml._schema import NestedField, UnionField, StringTransformedEnum
from azure.ai.ml._utils.utils import camel_to_snake
class UsageNameSchema(metaclass=PatchedSchemaMeta):
value = fields.Str()
localized_value = fields.Str()
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities import UsageName
return UsageName(**data)
class UsageSchema(metaclass=PatchedSchemaMeta):
id = fields.Str()
aml_workspace_location = fields.Str()
type = fields.Str()
unit = UnionField(
[
fields.Str(),
StringTransformedEnum(
allowed_values=UsageUnit.COUNT,
casing_transform=camel_to_snake,
),
]
)
current_value = fields.Int()
limit = fields.Int()
name = NestedField(UsageNameSchema)
|
#coding: utf-8
__author__ = "Lário dos Santos Diniz"
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name = 'core/index.html'
index = IndexView.as_view()
|
# Copyright 2020 ponai Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for the model output tensors
https://github.com/Project-ponai/ponai/wiki/ponai_Design
"""
from typing import Optional, Callable
import torch
import torch.nn.functional as F
from ponai.transforms.compose import Transform
from ponai.networks import one_hot
from ponai.transforms.utils import get_largest_connected_component_mask
from ponai.utils import ensure_tuple
class SplitChannel(Transform):
"""
Split PyTorch Tensor data according to the channel dim, if only 1 channel, convert to One-Hot
format first based on the class number. Users can use this transform to compute metrics on every
single class to get more details of validation/evaluation. Expected input shape:
``(batch_size, num_channels, [spatial_dim_1, spatial_dim_2, ...])``
Args:
to_onehot: whether to convert the data to One-Hot format first.
Defaults to ``False``.
num_classes: the class number used to convert to One-Hot format if `to_onehot` is True.
Defaults to ``None``.
"""
def __init__(self, to_onehot: bool = False, num_classes: Optional[int] = None):
self.to_onehot = to_onehot
self.num_classes = num_classes
def __call__(self, img, to_onehot: Optional[bool] = None, num_classes: Optional[int] = None):
"""
Args:
to_onehot: whether to convert the data to One-Hot format first.
Defaults to ``self.to_onehot``.
num_classes: the class number used to convert to One-Hot format if `to_onehot` is True.
Defaults to ``self.num_classes``.
"""
if to_onehot or self.to_onehot:
if num_classes is None:
num_classes = self.num_classes
assert isinstance(num_classes, int), "must specify class number for One-Hot."
img = one_hot(img, num_classes)
n_classes = img.shape[1]
outputs = list()
for i in range(n_classes):
outputs.append(img[:, i : i + 1])
return outputs
class Activations(Transform):
"""
Add activation operations to the model output, typically `Sigmoid` or `Softmax`.
Args:
sigmoid: whether to execute sigmoid function on model output before transform.
Defaults to ``False``.
softmax: whether to execute softmax function on model output before transform.
Defaults to ``False``.
other: callable function to execute other activation layers, for example:
`other = lambda x: torch.tanh(x)`. Defaults to ``None``.
"""
def __init__(self, sigmoid: bool = False, softmax: bool = False, other: Optional[Callable] = None):
self.sigmoid = sigmoid
self.softmax = softmax
self.other = other
def __call__(
self, img, sigmoid: Optional[bool] = None, softmax: Optional[bool] = None, other: Optional[Callable] = None
):
"""
Args:
sigmoid: whether to execute sigmoid function on model output before transform.
Defaults to ``self.sigmoid``.
softmax: whether to execute softmax function on model output before transform.
Defaults to ``self.softmax``.
other: callable function to execute other activation layers, for example:
`other = lambda x: torch.tanh(x)`. Defaults to ``self.other``.
Raises:
ValueError: sigmoid=True and softmax=True are not compatible.
ValueError: act_func must be a Callable function.
"""
if sigmoid is True and softmax is True:
raise ValueError("sigmoid=True and softmax=True are not compatible.")
if sigmoid or self.sigmoid:
img = torch.sigmoid(img)
if softmax or self.softmax:
img = torch.softmax(img, dim=1)
act_func = self.other if other is None else other
if act_func is not None:
if not callable(act_func):
raise ValueError("act_func must be a Callable function.")
img = act_func(img)
return img
class AsDiscrete(Transform):
"""
Execute after model forward to transform model output to discrete values.
It can complete below operations:
- execute `argmax` for input logits values.
- threshold input value to 0.0 or 1.0.
- convert input value to One-Hot format
Args:
argmax: whether to execute argmax function on input data before transform.
Defaults to ``False``.
to_onehot: whether to convert input data into the one-hot format.
Defaults to ``False``.
n_classes: the number of classes to convert to One-Hot format.
Defaults to ``None``.
threshold_values: whether threshold the float value to int number 0 or 1.
Defaults to ``False``.
logit_thresh: the threshold value for thresholding operation..
Defaults to ``0.5``.
"""
def __init__(
self,
argmax: bool = False,
to_onehot: bool = False,
n_classes: Optional[int] = None,
threshold_values: bool = False,
logit_thresh: float = 0.5,
):
self.argmax = argmax
self.to_onehot = to_onehot
self.n_classes = n_classes
self.threshold_values = threshold_values
self.logit_thresh = logit_thresh
def __call__(
self,
img,
argmax: Optional[bool] = None,
to_onehot: Optional[bool] = None,
n_classes: Optional[int] = None,
threshold_values: Optional[bool] = None,
logit_thresh: Optional[float] = None,
):
"""
Args:
argmax: whether to execute argmax function on input data before transform.
Defaults to ``self.argmax``.
to_onehot: whether to convert input data into the one-hot format.
Defaults to ``self.to_onehot``.
n_classes: the number of classes to convert to One-Hot format.
Defaults to ``self.n_classes``.
threshold_values: whether threshold the float value to int number 0 or 1.
Defaults to ``self.threshold_values``.
logit_thresh: the threshold value for thresholding operation..
Defaults to ``self.logit_thresh``.
"""
if argmax or self.argmax:
img = torch.argmax(img, dim=1, keepdim=True)
if to_onehot or self.to_onehot:
_nclasses = self.n_classes if n_classes is None else n_classes
assert isinstance(_nclasses, int), "One of self.n_classes or n_classes must be an integer"
img = one_hot(img, _nclasses)
if threshold_values or self.threshold_values:
img = img >= (self.logit_thresh if logit_thresh is None else logit_thresh)
return img.float()
class KeepLargestConnectedComponent(Transform):
"""
Keeps only the largest connected component in the image.
This transform can be used as a post-processing step to clean up over-segment areas in model output.
The input is assumed to be a PyTorch Tensor:
1) With shape (batch_size, 1, spatial_dim1[, spatial_dim2, ...]) and the values correspond to expected labels.
2) With shape (batch_size, C, spatial_dim1[, spatial_dim2, ...]) and the values should be 0, 1 on each labels.
Note:
For single channel data, 0 will be treated as background and the over-segment pixels will be set to 0.
For one-hot data, the over-segment pixels will be set to 0 in its channel.
For example:
Use KeepLargestConnectedComponent with applied_labels=[1], connectivity=1::
[1, 0, 0] [0, 0, 0]
[0, 1, 1] => [0, 1 ,1]
[0, 1, 1] [0, 1, 1]
Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=False, connectivity=1::
[0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]
[0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]
[1, 2, 1, 0 ,0] => [1, 2, 1, 0 ,0]
[1, 2, 0, 1 ,0] [1, 2, 0, 0 ,0]
[2, 2, 0, 0 ,2] [2, 2, 0, 0 ,0]
Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=True, connectivity=1::
[0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]
[0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]
[1, 2, 1, 0 ,0] => [0, 2, 1, 0 ,0]
[1, 2, 0, 1 ,0] [0, 2, 0, 0 ,0]
[2, 2, 0, 0 ,2] [2, 2, 0, 0 ,0]
Use KeepLargestConnectedComponent with applied_labels[1, 2], independent=False, connectivity=2::
[0, 0, 1, 0 ,0] [0, 0, 1, 0 ,0]
[0, 2, 1, 1 ,1] [0, 2, 1, 1 ,1]
[1, 2, 1, 0 ,0] => [1, 2, 1, 0 ,0]
[1, 2, 0, 1 ,0] [1, 2, 0, 1 ,0]
[2, 2, 0, 0 ,2] [2, 2, 0, 0 ,2]
"""
def __init__(self, applied_labels, independent: bool = True, connectivity: Optional[int] = None):
"""
Args:
applied_labels (int, list or tuple of int): Labels for applying the connected component on.
If only one channel. The pixel whose value is not in this list will remain unchanged.
If the data is in one-hot format, this is used to determine what channels to apply.
independent (bool): consider several labels as a whole or independent, default is `True`.
Example use case would be segment label 1 is liver and label 2 is liver tumor, in that case
you want this "independent" to be specified as False.
connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
"""
super().__init__()
self.applied_labels = ensure_tuple(applied_labels)
self.independent = independent
self.connectivity = connectivity
def __call__(self, img):
"""
Args:
img: shape must be (batch_size, C, spatial_dim1[, spatial_dim2, ...]).
Returns:
A PyTorch Tensor with shape (batch_size, C, spatial_dim1[, spatial_dim2, ...]).
"""
channel_dim = 1
if img.shape[channel_dim] == 1:
img = torch.squeeze(img, dim=channel_dim)
if self.independent:
for i in self.applied_labels:
foreground = (img == i).type(torch.uint8)
mask = get_largest_connected_component_mask(foreground, self.connectivity)
img[foreground != mask] = 0
else:
foreground = torch.zeros_like(img)
for i in self.applied_labels:
foreground += (img == i).type(torch.uint8)
mask = get_largest_connected_component_mask(foreground, self.connectivity)
img[foreground != mask] = 0
output = torch.unsqueeze(img, dim=channel_dim)
else:
# one-hot data is assumed to have binary value in each channel
if self.independent:
for i in self.applied_labels:
foreground = img[:, i, ...].type(torch.uint8)
mask = get_largest_connected_component_mask(foreground, self.connectivity)
img[:, i, ...][foreground != mask] = 0
else:
applied_img = img[:, self.applied_labels, ...].type(torch.uint8)
foreground = torch.any(applied_img, dim=channel_dim)
mask = get_largest_connected_component_mask(foreground, self.connectivity)
background_mask = torch.unsqueeze(foreground != mask, dim=channel_dim)
background_mask = torch.repeat_interleave(background_mask, len(self.applied_labels), dim=channel_dim)
applied_img[background_mask] = 0
img[:, self.applied_labels, ...] = applied_img.type(img.type())
output = img
return output
class LabelToContour(Transform):
"""
Return the contour of binary input images that only compose of 0 and 1, with Laplace kernel
set as default for edge detection. Typical usage is to plot the edge of label or segmentation output.
Args:
kernel_type: the method applied to do edge detection, default is "Laplace".
"""
def __init__(self, kernel_type: str = "Laplace"):
if kernel_type != "Laplace":
raise NotImplementedError("currently, LabelToContour only supports Laplace kernel.")
self.kernel_type = kernel_type
def __call__(self, img):
"""
Args:
img: torch tensor data to extract the contour, with shape: [batch_size, channels, height, width[, depth]]
Returns:
A torch tensor with the same shape as img, note:
1. it's the binary classification result of whether a pixel is edge or not.
2. in order to keep the original shape of mask image, we use padding as default.
3. the edge detection is just approximate because it defects inherent to Laplace kernel,
ideally the edge should be thin enough, but now it has a thickness.
"""
channels = img.shape[1]
if img.ndim == 4:
kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=torch.float32, device=img.device)
kernel = kernel.repeat(channels, 1, 1, 1)
contour_img = F.conv2d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
elif img.ndim == 5:
kernel = -1 * torch.ones(3, 3, 3, dtype=torch.float32, device=img.device)
kernel[1, 1, 1] = 26
kernel = kernel.repeat(channels, 1, 1, 1, 1)
contour_img = F.conv3d(img, kernel, bias=None, stride=1, padding=1, dilation=1, groups=channels)
else:
raise RuntimeError("the dimensions of img should be 4 or 5.")
torch.clamp_(contour_img, min=0.0, max=1.0)
return contour_img
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import sys
import os.path
import re
class ResidueError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Sequence(object):
def __init__(self, fn=None, ssq=None):
self.seq_=""
self.name_=""
if fn == None and ssq == None:
self.name_="Unknown"
elif fn != None and ssq == None and os.path.exists(fn):
fi = open(fn,'r')
fl = fi.readline().rstrip()
if fl.startswith(">"):
self.name_ = fl[1:]
for ln in fi:
self.seq_ = self.seq_ + ln.rstrip()
self.seq_ = self.seq_.replace(" ","")
else:
print (fn,"Not Fasta format file!")
sys.exit()
elif fn != None and ssq != None:
self.name_=fn
self.seq_=ssq
self.seq_ = self.seq_.replace(" ","")
elif ssq == None and fn != None and not os.path.exists(fn):
print ("File",fn,"does not exist!!")
sys.exit()
if not self.is_dna():
self.is_protein()
def get_name(self):
return self.name_
def get_sequence(self):
return self.seq_
def is_dna(self):
return not re.search(r"[^ATGC]",self.seq_)
def is_protein(self):
if self.is_dna():
return False
else:
for i in self.seq_:
if i not in ['U','G','A','V','L','I','P','F','Y','W','S','T','C','M','N','Q','K','R','H','D','E','X','Z','B']: #'X' a feature where the identity of the amino acid is unknown (an X is shown at this position in the sequence) and the only information concerning the modification is that the N-terminus is blocked: P80979 (Blocked amino end (Xaa))
#'Z' - Note: Pyro-Glu is often indicated in papers as ‘pGlu’ and sometimes, in one-letter code as “U”, although this is now used for selenocysteine. In figures of publications, it may be cited as Z, pQ or E
raise ResidueError("Residue '%s' cannot be identified as either a nucleotide or amino acid for sequence %s."%(i, self.name_))
return True
def write(self): #File handle, write permissions
n = ">"+self.name_
print(n)
c=0
n=self.seq_
while c<len(n):
print(n[c:c+60])
c=c+60
def write(self,fo): #File handle, write permissions
n = ">"+self.name_
fo.write(n)
fo.write("\n")
c=0
n=self.seq_
while c<len(n):
fo.write(n[c:c+60])
fo.write("\n")
c=c+60
#fo.close()
def __len__(self):
return len(self.seq_)
def __str__(self):
sq = self.name_+","+self.seq_
return sq
def __getitem__(self,i):
return self.seq_[i]
def __gt__(self, sq):
return len(self.seq_)>len(sq)
def __lt__(self, sq):
return len(self.seq_)<len(sq)
def __ge__(self, sq):
return len(self.seq_)>=len(sq)
def __le__(self,sq):
return len(self.seq_)<=len(sq)
def x_percent(self,ch):
xc = self.seq_.count(ch)
return (xc * 100 ) / self.length()
def get_segment(self, st, en):
ss = ""
if st <= en:
ss=self.seq_[st-1:en]
else:
raise ValueError("Start is greater than end.")
return ss
#to check whether a fusion is a pseudogene we need to blast it against a pseudogenedb and analyse the results (outputformat6)
''' def is_mapping(self, blastnpath, blastndb):
ispseudo=False
blasttop5rec=executeBlastnOutfmt6Wrapper(self, blastnpath, blastndb, 5)
if len(blasttop5rec)>0:
for brec in blasttop5rec:
if brec.pident > 99.9999 and brec.evalue < 0.00001 and brec.length > 98:
ispseudo=True
break
return ispseudo
def get_mapping_database_id(self, blastnpath, blastndb):
pseudoid=""
blasttop5rec=executeBlastnOutfmt6Wrapper(self, blastnpath, blastndb, 5)
if len(blasttop5rec)>0:
for brec in blasttop5rec:
if brec.pident > 99.9999 and brec.evalue < 0.00001 and brec.length > 98:
pseudoid=brec.sseqid
break
return pseudoid
def get_mapping_database_blast_records(self, blastnpath, blastndb):
pseudorec=[]
blasttop5rec=executeBlastnOutfmt6Wrapper(self, blastnpath, blastndb, 5)
if len(blasttop5rec)>0:
for brec in blasttop5rec:
print(brec.to_string())
if brec.pident > 98 and brec.evalue < 0.00001 and brec.mismatch <= 2 and brec.length >= 98 :
pseudorec.append(brec)
break
return pseudorec
#return a vector of pseudogene alignments - format 6
#arguments: completepath of blastN, pseudogene database
#start from here, look at the example saved in ..
# def blast_pseudogene_db(blastpath, psedogenedb):
# blastcmd="echo -e \">"+self.get_fusion_name()+"\n"+self.junctionsequence_+"\"| "+blastpath+" -db "+pseudogenedb+" -outfmt 6 -num_alignments 5"
# blastresult=subprocess.run(blastcmd, )
'''
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import os
from typing import List
from triton_python_backend_utils import (
InferenceRequest,
InferenceResponse,
Tensor,
get_input_tensor_by_name,
get_output_config_by_name,
triton_string_to_numpy,
)
import nvtabular
from nvtabular.dispatch import _is_list_dtype
from nvtabular.inference.triton import _convert_tensor
from nvtabular.inference.workflow.hugectr import HugeCTRWorkflowRunner
from nvtabular.inference.workflow.pytorch import PyTorchWorkflowRunner
from nvtabular.inference.workflow.tensorflow import TensorflowWorkflowRunner
LOG = logging.getLogger("nvtabular")
class TritonPythonModel:
def initialize(self, args):
# Arg parsing
workflow_path = os.path.join(
args["model_repository"], str(args["model_version"]), "workflow"
)
model_device = args["model_instance_kind"]
# Workflow instantiation
self.workflow = nvtabular.Workflow.load(workflow_path)
# Config loading and parsing
self.model_config = json.loads(args["model_config"])
model_framework = self.model_config["parameters"]["output_model"]["string_value"]
# Dtype parsing
input_dtypes = self.workflow.input_dtypes.items()
self.input_dtypes, self.input_multihots = _parse_input_dtypes(input_dtypes)
self.output_dtypes = dict()
for name, dtype in self.workflow.output_dtypes.items():
if not _is_list_dtype(dtype):
self._set_output_dtype(name)
else:
self._set_output_dtype(name + "__nnzs")
self._set_output_dtype(name + "__values")
if model_framework == "hugectr":
runner_class = HugeCTRWorkflowRunner
elif model_framework == "pytorch":
runner_class = PyTorchWorkflowRunner
else:
runner_class = TensorflowWorkflowRunner
self.runner = runner_class(
self.workflow, self.output_dtypes, self.model_config, model_device
)
def _set_output_dtype(self, name):
conf = get_output_config_by_name(self.model_config, name)
self.output_dtypes[name] = triton_string_to_numpy(conf["data_type"])
def execute(self, requests: List[InferenceRequest]) -> List[InferenceResponse]:
"""Transforms the input batches by running through a NVTabular workflow.transform
function.
"""
responses = []
for request in requests:
# transform the triton tensors to a dict of name:numpy tensor
input_tensors = {
name: _convert_tensor(get_input_tensor_by_name(request, name))
for name in self.input_dtypes
}
# multihots are represented as a tuple of (values, offsets)
for name, dtype in self.input_multihots.items():
values = _convert_tensor(get_input_tensor_by_name(request, name + "__values"))
offsets = _convert_tensor(get_input_tensor_by_name(request, name + "__nnzs"))
input_tensors[name] = (values, offsets)
raw_tensor_tuples = self.runner.run_workflow(input_tensors)
result = [Tensor(name, data) for name, data in raw_tensor_tuples]
responses.append(InferenceResponse(result))
return responses
def _parse_input_dtypes(dtypes):
input_dtypes = {col: dtype for col, dtype in dtypes if not _is_list_dtype(dtype)}
input_multihots = {col: dtype for col, dtype in dtypes if _is_list_dtype(dtype)}
return input_dtypes, input_multihots
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright 2021 mRuggi <mRuggi@PC>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from stellar_sdk import Keypair,Server,Network,TransactionBuilder
import requests
keypair = Keypair.from_secret("YOURSECRET")
bumptohint = b'SOMESTRANGENAMESTARTINGWITHn'#the name you need to find is hidden in a riddle, look at the resources!!
bumpto = int("".join(str(b) for b in bumptohint)) #for each byte in the buffer append it to an empty string and convert the result into an integer number
server = Server(horizon_url="https://horizon-testnet.stellar.org")
tx= (
TransactionBuilder(
source_account = server.load_account(account_id=keypair.public_key),
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=100)
.append_bump_sequence_op(bumpto)
.build()
)
tx.sign(keypair.secret)
response = server.submit_transaction(tx)
print("\nTransaction hash: {}".format(response["hash"]))
print("Premi un tasto per continuare")
input()
|
# Create a function to return the first initial of a name
# Parameters:
# name: name of person
# force_uppercase: indicates if you always want the initial to be in upppercase: default is True
# Return value
# first letter of name passed in
def get_initial(name, force_uppercase=True):
if force_uppercase:
initial = name[0:1].upper()
else:
initial = name[0:1]
return initial
# Ask for someone's name and return the initial
first_name = input('Enter your first name: ')
# Call get_initial function to retrieve first letter of name
# not passing a value for force_uppercase so default value is used
first_name_initial = get_initial(first_name)
print('Your initial is: ' + first_name_initial)
|
# Given a 2D grid, each cell is either a wall 'W',
# an enemy 'E' or empty '0' (the number zero),
# return the maximum enemies you can kill using one bomb.
# The bomb kills all the enemies in the same row and column from
# the planted point until it hits the wall since the wall is too strong
# to be destroyed.
# Note that you can only put the bomb at an empty cell.
# Example:
# For the given grid
# 0 E 0 0
# E 0 W E
# 0 E 0 0
# return 3. (Placing a bomb at (1,1) kills 3 enemies)
def max_killed_enemies(grid):
if not grid: return 0
m, n = len(grid), len(grid[0])
max_killed = 0
row_e, col_e = 0, [0] * n
for i in range(m):
for j in range(n):
if j == 0 or grid[i][j-1] == 'W':
row_e = row_kills(grid, i, j)
if i == 0 or grid[i-1][j] == 'W':
col_e[j] = col_kills(grid, i, j)
if grid[i][j] == '0':
max_killed = max(max_killed, row_e + col_e[j])
return max_killed
# calculate killed enemies for row i from column j
def row_kills(grid, i, j):
num = 0
while j < len(grid[0]) and grid[i][j] != 'W':
if grid[i][j] == 'E':
num += 1
j += 1
return num
# calculate killed enemies for column j from row i
def col_kills(grid, i, j):
num = 0
while i < len(grid) and grid[i][j] != 'W':
if grid[i][j] == 'E':
num += 1
i += 1
return num
grid = [
["0", "E", "0", "E"],
["E", "E", "E", "0"],
["E", "0", "W", "E"],
["0", "E", "0", "0"]]
print(grid)
print(max_killed_enemies(grid))
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Pooled supplier molecule design table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def create_table(metadata, supplier_molecule_design_tbl,
molecule_design_set_tbl):
"Table factory."
tbl = Table('pooled_supplier_molecule_design', metadata,
Column('supplier_molecule_design_id', Integer,
ForeignKey(
supplier_molecule_design_tbl.c.supplier_molecule_design_id),
primary_key=True),
Column('molecule_design_set_id', Integer,
ForeignKey(molecule_design_set_tbl.c.molecule_design_set_id,
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, index=True),
)
return tbl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.