id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11564715
|
from .rscnn_ssn_cls import RSCNN_SSN as RSCNN_SSN_Cls
from .rscnn_msn_seg import RSCNN_MSN as RSCNN_MSN_Seg
|
11564726
|
import logging
import colander
from grano.core import db, celery
from grano.lib.data import CSVImporter
from grano.model import Pipeline, File
from grano.logic import pipelines, entities, loader
from grano.logic.references import FileRef, ProjectRef
from grano.logic.validation import Invalid
log = logging.getLogger(__name__)
MODES = ['aliases', 'entities', 'relations']
class ImportBaseValidator(colander.MappingSchema):
file = colander.SchemaNode(FileRef())
project = colander.SchemaNode(ProjectRef())
source_url = colander.SchemaNode(colander.String(),
empty=None, missing=None)
mode = colander.SchemaNode(colander.String(),
validator=colander.OneOf(MODES))
def make_importer(project, account, data):
""" Create an importer pipeline to represent the data
import process which will be executed. """
validator = ImportBaseValidator()
sane = validator.deserialize(data)
# TODO: validate mapping.
config = {
'mode': sane.get('mode'),
'file': sane.get('file').id,
'source_url': sane.get('source_url'),
'mapping': data.get('mapping'),
'relation_schema': data.get('relation_schema'),
'source_schema': data.get('source_schema'),
'target_schema': data.get('target_schema'),
'entity_schema': data.get('entity_schema')
}
pipeline = pipelines.create(project, 'import',
sane.get('file').file_name, config, account)
db.session.commit()
run_importer.delay(pipeline.id)
return pipeline
@celery.task
def run_importer(pipeline_id):
""" Perform a raw data import with a given mode. """
pipeline = Pipeline.by_id(pipeline_id)
pipelines.start(pipeline)
mode = pipeline.config.get('mode')
file_id = pipeline.config.get('file')
file_ = File.by_id(file_id)
if file_ is None:
pipeline.log_error(pipeline, 'File object deleted: %s' % file_id)
elif mode == 'aliases':
import_aliases(pipeline, file_.fh)
else:
import_objects(pipeline, file_.fh)
pipelines.finish(pipeline)
def _row_source_url(pipeline, row):
""" Determine the best available source URL for the given
row of data. """
for k, v in pipeline.config.get('mapping', {}).items():
if v.get('attribute') == '_source_url':
value = row.get(k, '').strip()
if len(value):
return value
source_url = pipeline.config.get('source_url')
if source_url is not None and len(source_url.strip()):
return source_url
return None
def import_aliases(pipeline, fh):
""" Import aliases from a CSV source. This will not create
new entities, but re-name existing entities or merge two
entities if one's name is given as an alias for the other. """
importer = CSVImporter(fh)
canonical_column, alias_column = None, None
for k, v in pipeline.config.get('mapping', {}).items():
if v.get('attribute') == 'alias':
alias_column = k
elif v.get('attribute') == 'canonical':
canonical_column = k
for i, row in enumerate(importer):
source_url = _row_source_url(pipeline, row)
entities.apply_alias(pipeline.project, pipeline.author,
row.get(canonical_column),
row.get(alias_column),
source_url=source_url)
if i % 100 == 0:
percentage = int((float(i) / max(1, len(importer))) * 100)
pipeline.percent_complete = percentage
db.session.commit()
def import_objects(pipeline, fh):
""" Import objects - either individual entities or relations
and their involved entities (the target and source) - from a
CSV file. """
# Code is a bit ugly as this handles two cases at once:
# mode 'relations' where we import a source, target and relation
# mode 'entities' where we only import a single entity
config = pipeline.config
mode = config.get('mode')
mapping = config.get('mapping')
importer = CSVImporter(fh)
loader_ = loader.Loader(pipeline.project.slug, account=pipeline.author,
ignore_errors=True)
for i, row in enumerate(importer):
try:
url = _row_source_url(pipeline, row)
rel_data = {}
source = loader_.make_entity(config.get('source_schema'),
source_url=url)
target = loader_.make_entity(config.get('target_schema'),
source_url=url)
entity = loader_.make_entity(config.get('entity_schema'),
source_url=url)
# Try to assign each column to the appropriate object in this
# loader.
for column, spec in mapping.items():
attr = spec.get('attribute')
obj = spec.get('object')
value = row.get(column)
if not attr or not len(attr.strip()):
continue
if mode == 'entities':
entity.set(attr, value)
elif obj == 'relation':
rel_data[attr] = value
elif obj == 'source':
source.set(attr, value)
elif obj == 'target':
target.set(attr, value)
# Relation can only be saved once the entities are available,
# hence we're storing the relation property values and now
# making the whole thing.
if mode == 'relations':
source.save()
target.save()
rel = loader_.make_relation(config.get('relation_schema'),
source, target, source_url=url)
for k, v in rel_data.items():
rel.set(k, v)
rel.save()
else:
entity.save()
# indicate progress, and commit every now and then.
if i % 100 == 0:
percentage = int((float(i) / max(1, len(importer))) * 100)
pipeline.percent_complete = percentage
loader_.persist()
except Invalid, inv:
pipelines.log_warn(pipeline, unicode(inv), 'Invalid data',
inv.as_dict())
except Exception, exc:
pipelines.log_error(pipeline, unicode(exc), 'Error', {})
|
11564755
|
import logging
import sys
import time
from datetime import datetime, timezone
import simplejson as json
from termcolor import colored
class ColorfulFormatter(logging.Formatter):
converter = time.gmtime
palette = {
logging.DEBUG: "blue",
logging.INFO: "white",
logging.WARNING: "cyan",
logging.ERROR: "red",
logging.CRITICAL: "magenta",
}
is_tty = sys.stderr.isatty()
def format(self, record):
s = super().format(record)
return colored(s, color=self.palette.get(record.levelno)) if self.is_tty else s
class JsonFormatter(logging.Formatter):
converter = time.gmtime
def __init__(self, environment: str, etl_id: str):
super().__init__()
self.environment = environment
self.etl_id = etl_id
def as_utc_iso8601(self, ts) -> str:
return (
datetime.fromtimestamp(ts, timezone.utc)
.isoformat("T", timespec="milliseconds")
.replace("+00:00", "Z")
)
def format(self, record: logging.LogRecord) -> str:
"""Format log record by creating a JSON-format in a string."""
values = {
"application_name": "arthur-etl",
"environment": self.environment,
"gmtime": self.as_utc_iso8601(record.created),
"etl_id": self.etl_id,
"log_level": record.levelname,
"log_severity": record.levelno,
"logger": record.name,
"message": record.getMessage(),
"process.id": record.process,
"process.name": record.processName,
"source.filename": record.filename,
"source.function": record.funcName,
"source.line_number": record.lineno,
"source.module": record.module,
"source.pathname": record.pathname,
"thread.name": record.threadName,
"timestamp": int(record.created * 1000.0),
}
# Always add metrics if any are present.
if hasattr(record, "metrics"):
values["metrics"] = record.metrics # type: ignore
# Always add exception (value) as a field if exception info is present.
if record.exc_info is not None and isinstance(record.exc_info, tuple):
values["exception.class"] = record.exc_info[1].__class__.__name__
values["exception.message"] = str(record.exc_info[1])
# Always add formatted exception to message if exception info is present.
if record.exc_text is not None:
if values["message"] != "\n":
values["message"] += "\n" # type: ignore
values["message"] += record.exc_text # type: ignore
return json.dumps(values, default=str, separators=(",", ":"), sort_keys=True)
|
11564758
|
import FWCore.ParameterSet.Config as cms
APVPhases = cms.EDProducer('ConfigurableAPVCyclePhaseProducer',
defaultPartitionNames = cms.vstring("TI_13-JUN-2009_1",
"TO_30-JUN-2009_1",
"TP_09-JUN-2009_1",
"TM_09-JUN-2009_1"
),
defaultPhases = cms.vint32(-1,-1,-1,-1),
runPhases = cms.VPSet(
cms.PSet( runNumber = cms.int32(100967),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(100995),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(101012),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(101018),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(101043),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(101045),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(102130),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(102169),phases = cms.untracked.vint32(30),partitions = cms.untracked.vstring("TM_09-JUN-2009_1")),
cms.PSet( runNumber = cms.int32(105612),phases = cms.untracked.vint32(-1,-1,-1,-1)),
cms.PSet( runNumber = cms.int32(105755),phases = cms.untracked.vint32(30,30,30,30)),
cms.PSet( runNumber = cms.int32(105765),phases = cms.untracked.vint32(30,30,30,30)),
cms.PSet( runNumber = cms.int32(105820),phases = cms.untracked.vint32(30,30,30,30)),
cms.PSet( runNumber = cms.int32(106019),phases = cms.untracked.vint32(30,30,30,30)),
cms.PSet( runNumber = cms.int32(108219),phases = cms.untracked.vint32(30,30,30,30)),
cms.PSet( runNumber = cms.int32(108239),phases = cms.untracked.vint32(30,30,30,30))
)
)
|
11564759
|
from functools import wraps
def cachedprop(fn):
'''Decorator which creates a cached property.'''
@wraps(fn)
def get(self):
cache_name = '__' + fn.__name__ + '__cache'
try:
return self.__dict__[cache_name]
except KeyError:
ret = fn(self)
self.__dict__[cache_name] = ret
return ret
return property(get)
def rangecachedfn(fn):
'''Decorator which creates a range memoized function. Decorator speeds up functions that response
depends on numeric parameter and is constant in some ranges of this parameter. Decorated function must have
numeric parameter as second positional parameter. Decorated function must return response, lower boundary
and high boundary. Response will be cached for all function calls with second parameter in returned range
and the same other parameters. Keyword arguments are not supported.'''
memo = {}
@wraps(fn)
def wrapper(*args):
try:
return memo[args], None, None
except KeyError:
rv, lo, hi = fn(*args)
if hi:
for i in range(lo, hi):
newargs = list(args)
newargs[1] = i
memo[tuple(newargs)] = rv
else:
memo[args] = rv
return rv, lo, hi
return wrapper
def cachedfn(fn):
'''Decorator which creates a memoized function.'''
memo = {}
@wraps(fn)
def wrapper(*args):
try:
return memo[args]
except KeyError:
#print 'Calling %s(%s)' % (fn.__name__, ', '.join([str(x) for x in args]))
rv = fn(*args)
memo[args] = rv
return rv
return wrapper
def singleton(cls):
'''Convert the given into a singleton.
This function is ment to be used as a decorator (which should be applied to
classes, e.g.:
@singleton
class Foo:
pass
After this, the class can be called (as if an constructor was called), but
the call will always return the same instance, e.g.:
a = Foo()
b = Foo()
assert a is b
assert id(a) == id(b)
Implementation taken from PEP 318 examples.
'''
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
|
11564761
|
from django.core.exceptions import ValidationError
from rest_framework import status
from .views import JSONResponse
class ValidationErrorToHttpErrorMiddleware(object):
"""
Catch ValidationError exceptions and render them as JSONResponse
"""
def process_exception(self, request, exception):
if isinstance(exception, ValidationError):
content = {'error': exception.message}
return JSONResponse(content, status.HTTP_400_BAD_REQUEST)
|
11564767
|
import inspect
from tornado import gen
def coroutine(func):
"""Tornado-JSON compatible wrapper for ``tornado.gen.coroutine``
Annotates original argspec.args of ``func`` as attribute ``__argspec_args``
"""
wrapper = gen.coroutine(func)
wrapper.__argspec_args = inspect.getfullargspec(func).args
return wrapper
|
11564794
|
import re
def get_line(view):
"""Returns line and column where cursor is
``view`` is sublime.View.
"""
sel = view.sel()[0]
line = view.substr(view.line(sel)) # gets the line text
row, column = view.rowcol(sel.begin()) # gets the line number and column
return line, column
def get_prefix(line, column):
"""Return prefix for vars and text right side of the cursor"""
m = re.search('\S*$', line[:column])
rside = line[column:]
match = m.group(0)
return {'match': match, 'rside': rside}
def get_object_from_line(line, prefix, column):
"""Returns the object name after the prefix
``line`` -- Text in the line where cursor is.
``prefix`` -- Prefix determined by sublime.
``column`` -- Index of the cursor in the line.
"""
re_str = r'(?:\s)([^\s]+)(?:\.{0})$'.format(prefix)
match = re.search(re_str, line[:column])
if match:
return match.group(1)
else:
return None
|
11564817
|
import numpy as np
import cv2
def compute_objects_dict(sim_data):
objects = sim_data["semantic"]#np.string_(json.dumps(sim_data["semantic"]))
obj_ids = [int(ele['id'].split("_")[-1]) for ele in objects]
objects_dict = dict(zip(obj_ids, objects))
return objects_dict
def semantic_seg_to_binary_map(semantic_seg_img, object_mask):
filtered_img = np.copy(semantic_seg_img)
filtered_img[np.where(~object_mask)] = 0
binary_map = (filtered_img>0).astype(np.uint8)
return binary_map
def get_valid_ids(objects_dict, blacklist_categories):
valid_ids = []
for key, val in objects_dict.items():
if val['category'].lower() not in blacklist_categories:
valid_ids.append(key)
valid_ids = np.unique(valid_ids)
return valid_ids
def change_brightness(img, value=0.5):
hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
hsv_img[...,2] = hsv_img[...,2] * value
final_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
return final_img
def apply_attention(img, target_obj_mask, brightness_delta):
attention_img = np.copy(img[:,:,:3])
attention_img[~target_obj_mask] = change_brightness(np.copy(attention_img), value=brightness_delta)[~target_obj_mask]
return attention_img
def get_centroid(binary_map, _MIN_AREA_THRESHOLD, _MAX_AREA_THRESHOLD):
n_labels, labels, stats, centroids = run_connected_components(binary_map)
areas = stats[:,-1]
good_idxs = np.where((areas>_MIN_AREA_THRESHOLD(binary_map)) & (areas<_MAX_AREA_THRESHOLD(binary_map)))
centroids = centroids[good_idxs]
if len(centroids) > 1:
centroids = [centroids[np.argmax(areas)]]
if len(centroids) == 0:
centroid = None
else:
# Grab centroid from list
centroid = centroids[0]
return centroid
def run_connected_components(binary_map, connectivity=4):
# Perform the operation
# Output: num_labels, label matrix, stat matrix, centroid matrix
output = cv2.connectedComponentsWithStats(binary_map, connectivity, cv2.CV_32S)
return output
|
11564847
|
from common.spec_input_parsers import SpecInputParsers
from common.sagemaker_component_spec import SageMakerComponentSpec
from typing import List
from dataclasses import dataclass
from common.sagemaker_component_spec import SageMakerComponentSpec
from common.common_inputs import (
SageMakerComponentBaseInputs,
SageMakerComponentBaseOutputs,
SageMakerComponentInputValidator as InputValidator,
SageMakerComponentOutputValidator as OutputValidator,
SageMakerComponentInput as Input,
SageMakerComponentOutput as Output,
)
@dataclass(frozen=True)
class DummyInputs(SageMakerComponentBaseInputs):
input1: Input
input2: Input
@dataclass
class DummyOutputs(SageMakerComponentBaseOutputs):
output1: Output
output2: Output
@dataclass(frozen=True)
class AllInputTypes(SageMakerComponentBaseInputs):
inputStr: Input
inputInt: Input
inputBool: Input
inputDict: Input
inputList: Input
inputOptional: Input
inputOptionalNoDefault: Input
@dataclass
class NoOutputs(SageMakerComponentBaseOutputs):
pass
class DummySpec(SageMakerComponentSpec[DummyInputs, DummyOutputs]):
INPUTS: DummyInputs = DummyInputs(
input1=InputValidator(
input_type=str, description="The first input.", default="input1-default",
),
input2=InputValidator(
input_type=int, required=True, description="The second input.",
),
)
OUTPUTS = DummyOutputs(
output1=OutputValidator(description="The first output."),
output2=OutputValidator(description="The second output."),
)
def __init__(self, arguments: List[str]):
super().__init__(arguments, DummyInputs, DummyOutputs)
class ExtraSpec(SageMakerComponentSpec[AllInputTypes, NoOutputs]):
INPUTS: AllInputTypes = AllInputTypes(
inputStr=InputValidator(input_type=str, required=True, description="str",),
inputInt=InputValidator(input_type=int, required=True, description="int",),
inputBool=InputValidator(input_type=bool, required=True, description="bool",),
inputDict=InputValidator(
input_type=SpecInputParsers.yaml_or_json_dict,
required=True,
description="dict",
),
inputList=InputValidator(
input_type=SpecInputParsers.yaml_or_json_list,
required=True,
description="list",
),
inputOptional=InputValidator(
input_type=str,
required=False,
description="optional",
default="default-string",
),
inputOptionalNoDefault=InputValidator(
input_type=str, required=False, description="optional",
),
)
OUTPUTS = NoOutputs()
def __init__(self, arguments: List[str]):
super().__init__(arguments, AllInputTypes, NoOutputs)
|
11564860
|
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from utils.utils_metrics import evaluate
# 防止bug
def get_train_step_fn():
@tf.function
def train_step(imgs, targets, net, optimizer, triplet_loss):
with tf.GradientTape() as tape:
# 计算loss
outputs = net(imgs, training=True)
CE_loss_value = tf.reduce_mean(tf.losses.categorical_crossentropy(targets, outputs[0]))
triplet_loss_value = triplet_loss(None, outputs[1])
loss_value = CE_loss_value + triplet_loss_value
grads = tape.gradient(loss_value, net.trainable_variables)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
return loss_value, triplet_loss_value, CE_loss_value
return train_step
def fit_one_epoch(net, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, triplet_loss, test_loader, lfw_eval_flag):
train_step = get_train_step_fn()
loss = 0
total_triple_loss = 0
total_CE_loss = 0
val_loss = 0
val_triple_loss = 0
val_CE_loss = 0
print('Start Train')
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
images, targets = batch[0], tf.convert_to_tensor(batch[1])
loss_value, triplet_loss_value, CE_loss_value = train_step(images, targets, net, optimizer, triplet_loss)
loss = loss + loss_value
total_triple_loss = total_triple_loss + triplet_loss_value
total_CE_loss = total_CE_loss + CE_loss_value
pbar.set_postfix(**{'total_loss' : float(loss) / (iteration + 1),
'total_triple_loss' : float(total_triple_loss) / (iteration + 1),
'total_CE_loss' : float(total_CE_loss) / (iteration + 1),
'lr' : optimizer._decayed_lr(tf.float32).numpy()})
pbar.update(1)
print('Finish Train')
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
images, targets = batch[0], tf.convert_to_tensor(batch[1])
outputs = net(images)
CE_loss_value = tf.reduce_mean(tf.losses.categorical_crossentropy(targets, outputs[0]))
triplet_loss_value = triplet_loss(None, outputs[1])
loss_value = CE_loss_value + triplet_loss_value
val_loss = val_loss + loss_value
val_triple_loss = val_triple_loss + triplet_loss_value
val_CE_loss = val_CE_loss + CE_loss_value
pbar.set_postfix(**{'val_loss' : float(val_loss) / (iteration + 1),
'val_triple_loss' : float(val_triple_loss) / (iteration + 1),
'val_CE_loss' : float(val_CE_loss) / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
if lfw_eval_flag:
print("正在进行LFW数据集测试")
labels, distances = [], []
for _, (data_a, data_p, label) in enumerate(test_loader.generate()):
out_a, out_p = net(data_a)[1], net(data_p)[1]
dists = np.linalg.norm(out_a - out_p, axis=1)
distances.append(dists)
labels.append(label)
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
_, _, accuracy, _, _, _, _ = evaluate(distances,labels)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))
net.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % ((epoch + 1), loss / epoch_step ,val_loss / epoch_step_val))
|
11564863
|
from dragonfly import Grammar, CompoundRule, Integer, Choice, Repetition, Optional
heroes = ["Abathur","Alarak","Alexstrasza","Ana","Anubarak","Artanis","Arthas","Auriel","Azmodan","Blaze","Brightwing","Cassia","Chen","Cho","Chromie","D.Va","Deckard","Dehaka","Diablo","E.T.C.","Falstad","Fenix",
"Gall","Garrosh","Gazlowe","Genji","Greymane","Gul'dan","Hanzo","Illidan","Jaina","Johanna","Junkrat","Kael'thas","Kel'Thuzad","Kerrigan","Kharazim","Leoric",
"<NAME>","Li-Ming","Lt. Morales","Lucio","Lunara","Maiev","Mal'Ganis","Malfurion","Malthael","Medivh","Mephisto","Muradin","Murky","Nazeebo","Nova",
"Orphea","Probius","Ragnaros","Raynor","Rehgar","Rexxar","Samuro","Sgt. Hammer","Sonya","Stitches","Stukov","Sylvanas","Tassadar","The Butcher",
"The Lost Vikings","Thrall","Tracer","Tychus","Tyrael","Tyrande","Uther","Valeera","Valla","Varian","Whitemane","Xul","Yrel","Zagara","Zarya","Zeratul","Zul'jin"]
heroObject = {}
for hero in heroes:
heroObject[ hero ] = hero
heroObject[ "Morales"] = "Lt. Morales"
heroObject["Queen of blades"] = "Kerrigan"
heroObject["Deevah"] = "D.Va"
heroObject["Kaletas"] = "Kael'thas"
heroObject["A noob arak"] = "Anubarak"
heroObject["Let da killing begin"] = "Zul'jin"
heroObject["The Lich Lord of The Plaguelands"] = "Kel'thuzad"
heroObject["Commander of The Dread Necropolis"] = "Kel'thuzad"
heroObject["Founder of the Cult of The Damned"] = "Kel'thuzad"
heroObject["Former Member of the Council of Six"] = "Kel'thuzad"
heroObject["Creator of the Abominations"] = "Kel'thuzad"
heroObject["Betrayer of Humanity"] = "Kel'thuzad"
heroObject["Summoner of Archimonde"] = "Kel'thuzad"
heroObject["Majordomo to The Lich King himself"] = "Kel'thuzad"
heroObject["You wot mate"] = "Tracer"
heroObject["Death comes"] = "Malthael"
heroObject["Acceptable outcome"] = "Abathur"
heroObject["By fire be purged"] = "Ragnaros"
heroObject["I am malganis I am a turtle"] = "Mal'ganis"
heroObject["Stitches want to play"] = "Stitches"
heroObject["Essence"] = "Dehaka"
heroObject["Stay a while and listen"] = "Deckard"
heroObject["Do not fail me again"] = "Alarak"
heroObject["The great outdoors"] = "Lunara"
heroObject["Ome wa mo shinderu"] = "Genji"
heroObject["Nandato"] = "Hanzo"
heroObject["My destiny is my own"] = "Illidan"
heroObject["All shall suffer"] = "Leoric"
heroObject["Here we go again"] = "Chromie"
heroObject["Stratholme deserved it"] = "Arthas"
heroObject["Theramore deserved it"] = "Garrosh"
heroObject["Teldrassil deserved it"] = "Sylvanas"
heroObject["Burn it"] = "Sylvanas"
heroObject["Salami all shall adore it"] = "Kael'thas"
heroObject["I play to win"] = "D.Va"
heroObject["Cigar boie"] = "Tychus"
heroObject["There is always hope"] = "Auriel"
heroObject["I bring life and "] = "Alexstrasza"
heroObject["Proxy stargate"] = "Probius"
heroObject["Nature calls"] = "Lunara"
heroChoice = Choice( "hero", heroObject)
class SelectHeroRule(CompoundRule):
spec = "<hero>"
extras = [heroChoice]
callback = False
def set_callback( self, callback ):
self.callback = callback
def _process_recognition(self, node, extras):
hero = extras["hero"]
if( self.callback ):
self.callback( hero )
class QueueUpRule(CompoundRule):
spec = "Play the game"
callback = False
def set_callback( self, callback ):
self.callback = callback
def _process_recognition(self, node, extras):
if( self.callback ):
self.callback()
|
11564872
|
import glnext
import numpy as np
from glnext_compiler import glsl
from objloader import Obj
from PIL import Image
A = np.linspace(-1.0, 1.0, 11)
B = np.ones(11)
C = np.zeros(11)
vertex_data = np.array([A, -B, C, A, B, C, -B, A, C, B, A, C]).T.astype('f4').tobytes()
vertex_count = len(vertex_data) // 12
instance = glnext.instance()
task = instance.task()
framebuffer = task.framebuffer((512, 512))
pipeline = framebuffer.render(
vertex_shader=glsl('''
#version 450
#pragma shader_stage(vertex)
layout (binding = 0) uniform Buffer {
mat4 mvp;
};
layout (location = 0) in vec3 in_vert;
void main() {
gl_Position = mvp * vec4(in_vert, 1.0);
}
'''),
fragment_shader=glsl('''
#version 450
#pragma shader_stage(fragment)
layout (location = 0) out vec4 out_color;
void main() {
out_color = vec4(0.0, 0.0, 0.0, 1.0);
}
'''),
topology='lines',
vertex_format='3f',
vertex_count=vertex_count,
bindings=[
{
'binding': 0,
'name': 'uniform_buffer',
'type': 'uniform_buffer',
'size': 64,
},
],
)
framebuffer.update(
clear_values=glnext.pack([1.0, 1.0, 1.0, 1.0]),
)
pipeline.update(
uniform_buffer=glnext.camera((3.0, 2.0, 2.0), (0.0, 0.0, 0.0)),
vertex_buffer=vertex_data,
)
task.run()
data = framebuffer.output[0].read()
Image.frombuffer('RGBA', (512, 512), data, 'raw', 'RGBA', 0, -1).show()
|
11564887
|
import urllib
import urllib2
import requests
import threading
import json
url = 'http://localhost:8545/'
result = [{}]
def get_result(json_content):
content = json.loads(json_content)
try:
return content["result"]
except Exception as e:
print e
print json_content
class MyThread(threading.Thread):
def __init__(self, index):
threading.Thread.__init__(self)
self.data_getBalance = {'jsonrpc': '2.0',
'method': 'eth_getBalance',
'params': ["0x9bA082240DBa3F9ef90038b9357649Fa569fd763", 'latest'],
'id': 1 + index * 100}
self.filename = "contract_" + str(index) + "0000.json"
print "starting thread " + str(index)
self.balance = {}
self.index = index
self.sess = requests.Session()
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
self.sess.mount('http://', self.adapter)
def run(self):
with open(self.filename) as json_file:
c = json.load(json_file)
i = 0;
for address in c:
i += 1
if i%1000 == 0:
print 'Thread ' + str(self.filename) + ' is processing contract: ' + str(i)
try:
self.data_getBalance['params'][0] = address
r = self.sess.get(url, data=json.dumps(self.data_getBalance), allow_redirects=True)
self.balance[address] = int(get_result(r.content), 16)
r.close()
except Exception as e:
print str(e)
# print self.balance
result[self.index] = self.balance
list_threads = []
try:
for i in range(1, 147):
new_thread = MyThread(i)
list_threads.append(new_thread)
result.append({})
for my_thread in list_threads:
my_thread.start()
for my_thread in list_threads:
my_thread.join()
super_dict = {}
for small_dict in result:
super_dict.update(small_dict)
with open('contract_balance.json', 'w') as outfile:
json.dump(super_dict, outfile)
except Exception as e:
print e
print "Error: unable to start thread"
|
11564899
|
import os
app_dir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'A SECRET KEY'
SQLALCHEMY_TRACK_MODIFICATIONS = False
##### Flask-Mail configurations #####
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or '<EMAIL>'
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or 'password'
MAIL_DEFAULT_SENDER = MAIL_USERNAME
class DevelopementConfig(BaseConfig):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEVELOPMENT_DATABASE_URI') or \
'mysql+pymysql://root:pass@localhost/flask_app_db'
class TestingConfig(BaseConfig):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TESTING_DATABASE_URI') or \
'mysql+pymysql://root:pass@localhost/flask_app_db'
class ProductionConfig(BaseConfig):
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('PRODUCTION_DATABASE_URI') or \
'mysql+pymysql://root:pass@localhost/flask_app_db'
|
11564932
|
from __future__ import print_function
import six
import sys
from datetime import datetime
from insights import dr, rule
from insights.util import utc
RENDERERS = {}
_FORMATTERS = {}
def get_formatter(name):
"""
Looks up a formatter class given a prefix to it.
The names are sorted, and the first matching class is returned.
"""
for k in sorted(_FORMATTERS):
if k.startswith(name):
return _FORMATTERS[k]
class FormatterAdapterMeta(type):
""" Automatically registers subclasses for later lookup. """
def __init__(cls, name, bases, dct):
if name not in ("FormatterAdapter", "EvaluatorFormatterAdapter"):
_FORMATTERS[dr.get_name(cls)] = cls
super(FormatterAdapterMeta, cls).__init__(name, bases, dct)
class FormatterAdapter(six.with_metaclass(FormatterAdapterMeta)):
@staticmethod
def configure(p):
""" Override to add arguments to the ArgumentParser. """
pass
def __init__(self, args=None):
""" Subclasses get passed the parsed args. """
pass
def preprocess(self, broker):
"""
Called before any components have been run. Useful for registering
observers.
"""
pass
def postprocess(self, broker):
"""
Called after all components have been run. Useful for interrogating
the broker for final state.
"""
pass
class Formatter(object):
def __init__(self, broker, stream=sys.stdout):
self.broker = broker
self.stream = stream
self.start_time = datetime.now(utc)
def __enter__(self):
self.preprocess()
return self
def __exit__(self, _type, value, tb):
self.postprocess()
def preprocess(self):
pass
def postprocess(self):
pass
class EvaluatorFormatterAdapter(FormatterAdapter):
"""
Base class for formatters that want to serialize a SingleEvaluator after
execution.
"""
Impl = None
@staticmethod
def configure(p):
p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true")
p.add_argument("-S", "--show-rules", nargs="+",
choices=["fail", "info", "pass", "none", "metadata", "fingerprint"],
metavar="TYPE",
help="Show results per rule's type: 'fail', 'info', 'pass', 'none', 'metadata', and 'fingerprint'")
p.add_argument("-F", "--fail-only",
help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'",
action="store_true")
def __init__(self, args=None):
if args:
hn = "insights.combiners.hostname, insights.parsers.branch_info"
args.plugins = ",".join([args.plugins, hn]) if args.plugins else hn
self.missing = args.missing
fail_only = args.fail_only
if args.missing and fail_only:
# Drops the '-F' silently when specifying '-m' and '-F' together
# --> Do NOT break the Format of the output
fail_only = None
self.show_rules = [] # Empty by default, means show ALL types (exclude "none")
if not args.show_rules and fail_only:
self.show_rules = ['rule']
elif args.show_rules:
self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules]
def preprocess(self, broker):
self.formatter = self.Impl(broker, self.missing, self.show_rules)
self.formatter.preprocess()
def postprocess(self, broker):
self.formatter.postprocess()
def get_content(obj, val):
"""
Attempts to determine a jinja2 content template for a rule's response.
"""
# does the rule define a content= kwarg?
c = dr.get_delegate(obj).content
# otherwise, does the rule module have a CONTENT attribute?
if c is None:
mod = sys.modules[obj.__module__]
c = getattr(mod, "CONTENT", None)
if c:
# is the content a dictionary?
if isinstance(c, dict):
# does it contain a make_* class as a key?
v = c.get(val.__class__)
if v is not None:
return v
# does it contain an error key?
key = val.get_key()
if key:
v = c.get(key)
# is the value a dict that contains make_* classes?
if isinstance(v, dict):
return v.get(val.__class__)
return v
else:
return c
try:
from jinja2 import Template
def format_rule(comp, val):
content = get_content(comp, val)
if content and val.get("type") != "skip":
return Template(content).render(val)
return str(val)
RENDERERS[rule] = format_rule
except:
pass
def render(comp, val):
_type = dr.get_component_type(comp)
func = RENDERERS.get(_type)
return func(comp, val) if func else str(val)
def get_response_of_types(response, missing=True, show_rules=None):
# Check the "-m" option:
# - When "-m" is specified, show the "skips" rules
# - When "-m" is NOT specified, do not show the "skips" rules
if not missing and 'skips' in response:
response.pop('skips')
# Check the "-S" option:
# - When "-m" is specified but "-S" is NOT specified, show all the loaded rules
# - When neither "-m" nor "-S" is specified, show all the HIT rules (exclude the "skips")
if not show_rules:
# - Discard the "make_none" by default when no "-S"
# That means show "make_none" rules only when "none" is specified in "-S"
response.pop('none') if 'none' in response else None
return response
# - Discard the "medadata" rules when it's not specified in the "-S" option
if 'metadata' not in show_rules and 'metadata' in response.get('system', {}):
response['system'].pop('metadata')
# - Discard the "make_fail" rules when it's not specified in the "-S" option
if 'rule' not in show_rules and 'reports' in response:
response.pop('reports')
# - Discard the "make_info" rules when it's not specified in the "-S" option
if 'info' not in show_rules and 'info' in response:
response.pop('info')
# - Discard the "make_pass" rules when it's not specified in the "-S" option
if 'pass' not in show_rules and 'pass' in response:
response.pop('pass')
# - Discard the "make_none" rules when it's not specified in the "-S" option
if 'none' not in show_rules and 'none' in response:
response.pop('none')
# - Discard the "fingerprint" rules when it's not specified in the "-S" option
if 'fingerprint' not in show_rules and 'fingerprints' in response:
response.pop('fingerprints')
return response
|
11564946
|
import typing
from bottle import Bottle, Request
from jinja2 import Template
from .base import Apiman as _Apiman
class Apiman(_Apiman):
"""Bottle extension
>>> app = Flask(__name__)
>>> apiman = Apiman(
... template="./examples/docs/dog_template.yml"
... )
>>> apiman.init_app(app)
>>> apiman.add_schema(
... "Dog",
... {
... "properties": {
... "id": {"description": "global unique", "type": "integer"},
... "name": {"type": "string"},
... "age": {"type": "integer"},
... },
... "type": "object",
... },
... )
>>> @app.route("/dogs/", methods=["GET"])
... @apiman.from_file("./examples/docs/dogs_get.yml")
... def list_dogs():
... return jsonify(list(DOGS.values()))
"""
def init_app(self, app: Bottle):
app.add_hook("before_request", lambda: self.load_specification(app))
if self.swagger_template and self.swagger_url:
swagger_html = Template(open(self.swagger_template).read()).render(
self.config
)
self.route(
app,
self.swagger_url,
lambda: swagger_html,
)
if self.redoc_template and self.redoc_template:
redoc_html = Template(open(self.redoc_template).read()).render(self.config)
self.route(app, self.redoc_url, lambda: redoc_html)
if self.specification_url:
self.route(
app,
self.specification_url,
lambda: self.load_specification(app),
)
def get_request_schema(self, request: Request) -> typing.Dict:
return self._get_path_schema(
self._covert_path_rule(request.route.rule), request.method.lower()
)
def get_request_data(self, request: Request, k: str) -> typing.Any:
if k == "query":
return dict(request.query)
elif k == "path":
return dict(request.url_args)
elif k == "cookie":
return dict(request.cookies)
elif k == "header":
return dict(request.headers)
elif k == "json":
return request.json
elif k == "form":
return dict(request.forms)
elif k == "xml":
return self.xmltodict(request.body)
else:
return {}
def load_specification(self, app: Bottle) -> typing.Dict:
if not self.loaded:
for route in app.routes:
func = route.callback
specification = self.parse(func)
if not specification:
continue
if (
set(specification.keys()) & self.HTTP_METHODS
): # multi method description
self.add_path(route.rule, specification)
elif route.method.lower() in self.HTTP_METHODS:
self.add_path(route.rule, specification, method=route.method)
return self._load_specification()
else:
return self.specification
def route(self, app: Bottle, url: str, func):
app.route(url)(func)
def add_path(
self, path: str, specification: typing.Dict, method: typing.Optional[str] = None
):
return super().add_path(
self._covert_path_rule(path), specification, method=method
)
def _covert_path_rule(self, path: str) -> str:
# covert flask variable rules, eg "/path/<id:int>" to "/path/{id}"
_subs = []
for _sub in path.split("/"):
if _sub.startswith("<") and _sub.endswith(">"):
_subs.append(f"{{{_sub[1:-1].split(':')[0]}}}")
else:
_subs.append(_sub)
return "/".join(_subs)
Extension = Apiman
|
11564951
|
import os
from fabric.api import *
from fabric.colors import *
import sys
#env.user = 'umass_nameservice'
#env.key_filename = '/home/rahul/.ssh/id_rsa_pl'
#install_dir = '/home/umass_nameservice/'
#Instantiating the default install directory to users home directory
def_install = '/home/'+env.user+'/'
env.colorize_errors=True
def config_check(param):
if param in env.keys():
print blue("config_check passed")
else:
print red("config file is incomplete "+param+" is not set")
sys.exit(-1)
@task
def phost():
env.hosts = open('hosts.txt', 'r').readlines()
print "the hosts are " , env.hosts
@task
@parallel
def install_gns(install_dir = def_install):
config_check("gns_path")
print "the install dir is", install_dir
run_string = 'rsync -avz '+ env.gns_path +' '+ env.user+'@'+env.host_string+':'+install_dir
result = os.system(run_string)
if result != 0:
print "gns copy failed for the host" , env.host_string
else:
print " gns copy sucessfull for the host" , env.host_string
#The following functions
@task
@parallel
def install_mongo(install_dir = def_install):
config_check("mongo_download")
print "Initiating install_mongo routine"
print "the install dir is",install_dir
print "the mongo_download variable is ", env.mongo_download
#print "the various hosts are " , env.hosts
with settings(warn_only=True):
result = run("source ~/.bashrc && mongo -version")
if result.return_code == 0:
print "mongo already exists in PATH, no need to install"
else:
print "need to install"
run_string = 'scp '+' install.sh '+ env.user+'@'+env.host_string+':'+install_dir
presult = os.system(run_string) #The place where the install needs to be copied
print "presult is ", presult
run('bash ' + install_dir+'install.sh'+' mongodb http://downloads.mongodb.org/linux/mongodb-linux-i686-2.6.1.tgz '+install_dir)
@task
@parallel
def install_java(install_dir = def_install):
config_check("java_download")
print "Initiating install_java routine"
print "the install dir is",install_dir
print "java download link is",env.java_download
with settings(warn_only=True):
result = run('java -version')
if result.return_code == 0:
print "java already exists in PATH, no need to install"
else:
print "need to install"
run_string = 'scp '+' install.sh '+ env.user+'@'+env.host_string+':/home/umass_nameservice/'
presult = os.system(run_string) #The place where the install needs to be copied
print "presult is ", presult
run('bash '+ install_dir+'install.sh'+' jdk http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-i586.tar.gz '+install_dir)
|
11564962
|
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import tree
import starboost as sb
X, y = datasets.load_breast_cancer(return_X_y=True)
X_fit, X_val, y_fit, y_val = model_selection.train_test_split(X, y, test_size=0.2, random_state=42)
def micro_f1_score(y_true, y_pred):
return metrics.f1_score(y_true, y_pred, average='micro')
model = sb.BoostingClassifier(
loss=sb.losses.LogLoss(),
base_estimator=tree.DecisionTreeRegressor(max_depth=3, presort=True),
base_estimator_is_tree=True,
n_estimators=30,
init_estimator=sb.init.PriorProbabilityEstimator(),
learning_rate=0.1,
row_sampling=0.8,
col_sampling=0.8,
eval_metric=micro_f1_score,
early_stopping_rounds=5,
random_state=42
)
model = model.fit(X_fit, y_fit, eval_set=(X_val, y_val))
y_pred = model.predict_proba(X_val)
print(metrics.roc_auc_score(y_val, y_pred[:, 1]))
|
11564979
|
from re import compile
import pytest
pytestmark = pytest.mark.page('forms_with_input_elements.html')
class TestTextAreasByValue(object):
def test_finds_textareas_by_string(self, browser):
browser.textarea(index=0).set('foo1')
browser.textarea(index=1).set('foo2')
assert [e.id for e in browser.textareas(value='foo1')] == [browser.textarea(index=0).id]
assert [e.id for e in browser.textareas(value='foo2')] == [browser.textarea(index=1).id]
def test_finds_textareas_by_regexp(self, browser):
browser.textarea(index=0).set('foo1')
browser.textarea(index=1).set('foo2')
assert browser.textareas(value=compile(r'foo'))[0].id == browser.textarea(index=0).id
assert browser.textareas(value=compile(r'foo'))[1].id == browser.textarea(index=1).id
|
11564988
|
import logging
from pprint import pformat
from threading import Thread
from autobahn.wamp import ApplicationError
from waapi.client.interface import CallbackExecutor
from waapi.wamp.interface import WampRequestType, WampRequest, WaapiRequestFailed
from waapi.wamp.ak_autobahn import AkComponent
from waapi.wamp.async_compatibility import asyncio
class WampClientAutobahn(AkComponent):
"""
Implementation class of a Waapi client using the autobahn library
"""
logger = logging.getLogger("WampClientAutobahn")
def __init__(self, decoupler, callback_executor, allow_exception):
"""
:type decoupler: AutobahnClientDecoupler
:type callback_executor: CallbackExecutor
:param allow_exception: True to allow exception, False to ignore them.
In any case they are logged to stderr.
:type allow_exception: bool
"""
super(WampClientAutobahn, self).__init__()
self._decoupler = decoupler
self._callback_executor = callback_executor
self._allow_exception = allow_exception
@classmethod
def enable_debug_log(cls):
cls.logger.setLevel(logging.DEBUG)
@classmethod
def _log(cls, msg):
cls.logger.debug("WampClientAutobahn: %s", msg)
async def stop_handler(self, request):
"""
:param request: WampRequest
"""
self._log("Received STOP, stopping and setting the result")
self._callback_executor.stop()
self.disconnect()
self._log("Disconnected")
request.future.set_result(True)
async def call_handler(self, request):
"""
:param request: WampRequest
"""
self._log("Received CALL, calling " + request.uri)
res = await self.call(request.uri, **request.kwargs)
self._log("Received response for call")
result = res.kwresults if res else {}
if request.callback:
self._log("Callback specified, calling it")
callback = _WampCallbackHandler(request.callback, self._callback_executor)
callback(result)
request.future.set_result(result)
async def subscribe_handler(self, request):
"""
:param request: WampRequest
"""
self._log("Received SUBSCRIBE, subscribing to " + request.uri)
callback = _WampCallbackHandler(request.callback, self._callback_executor)
subscription = await (self.subscribe(
callback,
topic=request.uri,
options=request.kwargs)
)
request.future.set_result(subscription)
async def unsubscribe_handler(self, request):
"""
:param request: WampRequest
"""
self._log("Received UNSUBSCRIBE, unsubscribing from " + str(request.subscription))
try:
# Successful unsubscribe returns nothing
await request.subscription.unsubscribe()
request.future.set_result(True)
except ApplicationError:
request.future.set_result(False)
except Exception as e:
self._log(str(e))
request.future.set_result(False)
async def onJoin(self, details):
self._log("Joined!")
self._decoupler.set_joined()
self._callback_executor.start()
try:
while True:
self._log("About to wait on the queue")
request = await self._decoupler.get_request()
""":type: WampRequest"""
self._log("Received something!")
try:
handler = {
WampRequestType.STOP: self.stop_handler,
WampRequestType.CALL: self.call_handler,
WampRequestType.SUBSCRIBE: self.subscribe_handler,
WampRequestType.UNSUBSCRIBE: self.unsubscribe_handler
}.get(request.request_type)
if handler:
await handler(request)
else:
self._log("Undefined WampRequestType")
except ApplicationError as e:
self.logger.error("WampClientAutobahn (ERROR): " + pformat(str(e)))
if self._allow_exception:
request.future.set_exception(WaapiRequestFailed(e))
else:
request.future.set_result(None)
self._log("Done treating request")
if request.request_type == WampRequestType.STOP:
break
except RuntimeError:
# The loop has been shut down by a disconnect
pass
def onDisconnect(self):
self._log("The client was disconnected.")
# Stop the asyncio loop, ultimately stopping the runner thread
asyncio.get_event_loop().stop()
class _WampCallbackHandler:
"""
Wrapper for a callback that unwraps a WAMP response
"""
def __init__(self, callback, executor):
assert callable(callback)
assert isinstance(executor, CallbackExecutor)
self._callback = callback
self._executor = executor
def __call__(self, *args, **kwargs):
if self._callback and callable(self._callback):
self._executor.execute(self._callback, kwargs)
|
11565006
|
from transformers.training_args import TrainingArguments
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
logger = logging.get_logger(__name__)
class ContrastiveTrainer(Trainer):
def __init__(
self,
model = None,
args = None,
data_collator = None,
train_dataset = None,
eval_dataset = None,
tokenizer = None,
model_init = None,
compute_metrics = None,
callbacks = None,
optimizers = (None, None),
):
super().__init__(
model,
args,
data_collator,
train_dataset,
eval_dataset,
tokenizer,
model_init,
compute_metrics,
callbacks,
optimizers
)
def _compute_loss(self, model, inputs):
labels = inputs.pop("labels")
ce_pos = inputs.pop("ce_pos")
positive_contrast = inputs.pop("positive_contrast")
valid_contrast = inputs.pop("valid_contrast")
model_output = model(**inputs, use_cache=False)
logits = model_output.logits
ce_logits = logits[ce_pos]
ce_targets = labels[ce_pos]
loss = self.label_smoother((ce_logits,), ce_targets)
representation = model_output.contrast_states # bsz x seqlen x dim
ne_representation = representation.masked_fill((labels == -100).unsqueeze(-1), 0) # B x T x C
representation = ne_representation.sum(dim=1)
representation_ne_denom = (labels != -100).sum(dim=1, keepdim=True)
representation = representation / torch.max(representation_ne_denom,
1e-8 * torch.ones_like(representation_ne_denom))
representation_n = representation.norm(dim=-1, keepdim=True)
representation_norm = representation / torch.max(representation_n, 1e-8 * torch.ones_like(representation_n))
similarity = torch.matmul(representation_norm, representation_norm.transpose(0, 1)) # pos+neg x pos+neg
similarity = similarity.exp()
similarity = similarity.masked_fill(~valid_contrast, 0.)
denominator = similarity.sum(dim=-1, keepdim=True) # pos+neg
denom_similarity = similarity / torch.max(denominator, 1e-8 * torch.ones_like(denominator)) # pos+neg x pos+neg
contrast_loss = denom_similarity[positive_contrast]
contrast_loss = - contrast_loss.log()
contrast_loss_denom = positive_contrast.sum()
contrast_loss = contrast_loss.sum() / torch.max(contrast_loss_denom,
1e-8 * torch.ones_like(contrast_loss_denom))
return loss + contrast_loss, logits
def compute_loss(self, model, inputs):
loss, _ = self._compute_loss(model, inputs)
return loss
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A tuple with the loss, logits and labels (each being optional).
"""
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
# compute loss on predict data
loss, _ = self._compute_loss(model, inputs)
loss = loss.mean().detach()
return (loss, None, None)
def _pad_tensors_to_max_len(self, tensor, max_length):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
f"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be padded to `max_length`={max_length}"
)
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
|
11565010
|
from .base_cube import BaseCube, retrieve_score_for_strategy
from .regularizer_cube import RegularizersModifierCube
from .controller_cube import RegularizationControllerCube
from .cube_creator import CubeCreator
from .perplexity_strategy import PerplexityStrategy
from .greedy_strategy import GreedyStrategy
from .strategy import BaseStrategy
|
11565013
|
import os
import sys
import numpy as np
def azi_ele_the_rho(top_dir):
pgm_filepath = [line for line in os.listdir(top_dir) if line.endswith('.pgm') and line.startswith('frame80')][0]
tmp = pgm_filepath.split('.pgm')[0].split('_')
azimuth_deg = float(tmp[2].split('azi')[1])
elevation_deg = float(tmp[3].split('ele')[1])
theta_deg = float(tmp[4].split('theta')[1])
rho = float(tmp[1].split('rho')[1])
print('azi %f ele %f the %f rho %f' % (azimuth_deg, elevation_deg, theta_deg, rho))
view_params = {}
view_params['azi'] = azimuth_deg
view_params['ele'] = elevation_deg
view_params['rho'] = rho
view_params['the'] = theta_deg
return view_params
def tran_rot(filepath):
rot = np.zeros((3,3))
tran = np.zeros((3,))
print("name")
print(filepath)
lines = [line.strip() for line in open(filepath)]
for idx, line in enumerate(lines):
tmp = str(line).split('(')[1].split(')')[0].split()
tmp = [float(x.split(',')[0]) for x in tmp]
if idx < 3:
rot[idx,:] = np.array(tmp[0:3])
tran[idx] = tmp[3]
return tran,rot
def model_para_list(top_dir,frame_id):
model_top_dir = '/home/linshaonju/interactive-segmentation/Data/ShapeNetCore'
model_list = [line.strip().split('_') for line in os.listdir(top_dir) if line.startswith('frame'+frame_id) and line.endswith('_matrix_wolrd.txt')]
print("model_list")
print(model_list)
cate_model_list= [(line[1],line[2]) for line in model_list]
model_path_list = [os.path.join(model_top_dir,line[0],line[1],'model.obj') for line in cate_model_list]
print(model_path_list)
rot_list = []
tran_list = []
for cate_model in cate_model_list:
cate, model = cate_model
print(model)
print("cate_model")
print(cate_model_list)
tran_rot_file = os.path.join(top_dir,'frame'+frame_id+'_'+cate+'_'+model+'_matrix_wolrd.txt')
tran, rot = tran_rot(tran_rot_file)
rot_list.append(rot)
tran_list.append(tran)
return model_path_list, tran_list, rot_list
|
11565044
|
import sys
import subprocess
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', \
'\33[97m', '\33[93m', \
'\033[1;35m', '\033[1;32m', \
'\033[0m'
def get_py_version(possible=["python", "python2.7"]):
"""
Return what version of Python is installed
:param possible: Different python calls for 2.7.x
:return: 'python' if python 2.7.x is the only installed
'python27' if more then one version is installed
"""
items = sys.version
version = items.split(" ")[0]
if version.startswith("2.7"):
return possible[0]
else:
return possible[1]
def exec_com(command, py_ver=get_py_version(), sudo=False):
"""
Execute a command via subprocess call
:param command: Command to be executed
:param py_ver: Python version to be called
:param sudo: Is sudo required?
:return: Executed command
"""
command_list = command.split(" ")
if sudo is True:
command_list.insert(0, "sudo")
command_list.insert(1, py_ver)
else:
command_list.insert(0, py_ver)
return subprocess.call(' '.join(command_list), shell=True)
|
11565057
|
import torch
import pyro_model.helper
import numpy as np
from pyro.ops.stats import quantile
def get_R0_sooner_lockdown(R0, lockdown_start_sooner):
R0_last = R0[..., -1][:, :, None]
R0_last_pad = R0_last.repeat(1, 1, lockdown_start_sooner)
R0_counter = torch.cat([R0[..., lockdown_start_sooner:], R0_last_pad], dim=-1)
return R0_counter
def get_R0_later_lockdown(R0, lockdown_start_later):
R0_last = R0[..., 0][:, :, None]
R0_last_pad = R0_last.repeat(1, 1, lockdown_start_later)
R0_counter = torch.cat([R0_last_pad, R0[..., :-lockdown_start_later]], dim=-1)
return R0_counter
def get_counterfactual(data_dict, forecaster, res, R0):
p_fatal = res['p_fatal']
case_import = res['case_import'] / forecaster.model.N
infectious_days = res['infect_days']
time_to_death = res['time_to_death']
d_incubation = res['d_incubation']
alpha = 1. / d_incubation
d_death = time_to_death - infectious_days
e0 = case_import
i0 = torch.zeros_like(e0)
r0 = torch.zeros_like(e0)
f0 = torch.zeros_like(e0)
s0 = 1. - e0 - i0 - r0 - f0
sigma = 1. / infectious_days
beta_t = sigma * R0
# t_init: same shape with i0
t_init = data_dict['t_init'].unsqueeze(0).repeat(i0.size(0), 1, 1)
res_ode = pyro_model.helper.eluer_seir_time(s0, e0, i0, r0, f0, beta_t, sigma, alpha, p_fatal, d_death, case_import,
t_init)
r_t = res_ode[-1]
prediction = r_t * forecaster.model.N
# ..., t, p
prediction = prediction.unsqueeze(-1).transpose(-1, -3)
mask_half = res['mask_half']
mask_full = torch.cat([mask_half, torch.zeros_like(mask_half)], dim=-1)[..., :-1]
res_list = []
for i in range(prediction.shape[0]):
pred_temp = prediction[i, ...]
mask_temp = mask_full[i, ...][0, ...].permute(1, 0, 2)
prediction_conv = pred_temp.permute(2, 0, 1)
res_inner_list = []
for j in range(len(forecaster.model.N)):
res_inner = torch.nn.functional.conv1d(prediction_conv[j:j + 1, ...], mask_temp[j:j + 1, ...],
padding=mask_half.shape[-1] - 1)
res_inner_list.append(res_inner)
res1 = torch.cat(res_inner_list, dim=0)
res1 = res1.permute(1, 2, 0)
res_list.append(res1)
prediction = torch.stack(res_list, dim=0).numpy().squeeze()
prediction = np.diff(prediction, axis=1)
prediction = quantile(torch.tensor(prediction), (0.05, 0.5, 0.95), dim=0).numpy()
return prediction
|
11565068
|
from os import path
from mapshader.sources import world_countries_source
from mapshader.sources import world_boundaries_source
from mapshader.sources import world_cities_source
from mapshader.sources import nybb_source
from mapshader.sources import elevation_source
HERE = path.abspath(path.dirname(__file__))
FIXTURES_DIR = path.join(HERE, 'fixtures')
DEFAULT_SOURCES_FUNCS = [world_countries_source,
world_cities_source,
nybb_source,
world_boundaries_source,
elevation_source]
|
11565105
|
import functools
from functools import lru_cache
@lru_cache(None)
def func(x, y):
"""This is a docstring"""
if x < 0:
raise ValueError()
return x + 2.0 * y
class A:
def __init__(self, x, y):
self.x = x # This is a useless comment
self.y = y
def say_hello(self):
print(f"Hello, world\nx = {self.x}\ny = {self.y}")
if __name__ == "__main__":
a = A(1.0, 2.0)
a.say_hello()
|
11565115
|
from pc import PC
from pcw import PCw
from pcew import PCew
from assignment import dynamicUnassignedVars, stL
from inverse import inv
from hornsplit import hsplit
from basesplit import bsplit
from glob import globs
from weights import ew
from counters import nodeCount
# iterative consistency
def iconsistency(ConMatrix, m = -1, n = -1):
stack = [] # initialize stack to simulate backtracking
next(nodeCount)
# check for path consistency to be used for first step
if globs['pcheuristic'] == 0: # simple path consistency
if not PC(ConMatrix, m, n):
return None
elif globs['pcheuristic'] == 2: # exact weighted path consistency
if not PCew(ConMatrix, m, n):
return None
elif globs['pcheuristic'] == 1: # van Beek weighted path consistency
if not PCw(ConMatrix, m, n):
return None
# as long as the consistency problem is not decided, process it
while 1:
# check for processing to be used
if globs['process'] == 1: # dynamic processing
res = dynamicUnassignedVars(ConMatrix)
if not res:
return ConMatrix # solution found
dummy, (i,j) = res # grab unassigned variable
elif globs['process'] == 0: # static processing
# check for splitting to be used
if globs['split'] == 0: # splitting based on set of base relations
for dummy, (i,j) in stL:
if bsplit[ConMatrix[i][j]-1][0] > 1:
break
else:
return ConMatrix # solution found
elif globs['split'] == 1: # splitting based on horn set
for dummy, (i,j) in stL:
if hsplit[ConMatrix[i][j]-1][0] > 1:
break
else:
return ConMatrix # solution found
# check for splitting to be used
if globs['split'] == 0: # splitting based on set of base relations
values = bsplit[ConMatrix[i][j]-1][1][:]
elif globs['split'] == 1: # splitting based on horn set
values = hsplit[ConMatrix[i][j]-1][1][:]
# check for value decision heuristic to be used
if globs['valheuristic'] == 0: # non heuristic
valuesw = values
valuesw.reverse()
elif globs['valheuristic'] == 1: # least constraining value heuristic
valuesw = [(-ew[a-1],a) for a in values]
valuesw.sort(reverse=True)
valuesw = [a[1] for a in valuesw]
# as long as a consistent variable-value pair in not found, search for it
while 1:
next(nodeCount) # increment visited nodes counter
# check if current variable has any variables left, if not backtrack to a previous variable assignment
if not valuesw:
# check if any previous variable assignments are left in the stack
while stack:
ConMatrix, (i, j), valuesw, dummy = stack.pop()
# check if newly grabbed variable has any variables left, if not backtrack to a previous variable assignment
if valuesw:
break
else:
return None
value = valuesw.pop() # grab first value from variable
c = tuple([ic[:] for ic in ConMatrix]) if valuesw else () # keep copy of the constraint matrix in case an inconsistency happens
# assignment takes place
ConMatrix[i][j] = value
ConMatrix[j][i] = inv[value-1]
# check for path consistency to be used
if globs['pcheuristic'] == 0: # simple path consistency
if PC(ConMatrix, i, j):
break
elif globs['pcheuristic'] == 2: # exact weighted path consistency
if PCew(ConMatrix, i, j):
break
elif globs['pcheuristic'] == 1: # van Beek weighted path consistency
if PCw(ConMatrix, i, j):
break
ConMatrix = c # revert contraint mantrix to previous state
stack.append((c, (i, j), valuesw[:], dummy)) # save current state (function call) in a stack
raise RuntimeError, "Can't happen"
|
11565131
|
import robotoc
import numpy as np
import math
path_to_urdf = "../iiwa_description/urdf/iiwa14.urdf"
robot = robotoc.Robot(path_to_urdf)
# Change the limits from the default parameters.
robot.set_joint_effort_limit(np.full(robot.dimu(), 50))
robot.set_joint_velocity_limit(np.full(robot.dimv(), 0.5*math.pi))
# Create a cost function.
cost = robotoc.CostFunction()
config_cost = robotoc.ConfigurationSpaceCost(robot)
q_ref = np.array([0, 0.5*math.pi, 0, 0.5*math.pi, 0, 0.5*math.pi, 0])
config_cost.set_q_ref(q_ref)
config_cost.set_q_weight(np.full(robot.dimv(), 10))
config_cost.set_qf_weight(np.full(robot.dimv(), 10))
config_cost.set_v_weight(np.full(robot.dimv(), 0.01))
config_cost.set_vf_weight(np.full(robot.dimv(), 0.01))
config_cost.set_a_weight(np.full(robot.dimv(), 0.01))
cost.push_back(config_cost)
# Create joint constraints.
constraints = robotoc.Constraints(barrier=1.0e-03, fraction_to_boundary_rule=0.995)
joint_position_lower = robotoc.JointPositionLowerLimit(robot)
joint_position_upper = robotoc.JointPositionUpperLimit(robot)
joint_velocity_lower = robotoc.JointVelocityLowerLimit(robot)
joint_velocity_upper = robotoc.JointVelocityUpperLimit(robot)
joint_torques_lower = robotoc.JointTorquesLowerLimit(robot)
joint_torques_upper = robotoc.JointTorquesUpperLimit(robot)
constraints.push_back(joint_position_lower)
constraints.push_back(joint_position_upper)
constraints.push_back(joint_velocity_lower)
constraints.push_back(joint_velocity_upper)
constraints.push_back(joint_torques_lower)
constraints.push_back(joint_torques_upper)
# Create the OCP solver for unconstrained rigid-body systems.
T = 3.0
N = 60
ocp = robotoc.UnconstrOCP(robot=robot, cost=cost, constraints=constraints,
T=T, N=N)
solver_options = robotoc.SolverOptions()
ocp_solver = robotoc.UnconstrOCPSolver(ocp=ocp, solver_options=solver_options,
nthreads=4)
# Initial time and intial state
t = 0.0
q = np.array([0.5*math.pi, 0, 0.5*math.pi, 0, 0.5*math.pi, 0, 0.5*math.pi])
v = np.zeros(robot.dimv())
print("----- Solves the OCP by Riccati recursion algorithm. -----")
ocp_solver.set_solution("q", q)
ocp_solver.set_solution("v", v)
ocp_solver.init_constraints()
print("Initial KKT error: ", ocp_solver.KKT_error(t, q, v))
ocp_solver.solve(t, q, v, init_solver=True)
print("KKT error after convergence: ", ocp_solver.KKT_error(t, q, v))
print(ocp_solver.get_solver_statistics())
# Solves the OCP by ParNMPC algorithm.
parnmpc = robotoc.UnconstrParNMPC(robot=robot, cost=cost, constraints=constraints,
T=T, N=N)
parnmpc_solver = robotoc.UnconstrParNMPCSolver(parnmpc=parnmpc,
solver_options=solver_options,
nthreads=8)
print("\n----- Solves the OCP by ParNMPC algorithm. -----")
parnmpc_solver.set_solution("q", q)
parnmpc_solver.set_solution("v", v)
parnmpc_solver.init_constraints()
parnmpc_solver.init_backward_correction(t)
print("Initial KKT error: ", parnmpc_solver.KKT_error(t, q, v))
parnmpc_solver.solve(t, q, v, init_solver=True)
print("KKT error after convergence: ", parnmpc_solver.KKT_error(t, q, v))
print(parnmpc_solver.get_solver_statistics())
viewer = robotoc.utils.TrajectoryViewer(path_to_urdf=path_to_urdf, viewer_type='meshcat')
viewer.set_camera_transform_meshcat(camera_tf_vec=[0.5, -3.0, 0.0], zoom=2.0)
viewer.display((T/N), ocp_solver.get_solution('q'))
|
11565146
|
from timeit import Timer
from validr import T, Compiler, Invalid
from validators import url as _validators_url
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
def validators_url(x):
return _validators_url(x) is True
_validr_url = Compiler().compile(T.url.scheme('http https'))
def validr_url(x):
try:
_validr_url(x)
except Invalid:
return False
else:
return True
_django_url = URLValidator({'http', 'https'})
def django_url(x):
try:
_django_url(x)
except ValidationError:
return False
else:
return True
url_cases = {
'valid': [
'http://127.0.0.1:8080/hello?key=中文',
'http://tool.lu/regex/',
'https://github.com/guyskk/validator',
'https://avatars3.githubusercontent.com/u/6367792?v=3&s=40',
'https://github.com',
'https://www.google.com/' + 'x' * 128,
],
'invalid': [
'<EMAIL>',
'google',
'readme.md',
'github.com',
'www.google.com',
'http://www.google.com',
'//cdn.bootcss.com/bootstrap/4.0.0-alpha.3/css/bootstrap.min.css',
]
}
def _benchmark_url_validator(fn):
for url in url_cases['valid']:
assert fn(url), f'valid url={url}'
for url in url_cases['invalid']:
if fn(url):
print(f'invalid url={url}')
def test_benchmark_url_validator():
funcs = [
('validr', validr_url),
('validators', validators_url),
('django', django_url),
]
bench = _benchmark_url_validator
for name, fn in funcs:
print(name.center(79, '-'))
bench(fn)
print('OK')
for name, fn in funcs:
print(name.center(79, '-'))
n, t = Timer(lambda: bench(fn)).autorange()
print('{:>8} loops cost {:.3f}s'.format(n, t))
if __name__ == "__main__":
test_benchmark_url_validator()
|
11565221
|
from __future__ import print_function
from enum import IntEnum
import struct
import binascii
import uuid
import threading
from .notifications import GenericClearTileNotification
from .parser import MsftBandParser
from .commands import (
SERIAL_NUMBER_REQUEST, CARGO_NOTIFICATION,
GET_TILES_NO_IMAGES, CORE_WHO_AM_I, CORE_GET_API_VERSION,
SET_THEME_COLOR, START_STRIP_SYNC_END, CORE_SDK_CHECK,
START_STRIP_SYNC_START, READ_ME_TILE_IMAGE, CORE_GET_VERSION,
WRITE_ME_TILE_IMAGE_WITH_ID,
CARGO_SYSTEM_SETTINGS_OOBE_COMPLETED_GET,
NAVIGATE_TO_SCREEN, GET_ME_TILE_IMAGE_ID,
GET_TILES, SET_TILES,
)
from .versions import BandType, DeviceVersion, FirmwareVersion
from .socket import BandSocket
from .sensors import decode_sensor_reading
from . import PUSH_SERVICE_PORT
class DummyWrapper:
def print(self, *args, **kwargs):
print(*args, **kwargs)
def send(self, signal, args):
print(signal, args)
def atexit(self, func):
import atexit
atexit.register(func)
class FirmwareApp(IntEnum):
OneBL = 1
TwoUp = 2
App = 3
UpApp = 4
class FirmwareSdkCheckPlatform(IntEnum):
WindowsPhone = 1
Windows = 2
Desktop = 3
class PushServicePacketType(IntEnum):
WakeApp = 0
RemoteSubscription = 1
Sms = 100
DismissCall = 101
VoicePacketBegin = 200
VoicePacketData = 201
VoicePacketEnd = 202
VoicePacketCancel = 203
StrappEvent = 204
StrappSyncRequest = 205
CortanaContext = 206
Keyboard = 220
KeyboardSetContent = 222
class BandDevice:
address = ""
cargo = None
push = None
tiles = None
band_language = None
band_name = None
serial_number = None
push_thread = None
services = {}
version: DeviceVersion
wrapper = DummyWrapper()
def __init__(self, address):
self.address = address
self.push = BandSocket(self, PUSH_SERVICE_PORT)
self.cargo = BandSocket(self)
self.wrapper.atexit(self.disconnect)
@property
def band_type(self):
if self.version:
return self.version.band_type
return BandType.Unknown
def connect(self):
self.cargo.connect()
# fetch device data
self.get_firmware_version()
# start push thread
self.push_thread = threading.Thread(target=self.listen_pushservice)
self.push_thread.start()
def disconnect(self):
self.push.disconnect()
self.cargo.disconnect()
def check_if_oobe_completed(self):
result, data = self.cargo.cargo_read(
CARGO_SYSTEM_SETTINGS_OOBE_COMPLETED_GET, 4)
if data:
return struct.unpack("<I", data[0])[0] != 0
return False
def get_me_tile_image_id(self):
result, data = self.cargo.cargo_read(GET_ME_TILE_IMAGE_ID, 4)
if data:
return data[0]
return 0
def get_me_tile_image(self):
"""
Sends READ_ME_TILE_IMAGE command to device and returns a bgr565
byte array with Me tile image
"""
# calculate byte count based on device type
if self.band_type == BandType.Cargo:
byte_count = 310 * 102 * 2
elif self.band_type == BandType.Envoy:
byte_count = 310 * 128 * 2
else:
byte_count = 0
# read Me Tile image
result, data = self.cargo.cargo_read(READ_ME_TILE_IMAGE, byte_count)
pixel_data = b''.join(data)
return pixel_data
def set_me_tile_image(self, pixel_data, image_id):
result, data = self.cargo.cargo_write_with_data(
WRITE_ME_TILE_IMAGE_WITH_ID,
pixel_data,
struct.pack("<I", image_id))
return result, data
def navigate_to_screen(self, screen):
"""
Tells the device to navigate to a given screen.
AFAIK works only with OOBE screens in OOBE mode
"""
return self.cargo.cargo_write_with_data(
NAVIGATE_TO_SCREEN, struct.pack("<H", screen))
def process_push(self, guid, command, message):
for service in self.services.values():
if service.guid == guid:
new_message = service.push(guid, command, message)
if new_message:
message = new_message
break
return message
def process_tile_callback(self, result):
opcode = struct.unpack("I", result[6:10])[0]
guid = uuid.UUID(bytes_le=result[10:26])
command = result[26:44]
tile_name = MsftBandParser.bytes_to_text(result[44:84])
message = {
"opcode": opcode,
"guid": str(guid),
"command": binascii.hexlify(command),
"tile_name": tile_name,
}
message = self.process_push(guid, command, message)
self.wrapper.send("PushService", message)
def process_notification_callback(self, result):
opcode = struct.unpack("I", result[2:6])[0]
guid = uuid.UUID(bytes_le=result[6:22])
command = result[22:]
message = {
"opcode": opcode,
"guid": str(guid),
"command": str(binascii.hexlify(command)),
}
message = self.process_push(guid, command, message)
self.wrapper.send("PushService", message)
def listen_pushservice(self):
self.push.connect()
while True:
try:
result = self.push.receive()
except OSError:
break
packet_type = struct.unpack("H", result[0:2])[0]
self.wrapper.print(PushServicePacketType(packet_type))
if packet_type == PushServicePacketType.RemoteSubscription:
sensor = decode_sensor_reading(result)
self.wrapper.print(sensor)
elif packet_type == PushServicePacketType.Sms:
self.process_notification_callback(result)
elif packet_type == PushServicePacketType.DismissCall:
self.process_notification_callback(result)
elif packet_type == PushServicePacketType.StrappEvent:
self.wrapper.print(binascii.hexlify(result))
self.process_tile_callback(result)
else:
self.wrapper.print(binascii.hexlify(result))
def sync(self):
for service in self.services.values():
self.wrapper.print(f'{service}'.ljust(80), end='')
try:
result = getattr(service, "sync")()
except Exception as exc:
self.wrapper.print(exc)
result = False
self.wrapper.print("[%s]" % ("OK" if result else "FAIL"))
self.wrapper.print("Sync finished")
def clear_tile(self, guid):
self.send_notification(GenericClearTileNotification(guid))
def set_theme(self, colors):
"""
Takes an array of 6 colors encoded as ints
Base, Highlight, Lowlight, SecondaryText, HighContrast, Muted
"""
self.cargo.cargo_write(START_STRIP_SYNC_START)
colors = struct.pack("I"*6, *[int(x) for x in colors])
self.cargo.cargo_write_with_data(SET_THEME_COLOR, colors)
self.cargo.cargo_write(START_STRIP_SYNC_END)
def get_tiles(self):
if not self.tiles:
self.request_tiles()
return self.tiles
def get_serial_number(self):
if not self.serial_number:
# ask nicely for serial number
result, number = self.cargo.cargo_read(SERIAL_NUMBER_REQUEST, 12)
if result:
self.serial_number = number[0].decode("utf-8")
return self.serial_number
def get_max_tile_capacity(self):
# TODO: actual logic for calculating that
return 15
def set_tiles(self):
self.cargo.cargo_write(START_STRIP_SYNC_START)
# icons = []
tiles = []
data = bytes([])
for x in self.tiles:
# icons.append(x['icon'])
tile = bytes([])
tile += x['guid'].bytes_le
tile += struct.pack("<I", x['order'])
tile += struct.pack("<I", x['theme_color'])
tile += struct.pack("<H", len(x['name']))
tile += struct.pack("<H", x['settings_mask'])
tile += MsftBandParser.serialize_text(x['name'], 30)
tiles.append(tile)
# data = b''.join(icons)
data += struct.pack("<I", len(tiles))
data += b''.join(tiles)
result = self.cargo.cargo_write_with_data(
SET_TILES, data, struct.pack("<I", len(tiles))
)
self.cargo.cargo_write(START_STRIP_SYNC_END)
return result
def request_tiles(self, icons=False):
max_tiles = self.get_max_tile_capacity()
response_size = 88 * max_tiles + 4
command = GET_TILES_NO_IMAGES
if icons:
response_size += max_tiles * 1024
command = GET_TILES
result, tiles = self.cargo.cargo_read(
command, response_size)
tile_data = b"".join(tiles)
tile_list = []
tile_icons = []
begin = 0
if icons:
for i in range(0, max_tiles):
tile_icons.append(tile_data[begin:begin+1024])
begin += 1024
# first 4 bytes are tile count
tile_count = struct.unpack("<I", tile_data[begin:begin+4])[0]
begin += 4
i = 0
# while there are tiles
while i < tile_count:
# get guuid
guid = uuid.UUID(bytes_le=tile_data[begin:begin+16])
order = struct.unpack("<I", tile_data[begin+16:begin+20])[0]
theme_color = struct.unpack("<I", tile_data[begin+20:begin+24])[0]
name_length = struct.unpack("<H", tile_data[begin+24:begin+26])[0]
settings_mask = struct.unpack(
"<H", tile_data[begin+26:begin+28]
)[0]
# get tile name
if name_length:
name = MsftBandParser.bytes_to_text(
tile_data[begin+28:begin+80])
else:
name = ''
# append tile to list
tile_list.append({
"guid": guid,
"order": order,
"theme_color": theme_color,
"name_length": name_length,
"settings_mask": settings_mask,
"name": name,
"icon": tile_icons[i] if icons else None
})
# move to next tile
begin += 88
i += 1
self.tiles = tile_list
def send_notification(self, notification):
self.cargo.cargo_write_with_data(
CARGO_NOTIFICATION, notification.serialize()
)
def get_firmware_version(self):
result, info = self.cargo.cargo_read(CORE_GET_VERSION, 19*3)
info = b''.join(info)
self.version = DeviceVersion()
offset = 0
for i in range(0, 3):
fw_version = FirmwareVersion.deserialize(info[offset:offset+19])
if fw_version.app_name == '1BL':
self.version.bootloader = fw_version
elif fw_version.app_name == '2UP':
self.version.updater = fw_version
elif fw_version.app_name == 'App':
self.version.application = fw_version
offset += 19
return self.version
def get_api_version(self):
result, info = self.cargo.cargo_read(CORE_GET_API_VERSION, 4)
version = struct.unpack('I', info[0])[0]
return version
def get_running_firmware_app(self):
"""
Returns what mode Band is running in.
- OneBL - Bootloader
- TwoUp - Updater
- App - Regular mode
- UpApp - Probably also Updater (?)
"""
result, info = self.cargo.cargo_read(CORE_WHO_AM_I, 1)
app = struct.unpack('B', info[0])[0]
return FirmwareApp(app)
def check_firmware_sdk_bit(self, platform, reserved):
arguments = struct.pack('BBH', int(platform), int(reserved), 3)
self.cargo.cargo_write_with_data(CORE_SDK_CHECK, arguments)
|
11565229
|
import os
path = "/Users/chuntinz/Downloads/XNLI-1.0"
def read_test_tsv(path, opt1, opt2):
with open(path, "r", encoding="utf-8") as fin, open(opt1, 'w', encoding='utf-8') as fout1, open(opt2, 'w', encoding='utf-8') as fout2:
for line in fin:
fields = line.strip().split('\t')
lang = fields[0]
sent1, sent2 = fields[-3], fields[-2]
label = fields[1]
promptID, pairID = fields[8], fields[9]
if lang == "zh":
fout1.write("{}\t{}\t{}\t{}\t{}\n".format(sent1, sent2, label, promptID, pairID))
if lang == "en":
fout2.write("{}\t{}\t{}\t{}\t{}\n".format(sent1, sent2, label, promptID, pairID))
def make_new_test_dev(path1, path2, out_dir, prefix):
sent1_opt = open(os.path.join(out_dir, "{}.sent1".format(prefix)), "w", encoding="utf-8")
sent2_opt = open(os.path.join(out_dir, "{}.sent2".format(prefix)), "w", encoding="utf-8")
label_opt = open(os.path.join(out_dir, "{}.label".format(prefix)), "w", encoding="utf-8")
with open(path1, "r", encoding="utf-8") as fin1, open(path2, "r", encoding="utf-8") as fin2:
for l1, l2 in zip(fin1, fin2):
zh_sent_1, zh_sent_2, label1, zh_promptID, zh_pairID = l1.strip().split('\t')
en_sent_1, en_sent_2, label2, en_promptID, en_pairID = l2.strip().split('\t')
assert label1 == label2 and zh_promptID == en_promptID and zh_pairID == en_pairID
sent1_opt.write(zh_sent_1 + "\n")
sent2_opt.write(en_sent_2 + "\n")
if label1 == "contradiction":
label_opt.write("contradictory\n")
else:
label_opt.write(label1 + "\n")
sent1_opt.close()
sent2_opt.close()
label_opt.close()
def make_train(path1, path2, out_dir, prefix):
sent1_opt = open(os.path.join(out_dir, "{}.sent1".format(prefix)), "w", encoding="utf-8")
sent2_opt = open(os.path.join(out_dir, "{}.sent2".format(prefix)), "w", encoding="utf-8")
label_opt = open(os.path.join(out_dir, "{}.label".format(prefix)), "w", encoding="utf-8")
count = 0
with open(path1, "r", encoding="utf-8") as fin1, open(path2, "r", encoding="utf-8") as fin2:
for l1, l2 in zip(fin1, fin2):
zh_sent_1, zh_sent_2, label1 = l1.strip().split("\t")
en_sent1_1, en_sent_2, label2 = l2.strip().split("\t")
if count == 0:
count += 1
continue
count += 1
assert label1 == label2
sent1_opt.write(zh_sent_1 + "\n")
sent2_opt.write(en_sent_2 + "\n")
label_opt.write(label1 + "\n")
sent1_opt.close()
sent2_opt.close()
label_opt.close()
read_test_tsv(os.path.join(path, "xnli.test.tsv"), os.path.join(path, "zh.test.tsv"), os.path.join(path, "en.test.tsv"))
read_test_tsv(os.path.join(path, "xnli.dev.tsv"), os.path.join(path, "zh.dev.tsv"), os.path.join(path, 'en.dev.tsv'))
make_new_test_dev(os.path.join(path, "zh.test.tsv"), os.path.join(path, "en.test.tsv"), os.path.join(path, "zh_en"), "test")
make_new_test_dev(os.path.join(path, "zh.dev.tsv"), os.path.join(path, "en.dev.tsv"), os.path.join(path, "zh_en"), "valid")
train_path = "/Users/chuntinz/Downloads/XNLI-MT-1.0/multinli"
make_train(os.path.join(train_path, "multinli.train.zh.tsv"), os.path.join(train_path, "multinli.train.en.tsv"), os.path.join(path, "zh_en"), "train")
|
11565266
|
from __future__ import print_function, unicode_literals, absolute_import
import codecs
import contextlib
import itertools
import os
import re
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Check if a generator has at least one element.
#
# Since we don't want to consume the element the function return a tuple.
# The first element is a boolean telling whether or not the generator is empty.
# The second element is a new generator where the first element has been
# put back.
def empty_iterator_wrap(iterator):
try:
first = next(iterator)
except StopIteration:
return True, None
return False, itertools.chain([first], iterator)
# compatibility function,
# not as smart as the version of the Python standard library
@contextlib.contextmanager
def suppress(*exceptions):
"""Context manager to suppress specified exceptions
with suppress(OSError):
os.remove(somefile)
"""
try:
yield
except exceptions:
pass
def re_fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
return re.match("(?:" + regex + r")\Z", string, flags=flags)
# The issue this function tries to solve is to have a text writer where unicode
# data can be written without decoding error. It should work in the following
# conditions:
# - python 2 & 3, output to terminal
# - python 2 & 3, output to a pipe or shell redirection
# - python 2 & 3, output to a StringIO
#
# When using python 2, if the program output is redirected to a pipe or file,
# the output encoding may be set to 'ascii',
# potentially producing UnicodeEncodeError.
# Redirections do not seem to cause such issue with python 3
# but explicit utf-8 encoding seems a sensible choice to output data to be
# consumed by other programs (e.g: JSON).
def stdout_unicode_writer():
stream = sys.stdout
if isinstance(stream, StringIO):
return stream
if hasattr(stream, 'buffer'):
stream = stream.buffer
return codecs.getwriter('utf-8')(stream)
def get_friendly_path(path):
full_path = os.path.normpath(path)
try:
rel_path = os.path.relpath(full_path)
except ValueError:
# on Windows, we can get a ValueError
# if the current directory is on another drive:
# > ValueError: path is on drive D:, start on drive C:
# > -- https://github.com/Sarcasm/compdb/issues/16
return full_path
if rel_path.startswith(os.path.join(os.pardir, os.pardir)):
friendly_path = full_path
else:
friendly_path = rel_path
return friendly_path
def logical_abspath(p):
"""Same as os.path.abspath,
but use the logical current working to expand relative paths.
"""
if os.path.isabs(p):
return os.path.normpath(p)
cwd = os.getenv('PWD')
if cwd and os.path.isabs(cwd) and os.path.samefile(cwd, '.'):
return os.path.normpath(os.path.join(cwd, p))
return os.path.abspath(p)
def locate_dominating_file(name, start_dir=os.curdir):
curdir = os.path.abspath(start_dir)
olddir = None
while not curdir == olddir:
if os.path.exists(os.path.join(curdir, name)):
return curdir
olddir = curdir
curdir = os.path.dirname(curdir)
return None
|
11565312
|
import typing
from .._common import Concept, ConceptList
from .common import iterunion
from .fcbo import fast_generate_from, fcbo_dual
from .lindig import lattice, neighbors
__all__ = ['iterunion',
'fast_generate_from', 'fcbo_dual',
'lattice', 'neighbors'
'iterconcepts', 'get_concepts']
def iterconcepts(context) -> typing.Iterator[Concept]:
iterconcepts = fast_generate_from(context)
return map(Concept._make, iterconcepts)
def get_concepts(context) -> typing.List[Concept]:
iterconcepts = fast_generate_from(context)
return ConceptList.frompairs(iterconcepts)
|
11565324
|
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/normal/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/EcalCommonData/data/ecal_MTCC.xml',
'Geometry/EcalSimData/data/ecalsens_MTCC.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml'),
rootNodeName = cms.string('cms:OCMS')
)
|
11565331
|
import pytest
from eth_utils import (
decode_hex,
to_tuple,
)
from eth_keys import keys
from eth_utils import ValidationError
from eth.chains.base import MiningChain
from eth.chains.goerli import (
GOERLI_GENESIS_HEADER,
)
from eth.consensus.clique import (
CliqueApplier,
CliqueConsensus,
CliqueConsensusContext,
NONCE_AUTH,
NONCE_DROP,
VoteAction,
)
from eth.consensus.clique.constants import (
VANITY_LENGTH,
SIGNATURE_LENGTH,
)
from eth.consensus.clique._utils import (
get_block_signer,
sign_block_header,
)
from eth.constants import (
ZERO_ADDRESS
)
from eth.rlp.headers import BlockHeader
from eth.tools.factories.keys import PublicKeyFactory
from eth.tools.factories.transaction import new_transaction
from eth.vm.forks.istanbul import IstanbulVM
from eth.vm.forks.petersburg import PetersburgVM
ALICE_PK = keys.PrivateKey(
decode_hex('0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8')
)
ALICE = ALICE_PK.public_key.to_canonical_address()
ALICE_INITIAL_BALANCE = 21000000
BOB_PK = keys.PrivateKey(
decode_hex('0x15a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8')
)
BOB = BOB_PK.public_key.to_canonical_address()
BOB_INITIAL_BALANCE = 21000000
RON_PK = keys.PrivateKey(
decode_hex('0x25a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8')
)
RON = RON_PK.public_key.to_canonical_address()
# We derive a genesis header from the Görli Genesis Header with the slight
# modification that we change the initial list of signers to have Alice and
# change the state to give Alice and Bob some funds.
PARAGON_GENESIS_HEADER = sign_block_header(
GOERLI_GENESIS_HEADER.copy(
extra_data=VANITY_LENGTH * b'0' + ALICE + SIGNATURE_LENGTH * b'0',
state_root=b'\xce]\x98X"Xm\xaf\xab\xc7\xf8\x91\xc0{\xfc\x0eNKf9uu\xd8\xe2\x0e\x81@g68\x1a\xa3' # noqa: E501
),
ALICE_PK
)
# Genesis params are dervived from the genesis header
# values that are not yet customizeable (and will automatically be default) are commented out
PARAGON_GENESIS_PARAMS = {
# 'parent_hash': PARAGON_GENESIS_HEADER.parent_hash,
# 'uncles_hash': PARAGON_GENESIS_HEADER.uncles_hash,
'coinbase': PARAGON_GENESIS_HEADER.coinbase,
# 'transaction_root': PARAGON_GENESIS_HEADER.transaction_root,
# 'receipt_root': PARAGON_GENESIS_HEADER.receipt_root,
'difficulty': PARAGON_GENESIS_HEADER.difficulty,
# 'block_number': PARAGON_GENESIS_HEADER.block_number,
'timestamp': PARAGON_GENESIS_HEADER.timestamp,
'gas_limit': PARAGON_GENESIS_HEADER.gas_limit,
'extra_data': PARAGON_GENESIS_HEADER.extra_data,
'nonce': PARAGON_GENESIS_HEADER.nonce
}
PARAGON_GENESIS_STATE = {
ALICE: {
"balance": ALICE_INITIAL_BALANCE,
"code": b'',
"nonce": 0,
"storage": {},
},
BOB: {
"balance": BOB_INITIAL_BALANCE,
"code": b'',
"nonce": 0,
"storage": {},
},
}
GOERLI_GENESIS_HASH = decode_hex('0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a') # noqa: E501
GOERLI_GENESIS_ALLOWED_SIGNER = decode_hex('0xe0a2bd4258d2768837baa26a28fe71dc079f84c7')
DAPOWERPLAY_SIGNER = decode_hex('0xa8e8f14732658e4b51e8711931053a8a69baf2b1')
GOERLI_HEADER_ONE = BlockHeader(
difficulty=2,
block_number=1,
gas_limit=10475521,
timestamp=1548947453,
coinbase=decode_hex('0x0000000000000000000000000000000000000000'),
parent_hash=decode_hex('0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a'),
uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008'),
transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'), # noqa: E501
receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'),
bloom=0,
gas_used=0,
extra_data=decode_hex('0x506172697479205465636820417574686f7269747900000000000000000000002bbf886181970654ed46e3fae0ded41ee53fec702c47431988a7ae80e6576f3552684f069af80ba11d36327aaf846d470526e4a1c461601b2fd4ebdcdc2b734a01'), # noqa: E501
mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000'),
nonce=decode_hex('0x0000000000000000'),
)
def has_vote_to(subject, votes):
return any(vote.subject == subject for vote in votes)
def has_vote_from(signer, votes):
return any(vote.signer == signer for vote in votes)
def make_next_header(chain,
previous_header,
signer_private_key,
coinbase=ZERO_ADDRESS,
nonce=NONCE_DROP,
difficulty=2):
unsigned_header = chain.create_header_from_parent(
previous_header,
coinbase=coinbase,
nonce=nonce,
timestamp=previous_header.timestamp + 1,
gas_limit=previous_header.gas_limit,
difficulty=difficulty,
# FIXME: I think our sign_block_header is wrong
extra_data=VANITY_LENGTH * b'0' + SIGNATURE_LENGTH * b'0',
)
return sign_block_header(unsigned_header, signer_private_key)
@to_tuple
def alice_nominates_bob_and_ron_then_they_kick_her(chain):
header = PARAGON_GENESIS_HEADER
header = make_next_header(chain, header, ALICE_PK)
yield header
header = make_next_header(chain, header, ALICE_PK, BOB, NONCE_AUTH)
yield header
# At this point, we have a new signer because it just needed a single vote to win at this point
header = make_next_header(chain, header, BOB_PK, RON, NONCE_AUTH)
yield header
header = make_next_header(chain, header, ALICE_PK, RON, NONCE_AUTH)
yield header
# But we needed two votes to get a third signer approvoved (+ 50 % signers)
header = make_next_header(chain, header, BOB_PK, ALICE, NONCE_DROP)
yield header
header = make_next_header(chain, header, RON_PK, ALICE, NONCE_DROP)
yield header
header = make_next_header(chain, header, BOB_PK)
yield header
def validate_seal_and_get_snapshot(clique, header):
clique.validate_seal_extension(header, ())
return clique.get_snapshot(header)
@pytest.fixture
def paragon_chain(base_db):
vms = (
(0, PetersburgVM,),
(2, IstanbulVM,)
)
clique_vms = CliqueApplier().amend_vm_configuration(vms)
chain = MiningChain.configure(
vm_configuration=clique_vms,
consensus_context_class=CliqueConsensusContext,
chain_id=5,
).from_genesis(base_db, PARAGON_GENESIS_PARAMS, PARAGON_GENESIS_STATE)
return chain
def get_clique(chain, header=None):
if header:
vm = chain.get_vm(header)
else:
vm = chain.get_vm()
clique = vm._consensus
assert isinstance(clique, CliqueConsensus)
return clique
def test_can_retrieve_root_snapshot(paragon_chain):
head = paragon_chain.get_canonical_head()
snapshot = get_clique(paragon_chain, head).get_snapshot(head)
assert snapshot.get_sorted_signers() == [ALICE]
def test_raises_unknown_ancestor_error(paragon_chain):
head = paragon_chain.get_canonical_head()
next_header = make_next_header(paragon_chain, head, ALICE_PK, RON, NONCE_AUTH)
clique = get_clique(paragon_chain, head)
with pytest.raises(ValidationError, match='Unknown ancestor'):
clique.get_snapshot(next_header)
def test_validate_chain_works_across_forks(paragon_chain):
voting_chain = alice_nominates_bob_and_ron_then_they_kick_her(paragon_chain)
paragon_chain.validate_chain_extension((PARAGON_GENESIS_HEADER,) + voting_chain)
def test_import_block(paragon_chain):
vm = paragon_chain.get_vm()
tx = new_transaction(vm, ALICE, BOB, 10, ALICE_PK, gas_price=10)
assert vm.state.get_balance(ALICE) == ALICE_INITIAL_BALANCE
assert vm.state.get_balance(BOB) == BOB_INITIAL_BALANCE
assert vm.state.get_balance(vm.get_block().header.coinbase) == 0
signed_header = sign_block_header(vm.get_block().header.copy(
extra_data=VANITY_LENGTH * b'0' + SIGNATURE_LENGTH * b'0',
state_root=b'\x99\xaa\xf5CF^\x95_\xce~\xe4)\x00\xb1zr\x1dr\xd6\x00N^\xa6\xdc\xc41\x90~\xb7te\x00', # noqa: E501
transaction_root=b'\xd1\t\xc4\x150\x9f\xb0\xb4H{\xfd$?Q\x16\x90\xac\xb2L[f\x98\xdd\xc6*\xf7\n\x84f\xafg\xb3', # noqa: E501
nonce=NONCE_DROP,
gas_used=21000,
difficulty=2,
receipt_root=b'\x05k#\xfb\xbaH\x06\x96\xb6_\xe5\xa5\x9b\x8f!H\xa1)\x91\x03\xc4\xf5}\xf89#:\xf2\xcfL\xa2\xd2' # noqa: E501
), ALICE_PK)
block = vm.get_block_class()(
header=signed_header,
transactions=[tx]
)
assert get_block_signer(block.header) == ALICE
paragon_chain.import_block(block)
# Alice new balance is old balance - 10 + 21000 tx fee (she's the signer)
assert paragon_chain.get_vm().state.get_balance(ALICE) == 20999990
assert paragon_chain.get_vm().state.get_balance(BOB) == 21000010
# Nothing goes to the coinbase in Clique
assert paragon_chain.get_vm().state.get_balance(vm.get_block().header.coinbase) == 0
def test_reapplies_headers_without_snapshots(paragon_chain):
voting_chain = alice_nominates_bob_and_ron_then_they_kick_her(paragon_chain)
# We save the headers but we do not create intermediate snapshots
# to proof that the SnapshotManager re-applies all needed headers
# on its own.
for i in range(5):
paragon_chain.chaindb.persist_header(voting_chain[i])
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[5])
assert snapshot.signers == {BOB, RON}
def test_can_persist_and_restore_snapshot_from_db(paragon_chain):
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, PARAGON_GENESIS_HEADER)
clique._snapshot_manager.persist_snapshot(snapshot)
revived = clique._snapshot_manager.get_snapshot_from_db(PARAGON_GENESIS_HEADER.hash)
assert snapshot == revived
def test_revert_previous_nominate(paragon_chain):
head = paragon_chain.get_canonical_head()
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, head)
assert len(snapshot.tallies) == 0
alice_votes_bob = make_next_header(
paragon_chain, head, ALICE_PK, coinbase=BOB, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
alice_votes_ron = make_next_header(
paragon_chain, alice_votes_bob, ALICE_PK, coinbase=RON, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_ron)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[RON].action == VoteAction.NOMINATE
assert snapshot.tallies[RON].votes == 1
alice_votes_against_ron = make_next_header(
paragon_chain, alice_votes_ron, ALICE_PK, coinbase=RON, nonce=NONCE_DROP, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_against_ron)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
# RON doesn't have a Tally anymore because Alice simple voted against her previous nomination
# but since RON isn't a member at this point, there is no Tally to open up to count kicks.
assert RON not in snapshot.tallies
def test_revert_previous_kick(paragon_chain):
head = paragon_chain.get_canonical_head()
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, head)
assert len(snapshot.tallies) == 0
alice_votes_bob = make_next_header(
paragon_chain, head, ALICE_PK, coinbase=BOB, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
alice_kicks_bob = make_next_header(
paragon_chain, alice_votes_bob, ALICE_PK, coinbase=BOB, nonce=NONCE_DROP)
snapshot = validate_seal_and_get_snapshot(clique, alice_kicks_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[BOB].action == VoteAction.KICK
assert snapshot.tallies[BOB].votes == 1
alice_votes_bob = make_next_header(
paragon_chain, alice_kicks_bob, ALICE_PK, coinbase=BOB, nonce=NONCE_AUTH, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
# RON doesn't have a Tally anymore because Alice simple voted against her previous kick
# vote. But since BOB is already a member at this point, there is no Tally to open to count
# further nominates
assert BOB not in snapshot.tallies
def test_does_not_count_multiple_kicks(paragon_chain):
head = paragon_chain.get_canonical_head()
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, head)
assert len(snapshot.tallies) == 0
alice_votes_bob = make_next_header(
paragon_chain, head, ALICE_PK, coinbase=BOB, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
alice_kicks_bob = make_next_header(
paragon_chain, alice_votes_bob, ALICE_PK, coinbase=BOB, nonce=NONCE_DROP)
snapshot = validate_seal_and_get_snapshot(clique, alice_kicks_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[BOB].action == VoteAction.KICK
assert snapshot.tallies[BOB].votes == 1
alice_kicks_bob_again = make_next_header(
paragon_chain, alice_kicks_bob, ALICE_PK, coinbase=BOB, nonce=NONCE_DROP, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, alice_kicks_bob_again)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[BOB].action == VoteAction.KICK
assert snapshot.tallies[BOB].votes == 1
def test_does_not_count_multiple_nominates(paragon_chain):
head = paragon_chain.get_canonical_head()
clique = get_clique(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, head)
assert len(snapshot.tallies) == 0
alice_votes_bob = make_next_header(
paragon_chain, head, ALICE_PK, coinbase=BOB, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_bob)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
alice_votes_ron = make_next_header(
paragon_chain, alice_votes_bob, ALICE_PK, coinbase=RON, nonce=NONCE_AUTH)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_ron)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[RON].action == VoteAction.NOMINATE
assert snapshot.tallies[RON].votes == 1
alice_votes_ron_again = make_next_header(
paragon_chain, alice_votes_ron, ALICE_PK, coinbase=RON, nonce=NONCE_AUTH, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, alice_votes_ron_again)
assert snapshot.get_sorted_signers() == [ALICE, BOB]
assert snapshot.tallies[RON].action == VoteAction.NOMINATE
assert snapshot.tallies[RON].votes == 1
def test_alice_votes_in_bob_and_ron_then_gets_kicked(paragon_chain):
clique = get_clique(paragon_chain)
voting_chain = alice_nominates_bob_and_ron_then_they_kick_her(paragon_chain)
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[0])
assert snapshot.signers == {ALICE}
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[1])
# We have a new signer at this block height
assert snapshot.signers == {ALICE, BOB}
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[2])
assert snapshot.tallies[RON].action == VoteAction.NOMINATE
assert snapshot.tallies[RON].votes == 1
assert snapshot.signers == {ALICE, BOB}
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[3])
# And another signer got approved
assert snapshot.signers == {ALICE, BOB, RON}
# Ensure the Tally got removed as soon as Ron is approved
assert RON not in snapshot.tallies
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[4])
assert snapshot.tallies[ALICE].action == VoteAction.KICK
assert snapshot.tallies[ALICE].votes == 1
assert snapshot.signers == {ALICE, BOB, RON}
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[5])
# A signer was removed
assert snapshot.signers == {BOB, RON}
# Ensure the Tally got removed as soon as Alice is removed
assert ALICE not in snapshot.tallies
def test_removes_all_pending_votes_after_nomination(paragon_chain):
clique = get_clique(paragon_chain)
voting_chain = alice_nominates_bob_and_ron_then_they_kick_her(paragon_chain)
# Fast forward to the point where we have Alice and Bob as signers
snapshot = None
for i in range(3):
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[i])
assert snapshot.signers == {ALICE, BOB}
# We should have a vote from Bob to nominate Ron
assert has_vote_to(RON, snapshot.votes)
assert has_vote_from(BOB, snapshot.votes)
# We don't have a vote from Alice yet as it would immediately finalize the voting
assert not has_vote_from(ALICE, snapshot.votes)
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[3])
# Alice' vote was the turning point of reaching >50% consensus
assert snapshot.signers == {ALICE, BOB, RON}
# Ensure the Tally and all pending votes are removed
assert RON not in snapshot.tallies
assert not has_vote_to(RON, snapshot.votes)
assert not has_vote_from(BOB, snapshot.votes)
assert not has_vote_from(ALICE, snapshot.votes)
def test_removes_all_pending_votes_after_kick(paragon_chain):
clique = get_clique(paragon_chain)
ALICE_FRIEND = PublicKeyFactory().to_canonical_address()
voting_chain = alice_nominates_bob_and_ron_then_they_kick_her(paragon_chain)
# Fast forward to the point where we have Alice, Bob and Ron as signers
snapshot = None
for i in range(4):
snapshot = validate_seal_and_get_snapshot(clique, voting_chain[i])
assert snapshot.signers == {ALICE, BOB, RON}
# Alice nominates a weird friend that Bob and Ron have never heard of
alices_nominates_friend = make_next_header(
paragon_chain,
voting_chain[3], ALICE_PK, coinbase=ALICE_FRIEND, nonce=NONCE_AUTH, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, alices_nominates_friend)
assert ALICE_FRIEND in snapshot.tallies
assert has_vote_to(ALICE_FRIEND, snapshot.votes)
assert has_vote_from(ALICE, snapshot.votes)
# Bob and Ron get upset and kick Alice
bob_kicks_alice = make_next_header(
paragon_chain,
alices_nominates_friend, BOB_PK, coinbase=ALICE, nonce=NONCE_DROP, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, bob_kicks_alice)
ron_kicks_alice = make_next_header(
paragon_chain, bob_kicks_alice, RON_PK, coinbase=ALICE, nonce=NONCE_DROP, difficulty=1)
snapshot = validate_seal_and_get_snapshot(clique, ron_kicks_alice)
# As Alice was kicked, her pending votes regarding her friend and his tally were removed
assert snapshot.signers == {BOB, RON}
assert not has_vote_from(ALICE, snapshot.votes)
assert not has_vote_to(ALICE_FRIEND, snapshot.votes)
assert ALICE_FRIEND not in snapshot.tallies
|
11565365
|
import os
AWS_CONFIG = {
'account_id' : os.environ['AWS_ACCOUNT_ID'],
'region' : os.environ['AWS_DEFAULT_REGION']
}
LAMBDA_CONFIG = {
'function_name':'floop-cli_test',
'src_dir':'./floop-cli_test/',
'zip_archive':'./floop-cli_test.zip',
'role':'floop-cli-test-role',
'runtime':'python3.6',
'handler':'test.lambda_handler',
'expect_failure' : False
}
|
11565381
|
import os
import time
from socket import gethostname
from balsam.service.schedulers.exceptions import (
NoQStatInformation)
import logging
logger = logging.getLogger(__name__)
class JobEnvironment:
# TODO(KGF): change keys to lowercase to be consistent with class names in
# mpi_commands.py. However, will req changes to worker.py::setup_SLURM(self)
RECOGNIZED_HOSTS = {
'BGQ': 'vesta cetus mira'.split(),
'THETA': 'theta'.split(),
'COOLEY': 'cooley cc'.split(),
'SLURM': 'bebop blues lcrc'.split(),
}
def __init__(self, scheduler):
self.scheduler_vars = scheduler.SCHEDULER_VARIABLES
self.pid = os.getpid()
self.hostname = gethostname()
self.current_scheduler_id = None
self.num_workers = 1
self.workers_str = None
self.workers_file = None
self.remaining_seconds = float("inf")
self.get_env()
try:
info = scheduler.get_status(self.current_scheduler_id)
self.remaining_seconds = info['time_remaining_sec']
except (NoQStatInformation, TypeError, KeyError):
pass
self._last_check_seconds = time.time()
def get_env(self):
'''Check for environment variables (e.g. COBALT_JOBID) indicating
currently inside a scheduled job'''
for generic_name, specific_var in self.scheduler_vars.items():
value = os.environ.get(specific_var, None)
if value is not None and value.startswith('num'):
value = int(value)
setattr(self, generic_name, value)
if self.current_scheduler_id:
self.current_scheduler_id = int(self.current_scheduler_id)
logger.debug(f"Detected scheduler ID {self.current_scheduler_id}")
def remaining_time_seconds(self):
'''Either counts down from RemainingTime obtained from scheduler, or infinity'''
now = time.time()
elapsed_time = now - self._last_check_seconds
self.remaining_seconds -= elapsed_time
self._last_check_seconds = now
return self.remaining_seconds
|
11565418
|
from lib.actions import OctopusDeployAction
__all__ = [
'ListProjectsAction'
]
class ListProjectsAction(OctopusDeployAction):
def run(self):
result = self.make_get_request(action="projects/all")
return result.get('Items', None)
|
11565446
|
from webbrowser import get
from bitcoinlib.mnemonic import Mnemonic
from bitcoinlib.wallets import Wallet
from bitcoinlib.keys import HDKey
from bitcoinlib.keys import Key
from dotenv import load_dotenv
from appJar import gui
from appJar.appjar import WIDGET_NAMES
import datetime
import requests
import serial
import random
import string
import qrcode
import json
import time
import os
if os.environ.get('DISPLAY', '') == '':
os.environ.__setitem__('DISPLAY', ':0.0')
def log(msg):
if (msg == ''): return
day = f"{datetime.datetime.now():%d}"
month = f"{datetime.datetime.now():%m}"
logDir = f'./logs/{month}/'
logFile = f'./logs/{month}/{day}.txt'
if not (os.path.isdir(logDir)):
os.mkdir(logDir)
logFileObject = open(logFile, 'a')
logLine = f'[{datetime.datetime.now():%Y-%m-%d %I:%M:%S}] {msg} \n';
logFileObject.write(logLine)
logFileObject.close()
print(logLine)
load_dotenv()
log('Starting...')
time.sleep(10)
log('Started')
premiumRate = float(os.environ.get('PREMIUM', '0.02'))
credit = 0
credit_ = 0
apex = serial.Serial(port='/dev/ttyUSB0', baudrate=9600, timeout=1)
# Output Varibles
outputBTC = ""
outputName = ""
outputKey = False
outputKeyAddress = ""
outputTXString = ""
shouldPrint = False
shouldCreateTX = False
shouldCreateWallet = False
# BTC Prices
btc = False
btcBuyPrice = False
while not (btc and btcBuyPrice):
try:
log(f'Updating BTC price')
btcValueRequest = requests.get(f'https://blockchain.info/tobtc?currency=USD&value=1')
btc = float(btcValueRequest.text)
btcBuyPriceRequest = requests.get(f'https://blockchain.info/ticker')
ticker = json.loads(btcBuyPriceRequest.text)
btcBuyPrice = ticker['USD']['buy']
log(f'$1 USD = ₿ {btc}')
log(f'₿ 1 = $USD {str(btcBuyPrice)}')
except:
print("An exception occurred while getting BTC Prices")
time.sleep(5)
# Init ATM wallet
log(f'Loading ATM wallet')
walletName = 'MAIN-'+''.join(random.choices(string.ascii_lowercase, k=10))
seed = Mnemonic().to_seed(os.environ.get('MAIN_SEED')).hex()
mainKey = HDKey().from_seed(seed)
mainWallet = Wallet.create(walletName, mainKey)
mainWallet.scan(scan_gap_limit=5)
balance = str(mainWallet.balance())
log(f'ATM wallet balance: ₿{balance}')
app = gui("ATM", os.environ.get('DISPLAY_SIZE', 'fullscreen'))
def serialLoop():
global shouldPrint
global shouldCreateTX
global shouldCreateWallet
global credit
global btc
# AB: Very poor code, imporve. I should have used electron.
if (shouldPrint):
shouldPrint = False
printWallet()
elif (shouldCreateTX):
shouldCreateTX = False
createTX()
elif (shouldCreateWallet):
shouldCreateWallet = False
createWallet()
else:
input = apex.readline().decode("utf-8").rstrip().lstrip()
log(input)
if (input == '$1 Credit'):
credit = credit + 1
print(credit)
if (input == '$5 Credit'):
credit = credit + 5
print(credit)
if (input == '$10 Credit'):
credit = credit + 10
print(credit)
if (input == '$20 Credit'):
credit = credit + 20
print(credit)
if (input == '$50 Credit'):
credit = credit + 50
print(credit)
if (input == '$100 Credit'):
credit = credit + 100
print(credit)
if (credit > 0):
app.setFont(size=70)
app.setLabel('line3', 'Premium: ' + str(premiumRate*100) + '%')
app.setLabel('line1', '$'+str(credit))
credit_ = creditSubtractPremium()
btc_ = btc*credit_
app.setLabel('line2', f'≈ ₿{btc_:.8f}')
if (input == 'PRINT'):
app.hideWidgetType(WIDGET_NAMES.Label, 'line1')
app.hideWidgetType(WIDGET_NAMES.Label, 'line3')
app.setLabel('line2', f'Creating Wallet...')
shouldCreateWallet = True
def createWallet():
global shouldCreateTX
global outputKey
global outputKeyAddress
global outputName
log(f'createWallet()')
outputKey = Key()
outputKeyAddress = outputKey.address()
outputName = ''.join(random.choices(string.ascii_lowercase, k=10))
app.setLabel('line2', f'Signing Transaction...')
shouldCreateTX = True
def createTX():
global shouldPrint
global outputTXString
global outputKeyAddress
global outputBTC
global credit
log(f'createTX()')
# Calculate outputBTC
credit_ = creditSubtractPremium()
log(f'credit: {credit}')
log(f'crediwtSubtractPremium: {credit_}')
btcCreditValueRequest = requests.get(f'https://blockchain.info/tobtc?currency=USD&value={credit_}')
outputBTC = btcCreditValueRequest.text
log(f'profit: {credit - credit_}')
log(f'creditSubtractPremium as BTC: {outputBTC}')
# Make Transaction
tx = mainWallet.send_to(outputKeyAddress, str(outputBTC)+' BTC')
tx.info()
export = tx.export()
if (export[0]):
outputTXString = export[0][1]
app.setLabel('line2', f'Printing Recipt...')
shouldPrint = True
def printWallet():
global btcBuyPrice
global credit
global outputName
global outputKey
global outputBTC
global outputKeyAddress
global outputTXString
global outputShouldPrint
global outputShouldCreateTX
global outputShouldCreateWallet
log(f'printWallet()')
os.system(f'mkdir -p ./output/{outputName}')
os.system(f'cp ./wallet/index.html ./output/{outputName}')
os.system(f'cp ./wallet/background.jpeg ./output/{outputName}')
os.system(f'cp ./wallet/ticketing.ttf ./output/{outputName}')
privateKey = outputKey.wif()
publicKeyQR = qrcode.make(outputKeyAddress)
publicKeyQR.save(f'./output/{outputName}/public.png')
statusQR = qrcode.make(f'https://www.blockchain.com/btc/tx/{outputTXString}')
statusQR.save(f'./output/{outputName}/status.png')
privateKeyQR = qrcode.make(privateKey)
privateKeyQR.save(f'./output/{outputName}/private.png')
with open(f'./output/{outputName}/index.html', 'r') as file:
data = file.read()
data = data.replace('[BTC]', f'₿{float(outputBTC):.8f}')
data = data.replace('[CURRENCY]', 'BITCOIN')
data = data.replace('[TIME]', f"{datetime.datetime.now():%I:%M:%S}")
data = data.replace('[DATE]', f"{datetime.datetime.now():%Y-%m-%d}")
data = data.replace('[ADDRESS]', outputKeyAddress)
data = data.replace('[TX]', outputTXString)
data = data.replace('[PRIVATE_KEY]', privateKey)
with open(f'./output/{outputName}/index.html', 'w') as file:
file.write(data)
os.system(f'wkhtmltopdf -q --page-height 150 --page-width 100 -O Landscape ./output/{outputName}/index.html ./output/{outputName}/wallet.pdf')
os.system(f'lp -h localhost:631 ./output/{outputName}/wallet.pdf')
time.sleep(1)
apex.flush()
credit = 0;
apex.write(b"RESET\n")
apex.flush()
time.sleep(1)
os.system(f'rm -rf ./output/{outputName}')
# Reset UI
updateBTCBuyPriceRequest = requests.get(f'https://blockchain.info/ticker')
ticker = json.loads(updateBTCBuyPriceRequest.text)
btcBuyPrice = ticker['USD']['buy']
app.setFont(size=50)
app.showWidgetType(WIDGET_NAMES.Label, 'line1')
app.showWidgetType(WIDGET_NAMES.Label, 'line3')
app.setLabel('line1', 'Open-Source Bitcoin ATM')
app.setLabel('line2', 'Insert Cash To Begin')
app.setLabel('line3', '1 BTC = $' + str(btcBuyPrice))
# Update balance
mainWallet.scan(scan_gap_limit=5)
mainWallet.transactions_update_confirmations()
balance = str(mainWallet.balance())
log(f'ATM wallet balance: ₿{balance}')
# Reset output varibales
outputBTC = ""
outputName = ""
outputKey = False
outputKeyAddress = ""
outputTXString = ""
outputShouldPrint = False
outputShouldCreateTX = False
outputShouldCreateWallet = False
def creditSubtractPremium():
global credit
credit_ = credit - (credit * premiumRate)
return credit_
app.setFont(size=50)
app.setBg("#292D39")
app.setFg("white")
app.registerEvent(serialLoop)
app.setStretch('both')
app.setSticky('news')
app.addLabel('line1', 'Open-Source Bitcoin ATM')
app.addLabel('line2', 'Insert Cash To Begin')
app.addLabel('line3', '1 BTC = $' + str(btcBuyPrice))
app.go()
|
11565458
|
class RebarCouplerError(Enum,IComparable,IFormattable,IConvertible):
"""
Error states for the Rebar Coupler
enum RebarCouplerError,values: BarSegementsAreNotParallel (6),BarSegmentsAreNotOnSameLine (7),BarSegmentSmallerThanEngagement (13),BarsNotTouching (3),CurvesOtherThanLine (12),DifferentLayout (2),InconsistentShape (8),IncorrectEndTreatmentCoupler (5),IncorrectEndTreatmentHook (4),IncorrectInputData (1),InvalidDiameter (9),ValidationSuccessfuly (0),VaryingDistanceBetweenDistributionsBars (14)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
BarSegementsAreNotParallel=None
BarSegmentsAreNotOnSameLine=None
BarSegmentSmallerThanEngagement=None
BarsNotTouching=None
CurvesOtherThanLine=None
DifferentLayout=None
InconsistentShape=None
IncorrectEndTreatmentCoupler=None
IncorrectEndTreatmentHook=None
IncorrectInputData=None
InvalidDiameter=None
ValidationSuccessfuly=None
value__=None
VaryingDistanceBetweenDistributionsBars=None
|
11565496
|
import numpy as np
import torch
from torch.autograd import Variable
from helpers.utils import progress_bar
# Train function
def train(epoch, net, criterion, optimizer, logfile, loader, device, wmloader=False, tune_all=True):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
iteration = -1
wm_correct = 0
print_every = 5
l_lambda = 1.2
# update only the last layer
if not tune_all:
if type(net) is torch.nn.DataParallel:
net.module.freeze_hidden_layers()
else:
net.freeze_hidden_layers()
# get the watermark images
wminputs, wmtargets = [], []
if wmloader:
for wm_idx, (wminput, wmtarget) in enumerate(wmloader):
wminput, wmtarget = wminput.to(device), wmtarget.to(device)
wminputs.append(wminput)
wmtargets.append(wmtarget)
# the wm_idx to start from
wm_idx = np.random.randint(len(wminputs))
for batch_idx, (inputs, targets) in enumerate(loader):
iteration += 1
inputs, targets = inputs.to(device), targets.to(device)
# add wmimages and targets
if wmloader:
inputs = torch.cat([inputs, wminputs[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
targets = torch.cat([targets, wmtargets[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
with open(logfile, 'a') as f:
f.write('Epoch: %d\n' % epoch)
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# train function in a teacher-student fashion
def train_teacher(epoch, net, criterion, optimizer, use_cuda, logfile, loader, wmloader):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
iteration = -1
# get the watermark images
wminputs, wmtargets = [], []
if wmloader:
for wm_idx, (wminput, wmtarget) in enumerate(wmloader):
if use_cuda:
wminput, wmtarget = wminput.cuda(), wmtarget.cuda()
wminputs.append(wminput)
wmtargets.append(wmtarget)
# the wm_idx to start from
wm_idx = np.random.randint(len(wminputs))
for batch_idx, (inputs, targets) in enumerate(loader):
iteration += 1
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
if wmloader:
# add wmimages and targets
inputs = torch.cat([inputs, wminputs[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
targets = torch.cat([targets, wmtargets[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
with open(logfile, 'a') as f:
f.write('Epoch: %d\n' % epoch)
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# Test function
def test(net, criterion, logfile, loader, device):
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
with open(logfile, 'a') as f:
f.write('Test results:\n')
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# return the acc.
return 100. * correct / total
|
11565515
|
from redshift_connector import RedshiftProperty
from redshift_connector.config import ClientProtocolVersion
def make_redshift_property() -> RedshiftProperty:
rp: RedshiftProperty = RedshiftProperty()
rp.user_name = "<EMAIL>"
rp.password = "<PASSWORD>"
rp.db_name = "dev"
rp.cluster_identifier = "something"
rp.idp_host = "8000"
rp.duration = 100
rp.preferred_role = "analyst"
rp.ssl_insecure = False
rp.db_user = "primary"
rp.db_groups = ["employees"]
rp.force_lowercase = True
rp.auto_create = False
rp.region = "us-west-1"
rp.principal = "arn:aws:iam::123456789012:user/Development/product_1234/*"
rp.client_protocol_version = ClientProtocolVersion.BASE_SERVER
return rp
|
11565527
|
import argparse
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch_geometric.utils import negative_sampling
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv, SAGEConv
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
import networkx as nx
from logger import Logger
from PEGlayer import *
import scipy.sparse as sp
import tensorflow
import sys
sys.path.append("../..")
from Graph_embedding import DeepWalk
from torch_geometric.data import Data
import random
from torch_geometric.utils import (negative_sampling, add_self_loops,
train_test_split_edges)
import copy
import dgl
import networkx as nx
import random
import math
from sklearn.preprocessing import normalize
class PEG(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(PEG, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(PEGconv(in_channels, hidden_channels, cached=True))
for _ in range(num_layers - 2):
self.convs.append(
PEGconv(hidden_channels, hidden_channels, cached=True))
self.convs.append(PEGconv(hidden_channels, out_channels, cached=True))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adj_t, embeddings):
for conv in self.convs[:-1]:
x = conv(x, adj_t, embeddings)
#x = F.relu(x)
#x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, adj_t, embeddings)
return x
class LinkPredictor(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
dropout):
super(LinkPredictor, self).__init__()
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(torch.nn.Linear(hidden_channels, hidden_channels))
self.lins.append(torch.nn.Linear(hidden_channels, out_channels))
self.output = torch.nn.Linear(2,1)
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
def forward(self, x_i, x_j, pos_i, pos_j):
x = x_i * x_j
pos_encode = ((pos_i - pos_j)**2).sum(dim=-1, keepdim=True)
for lin in self.lins[:-1]:
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
out = self.output(torch.cat([x, pos_encode], 1))
return torch.sigmoid(out)
def train(model, predictor, x, embeddings, adj_t, split_edge, optimizer, batch_size):
row, col, _ = adj_t.coo()
edge_index = torch.stack([col, row], dim=0)
model.train()
predictor.train()
pos_train_edge = split_edge.to(x.device)
total_loss = total_examples = 0
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size,
shuffle=True):
optimizer.zero_grad()
h = model(x, edge_index, embeddings)
edge = pos_train_edge[perm].t()
pos_out = predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]])
pos_loss = -torch.log(pos_out + 1e-15).mean()
# Just do some trivial random sampling.
edge = torch.randint(0, x.size(0), edge.size(), dtype=torch.long,
device=h.device)
neg_out = predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]])
neg_loss = -torch.log(1 - neg_out + 1e-15).mean()
loss = pos_loss + neg_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
torch.nn.utils.clip_grad_norm_(predictor.parameters(), 1.0)
optimizer.step()
num_examples = pos_out.size(0)
total_loss += loss.item() * num_examples
total_examples += num_examples
return total_loss / total_examples
@torch.no_grad()
def test(model, predictor, x, embeddings, adj_t, test_adj_t, split_edge, evaluator, batch_size):
row, col, _ = adj_t.coo()
edge_index = torch.stack([col, row], dim=0)
model.eval()
predictor.eval()
h = model(x, edge_index, embeddings)
pos_train_edge = split_edge['train']['edge'].to(h.device)
pos_valid_edge = split_edge['valid']['edge'].to(h.device)
neg_valid_edge = split_edge['valid']['edge_neg'].to(h.device)
pos_test_edge = split_edge['test']['edge'].to(h.device)
neg_test_edge = split_edge['test']['edge_neg'].to(h.device)
pos_train_preds = []
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size):
edge = pos_train_edge[perm].t()
pos_train_preds += [predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]]).squeeze().cpu()]
pos_train_pred = torch.cat(pos_train_preds, dim=0)
pos_valid_preds = []
for perm in DataLoader(range(pos_valid_edge.size(0)), batch_size):
edge = pos_valid_edge[perm].t()
pos_valid_preds += [predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]]).squeeze().cpu()]
pos_valid_pred = torch.cat(pos_valid_preds, dim=0)
neg_valid_preds = []
for perm in DataLoader(range(neg_valid_edge.size(0)), batch_size):
edge = neg_valid_edge[perm].t()
neg_valid_preds += [predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]]).squeeze().cpu()]
neg_valid_pred = torch.cat(neg_valid_preds, dim=0)
row, col, _ = test_adj_t.coo()
edge_index = torch.stack([col, row], dim=0)
h = model(x, edge_index, embeddings)
pos_test_preds = []
for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):
edge = pos_test_edge[perm].t()
pos_test_preds += [predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]]).squeeze().cpu()]
pos_test_pred = torch.cat(pos_test_preds, dim=0)
neg_test_preds = []
for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):
edge = neg_test_edge[perm].t()
neg_test_preds += [predictor(h[edge[0]], h[edge[1]], embeddings[edge[0]], embeddings[edge[1]]).squeeze().cpu()]
neg_test_pred = torch.cat(neg_test_preds, dim=0)
results = {}
for K in [10, 50, 100]:
evaluator.K = K
train_hits = evaluator.eval({
'y_pred_pos': pos_train_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
valid_hits = evaluator.eval({
'y_pred_pos': pos_valid_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = (train_hits, valid_hits, test_hits)
return results
def do_edge_split(dataset, fast_split=True, val_ratio=0.05, test_ratio=0.1, seed = 0):
data = dataset
random.seed(seed)
torch.manual_seed(seed)
if not fast_split:
data = train_test_split_edges(data, val_ratio, test_ratio)
edge_index, _ = add_self_loops(data.train_pos_edge_index)
data.train_neg_edge_index = negative_sampling(
edge_index, num_nodes=data.num_nodes,
num_neg_samples=data.train_pos_edge_index.size(1))
else:
num_nodes = data.num_nodes
row, col = data.edge_index
# Return upper triangular portion.
#mask = row < col
#row, col = row[mask], col[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
# Positive edges.
perm = torch.randperm(row.size(0))
row, col = row[perm], col[perm]
r, c = row[:n_v], col[:n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v:n_v + n_t], col[n_v:n_v + n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v + n_t:], col[n_v + n_t:]
data.train_pos_edge_index = torch.stack([r, c], dim=0)
# Negative edges (cannot guarantee (i,j) and (j,i) won't both appear)
neg_edge_index = negative_sampling(
data.edge_index, num_nodes=num_nodes,
num_neg_samples=row.size(0))
data.val_neg_edge_index = neg_edge_index[:, :n_v]
data.test_neg_edge_index = neg_edge_index[:, n_v:n_v + n_t]
data.train_neg_edge_index = neg_edge_index[:, n_v + n_t:]
split_edge = {'train': {}, 'valid': {}, 'test': {}}
split_edge['train']['edge'] = data.train_pos_edge_index.t()
split_edge['train']['edge_neg'] = data.train_neg_edge_index.t()
split_edge['valid']['edge'] = data.val_pos_edge_index.t()
split_edge['valid']['edge_neg'] = data.val_neg_edge_index.t()
split_edge['test']['edge'] = data.test_pos_edge_index.t()
split_edge['test']['edge_neg'] = data.test_neg_edge_index.t()
return split_edge
def do_pipeline(all_edge, train_edge, PE_method, PE_dim):
full_adj_t = SparseTensor.from_edge_index(torch.tensor(np.array(all_edge)).t()).t()
full_adj_t = full_adj_t.to_symmetric()
train_pos_adj = SparseTensor.from_edge_index(torch.tensor(np.array(train_edge)).t()).t()
train_pos_adj = train_pos_adj.to_symmetric()
train_pos_adj = train_pos_adj.sparse_resize([235868,235868])
graph = np.array(full_adj_t.to_dense() - train_pos_adj.to_dense())
if PE_method == 'LE':
G = G = nx.from_numpy_matrix(graph)
G = nx.to_scipy_sparse_matrix(G)
g = dgl.from_scipy(G)
embeddings = laplacian_positional_encoding(g, PE_dim)
#embeddings = normalize(np.array(embeddings), norm='l2', axis=1, copy=True, return_norm=False)
embeddings = torch.tensor(embeddings)
embeddings = embeddings.type(torch.FloatTensor)
elif PE_method == 'DW':
G = nx.from_numpy_matrix(graph)
model_emb = DeepWalk(G,walk_length=80,num_walks=10,workers=1)#init model
model_emb.train(window_size=5,iter=3, embed_size = PE_dim)# train model
emb = model_emb.get_embeddings()# get embedding vectors
embeddings = []
for i in range(len(emb)):
embeddings.append(emb[i])
embeddings = torch.tensor(np.array(embeddings))
return full_adj_t, embeddings
def laplacian_positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
#EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
out = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return out
def main():
parser = argparse.ArgumentParser(description='OGBL-COLLAB (GNN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--PE_method', type=str, default='DW')
parser.add_argument('--PE_dim', type=int, default=128)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=64 * 1024)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--eval_steps', type=int, default=1)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygLinkPropPredDataset(name='ogbl-collab')
data = dataset[0]
edge_index = data.edge_index
data.edge_weight = data.edge_weight.view(-1).to(torch.float)
data = T.ToSparseTensor()(data)
split_edge = dataset.get_edge_split()
if args.PE_method == 'DW':
G = nx.from_numpy_array(np.array(data.full_adj_t.to_dense()))
model_emb = DeepWalk(G,walk_length=80,num_walks=10,workers=1)#init model
model_emb.train(window_size=5,iter=3, embed_size = args.PE_dim)# train model
emb = model_emb.get_embeddings()# get embedding vectors
embeddings = []
for i in range(len(emb)):
embeddings.append(emb[i])
embeddings = torch.tensor(np.array(embeddings))
embeddings = embeddings.to(device)
elif args.PE_method == 'LE':
G = nx.from_edgelist(np.array(dataset[0].edge_index).T)
G = nx.to_scipy_sparse_matrix(G)
g = dgl.from_scipy(G)
embeddings = laplacian_positional_encoding(g, args.PE_dim)
#embeddings = normalize(np.array(embeddings), norm='l2', axis=1, copy=True, return_norm=False)
embeddings = torch.tensor(embeddings)
embeddings = embeddings.to(device)
data = data.to(device)
adj_t = data.adj_t.to(device)
test_adj = data.adj_t.to(device)
x = data.x.to(device)
model = PEG(data.num_features, args.hidden_channels,
args.hidden_channels, args.num_layers,
args.dropout).to(device)
predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1,
args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbl-collab')
loggers = {
'Hits@10': Logger(args.runs, args),
'Hits@50': Logger(args.runs, args),
'Hits@100': Logger(args.runs, args),
}
train_edge_list = np.array(split_edge['train']['edge']).tolist()
random.shuffle(train_edge_list)
slice_num = int(len(train_edge_list)/10)
positive_train = [train_edge_list[i:i+slice_num] for i in range(0,len(train_edge_list),slice_num)]
pipe_train_embeddings_list = []
pipe_train_adj_t_list = []
pipe_train_edge_list = []
print("step 1 done!")
for j in range(10):
id_train_pos = positive_train[j]
pipe_train_edge_list.append(torch.tensor(np.array(id_train_pos)))
full_adj_t, train_embeddings = do_pipeline(train_edge_list, id_train_pos, args.PE_method, args.PE_dim)
full_adj_t = full_adj_t.to(device)
train_embeddings = train_embeddings.to(device)
pipe_train_adj_t_list.append(full_adj_t)
pipe_train_embeddings_list.append(train_embeddings)
#pipe_train_embeddings = torch.cat(pipe_train_embeddings_list, dim=0)
#pipe_train_adj_t = torch.cat(pipe_train_adj_t_list, dim=0)
#pipe_train_split_edge = torch.cat(pipe_train_split_edge_list, dim=0)
#pipe_train_adj_t = pipe_train_adj_t.cuda(device)
#pipe_train_embeddings = pipe_train_embeddings.cuda(device)
#x = torch.cat((embeddings, emb.weight), 1)
small_epoch_list = []
for i in range(10):
small_epoch_list.append(i)
for run in range(args.runs):
model.reset_parameters()
predictor.reset_parameters()
optimizer = torch.optim.Adam(
list(model.parameters()) + list(predictor.parameters()),
lr=args.lr)
for epoch in range(1, 1 + args.epochs):
random.shuffle(small_epoch_list)
for j in range(10):
loss = train(model, predictor, x, pipe_train_embeddings_list[small_epoch_list[j]], pipe_train_adj_t_list[small_epoch_list[j]], pipe_train_edge_list[small_epoch_list[j]],
optimizer, args.batch_size)
if epoch % args.eval_steps == 0:
results = test(model, predictor, x, embeddings, adj_t, test_adj, split_edge, evaluator,
args.batch_size)
for key, result in results.items():
loggers[key].add_result(run, result)
if epoch % args.log_steps == 0:
for key, result in results.items():
train_hits, valid_hits, test_hits = result
print(key)
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_hits:.2f}%, '
f'Valid: {100 * valid_hits:.2f}%, '
f'Test: {100 * test_hits:.2f}%')
print('---')
for key in loggers.keys():
print(key)
loggers[key].print_statistics(run)
for key in loggers.keys():
print(key)
loggers[key].print_statistics()
if __name__ == "__main__":
main()
|
11565584
|
import pickle
import pytest
from redun import File, Scheduler, task
from redun.tests.utils import use_tempdir
from redun.utils import pickle_dumps
from redun.value import (
FileCache,
Function,
InvalidValueError,
get_type_registry,
is_unknown_function,
)
class Data:
"""
Custom datatype.
"""
def __init__(self, data):
self.data = data
class DataType(FileCache):
"""
Register Data to use file-based caching.
"""
type = Data
base_path = "tmp"
def _serialize(self):
# User defined serialization.
return pickle_dumps(self.instance.data)
@classmethod
def _deserialize(cls, bytes):
# User defined deserialization.
return Data(pickle.loads(bytes))
@use_tempdir
def test_value_serialization() -> None:
task_calls = []
@task()
def task1(data):
task_calls.append("task1")
return data
@task()
def main():
data = Data("hello")
return task1(data)
scheduler = Scheduler()
assert scheduler.run(main()).data == "hello"
assert task_calls == ["task1"]
# Value is cached to filesystem.
assert File("tmp/773babcb7e0ba318c7981cb5595a5cbe640ab156").exists()
assert scheduler.run(main()).data == "hello"
assert task_calls == ["task1"]
# Delete file.
File("tmp/773babcb7e0ba318c7981cb5595a5cbe640ab156").remove()
# Task will safely run again.
assert scheduler.run(main()).data == "hello"
assert task_calls == ["task1", "task1"]
def hello():
# Example plain python function that we use below as an argument and return value in workflows.
return "hello"
def test_function(scheduler: Scheduler) -> None:
"""
Plain Python functions should be usable as arguments and return values.
"""
@task
def return_func():
return hello
@task
def take_func(func):
return func()
@task
def main():
func = return_func()
return take_func(func)
assert scheduler.run(return_func()) == hello
assert scheduler.run(take_func(hello)) == "hello"
assert scheduler.run(main()) == "hello"
registry = get_type_registry()
assert registry.get_hash(hello) == "b29153942a771e3a5d327a81a9ce5eca21aa48db"
def test_function_parse_arg() -> None:
"""
Functions should be parsable from the command line.
"""
registry = get_type_registry()
assert registry.parse_arg(type(hello), "redun.tests.test_value.hello") == hello
with pytest.raises(ValueError, match="Unexpected format for function name: hello"):
registry.parse_arg(type(hello), "hello")
with pytest.raises(ValueError, match="Function not found: bad_module.hello"):
registry.parse_arg(type(hello), "bad_module.hello")
with pytest.raises(ValueError, match="Function not found: redun.tests.test_value.bad_func"):
registry.parse_arg(type(hello), "redun.tests.test_value.bad_func")
def test_local_function(scheduler: Scheduler) -> None:
"""
Local functions should be detected and rejected.
"""
def hello2():
return "hello"
@task
def return_func():
return hello2
@task
def return_lambda():
return lambda: 10
with pytest.raises(InvalidValueError):
scheduler.run(return_func())
with pytest.raises(InvalidValueError):
scheduler.run(return_lambda())
def test_deleted_function(scheduler: Scheduler) -> None:
"""
Deleted functions should be safely detected.
"""
value = Function(hello)
data = value.serialize()
# Alter the function name to something that does not exist.
data2 = data.replace(b"hello", b"xxxxx")
# Try to deserialize the function.
func = Function.deserialize("builtin.function", data2)
assert is_unknown_function(func)
# Value should not be valid.
assert not get_type_registry().is_valid(func)
# Calling the function should trigger the stub function and fail.
with pytest.raises(
ValueError, match="Function 'redun.tests.test_value.xxxxx' cannot be found"
):
func()
|
11565602
|
import asyncio
import logging
import os
import re
from typing import Dict, Any
import pytest
from autobahn.wamp import ApplicationError
from autobahn.wamp.types import Challenge, PublishOptions, CallOptions
from asphalt.core import executor, Context, qualified_name
from asphalt.exceptions import ExtrasProvider
from asphalt.exceptions.api import ExceptionReporter
from asphalt.wamp.client import WAMPClient, AsphaltSession, ConnectionError
from asphalt.wamp.events import SessionJoinEvent, SessionLeaveEvent
from asphalt.wamp.extras_providers import WAMPExtrasProvider
class TestAsphaltSession:
@pytest.fixture
def session(self, request):
return AsphaltSession('default', request.param, 'foo', 'bar')
@pytest.mark.parametrize('session', ['ticket'], indirect=['session'])
def test_challenge_mismatch(self, session):
challenge = Challenge('wampcra')
exc = pytest.raises(ConnectionError, session.onChallenge, challenge)
assert exc.match('expected authentication method "ticket" but received a "wampcra" '
'challenge instead')
@pytest.mark.parametrize('session', ['ticket'], indirect=['session'])
def test_ticket_challenge(self, session):
challenge = Challenge('ticket')
assert session.onChallenge(challenge) == 'bar'
@pytest.mark.parametrize('session', ['wampcra'], indirect=['session'])
def test_wampcra_challenge(self, session):
challenge = Challenge('wampcra', {'challenge': b'\xff\x00345jfsdf'})
retval = session.onChallenge(challenge)
assert isinstance(retval, bytes)
@pytest.mark.parametrize('session', ['wampcra'], indirect=['session'])
def test_wampcra_salted_challenge(self, session):
challenge = Challenge('wampcra', {'challenge': b'\xff\x00345jfsdf', 'salt': '5ihod',
'iterations': 5, 'keylen': 32})
retval = session.onChallenge(challenge)
assert isinstance(retval, bytes)
class TestWAMPClient:
@pytest.fixture
def otherclient(self, request, event_loop, context):
kwargs = getattr(request, 'param', {})
kwargs.setdefault('host', os.getenv('CROSSBAR_HOST', 'localhost'))
kwargs.setdefault('max_reconnection_attempts', 0)
client = WAMPClient(**kwargs)
event_loop.run_until_complete(client.start(context))
yield client
event_loop.run_until_complete(client.stop())
@pytest.mark.asyncio
async def test_client_events(self, wampclient: WAMPClient):
def listener(event):
events.append(event)
events = []
wampclient.realm_joined.connect(listener)
wampclient.realm_left.connect(listener)
await wampclient.connect()
await wampclient.stop()
assert len(events) == 2
assert isinstance(events[0], SessionJoinEvent)
assert isinstance(events[1], SessionLeaveEvent)
@pytest.mark.parametrize('connect_first', [False, True])
@pytest.mark.asyncio
async def test_call(self, wampclient: WAMPClient, connect_first):
if connect_first:
await wampclient.connect()
result = await wampclient.call('wamp.session.count')
assert result == 1
@pytest.mark.asyncio
async def test_register_call_progress(self, wampclient: WAMPClient):
async def progressive_procedure(ctx, start, end):
for value in range(start, end):
ctx.progress(value)
return end
progress_values = []
await wampclient.register(progressive_procedure, 'test.progressive')
result = await wampclient.call('test.progressive', 2, 6,
options=CallOptions(on_progress=progress_values.append))
assert progress_values == [2, 3, 4, 5]
assert result == 6
@pytest.mark.asyncio
async def test_register_call_blocking(self, wampclient: WAMPClient):
@executor
def add(ctx, x, y):
return x + y
await wampclient.register(add, 'test.add')
result = await wampclient.call('test.add', 2, 3)
assert result == 5
@pytest.mark.asyncio
async def test_register_call_plain(self, wampclient: WAMPClient):
def add(ctx, x, y):
return x + y
await wampclient.register(add, 'test.add')
result = await wampclient.call('test.add', 2, 3)
assert result == 5
@pytest.mark.parametrize('wampclient', [
{'auth_method': 'wampcra', 'auth_id': 'testuser', 'auth_secret': 'testpass'}
], indirect=True)
@pytest.mark.asyncio
async def test_auth_wampcra(self, wampclient: WAMPClient):
await wampclient.connect()
result = await wampclient.call('wamp.session.get', wampclient.session_id)
assert result['authid'] == wampclient.details.authid == 'testuser'
@pytest.mark.parametrize('wampclient', [
{'auth_method': 'ticket', 'auth_id': 'device1', 'auth_secret': 'abc123'}
], indirect=True)
@pytest.mark.asyncio
async def test_auth_ticket(self, wampclient: WAMPClient):
await wampclient.connect()
result = await wampclient.call('wamp.session.get', wampclient.session_id)
assert result['authid'] == wampclient.details.authid == 'device1'
@pytest.mark.parametrize('wampclient', [
{'auth_method': 'ticket', 'auth_id': 'device1', 'auth_secret': 'abc124'}
], indirect=True)
@pytest.mark.asyncio
async def test_auth_failure(self, wampclient: WAMPClient):
with pytest.raises(ConnectionError) as exc:
await wampclient.connect()
assert exc.match('ticket in static WAMP-Ticket authentication is invalid')
@pytest.mark.asyncio
async def test_publish_autoconnect(self, wampclient: WAMPClient):
result = await wampclient.publish('test.topic', options=PublishOptions(acknowledge=True))
assert result
@pytest.mark.parametrize('connect_first', [False, True])
@pytest.mark.asyncio
async def test_publish_subscribe(self, wampclient: WAMPClient, connect_first):
async def subscriber(ctx, *args):
await q.put(args)
raise Exception()
q = asyncio.Queue()
if connect_first:
await wampclient.connect()
await wampclient.subscribe(subscriber, 'test.topic')
publication_id = await wampclient.publish(
'test.topic', 2, 3, options=PublishOptions(exclude_me=False, acknowledge=True))
assert isinstance(publication_id, int)
event = await asyncio.wait_for(q.get(), 2)
assert event == (2, 3)
@pytest.mark.parametrize('connect_first', [False, True])
@pytest.mark.asyncio
async def test_map_exception(self, wampclient: WAMPClient, connect_first):
class TestException(Exception):
pass
async def error(ctx):
raise TestException
if connect_first:
await wampclient.connect()
wampclient.map_exception(TestException, 'test.exception')
await wampclient.register(error, 'test.error')
with pytest.raises(TestException):
await wampclient.call('test.error')
@pytest.mark.asyncio
async def test_connect_procedure_registration_failure(self, wampclient: WAMPClient,
otherclient: WAMPClient):
"""
Test that a failure in registering the registry's procedures causes the connection attempt
to fail.
"""
await otherclient.register(lambda ctx: None, 'blah')
with pytest.raises(ApplicationError):
await wampclient.register(lambda ctx: None, 'blah')
assert wampclient.session_id is None
@pytest.mark.parametrize('wampclient', [
{'port': 8081, 'max_reconnection_attempts': 1, 'reconnect_delay': 0.3}], indirect=True)
@pytest.mark.asyncio
async def test_connect_retry(self, wampclient: WAMPClient, caplog):
"""Test that if the client can't connect, it will retry after a delay."""
with pytest.raises(ConnectionRefusedError):
await wampclient.connect()
messages = [record.message for record in caplog.records
if record.name == 'asphalt.wamp.client' and
record.message.startswith('Connection failed')]
assert len(messages) == 1
assert re.fullmatch("Connection failed \(attempt 1\): ConnectionRefusedError\(.+?\); "
"reconnecting in 0.3 seconds", messages[0])
@pytest.mark.asyncio
async def test_close_wait_handlers(self, event_loop, wampclient: WAMPClient,
otherclient: WAMPClient, caplog):
"""
Test that WAMPClient.close() waits for any running handler tasks to finish before
disconnecting from the router.
"""
async def sleep_subscriber(ctx):
nonlocal close_task
close_task = event_loop.create_task(wampclient.stop())
await asyncio.sleep(0.3)
async def sleep_sum(ctx, x, y):
await asyncio.sleep(0.3)
return x + y
caplog.set_level(logging.INFO)
close_task = None
await wampclient.register(sleep_sum)
await wampclient.subscribe(sleep_subscriber, 'testtopic')
await otherclient.publish('testtopic', options=PublishOptions(acknowledge=True))
result = await otherclient.call('sleep_sum', 1, 2)
assert result == 3
await close_task
messages = [record.message for record in caplog.records
if record.name == 'asphalt.wamp.client' and
record.message.startswith('Waiting for')]
assert messages == ['Waiting for 2 WAMP subscription/procedure handler tasks to finish']
@pytest.mark.asyncio
async def test_connect_twice(self, wampclient: WAMPClient):
"""
Test that when connect() is called while connected, it just returns a Future that resolves
immediately.
"""
retval = wampclient.connect()
assert isinstance(retval, asyncio.Task)
await retval
retval = wampclient.connect()
assert isinstance(retval, asyncio.Future)
await retval
def test_session_id_not_connected(self, wampclient: WAMPClient):
assert wampclient.session_id is None
def test_session_details_not_connected(self, wampclient: WAMPClient):
assert wampclient.details is None
@pytest.mark.parametrize('custom_exception', [False, True])
@pytest.mark.asyncio
async def test_report_applicationerror(self, wampclient: WAMPClient, context: Context,
custom_exception):
class DummyReporter(ExceptionReporter):
def report_exception(self, ctx: Context, exception: BaseException, message: str,
extra: Dict[str, Any]) -> None:
errors.append((exception, message, extra))
class CustomError(Exception):
pass
def handler(ctx):
if custom_exception:
raise CustomError
else:
raise ApplicationError('dummy.error')
errors = []
context.add_resource(DummyReporter(), types=[ExceptionReporter])
wampclient.map_exception(CustomError, 'dummy.error')
await wampclient.register(handler, 'dummyprocedure')
with pytest.raises(CustomError):
await wampclient.call('dummyprocedure')
assert not errors
@pytest.mark.parametrize('wampclient', [
{'auth_method': 'ticket', 'auth_id': 'device1', 'auth_secret': 'abc123'}
], indirect=True)
@pytest.mark.asyncio
async def test_sentry_extras_provider_procedure(self, wampclient: WAMPClient,
context: Context, monkeypatch):
class DummyReporter(ExceptionReporter):
def report_exception(self, ctx: Context, exception: BaseException, message: str,
extra: Dict[str, Any]) -> None:
errors.append((exception, message, extra))
def handler(ctx):
raise Exception('foo')
errors = []
context.add_resource(DummyReporter(), types=[ExceptionReporter])
context.add_resource(WAMPExtrasProvider(), types=[ExtrasProvider])
await wampclient.register(handler, 'dummyprocedure')
monkeypatch.setattr('asphalt.wamp.extras_providers.SENTRY_CLASS_NAME',
qualified_name(DummyReporter))
with pytest.raises(ApplicationError):
await wampclient.call('dummyprocedure')
assert len(errors) == 1
exc, message, extra = errors[0]
assert type(exc) is Exception
assert str(exc) == 'foo'
assert message == "Error running handler for procedure 'dummyprocedure'"
assert extra == {'extra': {'procedure': 'dummyprocedure'},
'user_context': {'auth_role': 'authorized_users',
'id': 'device1',
'session_id': wampclient.session_id}
}
@pytest.mark.parametrize('wampclient', [
{'auth_method': 'ticket', 'auth_id': 'device1', 'auth_secret': 'abc123'}
], indirect=True)
@pytest.mark.asyncio
async def test_sentry_extras_provider_subscriber(self, wampclient: WAMPClient,
context: Context, monkeypatch):
class DummyReporter(ExceptionReporter):
def report_exception(self, ctx: Context, exception: BaseException, message: str,
extra: Dict[str, Any]) -> None:
errors.append((exception, message, extra))
def handler(ctx):
ctx.loop.call_soon(event.set)
raise Exception('foo')
event = asyncio.Event()
errors = []
context.add_resource(DummyReporter(), types=[ExceptionReporter])
context.add_resource(WAMPExtrasProvider(), types=[ExtrasProvider])
await wampclient.subscribe(handler, 'dummytopic')
monkeypatch.setattr('asphalt.wamp.extras_providers.SENTRY_CLASS_NAME',
qualified_name(DummyReporter))
await wampclient.publish('dummytopic', options=dict(acknowledge=True, exclude_me=False))
await event.wait()
assert len(errors) == 1
exc, message, extra = errors[0]
assert type(exc) is Exception
assert str(exc) == 'foo'
assert message == "Error running subscription handler for topic 'dummytopic'"
assert extra == {'extra': {'topic': 'dummytopic'},
'user_context': {'auth_role': 'authorized_users',
'id': 'device1',
'session_id': wampclient.session_id}
}
|
11565621
|
import typing
import asyncio
import discord
from discord.ext import commands
import datetime
from nerdlandbot.translations.Translations import get_text as translate
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.helpers.constants import NOTIFY_EMBED_COLOR, MAX_REMINDER_TIME, MIN_REMINDER_TIME
class Reminder(commands.Cog, name="Simple Reminder"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="reminder", aliases=["remind_me"], brief="reminder_brief", usage="reminder_usage", help="reminder_help")
async def reminder(self, ctx: commands.Context, time: typing.Optional[str] = None, *, message: typing.Optional[str] = None):
# If the user doesn't enter a time-format, let him know
msg = translate("reminder_err_no_int", await culture(ctx))
title = translate("reminder_err_title", await culture(ctx))
embed = discord.Embed(
description=msg,
color=NOTIFY_EMBED_COLOR
)
if time is not None and time.isnumeric():
time_int = int(time)
else:
return await ctx.send(embed=embed)
# If the time is longer than a day
if time_int >= MAX_REMINDER_TIME:
msg = translate("reminder_err_too_long", await culture(ctx))
title = translate("reminder_err_title", await culture(ctx))
embed = discord.Embed(
title=title,
description=msg,
color=NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
# If the time is shorter than 5 minutes
if time_int <= MIN_REMINDER_TIME:
msg = translate("reminder_err_too_short", await culture(ctx))
title = translate("reminder_err_title", await culture(ctx))
embed = discord.Embed(
title=title,
description=msg,
color=NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
# Logic to determine the time the user wants to be reminded
current_time = datetime.datetime.now()
remind_time = current_time + datetime.timedelta(minutes=time_int)
remind_hour = remind_time.hour
remind_minute = remind_time.minute
# Fixing notation for first 10 min of the hour
remind_minute_string = str(remind_minute).rjust(2,"0")
# If there is a message, include it
if message:
reminder_set = translate("reminder_set_with_message", await culture(ctx)).format(remind_hour, remind_minute_string, message)
reminder = translate("reminder_with_message", await culture(ctx)).format(ctx.message.author, remind_hour, remind_minute_string, message)
else:
reminder_set = translate("reminder_set_no_message", await culture(ctx)).format(remind_hour, remind_minute_string)
reminder = translate("reminder_no_message", await culture(ctx)).format(ctx.message.author, remind_hour, remind_minute_string)
reminder_embed_title = translate("reminder_embed_title", await culture(ctx))
# Setting the embeds up
embed_reminder_set = discord.Embed(
title=reminder_embed_title,
description=reminder_set,
color=NOTIFY_EMBED_COLOR
)
embed_reminder = discord.Embed(
title=reminder_embed_title,
description=reminder,
color=NOTIFY_EMBED_COLOR
)
# Sending confirmation that the reminder has been set
await ctx.send(embed=embed_reminder_set)
# If it's not the specified time, wait for it.
while not (datetime.datetime.now().hour == remind_hour and datetime.datetime.now().minute == remind_minute):
await asyncio.sleep(60)
# Tag the author and send the message!
author = f"<@{ctx.message.author.id}>"
await ctx.send(author)
return await ctx.send(embed=embed_reminder)
def setup(bot: commands.Bot):
bot.add_cog(Reminder(bot))
|
11565623
|
from setuptools import setup, find_packages
from Cython.Build import cythonize
setup(
name='Triangulum3D',
version='0.0.1',
packages=find_packages('src'),
package_dir={'': 'src'},
setup_requires=[
'setuptools>=18.0',
'Cython>=0.24',
],
install_requires=[
'numpy>=1.9.1',
'PyOpenGL>=3.1.0',
'PyYAML>=3.11',
'pyopencl>=2015.2.3',
'Pillow>=3.0.0',
'Cython>=0.24',
'b2ac>=0.2.1',
],
# TODO: actualize, when pip will support alternative (direct dependencies, see https://github.com/pypa/pip/issues/2023 )
dependency_links=['https://github.com/PolarNick239/b2ac/tarball/master#egg=b2ac-0.2.1'],
ext_modules=cythonize("**/*.pyx"),
tests_require=[
'testfixtures>=4.1.2',
'nose>=1.3.4'
],
test_suite='nose.collector',
)
|
11565653
|
import pytest
def test_example_1():
"""
If you just want a 'tcrdistances' using pre-set default setting.
You can access distance matrices:
tr.pw_alpha - alpha chain pairwise distance matrix
tr.pw_beta - alpha chain pairwise distance matrix
tr.pw_cdr3_a_aa - cdr3 alpha chain distance matrix
tr.pw_cdr3_b_aa - cdr3 beta chain distance matrix
"""
import pandas as pd
from tcrdist.repertoire import TCRrep
df = pd.read_csv("dash.csv")
tr = TCRrep(cell_df = df,
organism = 'mouse',
chains = ['alpha','beta'],
db_file = 'alphabeta_gammadelta_db.tsv')
tr.pw_alpha
tr.pw_beta
tr.pw_cdr3_a_aa
tr.pw_cdr3_b_aa
|
11565661
|
import os
import subprocess
from subprocess import call
import sys
import errno
import urllib
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def call_with_err_code(cmd):
err_code = call(cmd, shell=True)
# Error code 137 is thrown by the timeout command when it timesout, used in RPi building
if (err_code != 0 and err_code != 137):
print("")
print("")
sys.stderr.write('call \'' + cmd + '\' exited with error code ' + str(err_code) + ' \n')
print("")
exit(err_code)
def call_retry_on_fail(cmd):
err_code = call(cmd, shell=True)
# Error code 137 is thrown by the timeout command when it timesout, used in RPi building
if (err_code != 0 and err_code != 137):
print("")
print("")
sys.stderr.write('call \'' + cmd + '\' failed with error code ' + str(err_code) + '. RETRYING...\n')
print("")
call_retry_on_fail(cmd)
def install_packages_debian(packages_to_install):
call_with_err_code('sudo apt-get update')
if len(packages_to_install) > 0:
call_with_err_code('sudo apt-get -y install ' + " ".join(packages_to_install))
def install_packages_osx(packages_to_install):
call_with_err_code('sudo brew update')
call_with_err_code('sudo brew -y install ' + " ".join(packages_to_install))
def setup_travis_or_gh_actions_env_vars():
if os.environ.get('TRAVIS_BUILD_DIR') is not None:
# Travis Detected
print("Travis CI Detected. Setting Up Environment Variables.")
os.environ['BUILD_DIR'] = os.environ.get('TRAVIS_BUILD_DIR')
os.environ['BRANCH'] = os.environ.get('TRAVIS_BRANCH')
os.environ['COMMIT'] = os.environ.get('TRAVIS_COMMIT')
elif os.environ.get('GITHUB_ACTIONS') is not None:
# GitHub Actions Detected
print("GitHub Actions Detected. Setting Up Environment Variables.")
os.environ['BUILD_DIR'] = os.environ['GITHUB_WORKSPACE']
os.environ['BRANCH'] = os.environ['GITHUB_REF'].rsplit('/', 1)[1]
os.environ['COMMIT'] = os.environ.get('GITHUB_SHA')
os.environ['CCACHE_DIR'] = os.path.join(os.environ['GITHUB_WORKSPACE'],'.ccache')
#Test without ccache compression now that GitHub Actions compresses cahces automatically
os.environ['CCACHE_NOCOMPRESS'] = "1"
os.environ['CCACHE_MAXSIZE'] = "750M"
os.environ['CPATH'] = '/Applications/Xcode_11.2.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include'
else:
print("Neither Travis CI nor GitHub Actions Detected. Assuming Local Run...")
os.environ['BUILD_DIR'] = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
os.environ['BRANCH'] = subprocess.Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
os.environ['COMMIT'] = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
print("BUILD_DIR: " + os.environ['BUILD_DIR'])
print("BRANCH: " + os.environ['BRANCH'])
print("COMMIT: " + os.environ['COMMIT'])
|
11565673
|
from setuptools import setup
import os
from os.path import exists, expanduser
from shutil import copyfile
ROOT = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(expanduser('~') + '/.pysession'):
os.makedirs(expanduser('~') + '/.pysession')
copyfile(ROOT + '/pysession.py', expanduser('~') + '/.pysession/pysession.py')
setup(
name='pysession',
version='0.2',
description='Automatically save python interpreter session code to a file or secret Gist',
author='Fallible',
author_email='<EMAIL>',
url='https://github.com/FallibleInc/pysession',
download_url='https://github.com/FallibleInc/pysession/tarball/0.2',
py_modules=['pysession'],
install_requires=[],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: IPython",
"Framework :: IDLE",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities",
],
)
|
11565729
|
from .notification import Notification
from .connection_confirmed_notification import ConnectionConfirmedNotification
from .connection_request_notification import ConnectionRequestNotification
from .follow_notification import FollowNotification
from .post_comment_notification import PostCommentNotification
from .post_comment_reply_notification import PostCommentReplyNotification
from .post_reaction_notification import PostReactionNotification
from .community_invite_notification import CommunityInviteNotification
from .post_comment_reaction_notification import PostCommentReactionNotification
from .post_comment_user_mention_notification import PostCommentUserMentionNotification
from .post_user_mention_notification import PostUserMentionNotification
from .community_new_post_notification import CommunityNewPostNotification
from .user_new_post_notification import UserNewPostNotification
from .follow_request_approved_notification import FollowRequestApprovedNotification
from .follow_request_notification import FollowRequestNotification
|
11565749
|
import jax.numpy as np
from jax import grad, jit, jacfwd
class RiskConcentrationFunction:
def __init__(self, portfolio):
self.portfolio = portfolio
def evaluate(self):
return np.sum(np.square(self.risk_concentration_vector(self.portfolio.weights)))
# the vector g in Feng & Palomar 2015
def risk_concentration_vector(self, portfolio_weights):
raise NotImplementedError(
"this method should be implemented in the child class"
)
# jacobian of the vector function risk_concentration_vector with respect to weights
def jacobian_risk_concentration_vector(self):
return jit(jacfwd(self.risk_concentration_vector))
class RiskContribOverBudgetDoubleIndex(RiskConcentrationFunction):
def risk_concentration_vector(self, portfolio_weights):
N = len(portfolio_weights)
marginal_risk = portfolio_weights * (
self.portfolio.covariance @ portfolio_weights
)
normalized_marginal_risk = marginal_risk / self.portfolio.budget
return np.tile(normalized_marginal_risk, N) - np.repeat(
normalized_marginal_risk, N
)
class RiskContribOverVarianceMinusBudget(RiskConcentrationFunction):
def risk_concentration_vector(self, portfolio_weights):
marginal_risk = portfolio_weights * (
self.portfolio.covariance @ portfolio_weights
)
return marginal_risk / np.sum(marginal_risk) - self.portfolio.budget
|
11565789
|
import io
import os
import select
import socket
import time
import utils
utils.new_ns()
port = 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.bind(('127.0.0.1', port))
s.listen(16)
tcpdump = utils.tcpdump_start(port)
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
c.connect(('127.0.0.1', port))
# user timeout on one socket for 3s
c.setsockopt(socket.IPPROTO_TCP, socket.TCP_USER_TIMEOUT, 3*1000)
# drop packets
utils.drop_start(dport=port)
utils.drop_start(sport=port)
utils.ss(port)
time.sleep(6)
utils.ss(port)
# the point: user-timeout doesn't kick in
c.send(b'hello world')
time.sleep(1)
utils.ss(port)
# utils.drop_stop(dport=port)
# utils.drop_stop(sport=port)
# time.sleep(1)
# utils.ss(port)
poll = select.poll()
poll.register(c, select.POLLIN)
poll.poll()
utils.ss(port)
e = c.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
print("[ ] SO_ERROR = %s" % (e,))
t1 = time.time()
print("[ ] took: %f seconds" % (t1-t0,))
|
11565799
|
import os
from bage_utils.base_util import is_my_pc, get_username
from bage_utils.cmd_util import CmdUtil
class TelegramUtil(object):
"""
- Telegram client with `telegram-cli`
"""
@staticmethod
def send_telegram(msg, username='길동_홍'):
telegram_cli_path = '%s/tg/bin/telegram-cli' % os.getenv('HOME')
server_pub_path = '%s/tg/tg-server.pub' % os.getenv('HOME')
if is_my_pc(): # 내 컴퓨터에서 구동할 때
telegram_cli_path = '%s/tg/bin/telegram-cli' % os.getenv('HOME')
server_pub_path = '%s/tg/tg-server.pub' % os.getenv('HOME')
cmd = r'''%s -k %s -W -e "msg %s %s" ''' % (telegram_cli_path, server_pub_path, username, msg)
print('cmd:', cmd)
CmdUtil().run(cmd)
# exit_code = os.system(cmd.encode(encoding='utf8')) # hang
# subprocess.call(cmd.encode(encoding='utf8'), shell=False, timeout=2)
# if exit_code == 0:
# return True
# else:
# return False
if __name__ == '__main__':
TelegramUtil.send_telegram('텔레그램 테스트...', username='길동_홍')
|
11565877
|
import time
import torch
from lib.utils import AverageMeter, get_train_labels, accuracy
def NN(net, lemniscate, trainloader, testloader, recompute_memory=0):
net.eval()
net_time = AverageMeter()
cls_time = AverageMeter()
correct = 0.
total = 0
testsize = testloader.dataset.__len__()
train_features = lemniscate.memory.t()
if hasattr(trainloader.dataset, 'imgs'):
train_labels = torch.LongTensor(
[y for (p, y) in trainloader.dataset.imgs]).cuda()
else:
train_labels = get_train_labels(trainloader)
if recompute_memory:
transform_bak = trainloader.dataset.transform
trainloader.dataset.transform = testloader.dataset.transform
temploader = torch.utils.data.DataLoader(
trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
batch_size = inputs.size(0)
features = net(inputs)
train_features[:, batch_idx * batch_size:batch_idx *
batch_size + batch_size] = features.data.t()
train_labels = get_train_labels(trainloader)
trainloader.dataset.transform = transform_bak
end = time.time()
with torch.no_grad():
for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
targets = targets.cuda(non_blocking=True)
batch_size = inputs.size(0)
features = net(inputs)
net_time.update(time.time() - end)
end = time.time()
dist = torch.mm(features, train_features)
yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
total += targets.size(0)
correct += retrieval.eq(targets.data).sum().item()
cls_time.update(time.time() - end)
end = time.time()
print(f'Test [{total}/{testsize}]\t'
f'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
f'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
f'Top1: {correct * 100. / total:.2f}')
return correct / total
def kNN(net, lemniscate, trainloader, testloader, K, sigma, recompute_memory=0):
net.eval()
net_time = AverageMeter()
cls_time = AverageMeter()
total = 0
testsize = testloader.dataset.__len__()
train_features = lemniscate.memory.t()
if hasattr(trainloader.dataset, 'imgs'):
train_labels = torch.LongTensor(
[y for (p, y) in trainloader.dataset.imgs]).cuda()
else:
train_labels = get_train_labels(trainloader)
C = train_labels.max() + 1
if recompute_memory:
transform_bak = trainloader.dataset.transform
trainloader.dataset.transform = testloader.dataset.transform
temploader = torch.utils.data.DataLoader(
trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
bs = inputs.size(0)
features = net(inputs)
train_features[:, batch_idx * bs:batch_idx *
bs + bs] = features.data.t()
train_labels = get_train_labels(trainloader)
trainloader.dataset.transform = transform_bak
top1 = 0.
top5 = 0.
with torch.no_grad():
retrieval_one_hot = torch.zeros(K, C).cuda()
for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
end = time.time()
targets = targets.cuda(non_blocking=True)
bs = inputs.size(0)
features = net(inputs)
net_time.update(time.time() - end)
end = time.time()
dist = torch.mm(features, train_features)
yd, yi = dist.topk(K, dim=1, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(bs, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval_one_hot.resize_(bs * K, C).zero_()
retrieval_one_hot.scatter_(1, retrieval.view(-1, 1), 1)
yd_transform = yd.clone().div_(sigma).exp_()
probs = torch.sum(torch.mul(retrieval_one_hot.view(
bs, -1, C), yd_transform.view(bs, -1, 1)), 1)
_, predictions = probs.sort(1, True)
# Find which predictions match the target
correct = predictions.eq(targets.data.view(-1, 1))
cls_time.update(time.time() - end)
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, 2).sum().item()
total += targets.size(0)
if batch_idx % 100 == 0:
print(f'Test [{total}/{testsize}]\t'
f'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
f'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
f'Top1: {top1 * 100. / total:.2f} top5: {top5 * 100. / total:.2f}')
print(top1 * 100. / total)
return top1 / total
def validate(val_loader, model, criterion, device='cpu', print_freq=100):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
# compute output
output = model(data)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), data.size(0))
top1.update(prec1[0], data.size(0))
top5.update(prec5[0], data.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print(f'Test: [{i}/{len(val_loader)}] '
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
f'Loss {loss.val:.4f} ({loss.avg:.4f}) '
f'Prec@1 {top1.val:.3f} ({top1.avg:.3f}) '
f'Prec@5 {top5.val:.3f} ({top5.avg:.3f})')
print(f' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}')
return top1.avg
|
11565940
|
from flask_security import SQLAlchemySessionUserDatastore
from flask_security.utils import hash_password
import pytest
@pytest.fixture(scope="module", autouse=True)
def setup_api_test_data(db, setup_roles_users, add_market_prices, add_battery_assets):
"""
Set up data for API v2.0 tests.
"""
print("Setting up data for API v2.0 tests on %s" % db.engine)
# Add battery asset
battery = add_battery_assets["Test battery"]
battery.owner = setup_roles_users["Test Prosumer User 2"]
@pytest.fixture(scope="module")
def setup_inactive_user(db, setup_accounts, setup_roles_users):
"""
Set up one inactive user.
"""
from flexmeasures.data.models.user import User, Role
user_datastore = SQLAlchemySessionUserDatastore(db.session, User, Role)
user_datastore.create_user(
username="inactive test user",
email="<EMAIL>",
password=hash_password("<PASSWORD>"),
account_id=setup_accounts["Prosumer"].id,
active=False,
)
|
11565948
|
import logging
import argparse
import json
import numpy as np
from tap2.bottom_machine.confidence_reestimation import validate_score, fraction_full_recall, sf_stats
from util.args_help import file_list
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(filename)s:%(lineno)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
def make_sid2score(scored_sfs):
sids = list(set([t[0] for t in scored_sfs]))
sid2score = dict()
for sid in sids:
merge_score = np.array([t[1] for t in scored_sfs if t[0] == sid]).mean() # TODO: try max
sid2score[sid] = merge_score
# TODO: also support rank bonuses
return sid2score
def make_qid2sid2score(prediction_file_list):
"""
first gather qid2sf_scores where each sf can have multiple score
then final merger creating condensing the scores for each qid2sid2score
then select a threshold to maximize F1
:param prediction_file_list:
:return:
"""
qid2sf_scores = dict()
for pfile in prediction_file_list:
with open(pfile, 'r') as fp:
for line in fp:
parts = line.split('\t')
if len(parts) != 3:
raise ValueError('bad line: '+line)
qid = parts[0]
sid = parts[1]
score = float(parts[2])
qid2sf_scores.setdefault(qid, []).append((sid, score))
qid2sid2score = dict()
for qid, sf_scores in qid2sf_scores.items():
qid2sid2score[qid] = make_sid2score(sf_scores)
return qid2sid2score
def main():
"""
python tune_sf_thresholds.py \
--predictions TAP/bm/large_out/plain_predictions.tsv,TAP/bm/large_out/sspt_predictions.tsv \
--data hotpot_dev_distractor_v1.json \
--qid2sid2bonus TAP/qid2sid2bonus.json
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--data", default=None, type=str, required=True,
help="HotpotQA json dataset")
parser.add_argument("--predictions", default=None, type=str, required=True,
help="From bottom_machine predictions.tsv, comma separated")
parser.add_argument("--qid2sid2bonus", default=None, type=str, required=False,
help="From boost_helpful_sfs")
parser.add_argument("--threshold_plot", default=None, type=str, required=False,
help="Write tsv performance vs. threshold")
args = parser.parse_args()
qid2sid2score = make_qid2sid2score(file_list(args.predictions))
if args.qid2sid2bonus:
with open(args.qid2sid2bonus, 'r') as fp:
qid2sid2bonus = json.load(fp)
for qid, sid2bonus in qid2sid2bonus.items():
sid2score = qid2sid2score[qid]
for sid, bonus in sid2bonus.items():
# this has a tiny impact (maybe a tenth of a percent)
sid2score[sid] = sid2score[sid] + bonus
qid2sfs = dict()
with open(args.data, 'r') as fp:
jdata = json.load(fp)
for jex in jdata:
qid = jex['_id']
supporting_facts = [sp[0] + ':' + str(sp[1]) for sp in jex['supporting_facts']]
qid2sfs[qid] = supporting_facts
max_f1, max_em, best_thresh, scores = validate_score(qid2sfs, qid2sid2score)
logger.info(f'other thresholds:')
scores.sort(key=lambda t: t[2], reverse=True) # sort by descending recall
for f1, p, r, em, thresh in scores:
logger.info(f' F1 = {f1}, P = {p}, R = {r}, Threshold = {thresh}')
# plot f, p, r, em as function of thresh
scores.sort(key=lambda t: t[4])
if args.threshold_plot:
with open(args.threshold_plot, 'w') as f:
for f1, p, r, em, thresh in scores:
f.write(f'{thresh}\t{f1}\t{p}\t{r}\t{em}\n')
fraction_full_recall(qid2sfs, qid2sid2score)
sf_stats(qid2sid2score)
if __name__ == "__main__":
main()
|
11565991
|
import argparse
import unittest
from util import *
class CreateFile(unittest.TestCase):
def test_create_file(self):
cids = []
for i in range(MAX_NUMBER):
path, cid = create_random_file_parallel(FILE_SIZE, FILE_DIR)
cids.append(cid)
failed_cids = wait_for_cmdlets(cids)
self.assertTrue(len(failed_cids) == 0, "Failed to create test files!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-size', default='1MB')
parser.add_argument('-num', default='10')
default_dir = TEST_DIR + random_string() + "/"
parser.add_argument('-path', default=default_dir)
parser.add_argument('unittest_args', nargs='*')
args, unknown_args = parser.parse_known_args()
sys.argv[1:] = unknown_args
FILE_SIZE = convert_to_byte(args.size)
print "The file size for test is {}.".format(FILE_SIZE)
MAX_NUMBER = int(args.num)
print "The file number for test is {}.".format(MAX_NUMBER)
if not args.path.endswith("/"):
args.path = args.path + "/"
FILE_DIR = args.path
print "The test path is {}.".format(FILE_DIR)
unittest.main()
|
11566009
|
import csv
from opensanctions import settings
from opensanctions.core import Context
from opensanctions.wikidata import get_entity, entity_to_ftm
CURRENT = "https://raw.githubusercontent.com/every-politician-scrapers/%s/main/html/current.csv"
def crawl_country(context: Context, country: str, url: str):
res = context.http.get(url, stream=True)
lines = (line.decode("utf-8") for line in res.iter_lines())
context.log.info("Importing current leaders", country=country)
for row in csv.DictReader(lines):
data = get_entity(row.get("personID"))
if data is not None:
entity_to_ftm(
context,
data,
position=data.get("position"),
topics="role.pep",
country=country,
)
def crawl(context: Context):
params = {"_": settings.RUN_DATE}
res = context.http.get(context.dataset.data.url, params=params, stream=True)
lines = (line.decode("utf-8") for line in res.iter_lines())
for row in csv.DictReader(lines):
url = CURRENT % row.get("repo")
crawl_country(context, row.get("country"), url)
|
11566037
|
import tensorflow as tf
from ..ctc import loss
def forwardsum_loss(attn_logprob, in_lens, out_lens, blank_logprob=0,
parallel_iterations=10, swap_memory=False):
"""
attn_logprob: B x 1 x T1 x T2
in_lens: B, batch of text length
out_lens: B, batch of mel length
"""
key_lens = in_lens
query_lens = out_lens
attn_logprob_padded = tf.pad(attn_logprob, ((0, 0), (0, 0), (0, 0), (1, 0)),
constant_values=blank_logprob)
batch = tf.constant(0, dtype=tf.int32)
total_loss = tf.constant(0.0, dtype=tf.float32)
batch_size = tf.shape(attn_logprob)[0]
def condition(bid, total_loss):
return tf.less(bid, batch_size)
def body(bid, total_loss):
target_seq = tf.expand_dims(tf.range(1, key_lens[bid] + 1), 0)
curr_logprob = tf.transpose(attn_logprob_padded[bid], [1, 0, 2])[: query_lens[bid], :, : key_lens[bid] + 1]
l, _, _ = loss.ctc_loss(
tf.transpose(curr_logprob, [1, 0, 2]), target_seq, query_lens[bid: bid + 1]
)
total_loss += l
return bid + 1, total_loss
_, total_loss = tf.while_loop(
condition,
body,
loop_vars=(batch, total_loss),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
back_prop=True,
)
return total_loss / tf.cast(batch_size, tf.float32)
def bin_loss(attn_hard, attn_soft):
where = tf.boolean_mask(attn_soft, tf.equal(attn_hard, tf.ones_like(attn_hard)))
log_sum = tf.math.log(tf.clip_by_value(where, 1e-12, tf.reduce_max(where)))
log_sum = tf.reduce_sum(log_sum)
return -log_sum / tf.reduce_sum(attn_hard)
|
11566161
|
import numpy as np
import scipy.linalg as spl
from typing import List, Optional, Tuple, Union
from .Transform import Transform
class LeastSq(Transform):
"""Intance of Transform. Applies least squares to generate matrices"""
def calculate(
self,
ordinates: Tuple[List[float], List[float], List[float]],
absolutes: Tuple[List[float], List[float], List[float]],
weights: Optional[List[float]] = None,
) -> np.array:
"""Calculates matrix with least squares and accompanying methods
Defaults to least squares calculation with no constraints
"""
abs_stacked, ord_stacked = self.get_stacked_values(
absolutes, ordinates, weights
)
ord_stacked = self.get_weighted_values(ord_stacked, weights)
abs_stacked = self.get_weighted_values(abs_stacked, weights)
# regression matrix M that minimizes L2 norm
matrix, res, rank, sigma = spl.lstsq(ord_stacked.T, abs_stacked.T)
if self.valid(rank):
return self.get_matrix(matrix, absolutes, ordinates, weights)
print("Poorly conditioned or singular matrix, returning NaNs")
return np.nan * np.ones((4, 4))
def get_matrix(
self,
matrix: List[List[float]],
absolutes: Optional[Tuple[List[float], List[float], List[float]]] = None,
ordinates: Optional[Tuple[List[float], List[float], List[float]]] = None,
weights: Optional[List[float]] = None,
) -> np.array:
"""Returns matrix formatted for no constraints
NOTE: absolutes, ordinates, and weights are only used by QRFactorization's child function
"""
return np.array(
[
[matrix[0], matrix[1], matrix[2], matrix[3]],
[matrix[4], matrix[5], matrix[6], matrix[7]],
[matrix[8], matrix[9], matrix[10], matrix[11]],
[0.0, 0.0, 0.0, 1.0],
]
)
def get_stacked_absolutes(
self, absolutes: Tuple[List[float], List[float], List[float]]
) -> List[float]:
"""Formats absolutes for least squares method
Attributes
----------
absolutes: Rotated X, Y, and Z absolutes
Output
------
X, Y and Z absolutes placed end to end and transposed
"""
return np.vstack([absolutes[0], absolutes[1], absolutes[2]]).T.ravel()
def get_stacked_ordinates(
self, ordinates: Tuple[List[float], List[float], List[float]]
) -> List[List[float]]:
"""Formats ordinates for least squares method"""
# (reduces degrees of freedom by 4:
# - 4 for the last row of zeros and a one)
ord_stacked = np.zeros((12, len(ordinates[0]) * 3))
ord_stacked[0, 0::3] = ordinates[0]
ord_stacked[1, 0::3] = ordinates[1]
ord_stacked[2, 0::3] = ordinates[2]
ord_stacked[3, 0::3] = 1.0
ord_stacked[4, 1::3] = ordinates[0]
ord_stacked[5, 1::3] = ordinates[1]
ord_stacked[6, 1::3] = ordinates[2]
ord_stacked[7, 1::3] = 1.0
ord_stacked[8, 2::3] = ordinates[0]
ord_stacked[9, 2::3] = ordinates[1]
ord_stacked[10, 2::3] = ordinates[2]
ord_stacked[11, 2::3] = 1.0
return ord_stacked
def get_stacked_values(
self,
absolutes: Tuple[List[float], List[float], List[float]],
ordinates: Tuple[List[float], List[float], List[float]],
weights: Optional[List[float]] = None,
) -> Tuple[List[float], List[List[float]]]:
"""Gathers stacked stacked absolutes/ordinates
NOTE: weights are only used in QRFactorization's child function
"""
# LHS, or dependent variables
# [A[0,0], A[1,0], A[2,0], A[0,1], A[1,1], A[2,1], ...]
abs_stacked = self.get_stacked_absolutes(absolutes)
# RHS, or independent variables
# [
# [o[0,0], 0, 0, o[0,1], 0, 0, ...],
# [0, o[1,0], 0, 0, o[1,1], 0, ...],
# [0, 0, o[2,0], 0, 0, o[2,1], ...],
# ...
# ]
ord_stacked = self.get_stacked_ordinates(ordinates)
return abs_stacked, ord_stacked
def get_weighted_values(
self,
values: Tuple[List[float], List[float], List[float]],
weights: Optional[List[float]] = None,
) -> Union[List[float], List[List[float]]]:
"""Application of weights for least squares methods, which calls for square roots
Attributes
----------
values: absolutes or ordinates
Outputs
-------
tuple of weights applied to each element of values
"""
if weights is None:
return values
weights = np.sqrt(weights)
weights = np.vstack((weights, weights, weights)).T.ravel()
return values * weights
def valid(self, rank: float) -> bool:
"""validates whether or not a matrix can reliably transform the method's number of dimensions"""
if rank < self.ndims:
return False
return True
|
11566166
|
import json
from e2e.Classes.Merit.Merit import Blockchain, Merit
from e2e.Vectors.Generation.PrototypeChain import PrototypeBlock, PrototypeChain
protoRoot: PrototypeChain = PrototypeChain(1, False)
protoRoot.add(1)
root: Blockchain = protoRoot.finish()
main: Merit = Merit.fromJSON(root.toJSON())
alt: Merit = Merit.fromJSON(root.toJSON())
main.add(
PrototypeBlock(main.blockchain.blocks[-1].header.time + 1200).finish(0, main)
)
#Create the competing Block to the second miner.
#Since the difficulty is fixed at the start, they're guaranteed to have the same amount of work.
#Because of that, we can't just mine the Block; we need to mine it until it has a lower hash than the above Block.
#Calculate a custom difficulty guaranteed to beat the above Block.
hashAsInt: int = int.from_bytes(main.blockchain.blocks[-1].header.hash, "little")
timeOffset: int = 1201
alt.blockchain.difficulties[-1] = 0
while int.from_bytes(
PrototypeBlock(
alt.blockchain.blocks[-1].header.time + timeOffset,
minerID=1
).finish(0, alt).header.hash,
"little"
) > hashAsInt:
timeOffset += 1
alt.add(
PrototypeBlock(alt.blockchain.blocks[-1].header.time + timeOffset, minerID=1).finish(0, alt)
)
with open("e2e/Vectors/Merit/Reorganizations/DepthOne.json", "w") as vectors:
vectors.write(json.dumps({
"main": main.toJSON(),
"alt": alt.toJSON()
}))
|
11566168
|
from django.utils import timezone
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
print('Start')
from apps.config.models import WeChatConfig
WeChatConfig.environ()
print('finish!!!!')
|
11566178
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from logging import getLogger
from libcity.model import loss
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
class CRANN(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self._scaler = self.data_feature.get('scaler')
self.adj_mx = self.data_feature.get('adj_mx')
self.num_nodes = self.data_feature.get('num_nodes', 1)
self.feature_dim = self.data_feature.get('feature_dim', 1)
self.ext_dim = self.data_feature.get('ext_dim', 0)
self.output_dim = self.data_feature.get('output_dim', 1)
self._logger = getLogger()
# ---- spatial module
self.dim_x = config.get('dim_x', 5)
self.dim_y = config.get('dim_y', 6)
# ---- temporal module
self.n_hidden_tem = config.get('n_hidden_tem', 100)
self.n_layers_tem = config.get('n_layers_tem', 1)
# ---- dense module
self.n_hidden_dns = config.get('n_hidden_dns', 0)
self.n_layers_dns = config.get('n_layers_dns', 1)
self.n_ar = config.get('n_ar', 4)
self.device = config.get('device', torch.device('cpu'))
self.input_window = config.get('input_window', 24)
self.output_window = config.get('output_window', 24)
self.len_inputs = self.output_window * (self.num_nodes + self.ext_dim + 1)
self.len_outputs = self.output_window * self.num_nodes
self.spatial_model = AttentionCNN(in_channels=self.input_window, out_channels=self.output_window,
dim_x=self.dim_x, dim_y=self.dim_y)
self.temporal_encoder = EncoderLSTM(self.feature_dim, self.n_hidden_tem, device=self.device)
self.temporal_decoder = BahdanauDecoder(self.n_hidden_tem, self.output_dim)
self.mlp = MLP(n_inputs=self.len_inputs + self.n_ar * self.num_nodes,
n_outputs=self.len_outputs,
n_layers=self.n_layers_dns, n_hidden=self.n_hidden_dns)
def evaluate_temp_att(self, encoder, decoder, batch, n_pred, device):
output = torch.Tensor().to(device)
h = encoder.init_hidden(batch.size(0))
encoder_output, h = encoder(batch, h)
decoder_hidden = h
decoder_input = torch.zeros(batch.size(0), 1, device=device)
for k in range(n_pred):
decoder_output, decoder_hidden, attn_weights = decoder(decoder_input, decoder_hidden, encoder_output)
decoder_input = decoder_output
output = torch.cat((output, decoder_output), 1)
return output
def forward(self, batch):
x_time = batch['x_time']
x_space = batch['x_space']
x_ext = batch['x_ext']
y_time = self.evaluate_temp_att(self.temporal_encoder, self.temporal_decoder,
x_time, self.output_window, self.device)
y_space = self.spatial_model(x_space)[0]
x = torch.cat((y_time.unsqueeze(2), y_space.squeeze().view(-1, self.output_window, self.num_nodes),
x_ext), dim=2).view(-1, self.len_inputs)
x = torch.cat((x, x_space[:, -self.n_ar:].view(-1, self.n_ar * self.num_nodes)), dim=1)
y_pred = self.mlp(x).view(-1, self.output_window, self.dim_x, self.dim_y)
return y_pred
def calculate_loss(self, batch):
y_true = batch['y']
y_predicted = self.predict(batch)
# print('y_true', y_true.shape)
# print('y_predicted', y_predicted.shape)
y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
return loss.masked_mae_torch(y_predicted, y_true)
def predict(self, batch):
return self.forward(batch)
class AttentionCNN(nn.Module):
"""
---------------
| Description |
---------------
Spatial module with spatio-temporal attention
--------------
| Attributes |
--------------
in_channels : int
Number of input timesteps
out_channels : int
Number of output timesteps
dim_x : int
Dimension of x-axis for input images
dim_y : int
Dimension of y-axis for input images
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, in_channels, out_channels, dim_x, dim_y):
super(AttentionCNN, self).__init__()
# Variables
self.out_channels = out_channels
self.dim_x = dim_x
self.dim_y = dim_y
# Conv blocks
self.conv_block1 = ConvBlock(in_channels, 64, 5)
# Attention
self.att1 = AttentionBlock(dim_x, dim_y, 24, method='hadamard')
# Output
self.regressor = nn.Conv2d(in_channels=64, out_channels=out_channels, kernel_size=3, padding=1, bias=True)
def forward(self, x):
out = self.conv_block1(x)
out = self.regressor(out)
out, att = self.att1(out)
return out, att
class ConvBlock(nn.Module):
"""
---------------
| Description |
---------------
Convolutional blocks of num_conv convolutions with out_features channels
--------------
| Attributes |
--------------
in_features : int
Number of input channels
out_features : int
Number of middle and output channels
num_conv : int
Number of convolutions
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, in_features, out_features, num_conv):
super(ConvBlock, self).__init__()
features = [in_features] + [out_features for i in range(num_conv)]
layers = []
for i in range(len(features) - 1):
layers.append(
nn.Conv2d(in_channels=features[i], out_channels=features[i + 1], kernel_size=3, padding=1, bias=True))
layers.append(nn.BatchNorm2d(num_features=features[i + 1], affine=True, track_running_stats=True))
layers.append(nn.ReLU())
self.op = nn.Sequential(*layers)
def forward(self, x):
return self.op(x)
class AttentionBlock(nn.Module):
"""
---------------
| Description |
---------------
Attentional block for spatio-temporal attention mechanism
--------------
| Attributes |
--------------
dim_x : int
Dimension of x-axis for input images
dim_y : int
Dimension of y-axis for input images
timesteps : int
Number of input timesteps
method : str
Attentional function to calculate attention weights
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, dim_x, dim_y, timesteps, method='hadamard'):
super(AttentionBlock, self).__init__()
# Variables
self.method = method
self.weight = nn.Parameter(torch.FloatTensor(timesteps, dim_x * dim_y, dim_x * dim_y))
torch.nn.init.xavier_uniform_(self.weight)
if method == 'general':
self.fc = nn.Linear(timesteps * (dim_x * dim_y) ** 2, timesteps * (dim_x * dim_y) ** 2, bias=False)
elif method == 'concat':
self.fc = nn.Linear(timesteps * (dim_x * dim_y) ** 2, timesteps * (dim_x * dim_y) ** 2, bias=False)
def forward(self, x, y=0):
N, T, W, H = x.size()
if self.method == 'hadamard':
xp = x.view(N, T, -1).repeat(1, 1, W * H).view(N, T, W * H, W * H)
wp = self.weight.expand_as(xp)
alig_scores = wp.mul(xp)
elif self.method == 'general':
xp = x.view(N, T, -1).repeat(1, 1, W * H).view(N, T, W * H, W * H)
wp = self.weight.expand_as(xp)
alig_scores = self.fc((wp.mul(xp)).view(N, -1))
elif self.method == 'concat':
xp = x.view(N, T, -1).repeat(1, 1, W * H).view(N, T, W * H, W * H)
wp = self.weight.expand_as(xp)
alig_scores = torch.tanh(self.fc((wp + xp).view(N, -1)))
elif self.method == 'dot':
xp = x.view(N, T, -1).repeat(1, 1, W * H).view(N, T, W * H, W * H)
alig_scores = self.weight.matmul(xp)
att_weights = F.softmax(alig_scores.view(N, T, W * H, W * H), dim=3)
out = att_weights.matmul(x.view(N, T, -1).unsqueeze(3))
return out.view(N, T, W, H), att_weights
class EncoderLSTM(nn.Module):
"""
---------------
| Description |
---------------
Encoder for temporal module
--------------
| Attributes |
--------------
input_size : int
Number of input features
hidden_size : int
Dimension of hidden space
n_layers : int
Number of layers for the encoder
drop_prob : float
Dropout for the encoder
device : int/str
Device in which hiddens are stored
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, input_size, hidden_size, n_layers=1, drop_prob=0, device='cuda'):
super(EncoderLSTM, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.device = device
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
output, hidden = self.lstm(inputs, hidden)
return output, hidden
def init_hidden(self, batch_size):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size, device=self.device),
torch.zeros(self.n_layers, batch_size, self.hidden_size, device=self.device))
class BahdanauDecoder(nn.Module):
"""
---------------
| Description |
---------------
Decoder an attention mechanism for temporal module
--------------
| Attributes |
--------------
hidden_size : int
Dimension of hidden space
output_size : int
Number of output features
n_layers : int
Number of layers for the encoder
drop_prob : float
Dropout for the encoder
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, hidden_size, output_size, n_layers=1, drop_prob=0.1):
super(BahdanauDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
self.fc_hidden = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.fc_encoder = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.weight = nn.Parameter(torch.FloatTensor(1, hidden_size))
torch.nn.init.xavier_uniform_(self.weight)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.lstm = nn.LSTM(self.hidden_size + self.output_size, self.hidden_size, batch_first=True)
self.fc_prediction = nn.Linear(self.hidden_size, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
encoder_outputs = encoder_outputs.squeeze()
# Calculating Alignment Scores
x = torch.tanh(self.fc_hidden(hidden[0].view(-1, 1, self.hidden_size)) +
self.fc_encoder(encoder_outputs))
alignment_scores = x.matmul(self.weight.unsqueeze(2))
# Softmaxing alignment scores to get Attention weights
attn_weights = F.softmax(alignment_scores.view(inputs.size(0), -1), dim=1)
# Multiplying the Attention weights with encoder outputs to get the context vector
self.context_vector = torch.matmul(attn_weights.unsqueeze(1), encoder_outputs)
# Concatenating context vector with embedded input word
output = torch.cat((inputs, self.context_vector.squeeze(1)), 1).unsqueeze(1)
# Passing the concatenated vector as input to the LSTM cell
output, hidden = self.lstm(output, hidden)
output = self.fc_prediction(output).squeeze(2)
return output, hidden, attn_weights
class MLP(nn.Module):
"""
---------------
| Description |
---------------
Dense module
--------------
| Attributes |
--------------
n_inputs : int
Number of input features
n_outputs : int
Number of output features
n_layers : int
Number of layers
n_hidden : int
Dimension of hidden layers
-----------
| Methods |
-----------
forward(x)
Forward pass of the network
"""
def __init__(self, n_inputs, n_outputs, n_layers=1, n_hidden=0, dropout=0):
super(MLP, self).__init__()
if n_layers < 1:
raise ValueError('Number of layers needs to be at least 1.')
elif n_layers == 1:
self.module = nn.Linear(n_inputs, n_outputs)
else:
modules = [nn.Linear(n_inputs, n_hidden), nn.ReLU(), nn.Dropout(dropout)]
n_layers -= 1
while n_layers > 1:
modules += [nn.Linear(n_hidden, n_hidden), nn.ReLU(), nn.Dropout(dropout)]
n_layers -= 1
modules.append(nn.Linear(n_hidden, n_outputs))
self.module = nn.Sequential(*modules)
def forward(self, x):
return self.module(x)
|
11566182
|
import torch
from torch.optim.lr_scheduler import _LRScheduler
class ManualScheduler(_LRScheduler):
'''
Example:
config = {
# epoch: learning rate
0: 1e-3,
10: 5e-4,
20: 1e-4
}
'''
def __init__(self, optimizer, config, verbose=False, **kwargs):
self.config = config
self.verbose = verbose
super().__init__(optimizer, **kwargs)
def get_lr(self):
if not self.last_epoch in self.config.keys():
return [group['lr'] for group in self.optimizer.param_groups]
else:
new_lr = [
self.config[self.last_epoch] for group in self.optimizer.param_groups]
if self.verbose:
print(f'learning rate -> {new_lr}')
return new_lr
|
11566207
|
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, nested_logit, optimal_strategy
from quetzal.engine.pathfinder import PublicPathFinder
from quetzal.engine.road_pathfinder import RoadPathFinder
from quetzal.model import preparationmodel
from syspy.assignment import raw as raw_assignment
from syspy.skims import skims
from tqdm import tqdm
class OptimalModel(preparationmodel.PreparationModel):
def get_optimal_strategy_edges(
self,
boarding_time=0,
alighting_time=0,
alpha=0.5,
target=None,
inf=1e9,
walk_on_road=False,
):
links = self.links.copy()
links['index'] = links.index
if walk_on_road:
road_links = self.road_links.copy()
road_links['time'] = road_links['walk_time']
footpaths = pd.concat([road_links, self.road_to_transit])
access = self.zone_to_road.copy()
else:
access = self.zone_to_transit.copy()
footpaths = self.footpaths.copy()
# transit edges
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
links['f'] = inf
links['c'] = links['time']
transit_edges = links[['i', 'j', 'f', 'c']].reset_index().values.tolist()
# boarding edges
links.index = 'boarding_' + links['index'].astype(str)
links['f'] = 1 / links['headway'] / alpha
if 'boarding_stochastic_utility' in links.columns:
links['f'] *= np.exp(links['boarding_stochastic_utility'])
links['c'] = boarding_time
boarding_edges = links[['a', 'i', 'f', 'c']].reset_index().values.tolist()
# alighting edges
links.index = 'alighting_' + links['index'].astype(str)
links['f'] = inf
links['c'] = alighting_time
alighting_edges = links[['j', 'b', 'f', 'c']].reset_index().values.tolist()
# access edges
if target is not None:
# we do not want to egress to a destination that is not the target
access = access.loc[(access['direction'] == 'access') | (access['b'] == target)]
access['f'] = inf
access['c'] = access['time']
access_edges = access[['a', 'b', 'f', 'c']].reset_index().values.tolist()
# footpaths
footpaths['f'] = inf
footpaths['c'] = footpaths['time']
footpaths_edges = footpaths[['a', 'b', 'f', 'c']].reset_index().values.tolist()
edges = access_edges + boarding_edges + transit_edges + alighting_edges + footpaths_edges
edges = [tuple(e) for e in edges]
return edges
def step_strategy_finder(self, *args, **kwargs):
s_dict = {}
node_df_list = []
all_edges = self.get_optimal_strategy_edges(*args, **kwargs)
for destination in tqdm(self.zones.index):
forbidden = set(self.zones.index) - {destination}
edges = [e for e in all_edges if e[2] not in forbidden]
strategy, u, f = optimal_strategy.find_optimal_strategy(edges, destination)
s_dict[destination] = strategy
node_df = pd.DataFrame({'f': pd.Series(f), 'u': pd.Series(u)})
node_df['destination'] = destination
node_df_list.append(node_df)
optimal_strategy_nodes = pd.concat(node_df_list)
edges = self.get_optimal_strategy_edges(*args, **kwargs)
optimal_strategy_sets = pd.Series(s_dict).apply(list)
optimal_strategy_edges = pd.DataFrame(
edges, columns=['ix', 'i', 'j', 'f', 'c']).set_index('ix')
assert optimal_strategy_edges.index.is_unique
self.optimal_strategy_edges = optimal_strategy_edges
self.optimal_strategy_sets = optimal_strategy_sets
self.optimal_strategy_nodes = optimal_strategy_nodes
nodes = optimal_strategy_nodes.copy()
nodes.index.name = 'origin'
nodes.set_index('destination', append=True, inplace=True)
pt_los = nodes.loc[self.zones.index]['u'].reset_index().rename(columns={'u': 'gtime'})
pt_los['pathfinder_session'] = 'optimal_strategy'
self.pt_los = pt_los
def step_strategy_assignment(self, volume_column, road=False):
dvol = self.volumes.groupby('destination')[volume_column].sum()
destinations = list(dvol.loc[dvol > 0].index)
destination_indexed_volumes = self.volumes.set_index(['destination', 'origin'])[volume_column]
destination_indexed_nodes = self.optimal_strategy_nodes.set_index(
'destination', append=True).swaplevel()
destination_indexed_strategies = self.optimal_strategy_sets
indexed_edges = self.optimal_strategy_edges[['i', 'j', 'f', 'c']]
node_volume = {}
edge_volume = {}
for destination in tqdm(destinations) if len(destinations) > 1 else destinations:
try:
sources = destination_indexed_volumes.loc[destination]
subset = destination_indexed_strategies.loc[destination]
edges = indexed_edges.loc[subset].reset_index().values.tolist()
f = destination_indexed_nodes.loc[destination]['f'].to_dict()
u = destination_indexed_nodes.loc[destination]['u'].to_dict()
except KeyError:
continue
node_v, edge_v = optimal_strategy.assign_optimal_strategy(sources, edges, u, f)
for k, v in node_v.items():
node_volume[k] = node_volume.get(k, 0) + v
for k, v in edge_v.items():
edge_volume[k] = edge_volume.get(k, 0) + v
loaded_edges = self.optimal_strategy_edges
loaded_edges.drop(volume_column, axis=1, errors='ignore', inplace=True)
loaded_edges[volume_column] = pd.Series(edge_volume)
df = loaded_edges[['i', 'j', volume_column]].dropna(subset=[volume_column])
self.links.drop(volume_column, axis=1, errors='ignore', inplace=True)
links = self.links.copy()
links['index'] = links.index
# transit edges
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
transit = pd.merge(links, df, on=['i', 'j'])
boardings = pd.merge(links, df, left_on=['a', 'i'], right_on=['i', 'j'])
alightings = pd.merge(links, df, left_on=['j', 'b'], right_on=['i', 'j'])
loaded_links = self.links.copy()
loaded_links[volume_column] = transit.set_index('index')[volume_column]
loaded_links['boardings'] = boardings.set_index('index')[volume_column]
loaded_links['alightings'] = alightings.set_index('index')[volume_column]
loaded_nodes = self.nodes.copy()
loaded_nodes.drop('boardings', axis=1, errors='ignore', inplace=True)
loaded_nodes.drop('alightings', axis=1, errors='ignore', inplace=True)
loaded_nodes['boardings'] = boardings.groupby('a')[volume_column].sum()
loaded_nodes['alightings'] = alightings.groupby('b')[volume_column].sum()
self.loaded_edges = loaded_edges
self.nodes = loaded_nodes
self.links = loaded_links
if road:
self.road_links[volume_column] = raw_assignment.assign(
volume_array=list(self.links[volume_column]),
paths=list(self.links['road_link_list'])
)
# todo remove 'load' from analysis module:
self.road_links['load'] = self.road_links[volume_column]
def analysis_strategy_time(self, boarding_time=0, alighting_time=0, inf=1e9, walk_on_road=True):
assert walk_on_road == True # TODO implement for ACF
zero = 1 / inf
# add a column for each type of time to the os edges
edges = self.optimal_strategy_edges
edges['rtt_time'] = self.road_to_transit['time']
edges['ztr_time'] = self.zone_to_road['time']
edges['in_vehicle_time'] = self.links['time']
edges.loc[['boarding_' in i for i in edges.index], 'boarding_time'] = boarding_time
edges.loc[['alighting_' in i for i in edges.index], 'alighting_time'] = alighting_time
if walk_on_road:
edges['road_time'] = self.road_links['walk_time']
edges.fillna(0, inplace=True)
edges['walk_time'] = edges['road_time'] + edges['rtt_time'] + edges['ztr_time']
self.optimal_strategy_edges = edges
# sum over the edges of a strategy the varios types of times
od_cost = []
columns = ['in_vehicle_time', 'boarding_time', 'walk_time']
indexed_edges = self.optimal_strategy_edges[['i', 'j', 'f', 'c']]
edges = indexed_edges.reset_index().values.tolist()
nodes = set.union(*[{i, j} for ix, i, j, f, c in edges])
edge_data = {ix: (i, j, fa, ca) for ix, i, j, fa, ca in edges}
cost_dict = {
key: self.optimal_strategy_edges[key].to_dict()
for key in columns
}
origins = destinations = list(self.zones.index)
for destination in tqdm(destinations):
u = {
key:{node:0 for node in nodes}
for key in columns
}
f = {node:0 for node in nodes} # here 0 * inf = 0 because inf = 1e9
F = {node: zero for node in nodes} # here zero * inf = 1
U = {node: inf for node in nodes}
U[destination] = 0
for ix in self.optimal_strategy_sets[destination]:
i, j, fa, _ = edge_data[ix]
for key in columns:
ca = cost_dict[key][ix]
u[key][i] = (f[i] * u[key][i] + fa * (u[key][j] + ca)) / (f[i] + fa)
U[i] = (F[i] * U[i] + fa * (U[j])) / (F[i] + fa)
F[i] = F[i] + fa
f[i] = f[i] + fa
u['waiting_time'] = U
time_columns = columns + ['waiting_time']
for key in time_columns :
for origin in origins:
od_cost.append([key, origin, destination, u[key][origin]])
data = pd.DataFrame(od_cost, columns=['key', 'origin', 'destination', 'cost'])
right = data.set_index(['key', 'origin', 'destination'])['cost'].unstack('key').reset_index()
self.pt_los.drop(time_columns, axis=1, inplace=True, errors='ignore')
self.pt_los = pd.merge(self.pt_los, right, on=['origin', 'destination'])
self.pt_los['time'] = self.pt_los[time_columns].sum(axis=1)
def get_aggregated_edges(self, origin, destination, irrelevant_nodes=None):
# restrection to the destination edges
edges = self.optimal_strategy_edges[['i', 'j', 'f', 'c']].copy()
edges = edges.loc[self.optimal_strategy_sets.loc[destination]]
edges['ix'] = edges.index
# removing the edges that are non relevant (p<1e-6)
f_total = edges.groupby('i')[['f']].sum()
edges = pd.merge(edges, f_total, left_on='i', right_index=True, suffixes=['', '_total'])
edges['p'] = np.round(edges['f'] / edges['f_total'], 6)
edges = edges.loc[edges['p'] > 0]
# restriction to the origin
g = nx.DiGraph()
for e in edges.to_dict(orient='records'):
g.add_edge(e['i'], e['j'])
paths = list(nx.all_simple_paths(g, source=origin, target=destination))
nodes = set.union(*[set(p) for p in paths])
ode = edges.loc[edges['i'].isin(nodes) & edges['j'].isin(nodes)]
# transform node -> (node, trip_id) to node -> trip_id
links = self.links.copy()
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
transit = pd.merge(links, ode[['i', 'j', 'ix']], on=['i', 'j'])
boardings = pd.merge(links[['a', 'i', 'trip_id']], ode[['i', 'j', 'ix']], left_on=['a', 'i'], right_on=['i', 'j'])
alightings = pd.merge(links[['j', 'b', 'trip_id']], ode[['i', 'j', 'ix']], left_on=['j', 'b'], right_on=['i', 'j'])
inlegs = set(transit['ix']).union(boardings['ix']).union(alightings['ix'])
remaining = ode.drop(list(inlegs))
boardings = boardings.set_index('ix')
boardings['i'] = boardings['a']
boardings['j'] = boardings['trip_id']
boardings['f'] = ode['f']
boardings['p'] = ode['p']
alightings = alightings.set_index('ix')
alightings['j'] = alightings['b']
alightings['i'] = alightings['trip_id']
alightings['f'] = ode['f']
alightings['p'] = 1
c = ['i', 'j', 'f', 'p']
a = pd.concat([boardings[c], alightings[c], remaining[c]])
a = a.dropna(subset=['i', 'j'])
# replace a -> irrelevant -> irrelevant -> b by a -> b
if irrelevant_nodes is not None:
def get_relevant_node(irrelevant_node, irrelevant_nodes, g):
node = irrelevant_node
irrelevant = True
while irrelevant:
node = list(g.neighbors(node))[0]
irrelevant = node in irrelevant_nodes
return node
a = a.loc[~a['i'].isin(irrelevant_nodes)]
loc = a.loc[a['j'].isin(irrelevant_nodes), 'j']
a.loc[a['j'].isin(irrelevant_nodes), 'j'] = loc.apply(
lambda j: get_relevant_node(j, irrelevant_nodes, g))
return a
|
11566232
|
import sys
from g_python.gextension import Extension
from g_python_bot import ChatBot
extension_info = {
"title": "ConsoleBot",
"description": "Make extension ez without ui",
"version": "1.0",
"author": "denio4321"
}
ext = Extension(extension_info, sys.argv)
ext.start()
def pong():
console_bot.send_message("pong")
def ping():
console_bot.send_message("ping")
# Creates the object
console_bot = ChatBot(ext, botname="ConsoleBot-Example")
# Defines the command and the call functions
console_bot.on_command(':ping', pong)
console_bot.on_command(':pong', ping)
# Runs the bot
console_bot.start()
# Sends a welcome message once connected
console_bot.send_message("Hi! Nice to meet you! This is a example bot.")
|
11566249
|
from abc import ABC, abstractmethod
class PropertyExtractor(ABC):
THRESHOLD_MISSING_FRAMES = 50
"""
This class should be used as a base class for any property extractor. Please rely to other
extractors (such as DepthExtractor, IlluminationExtractor, SaliencyExtractor) as a working
example.
"""
@abstractmethod
def extract_from_folder(self, frames_path: str, output_path: str):
"""
This method should be responsible to generate the properties map for a given folder which
contains a list of frames
:param frames_path: the folder containing the frames
:param output_path: where they will be stored
"""
raise NotImplementedError("You should override the method extract_from_folder")
@abstractmethod
def get_property_alias(self) -> str:
"""
This method should return the alias of the property, for identification purposes.
:return: the alias of the property (as a string)
"""
raise NotImplementedError("You should override the method get_property_alias")
@abstractmethod
def get_frame_extension(self) -> str:
"""
This method should return the extension from the generated property extension
:return: the extension (as a string)
"""
raise NotImplementedError("You should override the method get_frame_extension")
|
11566312
|
import numpy as np
ACCEPT, REJECT = 1, 0
P_FREE = 0.06
R_REJ = 0
class AccessControlQueuingTask:
def __init__(self, n_serv, que_size):
self.get_moves()
self.n_serv = n_serv
self.queue = np.random.randn(que_size)
self.reset()
def get_rank(self):
return len(np.flatnonzero(self.queue < self.queue[0]))
def get_priority(self):
return 2 ** self.get_rank()
def encode_state(self, rank, n_serv, n_free_serv):
return rank * (n_serv + 1) + n_free_serv
def get_state(self):
return self.encode_state(self.get_rank(), self.n_serv,
self.n_free_serv)
def get_moves(self):
self.moves = [REJECT, ACCEPT]
def update_queue(self):
self.queue[:-1] = self.queue[1:]
self.queue[-1] = np.random.randn()
def update_servers(self):
for i in range(self.n_serv - self.n_free_serv):
self.n_free_serv += (np.random.random() < P_FREE)
def step(self, a):
self.update_servers()
if a == REJECT or self.n_free_serv == 0:
self.update_queue()
state = self.get_state()
return state, R_REJ, False, {}
else:
prio = self.get_priority()
self.update_queue()
self.n_free_serv -= 1
state = self.get_state()
return state, prio, False, {}
def seed(self, seed):
np.random.seed(seed)
def reset(self):
self.n_free_serv = self.n_serv
self.state = self.get_state()
return self.state
|
11566330
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from .utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
)
class SpatialGroupEnhance(nn.Module):
def __init__(self, groups = 64):
super(SpatialGroupEnhance, self).__init__()
self.groups = groups
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.weight = Parameter(torch.zeros(1, groups, 1, 1))
self.bias = Parameter(torch.ones(1, groups, 1, 1))
self.sig = nn.Sigmoid()
def forward(self, x): # (b, c, h, w)
b, c, h, w = x.size()
x = x.view(b * self.groups, -1, h, w)
xn = x * self.avg_pool(x)
xn = xn.sum(dim=1, keepdim=True)
t = xn.view(b * self.groups, -1)
t = t - t.mean(dim=1, keepdim=True)
std = t.std(dim=1, keepdim=True) + 1e-5
t = t / std
t = t.view(b, self.groups, h, w)
t = t * self.weight + self.bias
t = t.view(b * self.groups, 1, h, w)
x = x * self.sig(t)
x = x.view(b, c, h, w)
return x
class ContextBlock(nn.Module):
def __init__(self,inplanes,ratio,pooling_type='att',
fusion_types=('channel_add', )):
super(ContextBlock, self).__init__()
valid_fusion_types = ['channel_add', 'channel_mul']
assert pooling_type in ['avg', 'att']
assert isinstance(fusion_types, (list, tuple))
assert all([f in valid_fusion_types for f in fusion_types])
assert len(fusion_types) > 0, 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int(inplanes * ratio)
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if pooling_type == 'att':
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if 'channel_add' in fusion_types:
self.channel_add_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if 'channel_mul' in fusion_types:
self.channel_mul_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
def spatial_pool(self, x):
batch, channel, height, width = x.size()
if self.pooling_type == 'att':
input_x = x
# [N, C, H * W]
input_x = input_x.view(batch, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(batch, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
# [N, 1, H * W, 1]
context_mask = context_mask.unsqueeze(-1)
# [N, 1, C, 1]
context = torch.matmul(input_x, context_mask)
# [N, C, 1, 1]
context = context.view(batch, channel, 1, 1)
else:
# [N, C, 1, 1]
context = self.avg_pool(x)
return context
def forward(self, x):
# [N, C, 1, 1]
context = self.spatial_pool(x)
out = x
if self.channel_mul_conv is not None:
# [N, C, 1, 1]
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = out * channel_mul_term
if self.channel_add_conv is not None:
# [N, C, 1, 1]
channel_add_term = self.channel_add_conv(context)
out = out + channel_add_term
return out
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._swish(self._bn0(self._expand_conv(inputs)))
x = self._swish(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class cbam_EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
self.ca1 = ChannelAttention(out_channels)
self.sa1 = SpatialAttention()
# self.sge = SpatialGroupEnhance(out_channels)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# self.ca2 = ChannelAttention(out_channels)
# self.sa2 = SpatialAttention()
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
x = self.ca1(x) * x
x = self.sa1(x) * x
# x = self.sge(x)
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
# x = self.ca2(x) * x
# x = self.sa2(x) * x
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
bs = inputs.size(0)
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.view(bs, -1)
x = self._dropout(x)
# x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return cls(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3):
model = cls.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop)
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size)
out_channels = round_filters(32, model._global_params)
model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
""" Validates model name. """
valid_models = ['efficientnet-b'+str(i) for i in range(9)]
if model_name not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
|
11566344
|
import unittest
from copy import deepcopy
from random import random, seed, randint
from lightly.api.bitmask import BitMask
N = 10
class TestBitMask(unittest.TestCase):
def setup(self, psuccess=1.):
pass
def test_get_and_set(self):
mask = BitMask.from_bin("0b11110000")
self.assertFalse(mask.get_kth_bit(2))
mask.set_kth_bit(2)
self.assertTrue(mask.get_kth_bit(2))
self.assertTrue(mask.get_kth_bit(4))
mask.unset_kth_bit(4)
self.assertFalse(mask.get_kth_bit(4))
def test_large_bitmasks(self):
bitstring = "0b" + "1" * 5678
mask = BitMask.from_bin(bitstring)
mask_as_bitstring = mask.to_bin()
self.assertEqual(mask_as_bitstring, bitstring)
def test_bitmask_from_length(self):
length = 4
mask = BitMask.from_length(length)
self.assertEqual(mask.to_bin(), "0b1111")
def test_get_and_set_outside_of_range(self):
mask = BitMask.from_bin("0b11110000")
self.assertFalse(mask.get_kth_bit(100))
mask.set_kth_bit(100)
self.assertTrue(mask.get_kth_bit(100))
def test_inverse(self):
# TODO: proper implementation
return
x = int("0b11110000", 2)
y = int("0b00001111", 2)
mask = BitMask(x)
mask.invert()
self.assertEqual(mask.x, y)
x = int("0b010101010101010101", 2)
y = int("0b101010101010101010", 2)
mask = BitMask(x)
mask.invert()
self.assertEqual(mask.x, y)
def test_store_and_retrieve(self):
x = int("0b01010100100100100100100010010100100100101001001010101010", 2)
mask = BitMask(x)
mask.set_kth_bit(11)
mask.set_kth_bit(22)
mask.set_kth_bit(33)
mask.set_kth_bit(44)
mask.set_kth_bit(55)
mask.set_kth_bit(66)
mask.set_kth_bit(77)
mask.set_kth_bit(88)
mask.set_kth_bit(99)
somewhere = mask.to_hex()
somewhere_else = mask.to_bin()
mask_somewhere = BitMask.from_hex(somewhere)
mask_somewhere_else = BitMask.from_bin(somewhere_else)
self.assertEqual(mask.x, mask_somewhere.x)
self.assertEqual(mask.x, mask_somewhere_else.x)
def test_union(self):
mask_a = BitMask.from_bin("0b001")
mask_b = BitMask.from_bin("0b100")
mask_a.union(mask_b)
self.assertEqual(mask_a.x, int("0b101", 2))
def test_intersection(self):
mask_a = BitMask.from_bin("0b101")
mask_b = BitMask.from_bin("0b100")
mask_a.intersection(mask_b)
self.assertEqual(mask_a.x, int("0b100", 2))
def assert_difference(self, bistring_1: str, bitstring_2: str, target: str):
mask_a = BitMask.from_bin(bistring_1)
mask_b = BitMask.from_bin(bitstring_2)
mask_a.difference(mask_b)
self.assertEqual(mask_a.x, int(target, 2))
def test_differences(self):
self.assert_difference("0b101", "0b001", "0b100")
self.assert_difference("0b0111", "0b1100", "0b0011")
self.assert_difference("0b10111", "0b01100", "0b10011")
def random_bitstring(self, length: int):
bitsting = '0b'
for i in range(length):
bitsting += str(randint(0, 1))
return bitsting
def test_difference_random(self):
seed(42)
for rep in range(10):
for string_length in range(1, 100, 10):
bitstring_1 = self.random_bitstring(string_length)
bitstring_2 = self.random_bitstring(string_length)
target = '0b'
for bit_1, bit_2 in zip(bitstring_1[2:], bitstring_2[2:]):
if bit_1 == '1' and bit_2 == '0':
target += '1'
else:
target += '0'
self.assert_difference(bitstring_1, bitstring_2, target)
def test_operator_minus(self):
mask_a = BitMask.from_bin("0b10111")
mask_a_old = deepcopy(mask_a)
mask_b = BitMask.from_bin("0b01100")
mask_target = BitMask.from_bin("0b10011")
diff = mask_a - mask_b
self.assertEqual(diff, mask_target)
self.assertEqual(mask_a_old, mask_a) # make sure the original mask is unchanged.
def test_equal(self):
mask_a = BitMask.from_bin("0b101")
mask_b = BitMask.from_bin("0b101")
self.assertEqual(mask_a, mask_b)
def test_masked_select_from_list(self):
n = 1000
list_ = [randint(0, 1) for _ in range(n - 2)] + [0, 1]
mask = BitMask.from_length(n)
for index, item_ in enumerate(list_):
if item_ == 0:
mask.unset_kth_bit(index)
else:
mask.set_kth_bit(index)
all_ones = mask.masked_select_from_list(list_)
mask.invert(n)
all_zeros = mask.masked_select_from_list(list_)
self.assertGreater(len(all_ones), 0)
self.assertGreater(len(all_zeros), 0)
self.assertTrue(all([item_ > 0 for item_ in all_ones]))
self.assertTrue(all([item_ == 0 for item_ in all_zeros]))
def test_masked_select_from_list_example(self):
list_ = [1, 2, 3, 4, 5, 6]
mask = BitMask.from_bin('0b001101') # expected result is [1, 3, 4]
selected = mask.masked_select_from_list(list_)
self.assertListEqual(selected, [1, 3, 4])
def test_invert(self):
# get random bitstring
length = 10
bitstring = self.random_bitstring(10)
#get inverse
mask = BitMask.from_bin(bitstring)
mask.invert(length)
inverted = mask.to_bin()
# remove 0b
inverted = inverted[2:]
bitstring = bitstring[2:]
for i in range(min(len(bitstring), len(inverted))):
if bitstring[-i - 1] == '0':
self.assertEqual(inverted[-i - 1], '1')
else:
self.assertEqual(inverted[-i - 1], '0')
def test_nonzero_bits(self):
mask = BitMask.from_bin("0b0")
indices = [100, 1000, 10_000, 100_000]
self.assertEqual(mask.x, 0)
for index in indices:
mask.set_kth_bit(index)
self.assertGreaterEqual(mask.x, 0)
also_indices = mask.to_indices()
for i, j in zip(indices, also_indices):
self.assertEqual(i, j)
|
11566404
|
from huobi.constant import *
class Account:
"""
The account information for spot account, margin account etc.
:member
id: The unique account id.
account_type: The type of this account, possible value: spot, margin, otc, point.
account_state: The account state, possible value: working, lock.
balances: The balance list of the specified currency. The content is Balance class
"""
def __init__(self):
self.id = 0
self.type = AccountType.INVALID
self.state = AccountState.INVALID
self.subtype = ""
def print_object(self, format_data=""):
from huobi.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.id, format_data + "ID")
PrintBasic.print_basic(self.type, format_data + "Account Type")
PrintBasic.print_basic(self.state, format_data + "Account State")
PrintBasic.print_basic(self.subtype, format_data + "Subtype")
|
11566421
|
from __future__ import unicode_literals
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.core.serializers.python import Serializer
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Manager, Model
from django.forms.models import modelform_factory
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import View
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
11566428
|
from __future__ import division
import collections
import numpy as np
from scipy import sparse
from scipy.sparse import linalg
from scipy.sparse import dia_matrix
np.random.seed(seed=1)
# Supporting functions
gaussian = lambda z, height, position, hwhm: height * np.exp(-np.log(2) * ((z - position)/hwhm)**2)
H = lambda z: 0.5 * (1 - np.sign(z))
TH = lambda x, sigma, mu: np.where( x>(mu-sigma), 1, 0) * np.where(x<(mu+sigma), 1, 0)
def check_index_within_bounds(i, min_i, max_i):
"""Checks that the index specified (can be number or an iterable) is within the given range."""
success = np.all((i>=min_i)*(i<=max_i))
if success:
return True
if isinstance(i, collections.Iterable):
# The index is array-like
print "Index is out of bounds.\ni=%s" % i[np.where(np.logical_not((i>=min_i)*(i<=max_i)))]
else:
# The index is an number
print "Index is out of bounds.\ni=%s" % i
return False
class Mesh(object):
"""A 1D cell centered mesh defined by faces for the finite volume method."""
def __init__(self, faces):
super(Mesh, self).__init__()
# Check for duplicated points
if len(faces) != len(set(faces)):
raise ValueError("The faces array contains duplicated positions. No cell can have zero volume so please update with unique face positions.")
self.faces = np.array(faces)
self.cells = 0.5 * (self.faces[0:-1] + self.faces[1:])
self.J = len(self.cells)
self.cell_widths = (self.faces[1:] - self.faces[0:-1])
def h(self, i):
"""Returns the width of the cell at the specified index."""
return self.cell_widths[i]
def hm(self, i):
"""Distance between centroids in the backwards direction."""
if not check_index_within_bounds(i,1,self.J-1):
raise ValueError("hm index runs out of bounds")
return (self.cells[i] - self.cells[i-1])
def hp(self, i):
"""Distance between centroids in the forward direction."""
if not check_index_within_bounds(i,0,self.J-2):
raise ValueError("hp index runs out of bounds")
return (self.cells[i+1] - self.cells[i])
class CellVariable(np.ndarray):
"""Representation of a variable defined at the cell centers. Provides interpolation functions to calculate the value at cell faces."""
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
def __new__(cls, input_array, mesh=None):
# If `input_array` is actually just a constant
# convert it to an array of len the number of cells.
try:
len(input_array)
except:
input_array = input_array*np.ones(len(mesh.cells))
obj = np.asarray(input_array).view(cls)
obj.mesh = mesh
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.mesh = getattr(obj, 'mesh', None)
self.__get_items__ = getattr(obj, '__get_items__', None)
def m(self, i):
"""Linear interpolation of the cell value at the right hand face i.e. along the _m_inus direction."""
return self.mesh.h(i)/(2*self.mesh.hm(i))*self[i-1] + self.mesh.h(i-1)/(2*self.mesh.hm(i))*self[i]
def p(self, i):
"""Linear interpolation of the cell value at the right hand face i.e. along the _p_lus direction."""
return self.mesh.h(i+1)/(2*self.mesh.hp(i))*self[i] + self.mesh.h(i)/(2*self.mesh.hp(i))*self[i+1]
class AdvectionDiffusionModel(object):
"""A model for the advection-diffusion equation"""
def __init__(self, faces, a, d, k, discretisation="central"):
super(AdvectionDiffusionModel, self).__init__()
self.mesh = Mesh(faces)
self.a = CellVariable(a, mesh=self.mesh)
self.d = CellVariable(d, mesh=self.mesh)
self.k = k
self.discretisation = discretisation
# Check Peclet number
import warnings
mu = self.peclet_number()
if np.max(np.abs(mu)) >= 1.5 and np.max(np.abs(mu)) < 2.0:
warnings.warn("\n\nThe Peclet number is %g, this is getting close to the limit of mod 2.")
elif np.max(np.abs(mu)) > 2:
warnings.warn("\n\nThe Peclet number (%g) has exceeded the maximum value of mod 2 for the central discretisation scheme." % (np.max(mu),) )
# Check CFL condition
CFL = self.CFL_condition()
if np.max(np.abs(CFL)) > 0.5 and np.max(np.abs(CFL)) < 1.0:
warnings.warn("\n\nThe CFL condition value is %g, it is getting close to the upper limit." % (np.max(CFL),) )
elif np.max(np.abs(CFL)) > 1:
warnings.warn("\n\nThe CFL condition value is %g, and has gone above the upper limit." % (np.max(CFL),) )
if discretisation == "exponential":
self.kappa = (np.exp(mu) + 1)/(np.exp(mu) - 1) - 2/mu;
self.kappa[np.where(mu==0.0)] = 0
self.kappa[np.where(np.isposinf(mu))] = 1
self.kappa[np.where(np.isneginf(mu))] = -1
elif discretisation == "upwind":
kappa_neg = np.where(self.a<0,-1,0)
kappa_pos = np.where(self.a>0,1,0)
self.kappa = kappa_neg + kappa_pos
elif discretisation == "central":
self.kappa = np.zeros(self.mesh.J)
else:
print "Please set `discretisation` to one of the following: `upwind`, `central` or `exponential`."
# Artificially modify the diffusion coefficient to introduce adpative discretisation
self.d = self.d + 0.5 * self.a * self.mesh.cell_widths * self.kappa
print "Using kappa", np.min(self.kappa), np.max(self.kappa)
print self.kappa
def peclet_number(self):
return self.a * self.mesh.cell_widths / self.d
def CFL_condition(self):
return self.a * self.k / self.mesh.cell_widths
def set_boundary_conditions(self, left_flux=None, right_flux=None, left_value=None, right_value=None ):
"""Make sure this function is used sensibly otherwise the matrix will be ill posed."""
self.left_flux = left_flux
self.right_flux = right_flux
self.left_value = left_value
self.right_value = right_value
def _interior_matrix_elements(self, i):
# Interior coefficients for matrix equation
ra = lambda i, a, d, m: 1./m.h(i)*(a.m(i)*m.h(i)/(2*m.hm(i)) + d.m(i)/m.hm(i))
rb = lambda i, a, d, m: 1./m.h(i)*(a.m(i)*m.h(i-1)/(2*m.hm(i)) - a.p(i)*m.h(i+1)/(2*m.hp(i)) - d.m(i)/m.hm(i) - d.p(i)/m.hp(i))
rc = lambda i, a, d, m: 1./m.h(i)*(-a.p(i)*m.h(i)/(2*m.hp(i)) + d.p(i)/m.hp(i))
return ra(i, self.a, self.d, self.mesh), rb(i, self.a, self.d, self.mesh), rc(i,self.a, self.d, self.mesh)
def _robin_boundary_condition_matrix_elements_left(self):
# Left hand side Robin boundary coefficients for matrix equation
b1 = lambda a, d, m: 1./m.h(0)*(-a.p(0)*m.h(1)/(2*m.hp(0)) - d.p(0)/m.hp(0) )
c1 = lambda a, d, m: 1./m.h(0)*(-a.p(0)*m.h(0)/(2*m.hp(0)) + d.p(0)/m.hp(0) )
# Index and element value
locations = [(0,0), (0,1)]
values = ( b1(self.a, self.d, self.mesh ),
c1(self.a, self.d, self.mesh ) )
return tuple([list(x) for x in zip(locations, values)])
def _robin_boundary_condition_matrix_elements_right(self, matrix=None):
# Right hand side Robin boundary coefficients for matrix equation
aJ = lambda a, d, m: 1./m.h(m.J-1)*( a.m(m.J-1)*m.h(m.J-1)/(2*m.hm(m.J-1)) + d.m(m.J-1)/m.hm(m.J-1) )
bJ = lambda a, d, m: 1./m.h(m.J-1)*( a.m(m.J-1)*m.h(m.J-2)/(2*m.hm(m.J-1)) - d.m(m.J-1)/m.hm(m.J-1) )
J = self.mesh.J # Index and element value
# Index and element value
locations = [(J-1,J-2), (J-1,J-1)]
values = ( aJ(self.a, self.d, self.mesh ),
bJ(self.a, self.d, self.mesh ) )
return tuple([list(x) for x in zip(locations, values)])
def _robin_boundary_condition_vector_elements_left(self):
# Index and boundary condition vector elements for Robin conditions
location = [0]
value = [self.left_flux/self.mesh.h(0)]
return tuple([list(x) for x in zip(location, value)])
def _robin_boundary_condition_vector_elements_right(self):
# Index and boundary condition vector elements for Robin conditions
location = [self.mesh.J-1]
value = [-self.right_flux/self.mesh.h(self.mesh.J-1)]
return tuple([list(x) for x in zip(location, value)])
def _dirichlet_boundary_condition_matrix_elements_left(self):
# Left hand side Robin boundary coefficients for matrix equation
rb = lambda i, a, d, m: 1./m.h(i)*(a.m(i)*m.h(i-1)/(2*m.hm(i)) - a.p(i)*m.h(i+1)/(2*m.hp(i)) - d.m(i)/m.hm(i) - d.p(i)/m.hp(i))
rc = lambda i, a, d, m: 1./m.h(i)*(-a.p(i)*m.h(i)/(2*m.hp(i)) + d.p(i)/m.hp(i))
# Index and element value
locations = [(0,0), (0,1)]
# values = ( rb(0, self.a, self.d, self.mesh ),
# rc(0, self.a, self.d, self.mesh ) )
values = ( 0,
1 )
return tuple([list(x) for x in zip(locations, values)])
def _dirichlet_boundary_condition_matrix_elements_right(self):
# Right hand side Robin boundary coefficients for matrix equation
ra = lambda i, a, d, m: 1./m.h(i)*(a.m(i)*m.h(i)/(2*m.hm(i)) + d.m(i)/m.hm(i))
rb = lambda i, a, d, m: 1./m.h(i)*(a.m(i)*m.h(i-1)/(2*m.hm(i)) - a.p(i)*m.h(i+1)/(2*m.hp(i)) - d.m(i)/m.hm(i) - d.p(i)/m.hp(i))
J = self.mesh.J # Index and element value
# Index and element value
locations = [(J-1,J-2), (J-1,J-1)]
# values = ( ra(self.J-1, self.a, self.d, self.mesh ),
# rb(self.J-1, self.a, self.d, self.mesh ) )
values = ( 0,
1 )
return tuple([list(x) for x in zip(locations, values)])
def _dirichlet_boundary_condition_vector_elements_left(self):
# Index and boundary condition vector elements for Dirichlet conditions
# NB these are always zero, unless BCs are time varying
location = [0]
value = [0]
return tuple([list(x) for x in zip(location, value)])
def _dirichlet_boundary_condition_vector_elements_right(self):
# Index and boundary condition vector elements for Dirichlet conditions
# NB these are always zero, unless BCs are time varying
location = [self.mesh.J-1]
value = [0]
return tuple([list(x) for x in zip(location, value)])
def alpha_matrix(self):
"""The alpha matrix is used to mask boundary conditions values for Dirichlet
conditions. Otherwise for a fully Neumann (or Robin) system it is equal to
the identity matrix."""
a1 = 0 if self.left_flux is None else 1
aJ = 0 if self.left_flux is None else 1
diagonals = np.ones(self.mesh.J)
diagonals[0] = a1
diagonals[-1] = aJ
return sparse.diags(diagonals, 0)
def beta_vector(self):
"""Returns the robin boundary condition vector."""
b = np.zeros(self.mesh.J)
if self.left_flux is not None:
left_bc_elements = self._robin_boundary_condition_vector_elements_left()
if self.right_flux is not None:
right_bc_elements = self._robin_boundary_condition_vector_elements_right()
if self.left_value is not None:
left_bc_elements = self._dirichlet_boundary_condition_vector_elements_left()
if self.right_value is not None:
right_bc_elements = self._dirichlet_boundary_condition_vector_elements_right()
bcs = left_bc_elements + right_bc_elements
for inx, value in bcs:
b[inx] = value
return b
def coefficient_matrix(self):
"""Returns the coefficient matrix which appears on the left hand side."""
J = self.mesh.J
k = self.k
m = self.mesh
a = self.a
d = self.d
padding = np.array([0]) # A element which is pushed off the edge of the matrix by the spdiags function
zero = padding # Yes, its the same. But this element is included in the matrix (semantic difference).
one = np.array([1]) #
if self.left_flux is not None:
left_bc_elements = self._robin_boundary_condition_matrix_elements_left()
if self.right_flux is not None:
right_bc_elements = self._robin_boundary_condition_matrix_elements_right()
if self.left_value is not None:
left_bc_elements = self._dirichlet_boundary_condition_matrix_elements_left()
if self.right_value is not None:
right_bc_elements = self._dirichlet_boundary_condition_matrix_elements_right()
# Use the functions to layout the matrix Note that the boundary
# condition elements are set to zero, they are filled in as
# the next step.
inx = np.array(range(1,J-1))
ra, rb, rc = self._interior_matrix_elements(inx)
# c1
upper = np.concatenate([padding, zero, rc ])
# b1 bJ
central = np.concatenate([zero, rb, zero ])
# aJ
lower = np.concatenate([ra, zero , padding])
A = sparse.spdiags([lower, central, upper], [-1,0,1], J, J).todok()
# Apply boundary conditions elements
bcs = left_bc_elements + right_bc_elements
for inx, value in bcs:
print inx, value
A[inx] = value
return dia_matrix(A)
if __name__ == '__main__':
def geo_series(n, r, min_spacing=0.01):
total = 0
series = []
for i in range(n):
if i == 0:
total = 1
else:
total = total - total*r
series.append(total)
series = np.array(series)
norm = series / (np.max(series) - np.min(series))
series = norm - np.min(norm)
series = np.abs(series - 1)
series_diff = np.gradient(series)
inx = np.where(series_diff > min_spacing)
print inx
series_diff[inx] = min_spacing
series_reconstruct = np.cumsum(series_diff)
if np.min(series_reconstruct) != 0.0:
series_reconstruct = np.array([0] + series_reconstruct.tolist())
if np.max(series_reconstruct) != 1.0:
series_reconstruct = np.array(series_reconstruct.tolist() + [1])
return series_reconstruct
#faces = geo_series(200, 0.15)
#print faces.shape, faces
#faces = np.concatenate((np.array([-0.5]), np.sort(np.random.uniform(-0.5, 1, 50)), np.array([1])))
#faces = np.linspace(0, 1, 50)
faces = np.concatenate([np.linspace(0, 0.99, 50), np.logspace(np.log10(0.991), np.log10(1.0), 100)])
mesh = Mesh(faces)
a = CellVariable(1, mesh=mesh) # Advection velocity
d = CellVariable(1e-3, mesh=mesh) # Diffusion coefficient
k = 0.01 # Time step
theta = 1.0
left_value = 1.0
#left_flux = 0.0
right_flux = 0.0
# Initial conditions
w_init = 0.5*TH(mesh.cells, 0.4, 0)
w_init = np.sin(np.pi*mesh.cells)**100
w_init[0] = left_value
#w_init[0] = left_flux
# Source term
#s[int(np.median(range(mesh.J)))] = 0.0
model = AdvectionDiffusionModel(faces, a, d, k, discretisation="exponential")
model.set_boundary_conditions(left_value=1., right_value=0.)
#model.set_boundary_conditions(left_flux=left_flux, right_flux=left_flux)
M = model.coefficient_matrix()
alpha = model.alpha_matrix()
beta = model.beta_vector()
I = sparse.identity(model.mesh.J)
# Construct linear system from discretised matrices, A.x = d
A = I - k*theta*alpha*M
d = (I + k*(1-theta)*alpha*M)*w_init + beta
print "Peclet number", np.min(model.peclet_number()), np.max(model.peclet_number())
print "CFL condition", np.min(model.CFL_condition()), np.max(model.CFL_condition())
# matplotlib for movie export
# see, http://matplotlib.org/examples/animation/moviewriter.html
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
print manimation.writers.__dict__
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')
writer = FFMpegWriter(fps=15, metadata=metadata)
fig = plt.figure()
l0, = plt.plot([],[], 'r-', lw=1)
l1, = plt.plot([],[], 'k-o', markersize=4)
plt.xlim(np.min(faces), np.max(faces))
plt.ylim(0,1.2)
l1.set_data(mesh.cells,w_init)
# # Analytical solution for Dirichlet boundary conditions
analytical_x = np.concatenate([np.array([np.min(faces)]), mesh.cells, np.array([np.max(faces)])])
analytical_solution = np.concatenate([np.array([model.left_value]), (np.exp(a/d) - np.exp(mesh.cells*a/d))/(np.exp(a/d)-1), np.array([model.right_value]) ])
#analytical_solution2 = np.concatenate([np.array([model.left_value]), (np.exp(a/model.d) - np.exp(mesh.cells*a/model.d))/(np.exp(a/model.d)-1), np.array([model.right_value]) ])
w = w_init
with writer.saving(fig, "fvm_advection_diffusion_1.mp4", 300):
for i in range(201):
#w = linalg.spsolve(A.tocsc(), M * w + s)
d = (I + k*(1-theta)*alpha*M)*w + beta
w = linalg.spsolve(A, d)
if i == 0:
l1.set_data(mesh.cells,w_init)
writer.grab_frame()
if i % 1 == 0 or i == 0:
l1.set_data(mesh.cells,w)
#l0.set_data(analytical_x, analytical_solution)
area = np.sum(w * mesh.cell_widths)
print "#%d; t=%g; area=%g:" % (i, i*k,area)
writer.grab_frame()
|
11566461
|
from unittest.mock import patch
import uuid
from ..base import BaseTest
from ...models.Secret import SecretModel
from ...lib.Encryption import Encryption
from ...modules.carry import global_scope
class Test(BaseTest):
def setUp(self):
# Set secret vars
self.name = 'Vault'
self.url = 'https://github.com/gabfl/vault'
self.login = 'gab'
self.password = '<PASSWORD>'
self.notes = 'some notes'
# Create a secret
secret = SecretModel(name=self.name,
url=self.url,
login=self.login,
password=self.password,
notes=self.notes)
self.session.add(secret)
self.session.commit()
def test_get_by_name(self):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertEqual(secret.name, self.name)
self.assertEqual(secret.url, self.url)
def test_repr(self):
secret = self.session.query(SecretModel).get(1)
print(secret) # Required for codecov
self.assertIsInstance(secret, object)
def test_get_enc(self):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertIsInstance(secret.get_enc(), Encryption)
def test_get_enc_2(self):
with patch.dict(global_scope, {'enc': None}):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertRaises(RuntimeError, secret.get_enc)
def test_getter_salt(self):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertIsInstance(secret.salt, bytes)
def test_getter_password(self):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertEqual(secret.password, self.password)
def test_getter_notes(self):
secret = self.session.query(
SecretModel).filter_by(name=self.name).first()
self.assertEqual(secret.notes, self.notes)
|
11566465
|
from functools import wraps
from hashlib import sha256
from flask import render_template, url_for, flash, redirect, request, abort
from flask_login import login_user, current_user, logout_user, login_required
from application import app, bcrypt, serializer, mail
from application.models.general import *
from application.forms.general import *
from application.settings_secrets import *
# Create a decorator function
def abort_not_confirmed(f):
# When this function is used as a decorator, the @wraps calls the decorator
# function with the function below the decorator as the parameter "f", and any
# arguments and keyword arguments are also passed in and can be passed to the
# original function as well
@wraps(f)
def decorator(*args, **kwargs):
if current_user.is_authenticated and not current_user.confirm:
return redirect(url_for('confirm_account'))
return f(*args, **kwargs)
# If the function is used as a decorator, then return
# the decorator function which will be called
return decorator
@app.context_processor
def send_sha_function():
return {'sha256': sha256, 'serializer': serializer}
# Log the user out
@app.route('/logout')
def logout():
# If the user is not logged in or have not confirmed their email, don't log them out
if not current_user.is_authenticated:
abort(404)
if not current_user.confirm:
abort(404)
logout_user()
return redirect(url_for('home'))
# Registration page
@app.route('/register', methods=['GET', 'POST'])
def register():
# If the user is already logged in, redirect to the dashboard
if current_user.is_authenticated:
return redirect(url_for('home'))
# Create form using Flask-WTF
form = RegistrationForm()
# If form was submitted successfully, create a user and redirect to confirm account page
if form.validate_on_submit():
hashed_password = <PASSWORD>.generate_password_hash(
form.password.data).decode('utf-8')
user = User(name=form.name.data, email=form.email.data,
password=<PASSWORD>)
db.session.add(user)
db.session.commit()
login_user(user, remember=True)
# Resend confirmation email, if there was an error, say so
try:
send_confirmation_email()
except:
flash('There was an error sending a confirmation email.', 'danger')
return redirect(url_for('login'))
return render_template('account/register.html', form=form, page_title='Register')
# Login page
@app.route('/login', methods=['GET', 'POST'])
def login():
# If the user is already logged in, redirect to the dashboard
if current_user.is_authenticated:
return redirect(url_for('home'))
# Create form using Flask-WTF
form = LoginForm()
# If the form has been successfully submitted
if form.validate_on_submit():
# Check if the user exists and whether the bcrypt hash of the input and the user's hash match
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
# Log the user in and redirect to the dashboard if there is not "?next=" parameter in the URL
login_user(user, remember=form.remember.data)
return redirect(url_for('home')) if not request.args.get('next') else redirect(
request.args.get('next'))
# Flash that the login was unsuccessful (Flash is an in-built
# Flask tool that sends messages which can be retrieved on the HTML page)
else:
flash('Login Unsuccessful. Please check your email and password', 'danger')
return render_template('account/login.html', form=form, page_title='Login')
# A route to send a password reset email in case the user forgets their password
@app.route('/forgot-password', methods=['GET', 'POST'])
def forgot_password():
# If the user is logged in, abort with 404 code
if current_user.is_authenticated:
abort(404)
form = ForgotPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = serializer.dumps(user.email, salt=SECRET_KEY + 'reset')
mail.send_message(subject='Reset your password.',
body=f'Click on the below link to reset your password\n{url_for("forgot_password_token", token=token, _external=True)}',
recipients=[user.email])
flash('An email has been sent to reset your password if the user exists.', 'info')
return redirect(url_for('forgot_password'))
return render_template('account/forgot-password.html', form=form, page_title='Forgot password')
# A route to change a user's password based on the token that was sent to their email
@app.route('/forgot-password/<token>', methods=['GET', 'POST'])
def forgot_password_token(token):
# If the user is logged in, abort with 404 code
if current_user.is_authenticated:
abort(404)
# Get the user's email based on the serializer's value
try:
user = User.query.filter_by(
email=serializer.loads(token, salt=SECRET_KEY + 'reset', max_age=7200)).first()
# If there was an issue, that means the token was incorrect, then abort with 404
except:
abort(404)
# Initialize the form
form = ChangePasswordForm()
# If the form validated, then generate a password hash,
# change the user's password, then let the user know
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = <PASSWORD>
db.session.commit()
flash('Your password has been changed.', 'success')
return redirect(url_for('login'))
# Show the HTML page
return render_template('account/change-password.html', form=form, page_title='Change your password')
# A route to confirm the user's account
@app.route('/confirm-account', methods=['GET', 'POST'])
def confirm_account():
# If the user is not logged in or the user has already confirmed, then return
if not current_user.is_authenticated:
abort(404)
if current_user.confirm:
return redirect(url_for('home'))
# Create the form which allows resending confirmation emails
form = ConfirmAccountForm()
# If the form was validated, generate a timed token, then send the message and let the user know
if form.validate_on_submit():
# Resend confirmation email, if there was an error, say so
try:
send_confirmation_email()
except:
flash('There was an error sending a confirmation email.', 'danger')
return redirect(url_for('confirm_account'))
flash('The email has been sent to you.', 'success')
return redirect(url_for('confirm_account'))
return render_template('account/confirm-account.html', form=form, page_title='Confirm Account')
# Route to check a user's token
@app.route('/token/<token>')
def token(token):
# If the user has already confirmed, abort with
# 404, then if the user is logged in, log them out
if current_user.is_authenticated:
if current_user.confirm:
abort(404)
logout_user()
# Load the token, then check if the emails match and set that the user has confirmed
try:
email = serializer.loads(token, salt=SECRET_KEY, max_age=7200)
# Get the user from the token
user = User.query.filter_by(email=email).first_or_404()
# Log the user in
login_user(user)
# If the user has confirmed, abort with 404
if user.confirm:
abort(404)
# Set the user's confirm attribute to True, then commit
current_user.confirm = True
db.session.commit()
# Let the user know they have been confirmed
flash('Your email has been confirmed.', 'success')
return redirect(url_for('home'))
# If there was an error while loading the token, return so
except:
return render_template('errors/token_expired.html'), 403
# Delete a user's account
@app.route('/delete-account/')
def delete_account():
if not current_user.is_authenticated:
abort(404)
# Hash the same properties as was passed from the class page
sha_hash_contents = sha256(
f'{current_user.id}{current_user.email}{current_user.password}'.encode('utf-8')).hexdigest()
# if the hashes don't match, don't delete the account
if sha_hash_contents != request.args.get('hash'):
return render_template('errors/token_expired.html'), 403
# Get the user and delete the account
user = User.query.filter_by(id=current_user.id).first()
logout_user()
db.session.delete(user)
db.session.commit()
flash('Your account has been deleted.', 'success')
return redirect(url_for('register'))
def send_confirmation_email():
token = serializer.dumps(current_user.email, salt=SECRET_KEY)
mail.send_message(subject='Your Confirmation Email',
body=f'Click on the below link to confirm your account\n{url_for("token", token=token, _external=True)}',
recipients=[current_user.email])
|
11566473
|
import logging
import math
from random import random
def full_activation():
return 100
def jump_threshold():
return 55.0
def points_at(links, other):
"""Whether any of the links points at the other"""
return any(_.points_at(other) for _ in links)
class Slipnode(object):
# pylint: disable=too-many-instance-attributes
def __init__(self, name, depth, length=0.0):
self.conceptual_depth = depth
self.usual_conceptual_depth = depth
self.name = name
self.intrinsic_link_length = length
self.shrunk_link_length = length * 0.4
self.activation = 0.0
self.buffer = 0.0
self.clamped = False
self.bond_facet_factor = 0.0
self.category_links = []
self.instance_links = []
self.property_links = []
self.lateral_slip_links = []
self.lateral_non_slip_links = []
self.incoming_links = []
self.outgoing_links = []
self.codelets = []
self.clamp_bond_degree_of_association = False
def __str__(self):
return str(self.name)
def __repr__(self):
return f"<{self.__class__.__name__}: {self}>"
def reset(self):
self.buffer = 0.0
self.activation = 0.0
def clamp_high(self):
self.clamped = True
self.activation = 100.0
def unclamp(self):
self.clamped = False
def unclamped(self):
return not self.clamped
def set_conceptual_depth(self, depth):
logging.info(f"set depth to {depth} for {self}")
self.conceptual_depth = depth
def category(self):
if not len(self.category_links):
return None
link = self.category_links[0]
return link.destination
def fully_active(self):
"""Whether this node has full activation"""
float_margin = 0.00001
return self.activation > full_activation() - float_margin
def activate_fully(self):
"""Make this node fully active"""
self.activation = full_activation()
def bond_degree_of_association(self):
link_length = self.intrinsic_link_length
if (not self.clamp_bond_degree_of_association) and self.fully_active():
link_length = self.shrunk_link_length
result = math.sqrt(100 - link_length) * 11.0
return min(100.0, result)
def degree_of_association(self):
link_length = self.intrinsic_link_length
if self.fully_active():
link_length = self.shrunk_link_length
return 100.0 - link_length
def update(self):
act = self.activation
self.old_activation = act
self.buffer -= self.activation * (100.0 - self.conceptual_depth) / 100.0
def linked(self, other):
"""Whether the other is among the outgoing links"""
return points_at(self.outgoing_links, other)
def slip_linked(self, other):
"""Whether the other is among the lateral links"""
return points_at(self.lateral_slip_links, other)
def related(self, other):
"""Same or linked"""
return self == other or self.linked(other)
def apply_slippages(self, slippages):
for slippage in slippages:
if self == slippage.initial_descriptor:
return slippage.target_descriptor
return self
def get_related_node(self, relation):
"""Return the node that is linked to this node via this relation.
If no linked node is found, return None
"""
from .slipnet import slipnet
if relation == slipnet.identity:
return self
destinations = [
_.destination for _ in self.outgoing_links if _.label == relation
]
if destinations:
return destinations[0]
return None
def get_bond_category(self, destination):
"""Return the label of the link between these nodes if it exists.
If it does not exist return None
"""
from .slipnet import slipnet
result = None
if self == destination:
result = slipnet.identity
else:
for link in self.outgoing_links:
if link.destination == destination:
result = link.label
break
if result:
logging.info(f"Got bond: {result.name}")
else:
logging.info("Got no bond")
return result
def spread_activation(self):
if self.fully_active():
_ = [link.spread_activation() for link in self.outgoing_links]
def add_buffer(self):
if self.unclamped():
self.activation += self.buffer
self.activation = max(min(self.activation, 100), 0)
def can_jump(self):
if self.activation <= jump_threshold():
return False
if self.clamped:
return False
value = (self.activation / 100.0) ** 3
return random() < value
def jump(self):
if self.can_jump():
self.activate_fully()
def get_name(self):
if len(self.name) == 1:
return self.name.upper()
return self.name
|
11566493
|
import skia
import pytest
@pytest.fixture
def colorspace():
return skia.ColorSpace.MakeSRGB()
def test_ColorSpace_toProfile(colorspace):
assert isinstance(colorspace.toProfile(), skia.cms.ICCProfile)
def test_ColorSpace_gammaCloseToSRGB(colorspace):
assert isinstance(colorspace.gammaCloseToSRGB(), bool)
def test_ColorSpace_gammaIsLinear(colorspace):
assert isinstance(colorspace.gammaIsLinear(), bool)
def test_ColorSpace_isNumericalTransferFn(colorspace):
transfer_fn = skia.cms.TransferFunction([1, 0, 0, 0, 0, 0, 0.5])
assert isinstance(colorspace.isNumericalTransferFn(transfer_fn), bool)
def test_ColorSpace_toXYZD50(colorspace):
m = skia.cms.Matrix3x3([1, 0, 0, 0, 1, 0, 0, 0, 1])
assert isinstance(colorspace.toXYZD50(m), bool)
def test_ColorSpace_toXYZD50Hash(colorspace):
assert isinstance(colorspace.toXYZD50Hash(), int)
def test_ColorSpace_makeLinearGamma(colorspace):
assert isinstance(colorspace.makeLinearGamma(), skia.ColorSpace)
def test_ColorSpace_makeSRGBGamma(colorspace):
assert isinstance(colorspace.makeSRGBGamma(), skia.ColorSpace)
def test_ColorSpace_makeColorSpin(colorspace):
assert isinstance(colorspace.makeColorSpin(), skia.ColorSpace)
def test_ColorSpace_isSRGB(colorspace):
assert isinstance(colorspace.isSRGB(), bool)
|
11566529
|
apiAttachAvailable = u'API je k dispozici'
apiAttachNotAvailable = u'Nedostupn\xfd'
apiAttachPendingAuthorization = u'Nevyr\xedzen\xe1 autorizace'
apiAttachRefused = u'Odm\xedtnuto'
apiAttachSuccess = u'\xdaspech'
apiAttachUnknown = u'Nezn\xe1m\xfd'
budDeletedFriend = u'Odstranen ze seznamu pr\xe1tel'
budFriend = u'Pr\xedtel'
budNeverBeenFriend = u'Nikdy nebyl v seznamu pr\xe1tel'
budPendingAuthorization = u'Nevyr\xedzen\xe1 autorizace'
budUnknown = u'Nezn\xe1m\xfd'
cfrBlockedByRecipient = u'Hovor je blokov\xe1n pr\xedjemcem.'
cfrMiscError = u'Jin\xe1 chyba'
cfrNoCommonCodec = u'Neobvykl\xfd kodek'
cfrNoProxyFound = u'Server proxy nebyl nalezen.'
cfrNotAuthorizedByRecipient = u'Aktu\xe1ln\xed u\u017eivatel nen\xed pr\xedjemcem autorizov\xe1n.'
cfrRecipientNotFriend = u'Pr\xedjemce nen\xed pr\xedtel.'
cfrRemoteDeviceError = u'Chyba zvukov\xe9ho zar\xedzen\xed volan\xe9ho'
cfrSessionTerminated = u'Hovor ukoncen'
cfrSoundIOError = u'Chyba zvukov\xe9ho V/V'
cfrSoundRecordingError = u'Chyba nahr\xe1v\xe1n\xed zvuku'
cfrUnknown = u'Nezn\xe1m\xfd'
cfrUserDoesNotExist = u'U\u017eivatel/telefonn\xed c\xedslo neexistuje.'
cfrUserIsOffline = u'On nebo Ona je Offline'
chsAllCalls = u'Dialog ve star\xe9m stylu'
chsDialog = u'Dialog'
chsIncomingCalls = u'S v\xedce \xfacastn\xedky, je treba prijet\xed'
chsLegacyDialog = u'Dialog ve star\xe9m stylu'
chsMissedCalls = u'Dialog'
chsMultiNeedAccept = u'S v\xedce \xfacastn\xedky, je treba prijet\xed'
chsMultiSubscribed = u'S v\xedce \xfacastn\xedky'
chsOutgoingCalls = u'S v\xedce \xfacastn\xedky'
chsUnknown = u'Nezn\xe1m\xfd'
chsUnsubscribed = u'Odebran\xfd ze seznamu'
clsBusy = u'Obsazeno'
clsCancelled = u'Zru\u0161eno'
clsEarlyMedia = u'Prehr\xe1v\xe1n\xed m\xe9di\xed pred prijet\xedm hovoru'
clsFailed = u'Bohu\u017eel, ne\xfaspe\u0161n\xe9 vol\xe1n\xed!'
clsFinished = u'Ukonceno'
clsInProgress = u'Prob\xedh\xe1 hovor'
clsLocalHold = u'Pridr\u017eeno m\xedstne'
clsMissed = u'Zme\u0161kan\xfd hovor'
clsOnHold = u'Pridr\u017een'
clsRefused = u'Odm\xedtnuto'
clsRemoteHold = u'Pridr\u017eeno vzd\xe1len\xfdm u\u017eivatelem'
clsRinging = u'vol\xe1te'
clsRouting = u'Smerov\xe1n\xed'
clsTransferred = u'Nezn\xe1m\xfd'
clsTransferring = u'Nezn\xe1m\xfd'
clsUnknown = u'Nezn\xe1m\xfd'
clsUnplaced = u'Nekonal se'
clsVoicemailBufferingGreeting = u'Ukl\xe1d\xe1n\xed pozdravu do vyrovn\xe1vac\xed pameti'
clsVoicemailCancelled = u'Hlasov\xe1 zpr\xe1va byla zru\u0161ena'
clsVoicemailFailed = u'Hlasov\xe1 zpr\xe1va ne\xfaspe\u0161n\xe1'
clsVoicemailPlayingGreeting = u'Prehr\xe1v\xe1n\xed pozdravu'
clsVoicemailRecording = u'Nahr\xe1v\xe1n\xed hlasov\xe9 zpr\xe1vy'
clsVoicemailSent = u'Hlasov\xe1 zpr\xe1va byla odesl\xe1na'
clsVoicemailUploading = u'Odes\xedl\xe1n\xed hlasov\xe9 zpr\xe1vy'
cltIncomingP2P = u'Pr\xedchoz\xed hovor v s\xedti P2P'
cltIncomingPSTN = u'Pr\xedchoz\xed telefonn\xed hovor'
cltOutgoingP2P = u'Odchoz\xed hovor v s\xedti P2P'
cltOutgoingPSTN = u'Odchoz\xed telefonn\xed hovor'
cltUnknown = u'Nezn\xe1m\xfd'
cmeAddedMembers = u'Byli prizv\xe1ni clenov\xe9.'
cmeCreatedChatWith = u'Byl vytvoren chat s v\xedce \xfacastn\xedky.'
cmeEmoted = u'Nezn\xe1m\xfd'
cmeLeft = u'Nekdo opustil chat nebo nebyl pribr\xe1n.'
cmeSaid = u'Rekl(a)'
cmeSawMembers = u'\xdacastn\xedk chatu videl ostatn\xed.'
cmeSetTopic = u'Zmena t\xe9matu'
cmeUnknown = u'Nezn\xe1m\xfd'
cmsRead = u'Precteno'
cmsReceived = u'Prijato'
cmsSending = u'Odes\xedl\xe1m...'
cmsSent = u'Odesl\xe1no'
cmsUnknown = u'Nezn\xe1m\xfd'
conConnecting = u'Spojuji'
conOffline = u'Offline'
conOnline = u'Online'
conPausing = u'Pozastavov\xe1n\xed'
conUnknown = u'Nezn\xe1m\xfd'
cusAway = u'Nepr\xedtomn\xfd'
cusDoNotDisturb = u'Neru\u0161it'
cusInvisible = u'Neviditeln\xfd'
cusLoggedOut = u'Offline'
cusNotAvailable = u'Nedostupn\xfd'
cusOffline = u'Offline'
cusOnline = u'Online'
cusSkypeMe = u'Skype Me'
cusUnknown = u'Nezn\xe1m\xfd'
cvsBothEnabled = u'Odes\xedl\xe1n\xed a pr\xedjem videa'
cvsNone = u'Bez videa'
cvsReceiveEnabled = u'Pr\xedjem videa'
cvsSendEnabled = u'Odes\xedl\xe1n\xed videa'
cvsUnknown = u''
grpAllFriends = u'V\u0161ichni pr\xe1tel\xe9'
grpAllUsers = u'V\u0161ichni u\u017eivatel\xe9'
grpCustomGroup = u'Vlastn\xed'
grpOnlineFriends = u'Pr\xe1tel\xe9 online'
grpPendingAuthorizationFriends = u'Nevyr\xedzen\xe1 autorizace'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'Ned\xe1vno kontaktovan\xed u\u017eivatel\xe9'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Pr\xe1tel\xe9 pou\u017e\xedvaj\xedc\xed Skype'
grpSkypeOutFriends = u'Pr\xe1tel\xe9 pou\u017e\xedvaj\xedc\xed SkypeOut'
grpUngroupedFriends = u'Nezarazen\xed pr\xe1tel\xe9'
grpUnknown = u'Nezn\xe1m\xfd'
grpUsersAuthorizedByMe = u'Autorizovan\xed'
grpUsersBlockedByMe = u'Blokovan\xed'
grpUsersWaitingMyAuthorization = u'Cekaj\xedc\xed na autorizaci'
leaAddDeclined = u'Prid\xe1n\xed bylo odm\xedtnuto.'
leaAddedNotAuthorized = u'Prid\xe1van\xfd mus\xed b\xfdt autorizov\xe1n.'
leaAdderNotFriend = u'Prid\xe1vaj\xedc\xed mus\xed b\xfdt pr\xedtel.'
leaUnknown = u'Nezn\xe1m\xfd'
leaUnsubscribe = u'Odebran\xfd ze seznamu'
leaUserIncapable = u'U\u017eivatel je nezpusobil\xfd.'
leaUserNotFound = u'U\u017eivatel nebyl nalezen.'
olsAway = u'Nepr\xedtomn\xfd'
olsDoNotDisturb = u'Neru\u0161it'
olsNotAvailable = u'Nedostupn\xfd'
olsOffline = u'Offline'
olsOnline = u'Online'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'????????? ?? SkypeOut'
olsUnknown = u'Nezn\xe1m\xfd'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'\u017dena'
usexMale = u'Mu\u017e'
usexUnknown = u'Nezn\xe1m\xfd'
vmrConnectError = u'Chyba pripojen\xed'
vmrFileReadError = u'Chyba cten\xed souboru'
vmrFileWriteError = u'Chyba z\xe1pisu souboru'
vmrMiscError = u'Jin\xe1 chyba'
vmrNoError = u'Bez chyby'
vmrNoPrivilege = u'Nem\xe1te opr\xe1vnen\xed k hlasov\xe9 schr\xe1nce.'
vmrNoVoicemail = u'Takov\xe1 hlasov\xe1 schr\xe1nka neexistuje.'
vmrPlaybackError = u'Chyba prehr\xe1v\xe1n\xed'
vmrRecordingError = u'Chyba nahr\xe1v\xe1n\xed'
vmrUnknown = u'Nezn\xe1m\xfd'
vmsBlank = u'Pr\xe1zdn\xe9'
vmsBuffering = u'Nac\xedt\xe1n\xed'
vmsDeleting = u'Odstranov\xe1n\xed'
vmsDownloading = u'Stahov\xe1n\xed'
vmsFailed = u'Ne\xfaspe\u0161n\xe9'
vmsNotDownloaded = u'Nesta\u017eeno.'
vmsPlayed = u'Prehr\xe1no'
vmsPlaying = u'Prehr\xe1v\xe1n\xed'
vmsRecorded = u'Nahr\xe1no'
vmsRecording = u'Nahr\xe1v\xe1n\xed hlasov\xe9 zpr\xe1vy'
vmsUnknown = u'Nezn\xe1m\xfd'
vmsUnplayed = u'Neprehr\xe1no'
vmsUploaded = u'Odesl\xe1no'
vmsUploading = u'Odes\xedl\xe1n\xed'
vmtCustomGreeting = u'Vlastn\xed pozdrav'
vmtDefaultGreeting = u'V\xfdchoz\xed pozdrav'
vmtIncoming = u'pr\xedchoz\xed hlasov\xe1 zpr\xe1va'
vmtOutgoing = u'Odchoz\xed'
vmtUnknown = u'Nezn\xe1m\xfd'
vssAvailable = u'Dostupn\xfd'
vssNotAvailable = u'Nedostupn\xfd'
vssPaused = u'Pozastaveno'
vssRejected = u'Odm\xedtnuto'
vssRunning = u'Prob\xedh\xe1'
vssStarting = u'Start'
vssStopping = u'Ukoncov\xe1n\xed'
vssUnknown = u'Nezn\xe1m\xfd'
|
11566566
|
import sys
import ssl
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
sender = sys.argv[1]
password = sys.argv[2]
receiver = sys.argv[3]
message = sys.argv[4]
port = 465
smtp_server = 'smtp.gmail.com'
try:
email = MIMEMultipart('alternative')
email['Subject'] = 'Jitsi Party Alert'
email['From'] = sender
email['To'] = receiver
email.attach(MIMEText(message, 'html'))
server = smtplib.SMTP_SSL(smtp_server, port)
server.login(sender, password)
server.sendmail(sender, receiver, email.as_string())
except:
print('Something went wrong sending a message to the moderator:', email.as_string())
|
11566605
|
import copy
###################################################################################################
# VoicelabDataModel: Abstracts the data management for the system from control and view logic.
# currently this data is stored only temporarily in a set of dictionaries (making it almost the same
# as the data controller), but this can be changed to more persistent storage without needing to
# change the front end.
# TODO: This can be simplified down to basic CRUD (create, remove, update, delete) operations with
# everything else being handled by the controller
###################################################################################################
class VoicelabDataModel:
def __init__(self):
# a set of default values for each of the configurable settings
self.default_settings = {}
# a set of default functions that are automatically checked when you start the program
self.default_functions = {}
# the set of functions that are available
self.loaded_functions = {}
# the set of voices that are available
self.loaded_voices = {}
# the set of voices that will be worked on
self.active_voices = {}
# the set of functions that will be run on the active voices
self.active_functions = {}
# the settings as they will be passed to each of the functions
self.active_settings = {}
# the settings as they have been changed within the functions
self.active_computed_settings = {}
# the set of results for the last run set of functions
self.active_results = {}
#
self.active_results_output = {}
self.active_settings_output = {}
###############################################################################################
# load_function:
# + fn_name: name of the function we are loading
# + fn_node: WARIO compatable node containing the functionality
# + default: boolean value whether this is a default function or not
###############################################################################################
def load_function(self, fn_name, fn_node, default=False):
"""
Args:
fn_name:
fn_node:
default:
"""
self.loaded_functions[fn_name] = fn_node
self.default_settings[fn_name] = {}
self.active_settings[fn_name] = {}
for setting in fn_node.args:
self.default_settings[fn_name][setting] = fn_node.args[setting]
self.active_settings[fn_name][setting] = fn_node.args[setting]
if default:
self.default_functions[fn_name] = self.loaded_functions[fn_name]
self.active_functions[fn_name] = self.loaded_functions[fn_name]
return self.loaded_functions
###############################################################################################
# activate_function: activated functions will run when processing
# + fn_name: Name of the function to activate
###############################################################################################
def activate_function(self, fn_name):
"""
Args:
fn_name:
"""
self.active_functions[fn_name] = self.loaded_functions[fn_name]
###############################################################################################
# deactivate_function: deactivated functions will not run when processing
# + fn_name: Name of the function to deactivate
###############################################################################################
def deactivate_function(self, fn_name):
"""
Args:
fn_name:
"""
if fn_name in self.active_functions:
del self.active_functions[fn_name]
###############################################################################################
# load_voice: makes the voice available to the system
# + voice: parselmouth Sound object
# + file_path: path to the file in the filesystem. Used for indexing
###############################################################################################
def load_voice(self, voice, file_path):
"""
Args:
voice:
file_path:
"""
self.loaded_voices[file_path] = voice
return self.loaded_voices
###############################################################################################
# unload_voice: removes a voice from the system
# + file_path: path to the file in the filesystem. Used for indexing
###############################################################################################
def unload_voice(self, file_path):
"""
Args:
file_path:
"""
if file_path in self.loaded_voices:
del self.loaded_voices[file_path]
return self.loaded_voices
###############################################################################################
# activate_voices: activated voices will be processed using active functions
# + file_path: path to the file in the filesystem. Used for indexing
###############################################################################################
def activate_voices(self, file_paths):
"""
Args:
file_paths:
"""
self.active_voices = file_paths
return self.active_voices
###############################################################################################
# deactivate_voice: inactive voices will not be processed
# + file_path: path to the file in the filesystem. Used for indexing
###############################################################################################
def deactivate_voice(self, file_path):
"""
Args:
file_path:
"""
if file_path in self.active_voices:
del self.active_voices[voice]
return self.active_voices
###############################################################################################
# set_setting: configure a setting to a value
# + fn_name: name of the function this setting is a part of
# setting_name: name of the setting to configure
# value: name of the value to set the setting to
###############################################################################################
def set_setting(self, fn_name, setting_name, value):
# if the setting hasnt been loaded yet, then we store this as a default value
"""
Args:
fn_name:
setting_name:
value:
"""
self.active_settings[fn_name][setting_name] = value
return self.active_settings
def set_computed_setting(self, fn_name, setting_name, value):
"""
Args:
fn_name:
setting_name:
value:
"""
if fn_name not in self.active_computed_settings:
self.active_computed_settings[fn_name] = {}
self.active_computed_settings[fn_name][setting_name] = value
###############################################################################################
# swap_active_settings: used to avoid iterating over all settings when changing many
# + settings: replaces all settings with those contained in this dictionary
###############################################################################################
def swap_active_settings(self, settings):
"""
Args:
settings:
"""
self.active_settings = settings
return self.active_settings
###############################################################################################
# swap_active_functions: used to avoid iterating over all functions when changing many
# + functions: replaces all functions with those contained in this dictionary
###############################################################################################
def swap_active_functions(self, functions):
"""
Args:
functions:
"""
self.active_functions = functions
return self.active_functions
###############################################################################################
# reset_setting: resets the specified setting to its default value
# + fn_name: name of the function this setting is a part of
# + setting_name: name of the setting to configure
###############################################################################################
def reset_setting(self, fn_name, setting_name):
"""
Args:
fn_name:
setting_name:
"""
self.active_settings[fn_name][setting_name] = self.default_settings[fn_name][
setting_name
]
return self.active_settings[fn_name][setting_name]
###############################################################################################
# reset_setting: remove the specified setting
# + fn_name: name of the function this setting is a part of
# + setting_name: name of the setting to configure
###############################################################################################
def remove_setting(self, fn_name, setting):
"""
Args:
fn_name:
setting:
"""
del self.default_settings[fn_name][setting]
del self.active_settings[fn_name][setting]
return self.default_settings
###############################################################################################
# load_result: store the results of running the processing pipeline
# + file_path: path to the voice file that was processed
# + fn: name of the function that was processe
# + results: values for the results from this function
# TODO: + settings: values for the settings used with this function
###############################################################################################
def load_result(self, file_path, fn, results):
"""
Args:
file_path:
fn:
results:
"""
if file_path not in self.active_results:
self.active_results[file_path] = {}
self.active_results[file_path][fn] = results
return self.active_results
###############################################################################################
# reset_results: empty the results and makes it ready for the next run
###############################################################################################
def reset_results(self):
self.active_results = {}
###############################################################################################
# Resets all functions and settings to their default values
###############################################################################################
def reset_all_defaults(self):
self.active_functions = copy.copy(self.default_functions)
self.active_settings = copy.copy(self.default_settings)
|
11566612
|
from django.contrib.admin.sites import site
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase
from ralph.lib.permissions.tests._base import PermissionsTestMixin
from ralph.lib.permissions.tests.models import Article
class PermissionPerFieldAdminMixinTestCase(PermissionsTestMixin, TestCase):
def setUp(self):
self._create_users_and_articles()
self.admin = site._registry[Article]
self.request_factory = RequestFactory()
self._all_fields = self.admin.fieldsets[0][1]['fields'][:]
self.maxDiff = None
def _get_list_display(self, user):
request = self.request_factory.get('/')
request.user = user
return self.admin.get_list_display(request)
def _get_fieldsets(self, user, obj=None):
request = self.request_factory.get('/')
request.user = user
return self.admin.get_fieldsets(request, obj)
def test_admin_list_display_for_superuser(self):
list_display = self._get_list_display(self.superuser)
self.assertEqual(list_display, self.admin.list_display)
def test_admin_list_display_without_model_field(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_title_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_title_field'
))
list_display = self._get_list_display(self.user2)
user_list_display = self.admin.list_display[:]
user_list_display.remove('title')
self.assertEqual(list_display, user_list_display)
def test_admin_list_display_without_model_callable(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_content_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_content_field'
))
list_display = self._get_list_display(self.user2)
user_list_display = self.admin.list_display[:]
# permission to sample_non_model_field_with_permissions is set based
# on content field
user_list_display.remove('content')
user_list_display.remove('sample_non_model_field_with_permissions')
self.assertEqual(list_display, user_list_display)
def test_admin_list_display_without_admin_field(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_custom_field_1_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_custom_field_1_field'
))
list_display = self._get_list_display(self.user2)
user_list_display = self.admin.list_display[:]
# permission to sample_admin_field_with_permissions is set based
# on custom_field_1 field
user_list_display.remove('custom_field_1')
user_list_display.remove('sample_admin_field_with_permissions')
self.assertEqual(list_display, user_list_display)
def test_admin_get_fieldsets_for_superuser(self):
fieldsets = self._get_fieldsets(self.superuser)[0][1]['fields']
self.assertEqual(fieldsets, self._all_fields)
def test_admin_get_fieldsets_without_model_field(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_title_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_title_field'
))
fieldsets = self._get_fieldsets(self.user2)[0][1]['fields']
self._all_fields.remove('title')
self.assertEqual(fieldsets, self._all_fields)
def test_admin_get_fieldsets_without_model_callable(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_content_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_content_field'
))
fieldsets = self._get_fieldsets(self.user2)[0][1]['fields']
# permission to sample_non_model_field_with_permissions is set based
# on content field
self._all_fields.remove('content')
self._all_fields.remove('sample_non_model_field_with_permissions')
self.assertEqual(fieldsets, self._all_fields)
def test_admin_get_fieldsets_without_admin_field(self):
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='view_article_custom_field_1_field'
))
self.user2.user_permissions.remove(Permission.objects.get(
content_type=ContentType.objects.get_for_model(Article),
codename='change_article_custom_field_1_field'
))
fieldsets = self._get_fieldsets(self.user2)[0][1]['fields']
# permission to sample_admin_field_with_permissions is set based
# on custom_field_1 field
self._all_fields.remove('custom_field_1')
self._all_fields.remove('sample_admin_field_with_permissions')
self.assertEqual(fieldsets, self._all_fields)
|
11566630
|
import collections
from typing import Callable, Optional, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset, Subset, random_split
from torchvision.datasets import CIFAR10, CIFAR100, MNIST
class NoisyMNIST(MNIST):
"""Extends `torchvision.datasets.MNIST
<https://pytorch.org/docs/stable/torchvision/datasets.html#mnist>`_
class by corrupting the labels with a fixed probability
"""
num_classes = 10
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
corrupt_prob: float = 0.0,
noise_seed: Optional[int] = None,
) -> None:
super().__init__(
root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download,
)
self.corrupt_prob = corrupt_prob
self.noise_seed = noise_seed
self._add_label_noise()
def _add_label_noise(self) -> None:
if self.corrupt_prob < 0 or self.corrupt_prob > 1:
raise ValueError(f"Invalid noise probability: {self.corrupt_prob}")
if self.corrupt_prob == 0:
return
if self.noise_seed is not None:
np.random.seed(self.noise_seed)
p = np.ones((len(self.targets), self.num_classes))
p = p * (self.corrupt_prob / (self.num_classes - 1))
p[np.arange(len(self.targets)), self.targets] = 1 - self.corrupt_prob
for i in range(len(self.targets)):
self.targets[i] = np.random.choice(self.num_classes, p=p[i])
class NoisyCIFAR10(CIFAR10):
"""Extends `torchvision.datasets.CIFAR10
<https://pytorch.org/docs/stable/torchvision/datasets.html#cifar>`_
class by corrupting the labels with a fixed probability
"""
num_classes = 10
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
corrupt_prob: float = 0.0,
noise_seed: Optional[int] = None,
) -> None:
super().__init__(
root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download,
)
self.corrupt_prob = corrupt_prob
self.noise_seed = noise_seed
self._add_label_noise()
def _add_label_noise(self) -> None:
if self.corrupt_prob < 0 or self.corrupt_prob > 1:
raise ValueError(f"Invalid noise probability: {self.corrupt_prob}")
if self.corrupt_prob == 0:
return
if self.noise_seed is not None:
np.random.seed(self.noise_seed)
p = np.ones((len(self.targets), self.num_classes))
p = p * (self.corrupt_prob / (self.num_classes - 1))
p[np.arange(len(self.targets)), self.targets] = 1 - self.corrupt_prob
for i in range(len(self.targets)):
self.targets[i] = np.random.choice(self.num_classes, p=p[i])
class NoisyCIFAR100(CIFAR100):
"""Extends `torchvision.datasets.CIFAR100
<https://pytorch.org/docs/stable/torchvision/datasets.html#cifar>`_
class by corrupting the labels with a fixed probability
"""
num_classes = 100
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
corrupt_prob: float = 0.0,
noise_seed: Optional[int] = None,
) -> None:
super().__init__(
root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download,
)
self.corrupt_prob = corrupt_prob
self.noise_seed = noise_seed
self._add_label_noise()
def _add_label_noise(self) -> None:
if self.corrupt_prob < 0 or self.corrupt_prob > 1:
raise ValueError(f"Invalid noise probability: {self.corrupt_prob}")
if self.corrupt_prob == 0:
return
if self.noise_seed is not None:
np.random.seed(self.noise_seed)
p = np.ones((len(self.targets), self.num_classes))
p = p * (self.corrupt_prob / (self.num_classes - 1))
p[np.arange(len(self.targets)), self.targets] = 1 - self.corrupt_prob
for i in range(len(self.targets)):
self.targets[i] = np.random.choice(self.num_classes, p=p[i])
def split_dataset(dataset: Dataset, split: float, seed: int) -> Tuple[Subset, Subset]:
"""Splits dataset into a train / val set based on a split value and seed
Args:
dataset: dataset to split
split: The proportion of the dataset to include in the validation split,
must be between 0 and 1.
seed: Seed used to generate the split
Returns:
Subsets of the input dataset
"""
# Verify that the dataset is Sized
if not isinstance(dataset, collections.abc.Sized):
raise ValueError("Dataset is not Sized!")
if not (0 <= split <= 1):
raise ValueError(f"Split value must be between 0 and 1. Value: {split}")
val_length = int(len(dataset) * split)
train_length = len(dataset) - val_length
splits = random_split(
dataset,
[train_length, val_length],
generator=torch.Generator().manual_seed(seed),
)
return splits
|
11566690
|
import json as json_handler
from balebot.models.base_models.jsonable import Jsonable
from balebot.models.constants.errors import Error
class Peer(Jsonable):
def __init__(self, peer_type, peer_id, access_hash):
self.type = str(peer_type)
self.peer_id = str(peer_id)
self.access_hash = str(access_hash)
def get_json_object(self):
data = {
"$type": self.type,
"id": self.peer_id,
"accessHash": self.access_hash,
}
return data
def get_json_str(self):
return json_handler.dumps(self.get_json_object())
@classmethod
def load_from_json(cls, json):
if isinstance(json, dict):
json_dict = json
elif isinstance(json, str):
json_dict = json_handler.loads(json)
else:
raise ValueError(Error.unacceptable_json)
peer_type = json_dict.get('$type', None)
peer_id = json_dict.get('id', None)
access_hash = json_dict.get('accessHash', None)
if (not peer_type) or (not peer_id) or (not access_hash):
raise ValueError(Error.none_or_invalid_attribute)
return cls(peer_type=peer_type, peer_id=peer_id, access_hash=access_hash)
|
11566691
|
from seamless.highlevel import Context, Cell, Transformer
import inspect
def hhelp(obj):
# Similar to doing obj? in IPython
print("*" * 80)
print(inspect.getdoc(obj))
print("*" * 80)
print("###", 1)
ctx = Context()
ctx.help = "This is an example help"
hhelp(ctx)
print(ctx.help.value)
ctx.compute()
hhelp(ctx)
print(ctx.help.value)
print()
print("###", 2)
ctx.help.ctx.a = "A markdown doc to document the main ctx"
ctx.help.ctx.a.mimetype = "markdown"
ctx.help.ctx.b = "A HTML doc to document the main ctx"
ctx.help.ctx.b.mimetype = "html"
print(ctx.help.ctx.a.value)
print(ctx.help.ctx.b.value)
ctx.compute()
print(ctx.help.ctx.a.value)
print(ctx.help.ctx.b.value)
print()
print("###", 3)
ctx.help.mount("/tmp/help-example.txt", authority="cell", persistent=False)
ctx.help.ctx.a.mount("/tmp/help-a.md", authority="cell", persistent=False)
ctx.help.ctx.b.mount("/tmp/help-b.html", authority="cell", persistent=False)
ctx.compute()
print()
print("###", 4)
ctx.help.share("help-example.txt")
ctx.help.ctx.a.share("help-a.md")
ctx.help.ctx.b.share("help-b.html")
ctx.compute()
print()
print("###", 5)
ctx.subctx = Context()
ctx.subctx.help = "This is documentation for a subcontext"
ctx.subctx.help.ctx.doc1 = "More documentation for a subcontext"
ctx.mycell = 123
ctx.mycell.help = "This is documentation for my cell"
ctx.mycell.help.ctx.doc1 = "More documentation for my cell"
ctx.tf = Transformer()
ctx.tf.help = "This is documentation for my transformer"
ctx.tf.help.ctx.doc1 = "More documentation for my transformer"
ctx.compute()
print(ctx.subctx.help.value)
print(ctx.subctx.help.ctx.doc1.value)
print(ctx.mycell.help.value)
print(ctx.mycell.help.ctx.doc1.value)
print(ctx.tf.help.value)
print(ctx.tf.help.ctx.doc1.value)
print()
print("###", 6)
import numpy as np
from matplotlib import pyplot as plt
plt.scatter([0, 1, 2, 3], [12, 7, 5, 6])
from io import BytesIO
f = BytesIO()
plt.savefig(f)
png = f.getvalue()
ctx.help.ctx.pictures = Context()
pic1 = Cell(celltype="bytes")
ctx.help.ctx.pictures.pic1 = pic1
ctx.help.ctx.pictures.pic1_txt = "Description of picture 1"
pic1.set(png)
pic1.mount("/tmp/pic1.png", authority="cell", persistent=False)
ctx.compute()
print()
print("###", 7)
ctx.help.ctx.pictures.pic1.share("help/pic1.png")
ctx.help.ctx.pictures.pic1.mimetype = "png"
ctx.help.ctx.pictures.pic1_html = """
<title>Picture 1</title>
<h3>Picture 1</h3>
<div>
<img src="./pic1.png"></img>
</div>
<div>
This is picture 1
</div>
"""
ctx.help.ctx.pictures.pic1_html.mimetype = "html"
ctx.help.ctx.pictures.pic1_html.share("help/pic1.html")
ctx.compute()
print()
print("###", 8)
def calc_help(help_language):
if help_language == "English":
return "Help in English"
elif help_language == "French":
return "Aide en Français"
ctx.calc_help = calc_help
ctx.calc_help.help_language = "English"
ctx.help.ctx.multi_lingual = Cell()
ctx.help.ctx.multi_lingual.mimetype = "html"
ctx.help.ctx.multi_lingual.connect_from(ctx.calc_help)
ctx.compute()
print(ctx.help.ctx.multi_lingual.value)
ctx.calc_help.help_language = "French"
ctx.compute()
print(ctx.help.ctx.multi_lingual.value)
ctx.help.ctx.multi_lingual.share("help/multi-lingual.html")
ctx.compute()
|
11566700
|
from setuptools import setup, find_packages
setup(
name="MyPackage",
version="0.0.1",
url="https://github.com/myorg/mypackage.git",
author="<NAME>",
author_email="<EMAIL>",
description="What my package does",
packages=["mypackage"],
install_requires=["pytest >= 6.1.2", "black >= 20.8b1"],
)
|
11566708
|
from roboschool.scene_abstract import SingleRobotEmptyScene
from roboschool.gym_mujoco_xml_env import RoboschoolMujocoXmlEnv
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import os, sys
class RoboschoolInvertedDoublePendulum(RoboschoolMujocoXmlEnv):
def __init__(self):
RoboschoolMujocoXmlEnv.__init__(self, 'inverted_double_pendulum.xml', 'cart', action_dim=1, obs_dim=9)
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=9.8, timestep=0.0165, frame_skip=1)
def robot_specific_reset(self):
self.pole2 = self.parts["pole2"]
self.slider = self.jdict["slider"]
self.j1 = self.jdict["hinge"]
self.j2 = self.jdict["hinge2"]
u = self.np_random.uniform(low=-.1, high=.1, size=[2])
self.j1.reset_current_position(float(u[0]), 0)
self.j2.reset_current_position(float(u[1]), 0)
self.j1.set_motor_torque(0)
self.j2.set_motor_torque(0)
def apply_action(self, a):
assert( np.isfinite(a).all() )
self.slider.set_motor_torque( 200*float(np.clip(a[0], -1, +1)) )
def calc_state(self):
theta, theta_dot = self.j1.current_position()
gamma, gamma_dot = self.j2.current_position()
x, vx = self.slider.current_position()
self.pos_x, _, self.pos_y = self.pole2.pose().xyz()
assert( np.isfinite(x) )
return np.array([
x, vx,
self.pos_x,
np.cos(theta), np.sin(theta), theta_dot,
np.cos(gamma), np.sin(gamma), gamma_dot,
])
def _step(self, a):
self.apply_action(a)
self.scene.global_step()
state = self.calc_state() # sets self.pos_x self.pos_y
# upright position: 0.6 (one pole) + 0.6 (second pole) * 0.5 (middle of second pole) = 0.9
# using <site> tag in original xml, upright position is 0.6 + 0.6 = 1.2, difference +0.3
dist_penalty = 0.01 * self.pos_x ** 2 + (self.pos_y + 0.3 - 2) ** 2
# v1, v2 = self.model.data.qvel[1:3] TODO when this fixed https://github.com/bulletphysics/bullet3/issues/1040
#vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2
vel_penalty = 0
alive_bonus = 10
done = self.pos_y + 0.3 <= 1
self.rewards = [float(alive_bonus), float(-dist_penalty), float(-vel_penalty)]
self.frame += 1
self.done += done # 2 == 1+True
self.reward += sum(self.rewards)
self.HUD(state, a, done)
return state, sum(self.rewards), done, {}
def camera_adjust(self):
self.camera.move_and_look_at(0,1.2,1.2, 0,0,0.5)
class RoboschoolInvertedPendulum(RoboschoolMujocoXmlEnv):
swingup = False
def __init__(self):
RoboschoolMujocoXmlEnv.__init__(self, 'inverted_pendulum.xml', 'cart', action_dim=1, obs_dim=5)
def create_single_player_scene(self):
return SingleRobotEmptyScene(gravity=9.8, timestep=0.0165, frame_skip=1)
def robot_specific_reset(self):
self.pole = self.parts["pole"]
self.slider = self.jdict["slider"]
self.j1 = self.jdict["hinge"]
u = self.np_random.uniform(low=-.1, high=.1)
self.j1.reset_current_position( u if not self.swingup else 3.1415+u , 0)
self.j1.set_motor_torque(0)
def apply_action(self, a):
assert( np.isfinite(a).all() )
self.slider.set_motor_torque( 100*float(np.clip(a[0], -1, +1)) )
def calc_state(self):
self.theta, theta_dot = self.j1.current_position()
x, vx = self.slider.current_position()
assert( np.isfinite(x) )
return np.array([
x, vx,
np.cos(self.theta), np.sin(self.theta), theta_dot
])
def _step(self, a):
self.apply_action(a)
self.scene.global_step()
state = self.calc_state() # sets self.pos_x self.pos_y
vel_penalty = 0
if self.swingup:
reward = np.cos(self.theta)
done = False
else:
reward = 1.0
done = np.abs(self.theta) > .2
self.rewards = [float(reward)]
self.frame += 1
self.done += done # 2 == 1+True
self.reward += sum(self.rewards)
self.HUD(state, a, done)
return state, sum(self.rewards), done, {}
def camera_adjust(self):
self.camera.move_and_look_at(0,1.2,1.0, 0,0,0.5)
class RoboschoolInvertedPendulumSwingup(RoboschoolInvertedPendulum):
swingup = True
|
11566726
|
import torch
import torch.nn as nn
class Baselayer:
def __init__(self, layername: str, id: int, input:list, statedict: list):
self.layername = layername
self.inputlayer = input
self.layerid = id
self.inputchannel = 0
self.outputchannel = 0
# filter relu
self.statedict = [s for s in statedict if len(s.shape) != 0]
self.prunemask = None
self.outmask=None
self.bnscale=None
def clone2module(self, module: nn.Module, inputmask,keepoutput:bool):
raise NotImplementedError
def _cloneBN(self,bn,statedict,mask):
assert isinstance(bn,nn.BatchNorm2d)
bn.weight.data = statedict[0][mask.tolist()].clone()
bn.bias.data = statedict[1][mask.tolist()].clone()
bn.running_mean = statedict[2][mask.tolist()].clone()
bn.running_var = statedict[3][mask.tolist()].clone()
def __repr__(self):
s = self.__class__.__name__ + "("
s += "name={}, ".format(self.layername)
s += "id={}, ".format(self.layerid)
s += "numweights={},".format(len(self.statedict))
s += "inchannel={},".format(self.inputchannel)
s += "outchannel={})".format(self.outputchannel)
return s
class CB(Baselayer):
def __init__(self, layername: str, id: int, input:list, statedict: list):
super().__init__(layername, id, input, statedict)
# 'conv.weight', 'bn.weight', 'bn.bias', 'bn.running_mean', 'bn.running_var'
self.inputchannel = self.statedict[0].shape[1]
self.outputchannel = self.statedict[-1].shape[0]
self.bnscale=self.statedict[1].abs().clone()
# self.bnscale = self.statedict[1]
def clone2module(self, module: nn.Module, inputmask,keepoutput=False):
if self.bnscale is None:
keepoutput=True
modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)]
temp = self.statedict[0][:, inputmask.tolist(), :, :]
if keepoutput:
modulelayers[0].weight.data = temp.clone()
self._cloneBN(modulelayers[1],self.statedict[1:5],torch.arange(self.statedict[1].shape[0]))
self.outmask=torch.arange(self.statedict[1].shape[0])
else:
modulelayers[0].weight.data = temp[self.prunemask.tolist(), :, :, :].clone()
self._cloneBN(modulelayers[1], self.statedict[1:5], self.prunemask)
self.outmask=self.prunemask
class DCB(Baselayer):
def __init__(self, layername: str, id: int, input:list, statedict: list):
super().__init__(layername, id, input, statedict)
# 'sepconv.weight', 'sepbn.weight', 'sepbn.bias', 'sepbn.running_mean', 'sepbn.running_var'
# 'pointconv.weight', 'pointbm.weight', 'pointbm.bias', 'pointbm.running_mean', 'pointbm.running_var'
self.inputchannel = self.statedict[0].shape[0]
self.outputchannel = self.statedict[-1].shape[0]
self.bnscale=self.statedict[6].abs().clone()
# self.bnscale = self.statedict[6]
def clone2module(self, module: nn.Module, inputmask,keepoutput=False):
modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)]
temp = self.statedict[0][inputmask.tolist(),:, :, :]
modulelayers[0].weight.data = temp.clone()
modulelayers[0].groups = inputmask.shape[0]
self._cloneBN(modulelayers[1], self.statedict[1:5], inputmask)
if keepoutput:
modulelayers[2].weight.data = self.statedict[5].clone()
self._cloneBN(modulelayers[3],self.statedict[6:10],torch.arange(self.statedict[6].shape[0]))
self.outmask=torch.arange(self.statedict[6].shape[0])
else:
temp = self.statedict[5][:, inputmask.tolist(), :, :]
modulelayers[2].weight.data = temp[self.prunemask.tolist(),:,:,:].clone()
self._cloneBN(modulelayers[3], self.statedict[6:10],self.prunemask)
self.outmask=self.prunemask
class InverRes(Baselayer):
def __init__(self, layername: str, id: int, input:list, statedict: list):
super().__init__(layername, id, input, statedict)
self.inputchannel = self.statedict[0].shape[1]
self.outputchannel = self.statedict[-1].shape[0]
self.numlayer = len(self.statedict) // 5
if self.numlayer==3:
self.bnscale=self.statedict[1].abs().clone()
# self.bnscale=self.statedict[1]
else:
self.bnscale=None
self.inputmask=None
def clone2module(self, module: nn.Module, inputmask,keepoutput=False):
if self.inputmask is not None:
inputmask=self.inputmask
modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)]
if self.numlayer == 2:
modulelayers[0].weight.data = self.statedict[0][inputmask.tolist(), :, :, :].clone()
modulelayers[0].groups=inputmask.shape[0]
self._cloneBN(modulelayers[1],self.statedict[1:5],inputmask)
modulelayers[2].weight.data = self.statedict[5][:,inputmask.tolist(),:,:].clone()
self._cloneBN(modulelayers[3], self.statedict[6:10], torch.arange(self.statedict[6].shape[0]))
self.outmask=torch.arange(self.statedict[6].shape[0])
if self.numlayer == 3:
temp = self.statedict[0][:, inputmask.tolist(), :, :]
modulelayers[0].weight.data = temp[self.prunemask.tolist(), :, :, :].clone()
self._cloneBN(modulelayers[1],self.statedict[1:5],self.prunemask)
modulelayers[2].weight.data = self.statedict[5][self.prunemask.tolist(),:,:,:]
modulelayers[2].groups=self.prunemask.shape[0]
self._cloneBN(modulelayers[3],self.statedict[6:10],self.prunemask)
modulelayers[4].weight.data = self.statedict[10][:, self.prunemask.tolist(), :, :]
self._cloneBN(modulelayers[5], self.statedict[11:15], torch.arange(self.statedict[11].shape[0]))
self.outmask = torch.arange(self.statedict[11].shape[0])
#TODO check right?
# if not module.use_res_connect:
# modulelayers[4].weight.data = self.statedict[10][:, self.prunemask.tolist(), :, :]
# self._cloneBN(modulelayers[5], self.statedict[11:15], torch.arange(self.statedict[11].shape[0]))
# self.outmask=torch.arange(self.statedict[11].shape[0])
# else:
# temp=self.statedict[10][:,self.prunemask.tolist(), :, :]
# modulelayers[4].weight.data = temp[inputmask.tolist(),:,:,:]
# self._cloneBN(modulelayers[5], self.statedict[11:15],inputmask)
# self.outmask = inputmask
class FC(Baselayer):
def __init__(self, layername: str, id: int, input:list, statedict: list):
super().__init__(layername, id, input, statedict)
self.inputchannel = self.statedict[0].shape[1]
self.outputchannel = self.statedict[0].shape[0]
def clone2module(self, module: nn.Module,inputmask=None,keepoutput=False):
modulelayers = [m for m in module.modules() if isinstance(m, nn.Linear)]
modulelayers[0].weight.data=self.statedict[0].clone()
modulelayers[0].bias.data=self.statedict[1].clone()
class Conv(Baselayer):
def __init__(self, layername: str, id: int, input:list, statedict: list):
super().__init__(layername, id, input, statedict)
self.inputchannel = self.statedict[0].shape[1]
self.outputchannel = self.statedict[0].shape[0]
def clone2module(self, module: nn.Module,inputmask=None,keepoutput=False):
modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d)]
modulelayers[0].weight.data = self.statedict[0][:, inputmask.tolist(), :, :].clone()
modulelayers[0].bias.data = self.statedict[1].clone()
# modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d)]
# modulelayers[0].weight.data=self.statedict[0].clone()
# modulelayers[0].bias.data=self.statedict[1].clone()
class DarkBlock(Baselayer):
def __init__(self, layername: str, id: int, input: list, statedict: list):
super().__init__(layername, id, input, statedict)
self.inputchannel = self.statedict[0].shape[1]
self.outputchannel = self.statedict[-1].shape[0]
self.bnscale = self.statedict[1].abs().clone()
def clone2module(self, module: nn.Module, inputmask, keepoutput=False):
modulelayers = [m for m in module.modules() if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)]
temp = self.statedict[0][:, inputmask.tolist(), :, :]
modulelayers[0].weight.data = temp[self.prunemask.tolist(), :, :, :].clone()
self._cloneBN(modulelayers[1], self.statedict[1:5], self.prunemask)
modulelayers[2].weight.data = self.statedict[5][:, self.prunemask.tolist(), :, :]
self._cloneBN(modulelayers[3], self.statedict[6:10], torch.arange(self.statedict[6].shape[0]))
self.outmask = torch.arange(self.statedict[6].shape[0])
|
11566731
|
import pytest
from kymatio import Scattering1D
import os
import numpy as np
import io
backends = []
from kymatio.scattering1d.backend.tensorflow_backend import backend
backends.append(backend)
class TestScattering1DTensorFlow:
@pytest.mark.parametrize('backend', backends)
def test_Scattering1D(self, backend):
"""
Applies scattering on a stored signal to make sure its output agrees with
a previously calculated version.
"""
test_data_dir = os.path.dirname(__file__)
with open(os.path.join(test_data_dir, 'test_data_1d.npz'), 'rb') as f:
buffer = io.BytesIO(f.read())
data = np.load(buffer)
x = data['x']
J = data['J']
Q = data['Q']
Sx0 = data['Sx']
T = x.shape[-1]
scattering = Scattering1D(J, T, Q, backend=backend, frontend='tensorflow')
Sx = scattering(x)
assert np.allclose(Sx, Sx0, atol=1e-6, rtol =1e-7)
|
11566738
|
from typing import Tuple
from pydantic import BaseSettings
class GameWindowSettings(BaseSettings):
# Window
CELL_WIDTH: int = 9
CELL_HEIGHT: int = 9
CELL_MARGIN: int = 1
FONT: str = "Arial"
FONT_SIZE: int = 15
CAPTION: str = "Conway's Game of Life - Cellular Automaton"
class Color:
BLACK: Tuple[int, int, int] = (0, 0, 0)
GRAY: Tuple[int, int, int] = (50, 50, 50)
WHITE: Tuple[int, int, int] = (255, 255, 255)
|
11566792
|
import torch
from torchvision import datasets, transforms
from lib.transform import AddUniformNoise, ToTensor, HorizontalFlip, Transpose, Resize
def dataloader(dataset, batch_size, cuda, conditionnal=False):
if dataset == 'CIFAR10':
data = datasets.CIFAR10('./CIFAR10', train=True, download=True,
transform=transforms.Compose([
AddUniformNoise(0.05),
Transpose(),
ToTensor()
]))
data_hflip = datasets.CIFAR10('./CIFAR10', train=True, download=True,
transform=transforms.Compose([
HorizontalFlip(),
AddUniformNoise(0.05),
Transpose(),
ToTensor()
]))
data = torch.utils.data.ConcatDataset([data, data_hflip])
train_data, valid_data = torch.utils.data.random_split(data, [90000, 10000])
test_data = datasets.CIFAR10('./CIFAR10', train=False, download=True,
transform=transforms.Compose([
AddUniformNoise(0.05),
Transpose(),
ToTensor()
]))
elif dataset == 'MNIST':
data = datasets.MNIST('./MNIST', train=True, download=True,
transform=transforms.Compose([
AddUniformNoise(),
ToTensor()
]))
train_data, valid_data = torch.utils.data.random_split(data, [50000, 10000])
test_data = datasets.MNIST('./MNIST', train=False, download=True,
transform=transforms.Compose([
AddUniformNoise(),
ToTensor()
]))
elif len(dataset) == 6 and dataset[:5] == 'MNIST':
data = datasets.MNIST('./MNIST', train=True, download=True,
transform=transforms.Compose([
AddUniformNoise(),
ToTensor()
]))
label = int(dataset[5])
idx = data.train_labels == label
data.train_labels = data.train_labels[idx]
data.train_data = data.train_data[idx]
train_data, valid_data = torch.utils.data.random_split(data, [5000, idx.sum() - 5000])
test_data = datasets.MNIST('./MNIST', train=False, download=True,
transform=transforms.Compose([
AddUniformNoise(),
ToTensor()
]))
idx = test_data.test_labels == label
test_data.test_labels = test_data.test_labels[idx]
test_data.test_data = test_data.test_data[idx]
elif dataset == 'MNIST32':
data = datasets.MNIST('./MNIST', train=True, download=True,
transform=transforms.Compose([
Resize(),
AddUniformNoise(),
ToTensor()
]))
train_data, valid_data = torch.utils.data.random_split(data, [50000, 10000])
test_data = datasets.MNIST('./MNIST', train=False, download=True,
transform=transforms.Compose([
Resize(),
AddUniformNoise(),
ToTensor()
]))
elif len(dataset) == 8 and dataset[:7] == 'MNIST32':
data = datasets.MNIST('./MNIST', train=True, download=True,
transform=transforms.Compose([
Resize(),
AddUniformNoise(),
ToTensor()
]))
label = int(dataset[7])
idx = data.train_labels == label
data.train_labels = data.train_labels[idx]
data.train_data = data.train_data[idx]
train_data, valid_data = torch.utils.data.random_split(data, [5000, idx.sum() - 5000])
test_data = datasets.MNIST('./MNIST', train=False, download=True,
transform=transforms.Compose([
Resize(),
AddUniformNoise(),
ToTensor()
]))
idx = test_data.test_labels == label
test_data.test_labels = test_data.test_labels[idx]
test_data.test_data = test_data.test_data[idx]
else:
print ('what network ?', args.net)
sys.exit(1)
#load data
kwargs = {'num_workers': 0, 'pin_memory': True} if cuda > -1 else {}
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size, shuffle=True, **kwargs)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size, shuffle=True, **kwargs)
return train_loader, valid_loader, test_loader
|
11566814
|
import django
VERSION = ('2', '2', '1')
if django.VERSION < (3, 2):
default_app_config = 'pagedown.apps.PagedownConfig'
|
11566819
|
from .brawlstars import BrawlStars
async def setup(bot):
cog = BrawlStars(bot)
await cog.api_init()
bot.add_cog(cog)
|
11566839
|
import logging
import os
from os import path as os_path
from pathlib import Path
from bio_embeddings.utilities.filemanagers.FileManagerInterface import FileManagerInterface
logger = logging.getLogger(__name__)
class FileSystemFileManager(FileManagerInterface):
def __init__(self):
super().__init__()
def exists(self, prefix, stage=None, file_name=None, extension=None) -> bool:
path = Path(prefix)
if stage:
path /= stage
if file_name:
path /= file_name + (extension or "")
return os_path.exists(path)
def get_file(self, prefix, stage, file_name, extension=None) -> str:
path = Path(prefix)
if stage:
path /= stage
if file_name:
path /= file_name + (extension or "")
return str(path)
def create_file(self, prefix, stage, file_name, extension=None) -> str:
path = Path(prefix)
if stage:
path /= stage
path /= file_name + (extension or "")
try:
with open(path, 'w'):
os.utime(path, None)
except OSError as e:
logger.error("Failed to create file %s" % path)
raise e
else:
logger.info("Created the file %s" % path)
return str(path)
def create_directory(self, prefix, stage, directory_name) -> str:
path = Path(prefix)
if stage:
path /= stage
path /= directory_name
try:
os.mkdir(path)
except FileExistsError:
logger.info("Directory %s already exists." % path)
except OSError as e:
logger.error("Failed to create directory %s" % path)
raise e
else:
logger.info("Created the directory %s" % path)
return str(path)
def create_stage(self, prefix, stage) -> str:
path = Path(prefix) / stage
try:
os.mkdir(path)
except FileExistsError:
logger.info("Stage directory %s already exists." % path)
except OSError as e:
logger.error("Failed to create stage directory %s" % path)
raise e
else:
logger.info("Created the stage directory %s" % path)
return str(path)
def create_prefix(self, prefix) -> str:
path = Path(prefix)
try:
os.mkdir(path)
except FileExistsError:
logger.info("Prefix directory %s already exists." % path)
except OSError as e:
logger.error("Failed to create prefix directory %s" % path)
raise e
else:
logger.info("Created the prefix directory %s" % path)
return str(path)
|
11566846
|
import torch
import torch.nn as nn
##########################################################################
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
layer = nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, stride=stride)
return layer
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
##########################################################################
## Supervised Attention Module (RAM)
class SAM(nn.Module):
def __init__(self, n_feat, kernel_size, bias):
super(SAM, self).__init__()
self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
def forward(self, x, x_img):
x1 = self.conv1(x)
img = self.conv2(x) + x_img
x2 = torch.sigmoid(self.conv3(img))
x1 = x1 * x2
x1 = x1 + x
return x1, img
##########################################################################
## Spatial Attention
class SALayer(nn.Module):
def __init__(self, kernel_size=7):
super(SALayer, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
y = torch.cat([avg_out, max_out], dim=1)
y = self.conv1(y)
y = self.sigmoid(y)
return x * y
# Spatial Attention Block (SAB)
class SAB(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, bias, act):
super(SAB, self).__init__()
modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
self.body = nn.Sequential(*modules_body)
self.SA = SALayer(kernel_size=7)
def forward(self, x):
res = self.body(x)
res = self.SA(res)
res += x
return res
##########################################################################
## Pixel Attention
class PALayer(nn.Module):
def __init__(self, channel, reduction=16, bias=False):
super(PALayer, self).__init__()
self.pa = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias), # channel <-> 1
nn.Sigmoid()
)
def forward(self, x):
y = self.pa(x)
return x * y
## Pixel Attention Block (PAB)
class PAB(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, bias, act):
super(PAB, self).__init__()
modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
self.PA = PALayer(n_feat, reduction, bias=bias)
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res = self.PA(res)
res += x
return res
##########################################################################
## Channel Attention Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16, bias=False):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Channel Attention Block (CAB)
class CAB(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, bias, act):
super(CAB, self).__init__()
modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
self.CA = CALayer(n_feat, reduction, bias=bias)
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res = self.CA(res)
res += x
return res
|
11566878
|
import time
import unittest
from datetime import timedelta
from nokia import NokiaSleepSeries
class TestNokiaSleepSeries(unittest.TestCase):
def test_attributes(self):
data = {
"startdate": 1387243618,
"state": 3,
"enddate": 1387265218
}
series = NokiaSleepSeries(data)
self.assertEqual(type(series), NokiaSleepSeries)
self.assertEqual(series.startdate.timestamp, data['startdate'])
self.assertEqual(series.state, data['state'])
self.assertEqual(series.enddate.timestamp, data['enddate'])
self.assertEqual(series.timedelta, timedelta(seconds=21600))
|
11566891
|
import textwrap
import logging
from benchmarkstt.cli import create_parser, args_help, args_common, args_complete
from benchmarkstt.cli import CustomHelpFormatter, before_parseargs
from benchmarkstt.modules import Modules
logger = logging.getLogger(__name__)
def argparser():
name = 'benchmarkstt-tools'
desc = 'Some additional helpful tools'
parser = create_parser(prog=name, description=desc)
subparsers = parser.add_subparsers(dest='subcommand')
for module, cli in Modules('cli'):
kwargs = dict()
if hasattr(cli, 'Formatter'):
kwargs['formatter_class'] = cli.Formatter
else:
kwargs['formatter_class'] = CustomHelpFormatter
if cli.__doc__ is not None:
docs = cli.__doc__
else:
docs = '?'
logger.warning('Missing __doc__ for benchmarkstt.%s._cli', module)
kwargs['description'] = textwrap.dedent(docs)
subparser = subparsers.add_parser(module, add_help=False, allow_abbrev=False, **kwargs)
cli.argparser(subparser)
args_common(subparser)
args_help(subparser)
args_help(parser)
return parser
def run():
before_parseargs()
parser = argparser()
args_complete(parser)
args = parser.parse_args()
if not args.subcommand:
parser.error("expects at least 1 argument")
Modules('cli')[args.subcommand].run(parser, args)
exit(0)
if __name__ == '__main__': # pragma: nocover
run()
|
11566897
|
import FWCore.ParameterSet.Config as cms
def customiseCommon(process):
#####################################################################################################
####
#### Top level replaces for handling strange scenarios of early collisions
####
## TRACKING:
process.newSeedFromTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.newSeedFromPairs.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
process.secTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.thTripletsA.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.thTripletsB.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000)
process.fourthPLSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
process.fifthSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000)
###### FIXES TRIPLETS FOR LARGE BS DISPLACEMENT ######
### prevent bias in pixel vertex
process.pixelVertices.useBeamConstraint = False
###
### end of top level replacements
###
###############################################################################################
return (process)
##############################################################################
def customisePPData(process):
process= customiseCommon(process)
## particle flow HF cleaning
process.particleFlowRecHitHCAL.LongShortFibre_Cut = 30.
process.particleFlowRecHitHCAL.ApplyPulseDPG = True
## HF cleaning for data only
process.hcalRecAlgos.SeverityLevels[3].RecHitFlags.remove("HFDigiTime")
process.hcalRecAlgos.SeverityLevels[4].RecHitFlags.append("HFDigiTime")
##beam-halo-id for data only
process.CSCHaloData.ExpectedBX = cms.int32(3)
## hcal hit flagging
process.hfreco.PETstat.flagsToSkip = 2
process.hfreco.S8S1stat.flagsToSkip = 18
process.hfreco.S9S1stat.flagsToSkip = 26
return process
##############################################################################
def customisePPMC(process):
process=customiseCommon(process)
return process
##############################################################################
def customiseCosmicData(process):
return process
##############################################################################
def customiseCosmicMC(process):
return process
##############################################################################
def customiseVALSKIM(process):
process= customisePPData(process)
process.reconstruction.remove(process.lumiProducer)
return process
##############################################################################
def customiseExpress(process):
process= customisePPData(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
def customisePrompt(process):
process= customisePPData(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
##############################################################################
def customiseCommonHI(process):
###############################################################################################
####
#### Top level replaces for handling strange scenarios of early HI collisions
####
## Offline Silicon Tracker Zero Suppression
process.siStripZeroSuppression.Algorithms.PedestalSubtractionFedMode = cms.bool(False)
process.siStripZeroSuppression.Algorithms.CommonModeNoiseSubtractionMode = cms.string("IteratedMedian")
process.siStripZeroSuppression.doAPVRestore = cms.bool(True)
process.siStripZeroSuppression.storeCM = cms.bool(True)
## Fixes to protect against large BS displacements
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.originRadius = 0.2
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.fixedError = 0.5
process.hiSelectedProtoTracks.maxD0Significance = 100
process.hiPixelAdaptiveVertex.TkFilterParameters.maxD0Significance = 100
process.hiPixelAdaptiveVertex.useBeamConstraint = False
process.hiPixelAdaptiveVertex.PVSelParameters.maxDistanceToBeam = 1.0
###
### end of top level replacements
###
###############################################################################################
return process
##############################################################################
def customiseExpressHI(process):
process= customiseCommonHI(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
# keep some debugging content for zero suppression
process.siStripZeroSuppression.produceRawDigis = cms.bool(True)
process.siStripZeroSuppression.produceCalculatedBaseline = cms.bool(True)
return process
##############################################################################
def customisePromptHI(process):
process= customiseCommonHI(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
return process
##############################################################################
def customiseAlcaOnlyPromptHI(process):
process= customiseCommonHI(process)
import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi
process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone()
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltCentralityVeto = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
HLTPaths = cms.vstring('HLT_HICentralityVeto'),
throw = cms.bool(False)
)
for path in process.paths:
getattr(process,path)._seq = process.hltCentralityVeto * getattr(process,path)._seq
return process
##############################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.