id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
161534
|
from office365.runtime.client_value import ClientValue
class QueryAutoCompletionResults(ClientValue):
pass
|
161560
|
from typing import Optional
from torch.nn import Module
from tha2.nn.base.init_function import create_init_function
from tha2.nn.base.module_factory import ModuleFactory
from tha2.nn.base.nonlinearity_factory import resolve_nonlinearity_factory
from tha2.nn.base.normalization import NormalizationLayerFactory
from tha2.nn.base.spectral_norm import apply_spectral_norm
def wrap_conv_or_linear_module(module: Module, initialization_method: str, use_spectral_norm: bool):
init = create_init_function(initialization_method)
return apply_spectral_norm(init(module), use_spectral_norm)
class ImageArgs:
def __init__(self, size: int = 64, num_channels: int = 3):
self.num_channels = num_channels
self.size = size
class BlockArgs:
def __init__(self,
initialization_method: str = 'he',
use_spectral_norm: bool = False,
normalization_layer_factory: Optional[NormalizationLayerFactory] = None,
nonlinearity_factory: Optional[ModuleFactory] = None):
self.nonlinearity_factory = resolve_nonlinearity_factory(nonlinearity_factory)
self.normalization_layer_factory = normalization_layer_factory
self.use_spectral_norm = use_spectral_norm
self.initialization_method = initialization_method
def wrap_module(self, module: Module) -> Module:
return wrap_conv_or_linear_module(module, self.initialization_method, self.use_spectral_norm)
|
161575
|
from django.test import TestCase
from django.test.client import Client
from django.urls import reverse
from model_mommy import mommy
from users.models import Department
from users.models import DepartmentUser
from users.models import Lageruser
class LageruserTests(TestCase):
def setUp(self):
self.client = Client()
Lageruser.objects.create_superuser('test', "<EMAIL>", "test")
self.client.login(username="test", password="<PASSWORD>")
def test_lageruser_creation(self):
user1 = mommy.make(Lageruser, first_name="a", last_name="a")
user2 = mommy.make(Lageruser, first_name="", last_name="a")
self.assertEqual(str(user1), "{0} {1}".format(user1.first_name, user1.last_name))
self.assertEqual(str(user2), user2.username)
self.assertEqual(user1.get_absolute_url(), reverse('userprofile', kwargs={'pk': user1.pk}))
user1.clean()
self.assertEqual(user1.expiration_date, None)
def test_list_view(self):
response = self.client.get('/users/')
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
user = mommy.make(Lageruser)
response = self.client.get('/users/%i/' % user.pk)
self.assertEqual(response.status_code, 200)
def test_profile_view(self):
response = self.client.get('/profile/')
self.assertEqual(response.status_code, 200)
def test_settings_view(self):
response = self.client.get('/settings/')
self.assertEqual(response.status_code, 200)
class DepartmentTests(TestCase):
def setUp(self):
self.client = Client()
Lageruser.objects.create_superuser('test', "<EMAIL>", "test")
self.client.login(username="test", password="<PASSWORD>")
def test_department_creation(self):
department = mommy.make(Department)
self.assertEqual(str(department), department.name)
def test_list_view(self):
response = self.client.get('/departments/')
self.assertEqual(response.status_code, 200)
def test_create_view(self):
response = self.client.get('/departments/add/')
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
department = mommy.make(Department)
response = self.client.get('/departments/%i/' % department.pk)
self.assertEqual(response.status_code, 200)
def test_update_view(self):
department = mommy.make(Department)
response = self.client.get('/departments/%i/edit/' % department.pk)
self.assertEqual(response.status_code, 200)
def test_delete_view(self):
department = mommy.make(Department)
response = self.client.get('/departments/%i/delete/' % department.pk)
self.assertEqual(response.status_code, 200)
def test_adduser_view(self):
department = mommy.make(Department)
response = self.client.get('/departments/%i/adduser/' % department.pk)
self.assertEqual(response.status_code, 200)
def test_removeuser_view(self):
departmentuser = mommy.make(DepartmentUser)
response = self.client.get('/departments/%i/removeuser/' % departmentuser.pk)
self.assertEqual(response.status_code, 200)
|
161655
|
from beneath.connection import Connection
class _ResourceBase:
def __init__(self, conn: Connection, dry=False):
self.conn = conn
self.dry = dry
def _before_mutation(self):
if self.dry:
raise Exception("Cannot run mutation on a client where dry=True")
|
161671
|
import logging
from abc import ABCMeta
from typing import Optional, Sequence
from wyze_sdk.errors import WyzeClientConfigurationError
from wyze_sdk.service import (ApiServiceClient, EarthServiceClient,
GeneralApiServiceClient, PlatformServiceClient,
ScaleServiceClient, VenusServiceClient,
WyzeResponse)
class BaseClient(object, metaclass=ABCMeta):
def __init__(
self,
token: str = None,
user_id: str = None,
base_url: Optional[str] = None,
logger: logging.Logger = logging.getLogger(__name__),
**kwargs,
):
if token is None:
raise WyzeClientConfigurationError("client is not logged in")
self._token = token.strip()
self._user_id = user_id
self._base_url = base_url
self._logger = logger
def _platform_client(self) -> PlatformServiceClient:
return BaseClient._service_client(PlatformServiceClient, token=self._token, base_url=self._base_url)
def _api_client(self) -> ApiServiceClient:
return BaseClient._service_client(ApiServiceClient, token=self._token, base_url=self._base_url)
@staticmethod
def _service_client(cls, *, token: str, base_url: Optional[str] = None, **kwargs) -> "BaseClient":
"""Create a service client to execute the API call to Wyze.
Args:
cls (class): The target service client.
e.g. 'PlatformServiceClient'
token (str): The access token.
base_url (Optional[str]): The base url of the service.
Returns:
(BaseClient)
A new pre-configured client for interacting
with the target service.
"""
return cls(
token=token,
**{{'base_url': base_url}, kwargs} if base_url is not None else kwargs,
)
def _earth_client(self) -> EarthServiceClient:
return BaseClient._service_client(EarthServiceClient, token=self._token, base_url=self._base_url)
def _general_api_client(self) -> GeneralApiServiceClient:
return GeneralApiServiceClient(
token=self._token,
**{'base_url': self._base_url} if self._base_url else {},
user_id=self._user_id,
)
def _scale_client(self) -> ScaleServiceClient:
return BaseClient._service_client(ScaleServiceClient, token=self._token, base_url=self._base_url)
def _venus_client(self) -> VenusServiceClient:
return BaseClient._service_client(VenusServiceClient, token=self._token, base_url=self._base_url)
def _create_user_event(self, pid: str, event_id: str, event_type: int) -> WyzeResponse:
self._general_api_client().post_user_event(pid=pid, event_id=event_id, event_type=event_type)
def _list_devices(self, **kwargs) -> Sequence[dict]:
return self._api_client().get_object_list()["data"]["device_list"]
|
161696
|
import os
from chazutsu.datasets.framework.xtqdm import xtqdm
from chazutsu.datasets.framework.dataset import Dataset
from chazutsu.datasets.framework.resource import Resource
class MovieReview(Dataset):
def __init__(self, kind="polarity"):
super().__init__(
name="Moview Review Data",
site_url="http://www.cs.cornell.edu/people/pabo/movie-review-data/", # noqa
download_url="",
description="movie review data is annotated by 3 kinds of label" \
" (polarity, subjective rating, subjectivity)."
)
urls = {
"polarity": "http://www.cs.cornell.edu/people/pabo/movie-review-data/review_polarity.tar.gz", # noqa
"polarity_v1": "http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz", # noqa
"rating": "http://www.cs.cornell.edu/people/pabo/movie-review-data/scale_data.tar.gz", # noqa
"subjectivity": "http://www.cs.cornell.edu/people/pabo/movie-review-data/rotten_imdb.tar.gz" # noqa
}
if kind not in urls:
keys = ",".join(urls.keys())
raise Exception("You have to choose kind from {}".format(keys))
self.kind = kind
self.download_url = urls[self.kind]
@classmethod
def polarity(cls):
return MovieReview("polarity")
@classmethod
def polarity_v1(cls):
return MovieReview("polarity_v1")
@classmethod
def rating(cls):
return MovieReview("rating")
@classmethod
def subjectivity(cls):
return MovieReview("subjectivity")
@property
def root_name(self):
return self.name.lower().replace(" ", "_") + "_" + self.kind
@property
def extract_targets(self):
if self.kind == "polarity_v1":
return ["rt-polaritydata/rt-polarity.neg",
"rt-polaritydata/rt-polarity.pos"]
elif self.kind == "subjectivity":
return ["plot.tok.gt9.5000",
"quote.tok.gt9.5000"]
return ()
def prepare(self, dataset_root, extracted_path):
if self.kind == "polarity":
return self._prepare_polarity(dataset_root, extracted_path)
elif self.kind == "polarity_v1":
return self._prepare_polarity_v1(dataset_root, extracted_path)
elif self.kind == "rating":
return self._prepare_rating(dataset_root, extracted_path)
elif self.kind == "subjectivity":
return self._prepare_subjectivity(dataset_root, extracted_path)
else:
raise Exception(
"{} is not supported in extraction process.".format(self.kind))
def make_resource(self, data_root):
if self.kind in ["polarity", "polarity_v1"]:
return Resource(data_root,
columns=["polarity", "review"], target="polarity")
elif self.kind == "rating":
return Resource(data_root,
columns=["rating", "review"], target="rating")
elif self.kind == "subjectivity":
return Resource(data_root,
columns=["subjectivity", "review"],
target="subjectivity")
else:
return Resource(data_root)
def _prepare_polarity(self, dataset_root, extracted_path):
polarity_file_path = os.path.join(dataset_root, "review_polarity.txt")
negative_path = os.path.join(extracted_path, "txt_sentoken/neg")
positive_path = os.path.join(extracted_path, "txt_sentoken/pos")
with open(polarity_file_path, mode="w", encoding="utf-8") as f:
for i, p in enumerate([negative_path, positive_path]):
label = i # negative = 0, positive = 1
label_name = "negative" if label == 0 else "positive"
self.logger.info("Extracting {} data.".format(label_name))
for txt in xtqdm(os.listdir(p)):
with open(os.path.join(p, txt), encoding="utf-8") as tf:
lines = [ln.strip().replace("\t", " ") for ln in tf.readlines()]
review = " ".join(lines)
f.write("\t".join([str(label), review]) + "\n")
return polarity_file_path
def _prepare_polarity_v1(self, dataset_root, extracted_path):
polarity_file = os.path.join(dataset_root, "review_polarity_v1.txt")
with open(polarity_file, mode="w", encoding="utf-8") as f:
for e in self.extract_targets:
p = os.path.join(extracted_path, os.path.basename(e))
label = 0 if e.endswith(".neg") else 1
label_name = "negative" if label == 0 else "positive"
self.logger.info("Extracting {} data.".format(label_name))
total = self.get_line_count(p)
with open(p, mode="r", errors="replace", encoding="utf-8") as p:
for ln in xtqdm(p, total=total):
review = ln.strip().replace("\t", " ")
f.write("\t".join([str(label), review]) + "\n")
return polarity_file
def _prepare_rating(self, dataset_root, extracted_path):
rating_file_path = os.path.join(dataset_root, "review_rating.txt")
rating_dir = os.path.join(extracted_path, "scaledata")
rating_file = open(rating_file_path, "w", encoding="utf-8")
for user in os.listdir(rating_dir):
user_dir = os.path.join(rating_dir, user)
if not os.path.isdir(user_dir):
continue
sub_in_review_file = os.path.join(user_dir, "subj." + user)
user_rating_file = os.path.join(user_dir, "rating." + user)
total = self.get_line_count(sub_in_review_file)
self.logger.info("Extracting user {}'s rating data.".format(user))
with open(sub_in_review_file, "r", encoding="utf-8") as sr:
with open(user_rating_file, "r", encoding="utf-8") as ur:
for review, rating in xtqdm(zip(sr, ur), total=total):
_rv = review.strip().replace("\t", " ")
_r = rating.strip()
rating_file.write("\t".join([_r, _rv]) + "\n")
rating_file.close()
return rating_file_path
def _prepare_subjectivity(self, dataset_root, extracted_path):
subjectivity_file = os.path.join(dataset_root, "subjectivity.txt")
with open(subjectivity_file, mode="w", encoding="utf-8") as f:
for e in self.extract_targets:
# subjective(plot) = 1
label = 1 if e.startswith("plot.") else 0
label_name = "subjective" if label == 1 else "objective"
self.logger.info("Extracting {} data.".format(label_name))
p = os.path.join(extracted_path, os.path.basename(e))
total = self.get_line_count(p)
with open(p, mode="r", errors="replace", encoding="utf-8") as sb:
for ln in xtqdm(sb, total=total):
review = ln.strip().replace("\t", " ")
f.write("\t".join([str(label), review]) + "\n")
return subjectivity_file
|
161738
|
import numpy as np
import scipy.linalg as la
# As 'measured' by Juan
'''
R_pyramic = np.array([
[-1.756492069, -3.042333507, 1.139761726], # 0
[-2.91789798, -5.053947553, 4.396223799],
[-4.079303892, -7.0655616, 7.652685873],
[-4.543866256, -7.870207219, 8.955270702],
[-4.776147439, -8.272530028, 9.606563117],
[-5.240709803, -9.077175647, 10.90914795],
[-6.402115715, -11.08878969, 14.16561002],
[-7.563521626, -13.10040374, 17.42207209], # 7
[-6.4, -10, 21.3], # 8
[-6.4, -6, 21.3],
[-6.4, -2, 21.3],
[-6.4, -0.4, 21.3],
[-6.4, 0.4, 21.3],
[-6.4, 2, 21.3],
[-6.4, 6, 21.3],
[-6.4, 10, 21.3], # 15
[3.512984138, 0, 1.139761726], # 16
[5.835795961, 0, 4.396223799],
[8.158607784, 0, 7.652685873],
[9.087732513, 0, 8.955270702],
[9.552294877, 0, 9.606563117],
[10.48141961, 0, 10.90914795],
[12.80423143, 0, 14.16561002],
[15.12704325, 0, 17.42207209], # 23
[13.70043101, -2.5, 21.3], # 24
[10.2363294, -4.5, 21.3],
[6.772227782, -6.5, 21.3],
[5.386587136, -7.3, 21.3],
[4.693766812, -7.7, 21.3],
[3.308126166, -8.5, 21.3],
[-0.155975449, -10.5, 21.3],
[-3.620077064, -12.5, 21.3], # 31
[-1.756492069, 3.042333507, 1.139761726], # 32
[-2.91789798, 5.053947553, 4.396223799],
[-4.079303892, 7.0655616, 7.652685873],
[-4.543866256, 7.870207219, 8.955270702],
[-4.776147439, 8.272530028, 9.606563117],
[-5.240709803, 9.077175647, 10.90914795],
[-6.402115715, 11.08878969, 14.16561002],
[-7.563521626, 13.10040374, 17.42207209], # 39
[-3.620077064, 12.5, 21.3], # 40
[-0.155975449, 10.5, 21.3],
[3.308126166, 8.5, 21.3],
[4.693766812, 7.7, 21.3],
[5.386587136, 7.3, 21.3],
[6.772227782, 6.5, 21.3],
[10.2363294, 4.5, 21.3],
[13.70043101, 2.5, 21.3], # 47
]).T / 100.
'''
x = 0.27 + 2*0.015 # length of one side
c1 = 1./np.sqrt(3.)
c2 = np.sqrt(2./3.)
c3 = np.sqrt(3.)/6.
c4 = 0.5
corners = np.array( [
[ 0, x*c1, -x*c3, -x*c3,],
[ 0, 0., x*c4, -x*c4,],
[ 0, x*c2, x*c2, x*c2,],
])
# relative placement of microphones on one pcb
pcb = np.array([-0.100, -0.060, -0.020, -0.004, 0.004, 0.020, 0.060, 0.100])
def line(p1, p2, dist):
# Places points at given distance on the line joining p1 -> p2, starting at the midpoint
o = (p1 + p2) / 2.
u = (p2 - p1) / la.norm(p2 - p1)
pts = []
for d in dist:
pts.append(o + d*u)
return pts
R_pyramic = np.array(
line(corners[:,0], corners[:,3], pcb) +
line(corners[:,3], corners[:,2], pcb) +
line(corners[:,0], corners[:,1], pcb) +
line(corners[:,1], corners[:,3], pcb) +
line(corners[:,0], corners[:,2], pcb) +
line(corners[:,2], corners[:,1], pcb)
).T
# Reference point is 1cm below zero'th mic
R_pyramic[2,:] += 0.01 - R_pyramic[2,0]
if __name__ == "__main__":
from experiment import PointCloud
pyramic = PointCloud(X=R_pyramic)
D = np.sqrt(pyramic.EDM())
print D[0,16]
pyramic.plot()
|
161772
|
import sys
import nuclio_sdk
import nuclio_sdk.test
import functions.api_serving
import functions.face_prediction
import logging
def chain_call_function_mock(name, event, node=None, timeout=None, service_name_override=None):
logger = nuclio_sdk.Logger(level=logging.DEBUG)
logger.set_handler('default', sys.stdout, nuclio_sdk.logger.HumanReadableFormatter())
logger.debug_with("Call function mock called", name=name, service_name_override=service_name_override)
if name == "face_prediction":
nuclio_plat = nuclio_sdk.test.Platform()
return nuclio_plat.call_handler(functions.face_prediction.handler, event)
raise RuntimeError('Call function called with unexpected function name: {0}'.format(name))
def test_api_serving():
f = open("image.txt", "r")
image = f.readline()
f.close()
nuclio_plat = nuclio_sdk.test.Platform()
nuclio_plat._call_function_mock.side_effect = chain_call_function_mock
event = nuclio_sdk.Event(body=image)
nuclio_plat.call_handler(functions.api_serving.handler, event)
|
161786
|
from banal import ensure_list
from rdflib import Namespace, ConjunctiveGraph, URIRef, Literal
from rdflib.namespace import FOAF, DC, RDF, RDFS, SKOS
from followthemoney import model
from pprint import pprint
from elasticsearch import Elasticsearch
from elasticsearch.helpers import scan
es = Elasticsearch()
collection_index = "aleph-collection-v1"
entity_index = "aleph-entity-v1"
HOST = "https://data.occrp.org"
ALEPH = Namespace("%s/#/ns/" % HOST)
FTM = Namespace("urn:ftm:")
SCHEMA = Namespace("%s/#/ftm/" % HOST)
def export_entity(ctx, entity):
g = ConjunctiveGraph()
if "Document" in entity["schemata"]:
uri = URIRef("%s/documents/%s" % (HOST, entity["id"]))
if entity.get("title"):
g.add((uri, DC.title, Literal(entity.get("title")), ctx))
g.add((uri, ALEPH.fileName, Literal(entity.get("file_name")), ctx))
g.add((uri, ALEPH.mimeType, Literal(entity.get("mime_type")), ctx))
# TODO DC dates, author etc.
# parent
else:
uri = URIRef("%s/entities/%s" % (HOST, entity["id"]))
if entity.get("name"):
g.add((uri, SKOS.prefLabel, Literal(entity.get("name")), ctx))
for name in entity.get("names", []):
g.add((uri, RDFS.label, Literal(name), ctx))
for schema in entity["schemata"]:
g.add((uri, RDF.type, FTM[schema], ctx))
for country in entity.get("countries", []):
if len(country) != 2:
continue
country = URIRef("iso-3166-1:%s" % country)
g.add((uri, ALEPH.country, country, ctx))
for phone in entity.get("phones", []):
phone = URIRef("tel:%s" % phone)
g.add((uri, ALEPH.phone, phone, ctx))
for email in entity.get("emails", []):
email = URIRef("mailto:%s" % email)
g.add((uri, ALEPH.email, email, ctx))
schema = model[entity["schema"]]
properties = entity.get("properties", {})
for name, values in properties.items():
prop = schema.get(name)
pred = "%s#%s" % (prop.schema.name, name)
pred = FTM[pred]
for value in ensure_list(values):
g.add((uri, pred, Literal(value), ctx))
print g.serialize(format="nquads")
def export_collection(collection):
g = ConjunctiveGraph()
domain = URIRef(HOST)
ctx = URIRef("%s/collections/%s" % (HOST, collection["id"]))
g.add((ctx, RDFS.label, Literal(collection["label"]), domain))
g.add((ctx, ALEPH.foreignId, Literal(collection["foreign_id"]), domain))
# print g.serialize(format='nquads')
# pprint(collection)
q = {
"query": {"term": {"collection_id": collection["id"]}},
"_source": {"exclude": ["text"]},
}
for row in scan(es, index=entity_index, query=q):
entity = row["_source"]
entity["id"] = row["_id"]
export_entity(ctx, entity)
def export_collections():
q = {"query": {"match_all": {}}, "size": 9999}
res = es.search(index=collection_index, body=q)
for hit in res["hits"]["hits"]:
collection = hit["_source"]
collection["id"] = hit["_id"]
export_collection(collection)
if __name__ == "__main__":
export_collections()
|
161840
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import numpy as np
import os
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
import sys
sys.path.append(root_dir)
from adversarial_robustness.cnns import *
from adversarial_robustness.datasets.svhn import SVHN
from adversarial_robustness.datasets.notmnist import notMNIST
from adversarial_robustness.datasets.mnist import MNIST
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--savedir", type=str,
help="Place to save model")
parser.add_argument(
"--name", type=str, default="",
help="Model name")
parser.add_argument(
"--dataset", type=str, default="",
help="Dataset")
parser.add_argument(
"--l2cs", type=float, default=0.0,
help="L2 certainty sensitivity penalty")
parser.add_argument(
"--l2dbl", type=float, default=0.0,
help="L2 double backprop penalty")
parser.add_argument(
"--lr", type=float, default=0.0002,
help="learning rate")
parser.add_argument(
"--adameps", type=float, default=1e-04,
help="adam epsilon")
parser.add_argument(
"--advtraineps", type=float, default=0.0,
help="adversarial training epsilon")
parser.add_argument(
"--distilltemp", type=float, default=1.0,
help="temperature for distillation")
parser.add_argument(
"--batchsize", type=int, default=256,
help="batch size")
parser.add_argument(
"--nbatches", type=int, default=15000,
help="number of batches")
FLAGS = parser.parse_args()
name = FLAGS.name
model_dir = FLAGS.savedir
adv_X_dir = root_dir + '/cached/fgsm'
if FLAGS.dataset == 'mnist':
dataset = MNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/mnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'notmnist':
dataset = notMNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/notmnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'svhn':
dataset = SVHN()
CNN = SVHN_CNN
fgsm_file = adv_X_dir + '/svhn-normal-fgsm-perturbation.npy'
X = dataset.X
y = dataset.onehot_y
Xt = dataset.Xt[:1024]
yt = dataset.onehot_yt[:1024]
clip_min = dataset.X.min()
clip_max = dataset.X.max()
dX = np.sign(np.load(fgsm_file))[:1024]
def _fgsm(eps):
return np.clip(Xt[:len(dX)] + eps * dX, clip_min, clip_max)
fgsm = { 0.1: _fgsm(0.1), 0.2: _fgsm(0.2), 0.3: _fgsm(0.3) }
epses = [0.1, 0.2, 0.3]
scores = {}
train_curves = {}
train_curves['batch_number'] = []
train_curves['batch_accuracy'] = []
train_curves['cross_entropy'] = []
train_curves['l2_grad_logp_true'] = []
train_curves['l2_grad_logp_rest'] = []
train_curves['l2_grad_logp_all'] = []
train_curves['l2_param_grads'] = []
train_curves['adv_accuracy'] = []
train_curves['test_accuracy'] = []
batch_size = FLAGS.batchsize
num_batches = FLAGS.nbatches
num_epochs = int(np.ceil(num_batches / (len(X) / batch_size)))
print(num_epochs)
if FLAGS.distilltemp > 1.01:
print('distillation')
num_batches2 = min(FLAGS.nbatches, 10000)
num_epochs2 = int(np.ceil(num_batches2 / (len(X) / batch_size)))
cnn2 = CNN()
cnn2.fit(X, y, softmax_temperature=FLAGS.distilltemp, learning_rate=FLAGS.lr, epsilon=FLAGS.adameps, num_epochs=num_epochs2, batch_size=batch_size)
yhat = tf.nn.softmax(cnn2.logits/FLAGS.distilltemp)
with tf.Session() as sess:
cnn2.init(sess)
ysmooth = yhat.eval(feed_dict={ cnn2.X: X[:1000] })
for i in range(1000, len(X), 1000):
ysmooth = np.vstack((ysmooth, yhat.eval(feed_dict={ cnn2.X: X[i:i+1000] })))
y = ysmooth
tf.reset_default_graph()
cnn = CNN()
cnn.l2_grad_logp_all = tf.nn.l2_loss(tf.gradients(cnn.logps, cnn.X)[0])
cnn.l2_grad_logp_true = tf.nn.l2_loss(tf.gradients(cnn.logps * cnn.y, cnn.X)[0])
cnn.l2_grad_logp_rest = tf.nn.l2_loss(tf.gradients(cnn.logps * (1-cnn.y), cnn.X)[0])
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.lr,
epsilon=FLAGS.adameps)
loss_fn = cnn.loss_function(
softmax_temperature=FLAGS.distilltemp,
l2_certainty_sensitivity=FLAGS.l2cs,
l2_double_backprop=FLAGS.l2dbl)
if FLAGS.advtraineps > 1e-06:
print('adversarial training')
adv_loss = cnn.adversarial_training_loss(FLAGS.advtraineps, clip_min, clip_max)
loss_fn = (loss_fn + adv_loss) / 2.0
gradients, variables = zip(*optimizer.compute_gradients(loss_fn))
cnn.l2_param_grads = tf.add_n([tf.nn.l2_loss(g) for g in gradients])
cnn.train_op = optimizer.apply_gradients(zip(gradients, variables))
batches = cnn.minibatches({ 'X': X, 'y': y }, batch_size=batch_size, num_epochs=num_epochs)
t = time.time()
i = 0
checkpoint_interval = 2500
print_interval = 500
curve_interval = 100
filenames = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch in batches:
batch[cnn.is_train] = True
_, loss = sess.run([cnn.train_op, loss_fn], feed_dict=batch)
if i % checkpoint_interval == 0:
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-batch{}-cnn.pkl'.format(name, i)
cnn.save(filename)
filenames.append(filename)
with open(model_dir+'/{}-batch{}-train-curves.pkl'.format(name,i), 'wb') as f:
pickle.dump(train_curves, f)
if i % print_interval == 0:
print('Batch {}, loss {}, {}s'.format(i, loss, time.time() - t))
if i % curve_interval == 0:
values = sess.run([
cnn.accuracy,
cnn.l2_grad_logp_true,
cnn.l2_grad_logp_rest,
cnn.l2_grad_logp_all,
cnn.l2_param_grads,
cnn.cross_entropy,
], feed_dict=batch)
train_curves['batch_number'].append(i)
train_curves['batch_accuracy'].append(values[0])
train_curves['l2_grad_logp_true'].append(values[1])
train_curves['l2_grad_logp_rest'].append(values[2])
train_curves['l2_grad_logp_all'].append(values[3])
train_curves['l2_param_grads'].append(values[4])
train_curves['cross_entropy'].append(values[5])
train_curves['adv_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: fgsm[epses[1]][:512], cnn.y: yt[:512] }))
train_curves['test_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: Xt[:512], cnn.y: yt[:512] }))
i += 1
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-cnn.pkl'.format(name)
cnn.save(filename)
filenames.append(filename)
for filename in filenames:
cnn2 = CNN()
cnn2.load(filename)
cnn2.save(filename)
with open(model_dir+'/{}-train-curves.pkl'.format(name), 'wb') as f:
pickle.dump(train_curves, f)
for key, values in train_curves.items():
if key == 'batch_number':
continue
fig = plt.figure()
plt.plot(train_curves['batch_number'], values, marker='o', lw=2)
plt.title(key)
plt.xlabel('Minibatch')
plt.ylabel(key)
if 'grad' in key:
plt.yscale('log')
plt.savefig(model_dir+'/{}-traincurves-{}.png'.format(name,key))
plt.close(fig)
scores[(name, 'norm')] = cnn.score(Xt, yt).accuracy
for eps in epses:
scores[(name, eps)] = cnn.score(fgsm[eps], yt[:len(fgsm[eps])]).accuracy
print(scores)
with open(model_dir+'/{}-scores.pkl'.format(name), 'wb') as f:
pickle.dump(scores, f)
with open(model_dir+'/{}-flags.pkl'.format(name), 'wb') as f:
pickle.dump(vars(FLAGS), f)
|
161845
|
import os, sys
import numpy as np
import time
import argparse
import traceback
import glob
import trimesh
import math
import shutil
import json
import open3d as o3d
from tqdm import tqdm
import ctypes
import logging
from contextlib import closing
import multiprocessing as mp
from multiprocessing import Pool
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import config as cfg
from utils.pcd_utils import BBox
info = mp.get_logger().info
def compute_global_bbox():
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
# create shared array for bbox
shared_bbox = mp.Array(ctypes.c_double, N*M)
bbox = to_numpy_array(shared_bbox)
# By updating bbox, we uppdate shared_bbox as well, since they share memory
bbox = bbox.reshape((N, M))
bbox[:, :] = np.array([[math.inf, math.inf, math.inf], [-math.inf, -math.inf, -math.inf]])
#####################################################################################
# Go over all animations
#####################################################################################
with closing(mp.Pool(processes=n_jobs, initializer=init, initargs=(shared_bbox,))) as p:
# many processes access the same slice
p.map_async(update_bbox, sample_dirs)
p.join()
p.close()
print("done")
final_bbox = to_numpy_array(shared_bbox)
final_bbox = final_bbox.reshape((N, M))
#####################################################################################
#####################################################################################
# assert np.all(np.isfinite(final_bbox)), final_bbox
# Compute current extent
p_min, p_max = final_bbox[0], final_bbox[1]
non_cube_extent = p_max - p_min
# Convert bbox to cube
cube_extent = np.max(non_cube_extent) * np.ones_like(non_cube_extent)
delta = cube_extent - non_cube_extent
half_delta = delta / 2.0
# Cube params
p_min = p_min - half_delta
extent = cube_extent
# Enlarge bbox
p_min = p_min - bbox_displacement
extent = extent + 2.0 * bbox_displacement
# Update bbox
final_bbox[0] = p_min
final_bbox[1] = p_min + extent
# assert np.all(np.isfinite(final_bbox)), final_bbox
# Store bbox
print("Dumping into json file:", dataset_bbox_json)
with open(dataset_bbox_json, 'w') as f:
json.dump(final_bbox.tolist(), f, indent=4)
return final_bbox
def init(shared_bbox_):
global shared_bbox
shared_bbox = shared_bbox_ # must be inherited, not passed as an argument
def to_numpy_array(mp_arr):
return np.frombuffer(mp_arr.get_obj())
def update_bbox(sample_dir):
print(sample_dir)
"""synchronized."""
with shared_bbox.get_lock(): # synchronize access
info(f"start {sample_dir}")
mesh_raw_path = os.path.join(sample_dir, "mesh_raw.ply")
assert os.path.exists(mesh_raw_path), f"update_bbox: {mesh_raw_path}"
# Load meshes
mesh = trimesh.load_mesh(mesh_raw_path, process=False, maintain_order=True)
# Compute bbox of current mesh
bbox_bounds = mesh.bounds
bbox_min = bbox_bounds[0]
bbox_max = bbox_bounds[1]
# print(bbox_bounds)
assert np.all(np.isfinite(bbox_bounds)), bbox_bounds
# Update the total bbox after having taken into account the alignment to the origin
bbox = to_numpy_array(shared_bbox)
bbox = bbox.reshape((N, M))
bbox[0] = np.minimum(bbox[0], bbox_min)
bbox[1] = np.maximum(bbox[1], bbox_max)
info(f"end {sample_dir}")
################################################################################################
################################################################################################
################################################################################################
def normalize_mesh(mesh):
# Global normalization
if compute_bbox:
vertices = (mesh.vertices - p_min) / extent # now we're in [-1, 1]
vertices = vertices - 0.5 # now in [-0.5, 0.5]
else:
vertices = mesh.vertices - trans
vertices = scale * vertices
mesh.vertices = vertices
return mesh
def normalize_meshes(sample_dir):
try:
# Normal mesh
mesh_raw_path = os.path.join(sample_dir, "mesh_raw.ply")
if os.path.exists(mesh_raw_path):
normalized_mesh_path = os.path.join(sample_dir, "mesh_normalized.ply")
if OVERWRITE or not os.path.isfile(normalized_mesh_path):
mesh = trimesh.load_mesh(mesh_raw_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, normalized_mesh_path, 'ply')
print("\tWriting mesh into:", normalized_mesh_path)
if VIZ:
mesh_o3d = o3d.io.read_triangle_mesh(normalized_mesh_path)
mesh_o3d.compute_vertex_normals()
o3d.visualization.draw_geometries([world_frame, unit_bbox, mesh_o3d])
###################################################################################
# Real scan if exists
real_scan_path = os.path.join(sample_dir, "mesh_real_scan.ply")
if os.path.isfile(real_scan_path):
mesh = trimesh.load_mesh(real_scan_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, real_scan_path, 'ply')
print("\t\tWriting real scan into:", real_scan_path)
###################################################################################
###################################################################################
# Body mesh if exists
body_mesh_raw_path = os.path.join(sample_dir, "mesh_body_raw.ply")
if os.path.isfile(body_mesh_raw_path):
body_mesh_normalized_path = os.path.join(sample_dir, "mesh_body_normalized.ply")
if OVERWRITE_BODY or not os.path.isfile(body_mesh_normalized_path):
mesh = trimesh.load_mesh(body_mesh_raw_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, body_mesh_normalized_path, 'ply')
print("\t\tWriting body mesh into:", body_mesh_normalized_path)
###################################################################################
except:
print('\t------------ Error with {}: {}'.format(sample_dir, traceback.format_exc()))
if __name__ == '__main__':
try:
n_jobs = int(os.environ['SLURM_CPUS_ON_NODE'])
except:
n_jobs = 4
print()
print(f"Using {n_jobs} jobs")
mp.freeze_support()
p_min = -0.5
p_max = 0.5
# Flag to visualize meshes for debugging
VIZ = False
if VIZ:
unit_bbox = BBox.compute_bbox_from_min_point_and_max_point(
np.array([p_min]*3), np.array([p_max]*3)
)
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.5, origin=[0, 0, 0]
)
#####################################################################################
#####################################################################################
dataset = 'cape'
#####################################################################################
#####################################################################################
OVERWRITE_BBOX_COMPUTATION = False
OVERWRITE = False # for the general mesh
OVERWRITE_BODY = False # for the body mesh, in case the dataset has such meshes
#####################################################################################
compute_bbox = False # Keep it to False, unless you wanna play with the normalization etc.
#####################################################################################
if not compute_bbox:
input("Using predefined bbox and scale to normalize - Recommened!")
else:
input("Computing dataset-specific bbox to normalize")
target_animations = []
if compute_bbox:
# bbox array dimensions ([[bbox_min], [bbox_max]])
N, M = 2, 3
# bbox displacement
bbox_displacement = 0.01
else:
# scale
scale = 1.0
trans = 0.0
if 'mano' in dataset:
scale = 0.75
bbox_displacement = 0.0
elif 'cape' in dataset:
scale = 0.4
bbox_displacement = 0.0
# Load our precomputed bbox to normalize the dataset to reside within a unit cube
predefined_bbox_json_path = os.path.join("bbox.json")
assert os.path.isfile(predefined_bbox_json_path)
with open(predefined_bbox_json_path, 'r') as f:
predefined_bbox = json.loads(f.read())
predefined_bbox = np.array(predefined_bbox)
trans = (predefined_bbox[0] + predefined_bbox[1]) / 2.
else:
print("dataset is not implemented")
exit()
####################
dataset_dir = os.path.join(cfg.ROOT, "datasets", dataset)
assert os.path.isdir(dataset_dir), dataset_dir
print("dataset_dir:", dataset_dir)
# Prepare the list of all sample dirs
sample_dirs = sorted(glob.glob(dataset_dir + "/*/*/*"))
########################################################################################################
# 1. Compute global bbox
########################################################################################################
if compute_bbox:
dataset_bbox_json = os.path.join(dataset_dir, "bbox.json")
if OVERWRITE_BBOX_COMPUTATION or not os.path.isfile(dataset_bbox_json):
print()
input("Need to compute bbox. Do I go ahead?")
bbox = compute_global_bbox()
else:
print()
input("Already had bbox - Load it?")
with open(dataset_bbox_json, 'r') as f:
bbox = json.loads(f.read())
bbox = np.array(bbox)
print("bbox ready!")
print(bbox)
########################################################################################################
# 2. Normalize meshes to lie within a common bbox
########################################################################################################
print()
print("#"*60)
print(f"Will normalize {len(sample_dirs)} meshes!")
print("#"*60)
input("Continue?")
if compute_bbox:
p_min = bbox[0]
p_max = bbox[1]
extent = p_max - p_min
p_norm = Pool(n_jobs)
p_norm.map(normalize_meshes, sample_dirs)
print("Done!")
|
161851
|
from common import *
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# resnet18 : BasicBlock, [2, 2, 2, 2]
# resnet34 : BasicBlock, [3, 4, 6, 3]
# resnet50 : Bottleneck [3, 4, 6, 3]
#
# https://medium.com/neuromation-io-blog/deepglobe-challenge-three-papers-from-neuromation-accepted-fe09a1a7fa53
# https://github.com/ternaus/TernausNetV2
# https://github.com/neptune-ml/open-solution-salt-detection
# https://github.com/lyakaap/Kaggle-Carvana-3rd-Place-Solution
##############################################################3
# https://github.com/neptune-ml/open-solution-salt-detection/blob/master/src/unet_models.py
# https://pytorch.org/docs/stable/torchvision/models.html
import torchvision
class ConvBn2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3,3), stride=(1,1), padding=(1,1)):
super(ConvBn2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
#self.bn = SynchronizedBatchNorm2d(out_channels)
def forward(self, z):
x = self.conv(z)
x = self.bn(x)
return x
class SEScale(nn.Module):
def __init__(self, channel, reduction=16):
super(SEScale, self).__init__()
self.fc1 = nn.Conv2d(channel, reduction, kernel_size=1, padding=0)
self.fc2 = nn.Conv2d(reduction, channel, kernel_size=1, padding=0)
def forward(self, x):
x = F.adaptive_avg_pool2d(x,1)
x = self.fc1(x)
x = F.relu(x, inplace=True)
x = self.fc2(x)
x = F.sigmoid(x)
return x
class SEBottleneck(nn.Module):
def __init__(self, in_planes, planes, out_planes, reduction, is_downsample=False, stride=1):
super(SEBottleneck, self).__init__()
self.is_downsample = is_downsample
self.conv_bn1 = ConvBn2d(in_planes, planes, kernel_size=1, padding=0, stride=1)
self.conv_bn2 = ConvBn2d( planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv_bn3 = ConvBn2d( planes, out_planes, kernel_size=1, padding=0, stride=1)
self.scale = SEScale(out_planes, reduction)
if is_downsample:
self.downsample = ConvBn2d(in_planes, out_planes, kernel_size=1, padding=0, stride=stride)
def forward(self, x):
z = self.conv_bn1(x)
z = F.relu(z,inplace=True)
z = self.conv_bn2(z)
z = F.relu(z,inplace=True)
z = self.conv_bn3(z)
z = z*self.scale(z)
if self.is_downsample:
z += self.downsample(x)
else:
z += x
z = F.relu(z,inplace=True)
return z
# layers ##---------------------------------------------------------------------
def make_layer(in_planes, planes, out_planes, reduction, num_blocks, stride):
layers = []
layers.append(SEBottleneck(in_planes, planes, out_planes, reduction, is_downsample=True, stride=stride))
for i in range(1, num_blocks):
layers.append(SEBottleneck(out_planes, planes, out_planes, reduction))
return nn.Sequential(*layers)
class Decoder(nn.Module):
def __init__(self, in_channels, channels, out_channels ):
super(Decoder, self).__init__()
self.conv1 = ConvBn2d(in_channels, channels, kernel_size=3, padding=1)
self.conv2 = ConvBn2d(channels, out_channels, kernel_size=3, padding=1)
def forward(self, x ):
x = F.upsample(x, scale_factor=2, mode='bilinear', align_corners=True)#False
x = F.relu(self.conv1(x),inplace=True)
x = F.relu(self.conv2(x),inplace=True)
return x
###########################################################################################3
class UNetSEResNet50(nn.Module):
def __init__(self ):
super().__init__()
self.conv1 = nn.Sequential(
ConvBn2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.ReLU(inplace=True),
)
self.encoder2 = make_layer ( 64, 64, 256, reduction= 16, num_blocks=3, stride=1) #out = 64*4 = 256
self.encoder3 = make_layer ( 256, 128, 512, reduction= 32, num_blocks=4, stride=2) #out = 128*4 = 512
self.encoder4 = make_layer ( 512, 256, 1024, reduction= 64, num_blocks=6, stride=2) #out = 256*4 = 1024
self.encoder5 = make_layer (1024, 512, 2048, reduction=128, num_blocks=3, stride=2) #out = 512*4 = 2048
self.center = nn.Sequential(
ConvBn2d(2048, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
ConvBn2d(512, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
)
self.decoder5 = Decoder(2048+256, 512, 256)
self.decoder4 = Decoder(1024+256, 512, 256)
self.decoder3 = Decoder( 512+256, 256, 64)
self.decoder2 = Decoder( 256+ 64, 128, 128)
self.decoder1 = Decoder( 128 , 128, 32)
self.logit = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 1, kernel_size=1, padding=0),
)
def forward(self, x):
#batch_size,C,H,W = x.shape
mean=[0.40784314, 0.45882353, 0.48235294]
x = torch.cat([
(x-mean[0])*255,
(x-mean[1])*255,
(x-mean[2])*255,
],1)
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
e2 = self.encoder2( x) #; print('e2',e2.size())
e3 = self.encoder3(e2) #; print('e3',e3.size())
e4 = self.encoder4(e3) #; print('e4',e4.size())
e5 = self.encoder5(e4) #; print('e5',e5.size())
#f = F.max_pool2d(e5, kernel_size=2, stride=2 ) #; print(f.size())
#f = F.upsample(f, scale_factor=2, mode='bilinear', align_corners=True)#False
#f = self.center(f) #; print('center',f.size())
f = self.center(e5)
f = self.decoder5(torch.cat([f, e5], 1)) #; print('d5',f.size())
f = self.decoder4(torch.cat([f, e4], 1)) #; print('d4',f.size())
f = self.decoder3(torch.cat([f, e3], 1)) #; print('d3',f.size())
f = self.decoder2(torch.cat([f, e2], 1)) #; print('d2',f.size())
f = self.decoder1(f) # ; print('d1',f.size())
#f = F.dropout2d(f, p=0.20)
logit = self.logit(f) #; print('logit',logit.size())
return logit
##-----------------------------------------------------------------
def criterion(self, logit, truth ):
#loss = PseudoBCELoss2d()(logit, truth)
loss = FocalLoss2d()(logit, truth, type='sigmoid')
#loss = RobustFocalLoss2d()(logit, truth, type='sigmoid')
return loss
# def criterion(self,logit, truth):
#
# loss = F.binary_cross_entropy_with_logits(logit, truth)
# return loss
def metric(self, logit, truth, threshold=0.5 ):
prob = F.sigmoid(logit)
dice = accuracy(prob, truth, threshold=threshold, is_average=True)
return dice
def set_mode(self, mode ):
self.mode = mode
if mode in ['eval', 'valid', 'test']:
self.eval()
elif mode in ['train']:
self.train()
else:
raise NotImplementedError
SaltNet = UNetSEResNet50
### run ##############################################################################
def run_check_net():
batch_size = 8
C,H,W = 1, 128, 128
input = np.random.uniform(0,1, (batch_size,C,H,W)).astype(np.float32)
truth = np.random.choice (2, (batch_size,C,H,W)).astype(np.float32)
#------------
input = torch.from_numpy(input).float().cuda()
truth = torch.from_numpy(truth).float().cuda()
#---
net = SaltNet().cuda()
net.set_mode('train')
# print(net)
# exit(0)
#net.load_pretrain('/root/share/project/kaggle/tgs/data/model/resnet50-19c8e357.pth')
logit = net(input)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
print('loss : %0.8f'%loss.item())
print('dice : %0.8f'%dice.item())
print('')
# dummy sgd to see if it can converge ...
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.1, momentum=0.9, weight_decay=0.0001)
#optimizer = optim.Adam(net.parameters(), lr=0.001)
i=0
optimizer.zero_grad()
while i<=500:
logit = net(input)
loss = net.criterion(logit, truth)
dice = net.metric(logit, truth)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if i%20==0:
print('[%05d] loss, dice : %0.5f,%0.5f'%(i, loss.item(),dice.item()))
i = i+1
########################################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_check_net()
print( 'sucessful!')
|
161869
|
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
# Metadata info about the module, not modified during runtime
self.info = {
# Name for the module that will appear in module menus
'Name': 'Invoke-Rubeus',
# List of one or more authors for the module
'Author': ['@harmj0y', '@S3cur3Th1sSh1t'],
# More verbose multi-line description of the module
'Description': ("Rubeus is a C# toolset for raw Kerberos interaction and abuses. "
"It is heavily adapted from <NAME>'s Kekeo project (CC BY-NC-SA 4.0 license) "
"and <NAME>'s MakeMeEnterpriseAdmin project (GPL v3.0 license). Full credit goes "
"to Benjamin and Vincent for working out the hard components of weaponization- without "
"their prior work this project would not exist."),
'Software': '',
'Techniques': ['T1208', 'T1097'],
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': None,
# True if the module needs admin rights to run
'NeedsAdmin': False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': True,
# The language for this module
'Language': 'powershell',
# The minimum PowerShell version needed for the module to run
'MinLanguageVersion': '2',
# List of any references/other comments
'Comments': [
'https://github.com/GhostPack/Rubeus',
'https://github.com/S3cur3Th1sSh1t/PowerSharpPack'
]
}
# Any options needed by the module, settable during runtime
self.options = {
# Format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description': 'Agent to run on.',
'Required': True,
'Value': ''
},
'Command': {
'Description': 'Use available Rubeus commands as a one-liner. ',
'Required': False,
'Value': '',
},
}
# Save off a copy of the mainMenu object to access external
# functionality like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters are passed as
# an object set to the module and the options dictionary is
# automatically set. This is mostly in case options are passed on
# the command line.
if params:
for param in params:
# Parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# First method: Read in the source script from module_source
module_source = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Rubeus.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=module_source, obfuscationCommand=obfuscationCommand)
module_source = module_source.replace("module_source", "obfuscated_module_source")
try:
f = open(module_source, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(module_source)))
return ""
module_code = f.read()
f.close()
script = module_code
script_end = 'Invoke-Rubeus -Command "'
# Add any arguments to the end execution of the script
if self.options['Command']['Value']:
script_end += " " + str(self.options['Command']['Value'])
script_end = script_end.replace('" ', '"')
script_end += '"'
if obfuscate:
script_end = helpers.obfuscate(psScript=script_end, installPath=self.mainMenu.installPath,
obfuscationCommand=obfuscationCommand)
script += script_end
script = helpers.keyword_obfuscation(script)
return script
|
161880
|
from django.urls import path
from . import views
from qa.views import UserAnswerList, UserQuestionList
app_name = "user_profile"
urlpatterns = [
path("activate/<uidb64>/<token>/", views.EmailVerify.as_view(), name="activate"),
path("<int:id>/<str:username>/", views.profile, name="profile"),
path(
"<int:user_id>/<str:user_name>/edit", views.ProfileEdit.as_view(), name="edit"
),
path("avatar/upload/", views.ProfileImageUplade.as_view(), name="avatar_upload"),
path(
"<int:user_id>/<str:user_name>/questions",
UserQuestionList.as_view(),
name="user_questions_list",
),
path(
"<int:user_id>/<str:user_name>/answers",
UserAnswerList.as_view(),
name="user_answers_list",
),
path("list/", views.UsersList.as_view(), name="user_list"),
]
|
161885
|
def count_and_say(palavra):
dicionario = {}
palavra = palavra.replace(' ', '')
for letra in palavra:
if not letra in dicionario.keys():
dicionario[letra] = 1
else:
dicionario[letra] += 1
retorno = ''
for letra, quantidade in dicionario.items():
retorno += '%d %s ' % (quantidade, letra)
return retorno.rstrip()
|
161927
|
from utils.dataset_utils import *
from utils.iterator_utils import *
from utils.vocab_utils import *
from utils.metrics_utils import *
from utils.image_utils import *
from utils.params_utils import *
|
161997
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
import utils
import algorithms.modules as m
from algorithms.sac import SAC
class RAD(SAC):
def __init__(self, obs_shape, action_shape, args):
super().__init__(obs_shape, action_shape, args)
|
161998
|
import glob
import multiprocessing as mp
import os
import gdown
import tqdm
NUM_THREADS = 8
BASE_DIR = "downloads"
def make_download_url(drive_url):
return "https://drive.google.com/uc?id=%s" % drive_url.split("?id=")[1]
def download_and_extract(func_args):
id, filename, out_dir = func_args
os.makedirs(out_dir, exist_ok=True)
if len(glob.glob("%s/%s*" % (out_dir, filename))):
# already downloaded
return
url = make_download_url(id)
download_path = "%s/%s.tar" % (out_dir, filename)
gdown.download(url, output=download_path)
if os.path.exists(download_path):
os.system("tar xf %s -C %s" % (download_path, out_dir))
os.remove(download_path)
if __name__ == "__main__":
os.makedirs(BASE_DIR, exist_ok=True)
os.chdir(BASE_DIR)
val_url = "https://drive.google.com/open?id=1ixet4jFn1zXRUG5kfwoczFPXjpf7EXFi"
download_and_extract((val_url, "val", "."))
pool = mp.Pool(8)
drive_links = [
line.strip().split(" ")
for line in open(
os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "info_files", "r2v2_drive_urls.txt")
)
]
filenames, drive_urls = list(zip(*drive_links))
arg_list = list(zip(drive_urls, filenames, ["train"] * len(filenames)))
list(tqdm.tqdm(pool.imap(download_and_extract, arg_list), total=len(filenames)))
|
162005
|
from ..registry import DETECTORS
from .single_stage import SingleStageDetector
from mmdet.core import bbox2result
@DETECTORS.register_module
class SipMask(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SipMask, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_metas, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
# bbox_results = [
# bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
# for det_bboxes, det_labels in bbox_list
# ]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels, aa in bbox_list
]
segm_results = [
aa
for det_bboxes, det_labels, aa in bbox_list
]
# aa= bbox_list[0][0][:,-1]>0.5
return bbox_results[0], segm_results[0]
|
162009
|
from pyqode.core.api import encodings
def test_convert_to_code_key():
assert encodings.convert_to_codec_key('UTF-8') == 'utf_8'
|
162013
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from copy import copy
from .layers import ConvexQuadratic, View, WeightTransformedLinear
class GradNN(nn.Module):
def __init__(self, batch_size=1024):
super(GradNN, self).__init__()
self.batch_size = batch_size
def forward(self, input):
pass
def push(self, input, create_graph=True, retain_graph=True):
'''
Pushes input by using the gradient of the network. By default preserves the computational graph.
# Apply to small batches.
'''
if len(input) <= self.batch_size:
output = autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=create_graph, retain_graph=retain_graph,
only_inputs=True,
grad_outputs=torch.ones_like(input[:, :1], requires_grad=False)
)[0]
return output
else:
output = torch.zeros_like(input, requires_grad=False)
for j in range(0, input.size(0), self.batch_size):
output[j: j + self.batch_size] = self.push(
input[j:j + self.batch_size],
create_graph=create_graph, retain_graph=retain_graph)
return output
def push_nograd(self, input):
'''
Pushes input by using the gradient of the network. Does not preserve the computational graph.
Use for pushing large batches (the function uses minibatches).
'''
output = torch.zeros_like(input, requires_grad=False)
for i in range(0, len(input), self.batch_size):
input_batch = input[i:i+self.batch_size]
output.data[i:i+self.batch_size] = self.push(
input[i:i+self.batch_size],
create_graph=False, retain_graph=False
).data
return output
def hessian(self, input):
gradient = self.push(input)
hessian = torch.zeros(
*gradient.size(), self.dim,
dtype=torch.float32,
requires_grad=True,
)
hessian = torch.cat(
[
torch.autograd.grad(
outputs=gradient[:, d], inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True, grad_outputs=torch.ones(input.size()[0]).float().to(input)
)[0][:, None, :]
for d in range(self.dim)
],
dim = 1
)
return hessian
class LinDenseICNN(GradNN):
'''
Fully Connected ICNN which follows the [Makkuva et.al.] article:
(https://arxiv.org/pdf/1908.10962.pdf)
'''
def __init__(
self, in_dim,
hidden_layer_sizes=[32, 32],
activation=torch.celu,
strong_convexity=1e-6,
batch_size=1024,
device='cuda'):
raise Exception("Not working yet!")
super().__init__(batch_size)
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.in_dim = in_dim
self.device = device
self.strong_convexity = strong_convexity
_hidden = copy(self.hidden_layer_sizes)
w_sizes = zip(_hidden[:-1], _hidden[1:])
self.W_layers = nn.ModuleList([
nn.Linear(in_dim, out_dim)
for in_dim, out_dim in w_sizes
])
self.A_layers = nn.ModuleList([
nn.Linear(self.in_dim, out_dim)
for out_dim in _hidden
])
self.final_layer = nn.Linear(self.hidden_layer_sizes[-1], 1, bias=False)
self.to(self.device)
def forward(self, input):
z = self.activation(self.A_layers[0](input))
for a_layer, w_layer in zip(self.A_layers[1:], self.W_layers[:]):
z = self.activation(a_layer(input) + w_layer(z))
return self.final_layer(z) + .5 * self.strong_convexity * (input ** 2).sum(dim=1).reshape(-1, 1)
def convexify(self):
for layer in self.W_layers:
assert isinstance(layer, nn.Linear)
layer.weight.data.clamp_(0)
self.final_layer.weight.data.clamp_(0)
def relaxed_convexity_regularization(self):
regularizer = 0.
for layer in self.W_layers:
assert isinstance(layer, nn.Linear)
regularizer += layer.weight.clamp(max=0.).pow(2).sum()
regularizer += self.final_layer.weight.clamp(max=0.).pow(2).sum()
return regularizer
class DenseICNN(GradNN):
'''Fully Conncted ICNN with input-quadratic skip connections.'''
def __init__(
self, dim,
hidden_layer_sizes=[32, 32, 32],
rank=1, activation='celu',
strong_convexity=1e-6,
batch_size=1024,
conv_layers_w_trf=lambda x: x,
forse_w_positive=True
):
super(DenseICNN, self).__init__(batch_size)
self.dim = dim
self.strong_convexity = strong_convexity
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.rank = rank
self.conv_layers_w_trf = conv_layers_w_trf
self.forse_w_positive = forse_w_positive
self.quadratic_layers = nn.ModuleList([
ConvexQuadratic(dim, out_features, rank=rank, bias=True)
for out_features in hidden_layer_sizes
])
sizes = zip(hidden_layer_sizes[:-1], hidden_layer_sizes[1:])
self.convex_layers = nn.ModuleList([
WeightTransformedLinear(
in_features, out_features, bias=False, w_transform=self.conv_layers_w_trf)
for (in_features, out_features) in sizes
])
self.final_layer = WeightTransformedLinear(
hidden_layer_sizes[-1], 1, bias=False, w_transform=self.conv_layers_w_trf)
def forward(self, input):
'''Evaluation of the discriminator value. Preserves the computational graph.'''
output = self.quadratic_layers[0](input)
for quadratic_layer, convex_layer in zip(self.quadratic_layers[1:], self.convex_layers):
output = convex_layer(output) + quadratic_layer(input)
if self.activation == 'celu':
output = torch.celu(output)
elif self.activation == 'softplus':
output = F.softplus(output)
elif self.activation == 'relu':
output = F.relu(output)
else:
raise Exception('Activation is not specified or unknown.')
return self.final_layer(output) + .5 * self.strong_convexity * (input ** 2).sum(dim=1).reshape(-1, 1)
def convexify(self):
if self.forse_w_positive:
for layer in self.convex_layers:
if (isinstance(layer, nn.Linear)):
layer.weight.data.clamp_(0)
self.final_layer.weight.data.clamp_(0)
|
162042
|
from gbdxtools import Interface
gbdx = None
def go():
print(gbdx.task_registry.list())
print(gbdx.task_registry.get_definition('HelloGBDX'))
if __name__ == "__main__":
gbdx = Interface()
go()
|
162045
|
import os
from subaligner.predictor import Predictor
from subaligner.subtitle import Subtitle
if __name__ == "__main__":
examples_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(examples_dir, "tmp")
os.makedirs(output_dir, exist_ok=True)
video_file_path = os.path.join(examples_dir, "..", "tests/subaligner/resource/test.mp4")
srt_file_path = os.path.join(examples_dir, "..", "tests/subaligner/resource/test.srt")
predictor = Predictor()
subs, audio_file_path, voice_probabilities, frame_rate = predictor.predict_single_pass(video_file_path, srt_file_path)
aligned_subtitle_path = os.path.join(output_dir, "test_aligned_1.srt")
Subtitle.export_subtitle(srt_file_path, subs, aligned_subtitle_path)
print("Aligned subtitle saved to: {}".format(aligned_subtitle_path))
log_loss = predictor.get_log_loss(voice_probabilities, subs)
print("Alignment finished with overall loss: {}".format(log_loss))
subs_list, subs, voice_probabilities, frame_rate = predictor.predict_dual_pass(video_file_path, srt_file_path, stretch=False)
aligned_subtitle_path = os.path.join(output_dir, "test_aligned_2.srt")
Subtitle.export_subtitle(srt_file_path, subs_list, aligned_subtitle_path)
print("Aligned subtitle saved to: {}".format(aligned_subtitle_path))
log_loss = predictor.get_log_loss(voice_probabilities, subs)
print("Alignment finished with overall loss: {}".format(log_loss))
|
162049
|
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.spatial.transform import Rotation
from tadataka.matrix import motion_matrix
from tadataka.rigid_transform import (inv_transform_all, transform_all,
transform_each, Transform, transform_se3)
def test_transform_each():
points = np.array([
[1, 2, 5],
[4, -2, 3],
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[1, 3, 10] # [ -3, -2, 4] + [ 4, 5, 6]
])
assert_array_equal(
transform_each(rotations, translations, points),
expected
)
def test_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[5, -1, 1], # [ 4, -3, -2] + [ 1, 2, 3]
[1, -4, 3]], # [ 0, -6, 0] + [ 1, 2, 3]
[[-1, 7, 7], # [-5, 2, 1] + [ 4, 5, 6]
[1, 3, 10], # [-3, -2, 4] + [ 4, 5, 6]
[-2, 5, 6]] # [-6, 0, 0] + [ 4, 5, 6]
])
assert_array_equal(transform_all(rotations, translations, points),
expected)
def test_inv_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
# [R.T for R in rotations]
# [[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]],
# [[0, 0, 1],
# [0, 1, 0],
# [-1, 0, 0]]
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
# p - t
# [[0, 0, 2],
# [3, -4, 0],
# [-1, -2, 3]],
# [[-3, -3, -1],
# [0, -7, -3],
# [-4, -5, 0]]
# np.dot(R.T, p-t)
expected = np.array([
[[0, 2, 0],
[3, 0, 4],
[-1, 3, 2]],
[[-1, -3, 3],
[-3, -7, 0],
[0, -5, 4]]
])
assert_array_equal(inv_transform_all(rotations, translations, points),
expected)
def test_transform_class():
P = np.array([
[1, 2, 5],
[4, -2, 3],
])
R = np.array([
[1, 0, 0],
[0, 0, -1],
[0, 1, 0]
])
t = np.array([1, 2, 3])
assert_array_equal(
Transform(R, t, s=1.0)(P),
[[2, -3, 5], # [ 1 -5 2] + [ 1 2 3]
[5, -1, 1]] # [ 4 -3 -2] + [ 1 2 3]
)
assert_array_equal(
Transform(R, t, s=0.1)(P),
[[1.1, 1.5, 3.2], # [ 0.1 -0.5 0.2] + [ 1 2 3]
[1.4, 1.7, 2.8]] # [ 0.4 -0.3 -0.2] + [ 1 2 3]
)
def test_transform_se3():
R_10 = np.random.random((3, 3))
t_10 = np.random.random(3)
T_10 = motion_matrix(R_10, t_10)
P0 = np.random.uniform(-10, 10, (10, 3))
P1 = transform_se3(T_10, P0)
assert_array_almost_equal(P1, np.dot(R_10, P0.T).T + t_10)
|
162068
|
from autograd import numpy as npy
from functools import reduce
from scipy.optimize import minimize
from autograd import grad
def generate_Givens_rotation(i, j, theta, size):
g = npy.eye(size)
c = npy.cos(theta)
s = npy.sin(theta)
g[i, i] = 0
g[j, j] = 0
g[j, i] = 0
g[i, j] = 0
ii_mat = npy.zeros_like(g)
ii_mat[i, i] = 1
jj_mat = npy.zeros_like(g)
jj_mat[j, j] = 1
ji_mat = npy.zeros_like(g)
ji_mat[j, i] = 1
ij_mat = npy.zeros_like(g)
ij_mat[i, j] = 1
return g + c * ii_mat + c * jj_mat + s * ji_mat - s * ij_mat
def generate_U_list(ij_list, theta_list, size):
return [generate_Givens_rotation(ij[0], ij[1], theta, size)
for ij, theta in zip(ij_list, theta_list)]
def get_rotation_matrix(X, C):
ij_list = [(i, j) for i in range(C) for j in range(C) if i < j]
def cost(theta_list):
U_list = generate_U_list(ij_list, theta_list, C)
R = reduce(npy.dot, U_list, npy.eye(C))
Z = X.dot(R)
M = npy.max(Z, axis=1, keepdims=True)
return npy.sum((Z / M) ** 2)
theta_list_init = npy.array([0.0] * int(C * (C - 1) / 2))
opt = minimize(cost,
x0=theta_list_init,
method='CG',
jac=grad(cost),
options={'disp': False})
return opt.fun, reduce(npy.dot, generate_U_list(ij_list, opt.x, C), npy.eye(C))
|
162143
|
import boto3
import random
import string
import uuid
import httplib
import urlparse
import json
import base64
import hashlib
"""
If included in a Cloudformation build as a CustomResource, generate a random string of length
given by the 'length' parameter.
By default the character set used is upper and lowercase ascii letters plus digits.
If the 'punctuation' parameter is specified this also includes punctuation.
If you specify a KMS key ID then it will be encrypted, too
"""
s3_client = boto3.client('s3')
def send_response(request, response, status=None, reason=None):
if status is not None:
response['Status'] = status
if reason is not None:
response['Reason'] = reason
if 'ResponseURL' in request and request['ResponseURL']:
url = urlparse.urlparse(request['ResponseURL'])
body = json.dumps(response)
print ('body', body)
https = httplib.HTTPSConnection(url.hostname)
https.request('PUT', url.path+'?'+url.query, body)
return response
def lambda_handler(event, context):
response = {
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Status': 'SUCCESS'
}
if event['ResponseURL'] == '':
s3params = {"Bucket": 'gillemi-gillemi', "Key": 'result.json'}
event["ResponseURL"] = s3_client.generate_presigned_url('put_object', s3params)
print('The URL is', event["ResponseURL"])
if 'PhysicalResourceId' in event:
response['PhysicalResourceId'] = event['PhysicalResourceId']
else:
response['PhysicalResourceId'] = str(uuid.uuid4())
if event['RequestType'] == 'Delete':
return send_response(event, response)
length = 8
try:
length = int(event['ResourceProperties']['Length'])
except:
pass
random_string = event['ResourceProperties']['StackName'][:12] + '-' + hashlib.sha224(event['StackId']).hexdigest()[:length]
response['Data'] = { 'RandomString': random_string }
response['Reason'] = 'Successfully generated a random string'
return send_response(event, response)
|
162172
|
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Context, RequestContext
from django import forms
from django.db.models import Count
try:
# py3
from urllib.parse import urlencode
except ImportError:
# py2
from urllib import urlencode
from astrometry.net.models import *
from astrometry.net.util import get_page, get_session_form
class ImageSearchForm(forms.Form):
query = forms.CharField(widget=forms.TextInput(attrs={'autocomplete':'off'}),
required=False)
def images(req):
images = UserImage.objects.all_visible()
form = ImageSearchForm(req.GET)
if form.is_valid():
query_string = urlencode(form.cleaned_data)
query = form.cleaned_data.get('query')
if query:
images = images.filter(tags__text__icontains=query)
images = images.order_by('submission__submitted_on').distinct()
page_number = req.GET.get('page',1)
image_page = get_page(images,3*10,page_number)
context = {
'image_search_form': form,
'image_page': image_page,
'query_string': query_string,
}
return render(req, 'search/user_images.html', context)
class UserSearchForm(forms.Form):
query = forms.CharField(widget=forms.TextInput(attrs={'autocomplete':'off'}),
required=False)
def users(req):
users = User.objects.all()
form = UserSearchForm(req.GET)
if form.is_valid():
query_string = urlencode(form.cleaned_data)
query = form.cleaned_data.get('query')
if query:
users = users.filter(profile__display_name__icontains=query)
sort = req.GET.get('sort', 'name')
order = 'profile__display_name'
if sort == 'name':
#profiles = (UserProfilek.extra(select={'lower_name':'lower(profile.display_name)'})
# .order_by('lower_name'))
order = 'profile__display_name'
elif sort == 'date':
order = 'date_joined'
elif sort == 'images':
users = users.annotate(Count('user_images'))
order = '-user_images__count'
users = users.order_by(order)
#users = users.order_by(order)
page_number = req.GET.get('page',1)
user_page = get_page(users,20,page_number)
context = {
'user_search_form': form,
'user_page': user_page,
'query_string': query_string,
}
return render(req, 'search/users.html', context)
|
162205
|
from flask import Flask
from datetime import datetime
from flask import current_app
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World! " + str(datetime.now())
@app.route("/applicationpath")
def applicationPath():
return current_app.root_path
|
162237
|
import logging
import sys
import time
import requests
from transifex.native.consts import (KEY_CHARACTER_LIMIT,
KEY_DEVELOPER_COMMENT, KEY_OCCURRENCES,
KEY_TAGS)
TRANSIFEX_CDS_HOST = 'https://cds.svc.transifex.net'
TRANSIFEX_CDS_URLS = {
'FETCH_AVAILABLE_LANGUAGES': '/languages',
'FETCH_TRANSLATIONS_FOR_LANGUAGE': '/content/{language_code}',
'PUSH_SOURCE_STRINGS': '/content/',
'INVALIDATE_CACHE': '/invalidate',
'PURGE_CACHE': '/purge',
}
logger = logging.getLogger('transifex.native.cds')
logger.addHandler(logging.StreamHandler(sys.stderr))
# A mapping of meta keys
# (interface_key: cds_key)
# Only contains meta keys that are different between the two
MAPPING = {
KEY_DEVELOPER_COMMENT: 'developer_comment',
KEY_CHARACTER_LIMIT: 'character_limit',
KEY_TAGS: 'tags',
KEY_OCCURRENCES: 'occurrences',
}
# Number of times to retry connecting to CDS before bailing out
MAX_RETRIES = 3
RETRY_DELAY_SEC = 2
class EtagStore(object):
""" Manges etags """
# Probably we need to a duration policy here
def __init__(self):
self._mem = {}
def set(self, key, value):
self._mem[key] = value
def get(self, key):
return self._mem.get(key, '')
class CDSHandler(object):
"""Handles communication with the Content Delivery Service."""
def __init__(self, configured_languages, token, secret=None,
host=TRANSIFEX_CDS_HOST):
"""Constructor.
:param list configured_languages: a list of language codes for the
configured languages in the application
:param str token: the API token to use for connecting to the CDS
:param str host: the host of the Content Delivery Service
"""
self.configured_language_codes = configured_languages
self.token = token
self.secret = secret
self.host = host or TRANSIFEX_CDS_HOST
self.etags = EtagStore()
def fetch_languages(self):
"""Fetch the languages defined in the CDS for the specific project.
Contains the source language and all target languages.
:return: a list of language information
:rtype: dict
"""
cds_url = TRANSIFEX_CDS_URLS['FETCH_AVAILABLE_LANGUAGES']
languages = []
try:
response = self.retry_get_request(
self.host + cds_url,
headers=self._get_headers(),
)
if not response.ok:
logger.error(
'Error retrieving languages from CDS: `{}`'.format(
response.reason
)
)
response.raise_for_status()
json_content = response.json()
languages = json_content['data']
except (KeyError, ValueError):
# Compatibility with python2.7 where `JSONDecodeError` doesn't
# exist
logger.error(
'Error retrieving languages from CDS: Malformed response')
except requests.ConnectionError:
logger.error(
'Error retrieving languages from CDS: ConnectionError')
except Exception as e:
logger.error('Error retrieving languages from CDS: UnknownError '
'(`{}`)'.format(str(e)))
return languages
def fetch_translations(self, language_code=None):
"""Fetch all translations for the given organization/project/(resource)
associated with the current token.
Returns a tuple of refresh flag and a dictionary of the fetched
translations per language. Refresh flag is going to be True whenever
fresh data has been acquired, False otherwise.
:return: a dictionary of (refresh_flag, translations) tuples
:rtype: dict
"""
cds_url = TRANSIFEX_CDS_URLS['FETCH_TRANSLATIONS_FOR_LANGUAGE']
translations = {}
if not language_code:
languages = [lang['code'] for lang in self.fetch_languages()]
else:
languages = [language_code]
for language_code in set(languages) & \
set(self.configured_language_codes):
try:
response = self.retry_get_request(
(self.host + cds_url.format(language_code=language_code)),
headers=self._get_headers(
etag=self.etags.get(language_code)
)
)
if not response.ok:
logger.error(
'Error retrieving translations from CDS: `{}`'.format(
response.reason
)
)
response.raise_for_status()
# etags indicate that no translation have been updated
if response.status_code == 304:
translations[language_code] = (False, {})
else:
self.etags.set(
language_code, response.headers.get('ETag', ''))
json_content = response.json()
translations[language_code] = (
True, json_content['data']
)
except (KeyError, ValueError):
# Compatibility with python2.7 where `JSONDecodeError` doesn't
# exist
logger.error('Error retrieving translations from CDS: '
'Malformed response') # pragma no cover
translations[language_code] = (False, {}) # pragma no cover
except requests.ConnectionError:
logger.error(
'Error retrieving translations from CDS: ConnectionError')
translations[language_code] = (False, {})
except Exception as e:
logger.error(
'Error retrieving translations from CDS: UnknownError '
'(`{}`)'.format(str(e))
) # pragma no cover
translations[language_code] = (False, {})
return translations
def push_source_strings(self, strings, purge=False):
"""Push source strings to CDS.
:param list(SourceString) strings: a list of `SourceString` objects
holding source strings
:param bool purge: True deletes destination source content not included
in pushed content. False appends the pushed content to destination
source content.
:return: the HTTP response object
:rtype: requests.Response
"""
if not self.secret:
raise Exception('You need to use `TRANSIFEX_SECRET` when pushing '
'source content')
cds_url = TRANSIFEX_CDS_URLS['PUSH_SOURCE_STRINGS']
data = {k: v for k, v in [self._serialize(item) for item in strings]}
try:
response = requests.post(
self.host + cds_url,
headers=self._get_headers(use_secret=True),
json={
'data': data,
'meta': {'purge': purge},
}
)
response.raise_for_status()
except requests.ConnectionError:
logger.error(
'Error pushing source strings to CDS: ConnectionError')
except Exception as e:
logger.error('Error pushing source strings to CDS: UnknownError '
'(`{}`)'.format(str(e)))
return response
def get_push_status(self, job_path):
"""Get source string push job status
:param str job_path: Job url path
:return: the HTTP response object
:rtype: requests.Response
"""
if not self.secret:
raise Exception('You need to use `TRANSIFEX_SECRET` when polling '
'source content push')
try:
response = requests.get(
self.host + job_path,
headers=self._get_headers(use_secret=True),
)
response.raise_for_status()
except requests.ConnectionError:
logger.error(
'Error polling source strings push to CDS: ConnectionError')
except Exception as e:
logger.error(
'Error polling source strings push to CDS: UnknownError '
'(`{}`)'.format(str(e)))
return response
def invalidate_cache(self, purge=False):
"""Invalidate CDS cache.
:param bool purge: True deletes CDS cache entirely instead of
triggering a job to re-cache content.
:return: the HTTP response object
:rtype: requests.Response
"""
if not self.secret:
raise Exception('You need to use `TRANSIFEX_SECRET` when '
'invalidating cache')
cds_url = TRANSIFEX_CDS_URLS['PURGE_CACHE'] if purge else \
TRANSIFEX_CDS_URLS['INVALIDATE_CACHE']
try:
response = requests.post(
self.host + cds_url,
headers=self._get_headers(use_secret=True),
json={}
)
response.raise_for_status()
except requests.ConnectionError:
logger.error(
'Error invalidating CDS: ConnectionError')
except Exception as e:
logger.error('Error invalidating CDS: UnknownError '
'(`{}`)'.format(str(e)))
return response
def _serialize(self, source_string):
"""Serialize the given source string to a format suitable for the CDS.
:param transifex.native.parsing.SourceString source_string: the object
to serialize
:return: a tuple that contains ths string key and its data,
as (key, data)
:rtype: tuple
"""
data = {
'string': source_string.string,
'meta': {
MAPPING.get(k, k): v
for k, v in source_string.meta.items()
},
}
if source_string.context:
data['meta']['context'] = source_string.context
return source_string.key, data
def _get_headers(self, use_secret=False, etag=None):
"""Return the headers to use when making requests.
:param bool use_secret: if True, the Bearer authorization header
will also include the secret, otherwise it will only use the token
:param str etag: an optional etag to include
:return: a dictionary with all headers
:rtype: dict
"""
headers = {
'Authorization': 'Bearer {token}{secret}'.format(
token=self.token,
secret=(':' + self.secret if use_secret else '')
),
'Accept-Encoding': 'gzip',
'Accept-Version': 'v2',
'X-NATIVE-SDK': 'python',
}
if etag:
headers['If-None-Match'] = etag
return headers
def retry_get_request(self, *args, **kwargs):
""" Resilient function for GET requests """
retries, last_response_status = 0, 202
while (last_response_status == 202 or
500 <= last_response_status < 600 and
retries < MAX_RETRIES):
if 500 <= last_response_status < 600:
retries += 1
time.sleep(retries * RETRY_DELAY_SEC)
response = requests.get(*args, **kwargs)
last_response_status = response.status_code
return response
|
162339
|
from abc import ABCMeta, abstractmethod
from liteflow.core.builders import WorkflowBuilder
class Workflow(metaclass=ABCMeta):
@property
@abstractmethod
def id(self):
return None
@property
@abstractmethod
def version(self):
return 1
@abstractmethod
def build(self, builder: WorkflowBuilder):
raise NotImplementedError
|
162346
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SUSY_HLT_DiJet_MET = DQMEDAnalyzer('SUSY_HLT_DiJet_MET',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'),
pfMETCollection = cms.InputTag("pfMet"),
caloMETCollection = cms.InputTag("caloMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_DiCentralPFJet55_PFMET110_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_v'),
TriggerFilter = cms.InputTag('hltPFMET110Filter','','HLT'), #the last filter in the path
TriggerJetFilter = cms.InputTag('hltDiCentralPFJet55','','HLT'), #the last filter in the path
PtThrJetTrig = cms.untracked.double(55.0),
EtaThrJetTrig = cms.untracked.double(2.6),
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(2.4),
OfflineMetCut = cms.untracked.double(250.0),
)
SUSYoHLToDiJetMEToPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_DiCentralPFJet55_PFMET110_v"),
verbose = cms.untracked.uint32(2), # Set to 2 for all messages
resolution = cms.vstring(""),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Turn-on vs MET; PFMET (GeV); #epsilon' pfMetTurnOn_num pfMetTurnOn_den",
"pfJet2PtTurnOn_eff 'Efficiency vs Jet2 p_{T}, NCentralPFJets >= 2, PFMET > 250 GeV; Second leading jet pT (GeV); #epsilon' pfJet2PtTurnOn_num pfJet2PtTurnOn_den",
)
)
|
162434
|
from setuptools import setup, find_packages
from pathlib import Path
import os
if __name__ == "__main__":
with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file:
long_description = file.read()
import os
def package_files(directory):
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", path, filename))
return paths
extra_files = package_files("front/build")
setup(
name="clip_retrieval",
packages=find_packages(),
package_data={"": extra_files},
include_package_data=True,
version="2.21.0",
license="MIT",
description="Easily computing clip embeddings and building a clip retrieval system with them",
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["clip-retrieval = clip_retrieval.cli:main"]},
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/rom1504/clip-retrieval",
data_files=[(".", ["README.md"]),],
keywords=["machine learning", "computer vision", "download", "image", "dataset"],
install_requires=[
"img2dataset",
"clip-anytorch",
"tqdm",
"fire",
"torch",
"torchvision",
"numpy",
"faiss-cpu",
"flask",
"flask_restful",
"flask_cors",
"pandas",
"pyarrow",
"autofaiss",
"pyyaml",
"webdataset",
"h5py",
"prometheus-client",
"fsspec",
"sentence-transformers",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
162453
|
from ..error_types import ValidationError
from .selections import cursor, DOCUMENT_BEGIN, select_comments, select_element, select_key, selection
from ..constants import (
BEGIN,
END,
DOCUMENT,
FIELD,
FIELDSET_ENTRY,
FIELD_OR_FIELDSET_OR_LIST,
LIST_ITEM,
MULTILINE_FIELD_BEGIN
)
class Validation:
@staticmethod
def comment_error(context, message, element):
return ValidationError(
context.messages.comment_error(message),
context.reporter(context).report_comments(element).snippet(),
select_comments(element)
)
@staticmethod
def element_error(context, message, element):
return ValidationError(
message,
context.reporter(context).report_element(element).snippet(),
select_element(element)
)
@staticmethod
def key_error(context, message, element):
return ValidationError(
context.messages.key_error(message),
context.reporter(context).report_line(element).snippet(),
select_key(element)
)
@staticmethod
def missing_comment(context, element):
return ValidationError(
context.messages.missing_comment,
context.reporter(context).report_line(element).snippet(), # TODO: Question-tag an empty line before an element with missing comment
selection(element, 'line', BEGIN)
)
@staticmethod
def missing_element(context, key, parent, message):
message = getattr(context.messages, message if key is None else message + '_with_key')
return ValidationError(
message if key is None else message(key),
context.reporter(context).report_missing_element(parent).snippet(),
DOCUMENT_BEGIN if parent['type'] == DOCUMENT else selection(parent, 'line', END)
)
@staticmethod
def missing_value(context, element):
selection_data = {}
if (element['type'] == FIELD or
element['type'] == FIELD_OR_FIELDSET_OR_LIST or
element['type'] == MULTILINE_FIELD_BEGIN):
message = context.messages.missing_field_value(element['key'])
if 'template' in element['ranges']:
selection_data['from'] = cursor(element, 'template', END)
elif 'element_operator' in element['ranges']:
selection_data['from'] = cursor(element, 'element_operator', END)
else:
selection_data['from'] = cursor(element, 'line', END)
elif element['type'] == FIELDSET_ENTRY:
message = context.messages.missing_fieldset_entry_value(element['key'])
selection_data['from'] = cursor(element, 'entry_operator', END)
elif element['type'] == LIST_ITEM:
message = context.messages.missing_list_item_value(element['parent']['key'])
selection_data['from'] = cursor(element, 'item_operator', END)
snippet = context.reporter(context).report_element(element).snippet()
if element['type'] == FIELD and 'continuations' in element:
selection_data['to'] = cursor(element['continuations'][-1], 'line', END)
else:
selection_data['to'] = cursor(element, 'line', END)
return ValidationError(message, snippet, selection_data)
@staticmethod
def unexpected_element(context, message, element):
return ValidationError(
message or context.messages.unexpected_element,
context.reporter(context).report_element(element).snippet(),
select_element(element)
)
@staticmethod
def unexpected_multiple_elements(context, key, elements, message):
message = getattr(context.messages, message if key is None else message + '_with_key')
return ValidationError(
message if key is None else message(key),
context.reporter(context).report_elements(elements).snippet(),
select_element(elements[0])
)
@staticmethod
def unexpected_element_type(context, key, section, message):
message = getattr(context.messages, message if key is None else message + '_with_key')
return ValidationError(
message if key is None else message(key),
context.reporter(context).report_element(section).snippet(),
select_element(section)
)
@staticmethod
def value_error(context, message, element):
if 'mirror' in element:
snippet = context.reporter(context).report_line(element).snippet()
select = select_key(element)
elif element['type'] == MULTILINE_FIELD_BEGIN:
if 'lines' in element:
snippet = context.reporter(context).report_multiline_value(element).snippet()
select = selection(element['lines'][0], 'line', BEGIN, element['lines'][-1], 'line', END)
else:
snippet = context.reporter(context).report_element(element).snippet()
select = selection(element, 'line', END)
else:
snippet = context.reporter(context).report_element(element).snippet()
select = {}
if 'value' in element['ranges']:
select['from'] = cursor(element, 'value', BEGIN)
elif 'element_operator' in element['ranges']:
select['from'] = cursor(element, 'element_operator', END)
elif 'entry_operator' in element['ranges']:
select['from'] = cursor(element, 'entry_operator', END)
elif element['type'] == LIST_ITEM:
select['from'] = cursor(element, 'item_operator', END)
else:
# TODO: Possibly never reached - think through state permutations
select['from'] = cursor(element, 'line', END)
if 'continuations' in element:
select['to'] = cursor(element['continuations'][-1], 'line', END)
elif 'value' in element['ranges']:
select['to'] = cursor(element, 'value', END)
else:
select['to'] = cursor(element, 'line', END)
return ValidationError(context.messages.value_error(message), snippet, select)
|
162465
|
import ast
import json
import cowait
from cowait import Task
from .code_builder import CodeBuilder
class NotebookRunner(Task):
async def run(self, path: str, **inputs):
if not path.endswith('.ipynb'):
path += '.ipynb'
cells = file_to_json(path)['cells']
code = CodeBuilder()
code.append(0, 'async def _notebook_runner():')
code.appendBlock(4, cells_to_code(cells))
global_scope = {'cowait': cowait, 'NotebookRunner': self.__class__}
local_scope = {}
exec(str(code), global_scope, local_scope)
handle = local_scope['_notebook_runner']
return await handle()
def cells_to_code(cells):
code = CodeBuilder()
for cell in cells:
cell_type = cell['cell_type']
if cell_type == 'code':
source_rows = [row[:-1] if row[-1] == '\n' else row for row in cell['source']]
if len(source_rows) > 0:
code.appendBlock(0, code_from_source(source_rows))
code.append(0, '')
return code
def code_from_source(source_rows):
code = CodeBuilder()
_, first_line = strip_indentation(source_rows[0])
if first_line.startswith('%%'):
command, *args = first_line[2:].split(' ', 1)
new_code = transform_cell_magic(source_rows[1:], command, args)
if new_code:
code.appendBlock(0, new_code)
else:
for row in source_rows:
indentation, line = strip_indentation(row)
if line.startswith('global '):
new_line = 'nonlocal ' + line[7:]
code.append(indentation, new_line)
print(f"Warning: Replaced '{line}' with '{new_line}'")
elif line.startswith('%'): # Not supported: a = %ls
command, *args = line[1:].split(' ', 1)
new_code = transform_line_magic(command, args)
if new_code:
code.appendBlock(indentation, new_code)
else:
code.append(indentation, line)
# If there is a syntax error it will be found here
try:
ast.parse(str(code), filename='<notebook cell>')
except SyntaxError as e:
syntax_error = SyntaxError(f"{e.msg}\nThe error is located somewhere in this cell:\n\n{str(code)}", e.args[1])
raise syntax_error from None
return code
def strip_indentation(row):
line = row.lstrip(' ')
return len(row) - len(line), line
def transform_line_magic(command: str, args: str):
ignorables = ['lsmagic', 'matplotlib']
if command in ignorables:
return None
raise ValueError(f"Magic command %{command} is not supported")
def transform_cell_magic(rows: list, command: str, args: str):
ignorables = ['html', 'HTML', 'markdown', 'latex']
if command in ignorables:
return None
raise ValueError(f"Magic command %%{command} is not supported")
def file_to_json(path):
with open(path, 'r') as f:
return json.load(f)
|
162496
|
import pytest
from django.test import Client
pytestmark = pytest.mark.django_db
client = Client()
def test_news_page_gets_created(news_page):
"""Test that we have a news page created by the fixture"""
assert news_page is not None
def test_news_200(news_page):
"""Test that we have a news page created by the fixture"""
rv = client.get(news_page.url)
assert rv.status_code == 200
def test_news_shows_tags(news_page):
"""Test that we can see a news item's tags"""
news_page.tags.add("This is a tag")
news_page.save_revision().publish()
rv = client.get(news_page.url)
assert "This is a tag" in str(rv.content)
|
162556
|
import sys
import pytest
import ochrona.eval.eval as e
from ochrona.model.dependency_set import DependencySet
class MockLogger:
def __init__(self):
self._debug = []
self._info = []
self._warn = []
self._error = []
def debug(self, msg):
self._debug.append(msg)
def info(self, msg):
self._info.append(msg)
def warn(self, msg):
self._warn.append(msg)
def error(self, msg):
self._error.append(msg)
class TestEval:
"""
Component tests for eval.eval module.
"""
def test_resolve(self):
res = e.resolve(logger=MockLogger())
assert isinstance(res, DependencySet)
assert res.dependencies == []
assert res.flat_list == []
assert res.confirmed_vulnerabilities == []
assert res.policy_violations == []
def test_resolve_no_vulns(self):
res = e.resolve(dependencies=[{"version": "fake=9.9.9"}], logger=MockLogger())
assert isinstance(res, DependencySet)
assert len(res.dependencies) == 1
assert res.flat_list == ["fake=9.9.9"]
assert res.confirmed_vulnerabilities == []
assert res.policy_violations == []
def test_resolve_vuln_found(self):
res = e.resolve(dependencies=[{"version": "requests==2.19.0"}], logger=MockLogger())
assert isinstance(res, DependencySet)
assert len(res.dependencies) == 1
assert res.flat_list == ["requests==2.19.0"]
assert len(res.confirmed_vulnerabilities) == 1
assert res.confirmed_vulnerabilities[0].cve_id == "CVE-2018-18074"
assert res.policy_violations == []
def test_resolve_policy_violation_legacy(self):
res = e.resolve(dependencies=[{"version": "fake=9.9.9"}], policies=[{"policy_type": "package_name", "allow_list": "urllib3"}], logger=MockLogger())
assert isinstance(res, DependencySet)
assert len(res.dependencies) == 1
assert res.flat_list == ["fake=9.9.9"]
assert res.confirmed_vulnerabilities == []
assert len(res.policy_violations) == 1
assert res.policy_violations[0].message == "'fake' not in list of allowed packages. (from fake=9.9.9)"
def test_resolve_policy_violation(self):
res = e.resolve(dependencies=[{"version": "fake=9.9.9"}], policies=["name IN requests,click,pytest"], logger=MockLogger())
assert isinstance(res, DependencySet)
assert len(res.dependencies) == 1
assert res.flat_list == ["fake=9.9.9"]
assert res.confirmed_vulnerabilities == []
assert len(res.policy_violations) == 1
assert res.policy_violations[0].message == "Policy violated by fake=9.9.9"
|
162637
|
from functools import wraps
import re
import math
from PySide2.QtGui import QValidator
from PySide2.QtWidgets import QDoubleSpinBox
#
# Derived from https://gist.github.com/jdreaver/0be2e44981159d0854f5
#
FLOAT_REGEX = re.compile(r'(([+-]?\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?)')
# "i" and "in" are both interpreted as "inf"
INFINITE_REGEX = re.compile(r'^([+-]?)(i(?:n|nf)?)$')
# "n" and "na" are both interpreted as "nan"
NAN_REGEX = re.compile(r'^(n(?:a|an)?)$')
class FloatValidator(QValidator):
@staticmethod
def valid_float_string(string):
match = FLOAT_REGEX.search(string)
if match:
return match.group(0) == string
special_regexes = (INFINITE_REGEX, NAN_REGEX)
return any(x.search(string) is not None for x in special_regexes)
def validate(self, string, position):
if FloatValidator.valid_float_string(string):
return self.State.Acceptable
if string == "" or string[position-1] in 'e.-+':
return self.State.Intermediate
return self.State.Invalid
def fixup(self, text):
match = FLOAT_REGEX.search(text)
if match:
return match.group(0)
if match := INFINITE_REGEX.search(text):
return match.group(1) + 'inf'
if NAN_REGEX.search(text):
return 'nan'
return ''
def clean_text(func):
"""Clean text for ScientificDoubleSpinBox functions
This removes the prefix, suffix, and leading/trailing white space.
"""
@wraps(func)
def wrapped(self, text, *args, **kwargs):
text = remove_prefix(text, self.prefix())
text = remove_suffix(text, self.suffix())
text = text.strip()
return func(self, text, *args, **kwargs)
return wrapped
class ScientificDoubleSpinBox(QDoubleSpinBox):
@staticmethod
def format_float(value):
"""Modified form of the 'g' format specifier."""
string = '{:.10g}'.format(value).replace('e+', 'e')
string = re.sub(r'e(-?)0*(\d+)', r'e\1\2', string)
return string
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validator = FloatValidator()
self.setDecimals(1000)
self.reset_range()
def reset_range(self):
self.setRange(-math.inf, math.inf)
@clean_text
def validate(self, text, position):
return self.validator.validate(text, position)
@clean_text
def fixup(self, original_text):
text = self.validator.fixup(original_text)
if text == 'nan':
self.is_nan = True
# Don't auto-fill the text
self.lineEdit().setText(original_text)
elif self.is_nan and text:
self.is_nan = False
# Don't auto-fill the text
self.lineEdit().setText(original_text)
return text
@clean_text
def valueFromText(self, text):
return float(self.fixup(text))
def textFromValue(self, value):
return ScientificDoubleSpinBox.format_float(value)
def stepBy(self, steps):
text = self.cleanText()
if any(x.search(text) for x in (INFINITE_REGEX, NAN_REGEX)):
# We cannot step
return
new_value = self.value() + self.singleStep() * steps
self.setValue(new_value)
# Select the text just like a regular spin box would...
self.selectAll()
def setValue(self, v):
self.is_nan = math.isnan(v)
super().setValue(v)
@property
def is_nan(self):
return math.isnan(self.value())
@is_nan.setter
def is_nan(self, b):
if self.is_nan == b:
# Unchanged
return
if b:
# Setting the min or max to nan forces the min, max, and
# value to all be nan.
self.setMaximum(math.nan)
else:
# Reset the range so we can have values that are not nan
self.reset_range()
def remove_prefix(text, prefix):
# This can be replaced with str.removeprefix() in python >=3.9
if prefix and text.startswith(prefix):
return text[len(prefix):]
return text
def remove_suffix(text, suffix):
# This can be replaced with str.removesuffix() in python >=3.9
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
|
162650
|
import gl
from importlib import resources
from . import shaders
from .. import Common
class SfaMapProgram(gl.Program):
"""Base for Programs used by this renderer."""
separable = True
def __init__(self, ctx):
super().__init__(ctx)
#self._translate = [-1, -2, -300]
self._translate = [-0.1, -0.3, 20]
self._rotate = [0, 0, 0]
self._initT = self._translate.copy()
self._initR = self._rotate.copy()
def _getShaderCodeFromFile(self, path):
# this lets us specify where to #include from.
try: return resources.read_text(shaders, path)
except FileNotFoundError: return resources.read_text(Common, path)
def setMtxs(self, projection, modelView):
# used by geometry shaders
with self:
self.uniforms['matProjection'].value = projection
self.uniforms['matModelview'].value = modelView
|
162667
|
from entityfx.string_manipulation_base import StringManipulationBase
class StringManipulation(StringManipulationBase):
def benchImplementation(self) -> str:
str0_ = "the quick brown fox jumps over the lazy dog"
str1 = ""
i = 0
while i < self._iterrations:
str1 = StringManipulationBase._doStringManipilation(str0_)
i += 1
return str1
|
162702
|
import urlparse
import httplib
def solveRedirect(url, depth=0):
"""
Facebook API provides a URL for acquiring profile pictures.
That URL contains a person's username, which is not wanted.
However, the URL redirects to an encrypted link, which can then be used.
This function gets the encrypted address.
^NOTE: this is very slow, and I probably won't use it anymore.
"""
if depth > 2:
raise Exception("redirected %i times, giving up." % depth)
o = urlparse.urlparse(url, allow_fragments=True)
conn = httplib.HTTPConnection(o.netloc)
path = o.path
if o.query:
path += '?' + o.query
conn.request("HEAD", path)
res = conn.getresponse()
headers = dict(res.getheaders())
if 'location' in headers and headers['location'] != url:
return solveRedirect(headers['location'], depth+1)
return url
|
162712
|
DEFAULT_AUTOPILOT_CONFIG = {
'region_name': 'YOUR_REGION',
's3_bucket': 'YOUR_S3_BUCKET',
'role_arn': 'YOUR_ROLE_ARN',
}
|
162742
|
import tensorflow as tf
class VGG(tf.keras.layers.Layer):
def __init__(self, use_bn=False):
super(VGG, self).__init__()
self.conv1 = VGG._make_conv_block(64, 3, 1, "same", use_bn)
self.conv2 = VGG._make_conv_block(64, 3, 1, "same", use_bn)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")
self.conv3 = VGG._make_conv_block(128, 3, 1, "same", use_bn)
self.conv4 = VGG._make_conv_block(128, 3, 1, "same", use_bn)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")
self.conv5 = VGG._make_conv_block(256, 3, 1, "same", use_bn)
self.conv6 = VGG._make_conv_block(256, 3, 1, "same", use_bn)
self.conv7 = VGG._make_conv_block(256, 3, 1, "same", use_bn)
self.pool3 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")
self.conv8 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.conv9 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.conv10 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.pool4 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding="same")
self.conv11 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.conv12 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.conv13 = VGG._make_conv_block(512, 3, 1, "same", use_bn)
self.pool5 = tf.keras.layers.MaxPool2D(pool_size=3, strides=1, padding="same")
self.conv14 = tf.keras.layers.Conv2D(filters=1024, kernel_size=3, strides=1, padding="same", dilation_rate=6)
self.conv15 = tf.keras.layers.Conv2D(filters=1024, kernel_size=1, strides=1, padding="same")
@staticmethod
def _make_conv_block(out_channels, kernel_size, strides, padding, use_bn=True):
if use_bn:
return tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=strides, padding=padding),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU()
])
else:
return tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=strides, padding=padding),
tf.keras.layers.ReLU()
])
def call(self, inputs, training=None, *args, **kwargs):
x = self.conv1(inputs, training=training)
x = self.conv2(x, training=training)
x = self.pool1(x)
x = self.conv3(x, training=training)
x = self.conv4(x, training=training)
x = self.pool2(x)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
x = self.conv7(x, training=training)
x = self.pool3(x)
x = self.conv8(x, training=training)
x = self.conv9(x, training=training)
x = self.conv10(x, training=training)
o1 = x
x = self.pool4(x)
x = self.conv11(x, training=training)
x = self.conv12(x, training=training)
x = self.conv13(x, training=training)
x = self.pool5(x)
x = self.conv14(x)
x = tf.nn.relu(x)
x = self.conv15(x)
x = tf.nn.relu(x)
o2 = x
return o1, o2
|
162761
|
from __future__ import annotations # noqa: F401
import re
import warnings
import numpy as np
import pandas as pd
import xarray as xr
from .config import config
from .grid import _wrf_grid_from_dataset
def _decode_times(ds: xr.Dataset) -> xr.Dataset:
"""
Decode the time variable to datetime64.
"""
try:
_time = pd.to_datetime(
ds.Times.data.astype('str'), errors='raise', format='%Y-%m-%d_%H:%M:%S'
)
except ValueError:
_time = pd.to_datetime(
ds.Times.data.astype('str'), errors='raise', format='%Y-%m-%dT%H:%M:%S.%f'
)
ds = ds.assign_coords({'Time': _time})
ds.Time.attrs = {'long_name': 'Time', 'standard_name': 'time'}
# make XTIME be consistent with its description
if 'XTIME' in ds.variables and np.issubdtype(ds.XTIME.dtype, np.datetime64):
ds['XTIME'].data = (
ds.XTIME.data
- pd.to_datetime(
ds['XTIME'].description, format='minutes since %Y-%m-%d %H:%M:%S'
).to_datetime64()
)
return ds
def _clean_brackets_from_units(ds: xr.Dataset) -> xr.Dataset:
"""
Cleans brackets from units attributes
"""
sep = '\\'
regex = re.compile(f'[{sep.join(config.get("brackets_to_clean_from_units"))}]')
for var in ds.variables:
if 'units' in ds[var].attrs:
ds[var].attrs['units'] = regex.sub('', ds[var].attrs['units'])
return ds
def _make_units_pint_friendly(ds: xr.Dataset) -> xr.Dataset:
"""
Harmonizes awkward WRF units into pint-friendly ones
"""
ds = _clean_brackets_from_units(ds)
# We have to invert the mapping from "new_unit -> wrf_units" to "wrf_unit -> new_unit"
wrf_units_map = {
v: k for (k, val_list) in config.get('unit_harmonization_map').items() for v in val_list
}
for variable in ds.data_vars:
if ds[variable].attrs.get('units') in wrf_units_map:
harmonized_unit = wrf_units_map[ds[variable].attrs['units']]
if harmonized_unit == 'invalid':
ds[variable].attrs.pop('units', None)
else:
ds[variable].attrs['units'] = harmonized_unit
return ds
def _modify_attrs_to_cf(ds: xr.Dataset) -> xr.Dataset:
"""Modify the attributes of the dataset to comply with CF conventions."""
# Universal updates
vars_to_update = set(config.get('cf_attribute_map').keys()).intersection(set(ds.data_vars))
for variable in vars_to_update:
ds[variable].attrs.update(config.get(f'cf_attribute_map.{variable}'))
# Conditional updates (right now just vertical coordinate type)
hybrid_opt_condition = 'HYBRID_OPT==0' if getattr(ds, 'HYBRID_OPT', 0) == 0 else 'HYBRID_OPT!=0'
vars_to_update = set(
config.get(f'conditional_cf_attribute_map.{hybrid_opt_condition}').keys()
).intersection(set(ds.data_vars))
for variable in vars_to_update:
ds[variable].attrs.update(
config.get(f'conditional_cf_attribute_map.{hybrid_opt_condition}.{variable}')
)
return ds
def _collapse_time_dim(ds: xr.Dataset) -> xr.Dataset:
# This "time dimension collapsing" assumption is wrong with moving nests
# and should be applied to static, nested domains.
lat_lon_coords = set(config.get('latitude_coords') + config.get('longitude_coords'))
vertical_coords = set(config.get('vertical_coords'))
coords = set(ds.variables).intersection(lat_lon_coords.union(vertical_coords))
ds = ds.set_coords(coords)
for coord in ds.coords:
data_to_reassign = None
if coord in lat_lon_coords and ds[coord].ndim == 3:
data_to_reassign = ds[coord].data[0, :, :]
elif coord in vertical_coords and ds[coord].ndim == 2:
data_to_reassign = ds[coord].data[0, :]
if data_to_reassign is not None:
attrs, encoding = ds[coord].attrs, ds[coord].encoding
ds = ds.assign_coords({coord: (ds[coord].dims[1:], data_to_reassign)})
ds[coord].attrs = attrs
ds[coord].encoding = encoding
return ds
def _include_projection_coordinates(ds: xr.Dataset) -> xr.Dataset:
"""Introduce projection dimension coordinate values and CRS."""
try:
grid_components = _wrf_grid_from_dataset(ds)
except KeyError:
warnings.warn(
'Unable to create coordinate values and CRS due to insufficient dimensions or '
'projection metadata.'
)
return ds
horizontal_dims = set(config.get('horizontal_dims')).intersection(set(ds.dims))
# Include dimension coordinates
for dim in horizontal_dims:
ds[dim] = (dim, grid_components[dim], config.get(f'cf_attribute_map.{dim}'))
# Include CRS
ds['wrf_projection'] = (tuple(), grid_components['crs'], grid_components['crs'].to_cf())
for varname in ds.data_vars:
if any(dim in ds[varname].dims for dim in horizontal_dims):
ds[varname].attrs['grid_mapping'] = 'wrf_projection'
return ds
def _assign_coord_to_dim_of_different_name(ds: xr.Dataset) -> xr.Dataset:
for varname, dim in config.get('assign_coord_to_dim_map').items():
try:
ds[dim] = ds[varname]
del ds[varname]
except KeyError:
pass
return ds
def _rename_dims(ds: xr.Dataset) -> xr.Dataset:
"""Rename dims for more consistent semantics."""
rename_dim_map = {k: v for k, v in config.get('rename_dim_map').items() if k in ds.dims}
return ds.rename(rename_dim_map)
def _calc_base_diagnostics(ds: xr.Dataset, drop: bool = True) -> xr.Dataset:
"""Calculate the four basic fields that WRF does not have in physically meaningful form.
Parameters
----------
dataset : xarray.Dataset
Dataset representing WRF data opened via normal backend, with chunking.
drop : bool
Decide whether to drop the components of origin after creating the diagnostic fields from
them.
Notes
-----
This operation should be called before destaggering.
"""
# Potential temperature
if 'T' in ds.data_vars:
ds['air_potential_temperature'] = ds['T'] + 300
ds['air_potential_temperature'].attrs = {
'units': 'K',
'standard_name': 'air_potential_temperature',
}
if drop:
del ds['T']
# Pressure
if 'P' in ds.data_vars and 'PB' in ds.data_vars:
ds['air_pressure'] = ds['P'] + ds['PB']
ds['air_pressure'].attrs = {
'units': ds['P'].attrs.get('units', 'Pa'),
'standard_name': 'air_pressure',
}
if drop:
del ds['P'], ds['PB']
# Geopotential and geopotential height
if 'PH' in ds.data_vars and 'PHB' in ds.data_vars:
ds['geopotential'] = ds['PH'] + ds['PHB']
ds['geopotential'].attrs = {
'units': 'm**2 s**-2',
'standard_name': 'geopotential',
'stagger': ds['PH'].attrs.get('stagger', 'Z'),
}
ds['geopotential_height'] = ds['geopotential'] / 9.81
ds['geopotential_height'].attrs = {
'units': 'm',
'standard_name': 'geopotential_height',
'stagger': ds['PH'].attrs.get('stagger', 'Z'),
}
if drop:
del ds['PH'], ds['PHB']
return ds
|
162762
|
import unittest
import cellpylib as cpl
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHopfieldNet(unittest.TestCase):
def test_hopfield_net(self):
np.random.seed(0)
# patterns for training
zero = [
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0,
0, 0, 0, 0, 0]
one = [
0, 1, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0]
two = [
1, 1, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 1, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
# replace the zeroes with -1 to make these vectors bipolar instead of binary
one = [-1 if x == 0 else x for x in one]
two = [-1 if x == 0 else x for x in two]
zero = [-1 if x == 0 else x for x in zero]
P = [zero, one, two]
hopfield_net = cpl.HopfieldNet(num_cells=35)
hopfield_net.train(P)
expected_weights = self._convert_to_ndarray("hopfield_net_weights.txt")
np.testing.assert_equal(expected_weights, hopfield_net.W)
expected_activities = self._convert_to_ndarray("hopfield_net.ca")
half_two = [
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 1, 0, 0,
1, 0, 0, 0, 0,
1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
half_two = [-1 if x == 0 else x for x in half_two]
cellular_automaton = np.array([half_two])
cellular_automaton = cpl.evolve(cellular_automaton, timesteps=155,
apply_rule=hopfield_net.apply_rule, r=hopfield_net.r)
np.testing.assert_equal(expected_activities, cellular_automaton)
def _convert_to_ndarray(self, filename, dtype=int):
with open(os.path.join(THIS_DIR, 'resources', filename), 'r') as content_file:
content = content_file.read()
content = content.replace('[[', '')
content = content.replace(']]', '')
content = content.replace('[', '')
content = content.replace('],', ';')
content = [[dtype(i) for i in x.split(',')] for x in content.split(';')]
return np.array(content)
|
162783
|
import pytest
from stock_indicators import indicators
class TestHMA:
def test_standard(self, quotes):
results = indicators.get_hma(quotes, 20)
assert 502 == len(results)
assert 480 == len(list(filter(lambda x: x.hma is not None, results)))
r = results[149]
assert 236.0835 == round(float(r.hma), 4)
r = results[501]
assert 235.6972 == round(float(r.hma), 4)
def test_bad_data(self, bad_quotes):
r = indicators.get_hma(bad_quotes, 15)
assert 502 == len(r)
def test_no_quotes(self, quotes):
r = indicators.get_hma([], 5)
assert 0 == len(r)
r = indicators.get_hma(quotes[:1], 5)
assert 1 == len(r)
def test_removed(self, quotes):
results = indicators.get_hma(quotes, 20).remove_warmup_periods()
assert 480 == len(results)
last = results.pop()
assert 235.6972 == round(float(last.hma), 4)
def test_exceptions(self, quotes):
from System import ArgumentOutOfRangeException
with pytest.raises(ArgumentOutOfRangeException):
indicators.get_hma(quotes, 1)
|
162872
|
class Solution:
def dropNegatives(self, A):
j = 0
newlist = []
for i in range(len(A)):
if A[i] > 0:
newlist.append(A[i])
return newlist
def firstMissingPositive(self, nums: List[int]) -> int:
A = self.dropNegatives(nums)
if len(A)==0:
return 1
if len(A)==1:
return 1 if A[0] >= 2 else 2
else:
n = len(A)
for i in range(n):
item = abs(A[i])
if item <= n:
A[item-1] = -1 * abs(A[item-1])
res = len(A) + 1
for i in range(n):
if A[i] > 0:
return i+1
return res
|
162892
|
import os
import warnings
from typing import TYPE_CHECKING, Optional, Union
from .model import OME
try:
from qtpy.QtCore import QMimeData, Qt
from qtpy.QtWidgets import QTreeWidget, QTreeWidgetItem
except ImportError:
raise ImportError(
"qtpy and a Qt backend (pyside or pyqt) is required to use the OME widget:\n"
"pip install qtpy pyqt5"
)
if TYPE_CHECKING:
import napari
class OMETree(QTreeWidget):
"""A Widget that can show OME XML"""
def __init__(
self, ome_dict: dict = None, viewer: "napari.viewer.Viewer" = None, parent=None
) -> None:
super().__init__(parent=parent)
self._viewer = viewer
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.setIndentation(15)
item = self.headerItem()
font = item.font(0)
font.setBold(True)
item.setFont(0, font)
self.clear()
self._current_path: Optional[str] = None
if ome_dict:
self.update(ome_dict)
if viewer is not None:
viewer.layers.selection.events.active.connect(
lambda e: self._try_load_layer(e.value)
)
self._try_load_layer(viewer.layers.selection.active)
def clear(self):
self.headerItem().setText(0, "drag/drop file...")
super().clear()
def _try_load_layer(self, layer: "napari.layers.Layer"):
"""Handle napari viewer behavior"""
from ._napari_plugin import METADATA_KEY
if layer is not None:
path = str(layer.source.path)
# deprecated... don't do this ... it should be a dict
if callable(layer.metadata):
ome_meta = layer.metadata()
elif isinstance(layer.metadata, OME):
ome_meta = layer.metadata
else:
ome_meta = layer.metadata.get(METADATA_KEY)
if callable(ome_meta):
ome_meta = ome_meta()
ome = None
if isinstance(ome_meta, OME):
ome = ome_meta
elif path.endswith((".tiff", ".tif")) and path != self._current_path:
try:
ome = OME.from_tiff(path)
except Exception:
return
if isinstance(ome, OME):
self._current_path = path
self.update(ome)
self.headerItem().setText(0, os.path.basename(path))
else:
self._current_path = None
self.clear()
def update(self, ome: Union[OME, str]):
if not ome:
return
if isinstance(ome, OME):
_ome = ome
elif isinstance(ome, str):
if ome == self._current_path:
return
try:
if ome.endswith(".xml"):
_ome = OME.from_xml(ome)
elif ome.lower().endswith((".tif", ".tiff")):
_ome = OME.from_tiff(ome)
else:
warnings.warn(f"Unrecognized file type: {ome}")
return
except Exception as e:
warnings.warn(f"Could not parse OME metadata from {ome}: {e}")
return
self.headerItem().setText(0, os.path.basename(ome))
self._current_path = ome
else:
raise TypeError("must be OME object or string")
self._fill_item(_ome.dict(exclude_unset=True))
def _fill_item(self, obj, item: QTreeWidgetItem = None):
if item is None:
self.clear()
item = self.invisibleRootItem()
if isinstance(obj, dict):
for key, val in sorted(obj.items()):
child = QTreeWidgetItem([key])
item.addChild(child)
self._fill_item(val, child)
elif isinstance(obj, (list, tuple)):
for n, val in enumerate(obj):
text = val.get("id", n) if hasattr(val, "get") else n
child = QTreeWidgetItem([str(text)])
item.addChild(child)
self._fill_item(val, child)
else:
t = getattr(obj, "value", str(obj))
item.setText(0, f"{item.text(0)}: {t}")
def dropMimeData(
self, parent: QTreeWidgetItem, index: int, data: QMimeData, a
) -> bool:
if data.hasUrls():
for url in data.urls():
lf = url.toLocalFile()
if lf.endswith((".xml", ".tiff", ".tif")):
self.update(lf)
return True
return False
def mimeTypes(self):
return ["text/uri-list"]
def supportedDropActions(self):
return Qt.CopyAction
if __name__ == "__main__":
from qtpy.QtWidgets import QApplication
app = QApplication([])
widget = OMETree()
widget.show()
app.exec()
|
162902
|
from datetime import datetime, timedelta
from typing import Callable
from unittest.mock import MagicMock, patch
import pandas as pd
import pytest
from _pytest.monkeypatch import MonkeyPatch
from pybaseball import cache
@pytest.fixture(name="mock_data_1")
def _mock_data_1() -> pd.DataFrame:
return pd.DataFrame([1, 2], columns=['a'])
@pytest.fixture(name='empty_load_mock')
def _empty_load_mock(monkeypatch: MonkeyPatch) -> MagicMock:
load_mock = MagicMock(return_value=None)
monkeypatch.setattr(cache.dataframe_utils, 'load_df', load_mock)
return load_mock
@pytest.fixture(name='load_mock')
def _load_mock(monkeypatch: MonkeyPatch, mock_data_1: pd.DataFrame) -> MagicMock:
load_mock = MagicMock(return_value=mock_data_1)
monkeypatch.setattr(cache.dataframe_utils, 'load_df', load_mock)
return load_mock
@pytest.fixture(name='save_json_mock')
def _save_json_mock(monkeypatch: MonkeyPatch) -> MagicMock:
save_mock = MagicMock()
monkeypatch.setattr(cache.file_utils, 'safe_jsonify', save_mock)
return save_mock
@pytest.fixture(name='save_mock')
def _save_mock(monkeypatch: MonkeyPatch) -> MagicMock:
save_mock = MagicMock()
monkeypatch.setattr(cache.dataframe_utils, 'save_df', save_mock)
return save_mock
def test_cache_enable() -> None:
enable_mock = MagicMock()
with patch('pybaseball.cache.config.enable', enable_mock):
cache.enable()
enable_mock.assert_called_once_with(True)
def test_cache_disable() -> None:
enable_mock = MagicMock()
with patch('pybaseball.cache.config.enable', enable_mock):
cache.disable()
enable_mock.assert_called_once_with(False)
@patch('pybaseball.cache.config.enabled', False)
def test_call_cache_disabled(load_mock: MagicMock, save_mock: MagicMock) -> None:
df_func = MagicMock(return_value=pd.DataFrame([1, 2], columns=['a']))
df_func.__name__ = "df_func"
df_cache = cache.df_cache()
assert not df_cache.cache_config.enabled
wrapper = df_cache.__call__(df_func)
wrapper(*(1, 2), **{'val1': 'a'})
df_func.assert_called_once_with(1, 2, val1='a')
load_mock.assert_not_called()
save_mock.assert_not_called()
@patch('pybaseball.cache.config.enabled', True)
@patch('glob.glob', MagicMock(return_value=['1.cache_record.json']))
@patch('pybaseball.cache.file_utils.load_json', MagicMock(
return_value={
'expires': '3000-01-01',
'func': 'df_func',
'args': [1, 2],
'kwargs': {'val1': 'a'},
'dataframe': 'cachefile.csv'
}
))
def test_call_cache_enabled_loads_cache(
mock_data_1: pd.DataFrame,
load_mock: MagicMock, save_mock: MagicMock, save_json_mock: MagicMock) -> None:
df_func = MagicMock()
df_func.__name__ = "df_func"
df_cache = cache.df_cache()
assert df_cache.cache_config.enabled
wrapper = df_cache.__call__(df_func)
result = wrapper(*(1, 2), **{'val1': 'a'})
load_mock.assert_called_once()
df_func.assert_not_called()
save_mock.assert_not_called()
assert isinstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, mock_data_1)
@patch('pybaseball.cache.config.enabled', True)
@patch('glob.glob', MagicMock(return_value=['1.cache_record.json']))
@patch('pybaseball.cache.file_utils.load_json', MagicMock(
return_value={'expires': '2020-01-01', 'filename': 'old_file.csv'}
))
def test_call_cache_ignores_expired(
mock_data_1: pd.DataFrame, load_mock: MagicMock,
save_mock: MagicMock, save_json_mock: MagicMock) -> None:
df_func = MagicMock(return_value=mock_data_1)
df_func.__name__ = "df_func"
df_cache = cache.df_cache()
assert df_cache.cache_config.enabled
wrapper = df_cache.__call__(df_func)
wrapper(*(1, 2), **{'val1': 'a'})
df_func.assert_called_once_with(1, 2, val1='a')
load_mock.assert_not_called()
save_mock.assert_called_once()
pd.testing.assert_frame_equal(mock_data_1, save_mock.call_args[0][0])
@patch('pybaseball.cache.config.enabled', True)
@patch('glob.glob', MagicMock(return_value=[]))
@patch('os.path.exists', MagicMock(return_value=False))
def test_call_cache_gets_uncached_data(
mock_data_1: pd.DataFrame, load_mock: MagicMock,
save_mock: MagicMock, save_json_mock: MagicMock) -> None:
df_func = MagicMock(return_value=mock_data_1)
df_func.__name__ = "df_func" # type: ignore
df_cache = cache.df_cache()
assert df_cache.cache_config.enabled
wrapper = df_cache.__call__(df_func)
wrapper(*(1, 2), **{'val1': 'a'})
df_func.assert_called_once_with(1, 2, val1='a')
load_mock.assert_not_called()
save_mock.assert_called_once()
pd.testing.assert_frame_equal(mock_data_1, save_mock.call_args[0][0])
@patch('pybaseball.cache.config.enabled', True)
def test_call_cache_get_func_data_fails_silently(
mock_data_1: pd.DataFrame, thrower: Callable,
load_mock: MagicMock, save_mock: MagicMock, save_json_mock: MagicMock) -> None:
assert cache.config.enabled
df_func = MagicMock(return_value=mock_data_1)
df_func.__name__ = "df_func"
df_cache = cache.cache.df_cache()
assert df_cache.cache_config.enabled
with patch('pybaseball.cache.func_utils.get_func_name', thrower):
wrapper = df_cache.__call__(df_func)
result = wrapper(*(1, 2), **{'val1': 'a'})
assert isinstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, mock_data_1)
load_mock.assert_not_called()
save_mock.assert_not_called()
@patch('pybaseball.cache.config.enabled', True)
def test_call_cache_load_fails_silently(
mock_data_1: pd.DataFrame, thrower: Callable,
load_mock: MagicMock, save_mock: MagicMock, save_json_mock: MagicMock) -> None:
assert cache.config.enabled
df_func = MagicMock(return_value=mock_data_1)
df_func.__name__ = "df_func"
df_cache = cache.cache.df_cache()
assert df_cache.cache_config.enabled
with patch('glob.glob', thrower):
wrapper = df_cache.__call__(df_func)
result = wrapper(*(1, 2), **{'val1': 'a'})
assert isinstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, mock_data_1)
load_mock.assert_not_called()
save_mock.assert_called_once()
@patch('pybaseball.cache.config.enabled', True)
@patch('glob.glob', MagicMock(return_value=['1.cache_record.json']))
@patch('pybaseball.cache.file_utils.load_json', MagicMock(
return_value={
'expires': '3000-01-01',
'func': 'df_func',
'args': [1, 2],
'kwargs': {'val1': 'a'},
'dataframe': 'cachefile.csv'
}
))
def test_call_cache_save_fails_silently(
mock_data_1: pd.DataFrame, thrower: Callable,
empty_load_mock: MagicMock, save_mock: MagicMock) -> None:
assert cache.config.enabled
df_func = MagicMock(return_value=mock_data_1)
df_func.__name__ = "df_func"
df_cache = cache.cache.df_cache()
assert df_cache.cache_config.enabled
with patch.object(cache.cache_record.CacheRecord, 'save', thrower):
wrapper = df_cache.__call__(df_func)
result = wrapper(*(1, 2), **{'val1': 'a'})
assert isinstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, mock_data_1)
empty_load_mock.assert_called_once()
save_mock.assert_not_called()
def test_purge(remove: MagicMock) -> None:
glob_result = ['1.cache_record.json', '2.cache_record.json']
glob_mock = MagicMock(return_value=glob_result)
mock_cache_record = {'expires': '3000-01-01', 'filename': 'df_cache.parquet'}
mock_load_json = MagicMock(return_value=mock_cache_record)
with patch('glob.glob', glob_mock):
with patch('pybaseball.cache.file_utils.load_json', mock_load_json):
cache.purge()
assert glob_mock.called_once()
assert mock_load_json.call_count == len(glob_result)
assert remove.call_count == len(glob_result)
def test_flush(remove: MagicMock) -> None:
glob_result = ['1.cache_record.json', '2.cache_record.json']
glob_mock = MagicMock(return_value=glob_result)
mock_cache_records = [
{'expires': '2000-01-01', 'filename': 'df_cache.parquet'},
{'expires': '3000-01-01', 'filename': 'df_cache2.parquet'},
]
mock_load_json = MagicMock(side_effect=mock_cache_records)
with patch('glob.glob', glob_mock):
with patch('pybaseball.cache.file_utils.load_json', mock_load_json):
cache.flush()
assert glob_mock.called_once()
assert mock_load_json.call_count == len(glob_result)
remove.assert_called_once()
|
162921
|
seed_csv = """
id,name,some_date
1,Easton,1981-05-20T06:46:51
2,Lillian,1978-09-03T18:10:33
3,Jeremiah,1982-03-11T03:59:51
4,Nolan,1976-05-06T20:21:35
""".lstrip()
model_sql = """
select * from {{ ref('seed') }}
"""
profile_yml = """
version: 2
models:
- name: materialization
columns:
- name: id
tests:
- unique
- not_null
- name: name
tests:
- not_null
"""
|
162923
|
import numpy as np
from rlscore.learner import LeaveOneOutRLS
from rlscore.measure import sqerror
from housing_data import load_housing
def train_rls():
#Selects both the gamma parameter for Gaussian kernel, and regparam with loocv
X_train, Y_train, X_test, Y_test = load_housing()
regparams = [2.**i for i in range(-15, 16)]
gammas = regparams
best_regparam = None
best_gamma = None
best_error = float("inf")
best_learner = None
for gamma in gammas:
#New RLS is initialized for each kernel parameter
learner = LeaveOneOutRLS(X_train, Y_train, kernel="GaussianKernel", gamma=gamma, regparams=regparams)
e = np.min(learner.cv_performances)
if e < best_error:
best_error = e
best_regparam = learner.regparam
best_gamma = gamma
best_learner = learner
P_test = best_learner.predict(X_test)
print("best parameters gamma %f regparam %f" %(best_gamma, best_regparam))
print("best leave-one-out error %f" %best_error)
print("test error %f" %sqerror(Y_test, P_test))
if __name__=="__main__":
train_rls()
|
162949
|
from typing import List
import fv3gfs.util
from .. import _wrapper
def open_restart(
dirname: str,
communicator: fv3gfs.util.CubedSphereCommunicator,
label: str = "",
only_names: List[str] = None,
) -> dict:
"""Load restart files output by the Fortran model into a state dictionary.
See :py:func:`fv3gfs.set_state` if you would like to load the resulting state into
the Fortran model.
Args:
dirname: location of restart files, can be local or remote
communicator: communication object for the cubed sphere
label: prepended string on the restart files to load
only_names (optional): list of standard names to load
Returns:
state: model state dictionary
"""
tracer_properties = _wrapper.get_tracer_metadata()
return fv3gfs.util.open_restart(
dirname,
communicator,
label=label,
only_names=only_names,
tracer_properties=tracer_properties,
)
|
162963
|
from typing import List, Tuple
def simple_line_diff(
old_lines: List[str],
new_lines: List[str],
search_from_start_offset: int = 0,
search_from_end_offset: int = 0,
) -> Tuple[bool, int, int]:
"""
Re-implementation of `dotfiles.lsp.utils.simple_line_diff` from the Lua side.
"""
min_lines_len = min(len(old_lines), len(new_lines))
common_lines_from_start, common_lines_from_end = 0, 0
for i in range(search_from_start_offset, min_lines_len):
if old_lines[i] != new_lines[i]:
break
common_lines_from_start += 1
if len(old_lines) == len(new_lines) and common_lines_from_start == len(old_lines):
return False, common_lines_from_start, common_lines_from_end
for i in range(search_from_end_offset, min_lines_len - common_lines_from_start):
if old_lines[-i - 1] != new_lines[-i - 1]:
break
common_lines_from_end += 1
assert len(old_lines) >= common_lines_from_start + common_lines_from_end, 'sanity check'
assert len(new_lines) >= common_lines_from_start + common_lines_from_end, 'sanity check'
return True, common_lines_from_start, common_lines_from_end
|
163013
|
from urllib.parse import urlparse, urlunparse
class Link:
""" Dynamic link (`url` (parse/unpars)ed when set/get) """
def __init__(self, url: str, content: str = None):
self.url = url
self.content = content
@property
def url(self):
return urlunparse(
(
self.scheme,
self.netloc,
self.path,
self.params,
self.query,
self.fragment,
)
)
@url.setter
def url(self, url):
self._url = url
parsed_url = urlparse(url)
self.scheme = parsed_url.scheme
self.path = parsed_url.path
self.query = parsed_url.query
self.fragment = parsed_url.fragment
self.port = parsed_url.port
self.params = parsed_url.params
self.netloc = parsed_url.netloc
def copy(self):
return Link(self._url)
@property
def may_file(self):
if not self.path:
return False
return len(self.path.split(".")) > 1
|
163014
|
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from collections import Counter
import numpy as np
import sklearn.metrics
SAVE_PLOT_STR="Saved figure for {} at {}"
ERR_PLOT_STR="ERROR! Failed to save figure for {} at {}. Exception {}}"
def plot_losses(args):
epochs = range(args.epochs)
losses = args.epoch_details
for key in losses:
save_path = "{}.{}.png".format(args.results_dir, key)
loss = losses[key]
try:
plt.plot(epochs, loss)
plt.ylabel(key)
plt.xlabel('epoch')
plt.savefig(save_path)
plt.close()
print(SAVE_PLOT_STR.format(key, save_path))
except Exception, e:
print(ERR_PLOT_STR.format(
key, save_path, e))
def plot_pred_stats(golds_percent, pred_percent, save_path):
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
classes = ['Fatty', 'Scattered', 'Hetrogenous', 'Dense']
width = 0.35
ind = np.arange(len(pred_percent))
fig = plt.figure()
ax = fig.add_subplot(111)
rec1 = ax.bar(ind-width/2, pred_percent.values(), width, align='center', label='Model')
rec2 = ax.bar(ind+width/2, golds_percent.values(), width, align='center', label='Human')
ax.legend();
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation=45)
def autolabel(rects):
for rect in rects:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*h, '%.1f'%h,
ha='center', va='bottom')
autolabel(rec1)
autolabel(rec2)
ymin, ymax = plt.ylim()
plt.ylim((ymin, ymax+5))
plt.tight_layout()
plt.savefig(save_path)
plt.close()
def plot_roc_curve(fr, tr, class_name, save_path):
roc_auc = sklearn.metrics.auc(fr, tr)
plt.figure()
lw = 2
plt.plot(fr, tr, color='darkorange',
lw=lw, label='{} ROC curve (area = {:.2f})'.format(class_name, roc_auc))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False {} Rate'.format(class_name))
plt.ylabel('True {} Rate'.format(class_name))
plt.legend(loc="lower right")
plt.savefig(save_path)
plt.close()
|
163017
|
from typing import Optional
import numpy as np
import skimage.draw as skdraw
from gdsfactory.component import Component
from gdsfactory.types import Floats, Layers
def to_np(
component: Component,
nm_per_pixel: int = 20,
layers: Layers = ((1, 0),),
values: Optional[Floats] = None,
pad_width: int = 1,
) -> np.ndarray:
"""Returns a pixelated numpy array from Component polygons.
Args:
component: Component
nm_per_pixel: you can go from 20 (coarse) to 4 (fine)
layers: to convert. Order matters (latter overwrite former)
values: associated to each layer (defaults to 1)
pad_width: padding pixels around the image
"""
pixels_per_um = (1 / nm_per_pixel) * 1e3
xmin, ymin = component.bbox[0]
xmax, ymax = component.bbox[1]
shape = (
int(np.ceil(xmax - xmin) * pixels_per_um),
int(np.ceil(ymax - ymin) * pixels_per_um),
)
img = np.zeros(shape, dtype=float)
layer_to_polygons = component.get_polygons(by_spec=True, depth=None)
values = values or [1] * len(layers)
for layer, value in zip(layers, values):
if layer in layer_to_polygons:
polygons = layer_to_polygons[layer]
for polygon in polygons:
r = polygon[:, 0] - xmin
c = polygon[:, 1] - ymin
rr, cc = skdraw.polygon(
r * pixels_per_um, c * pixels_per_um, shape=shape
)
img[rr, cc] = value
img_with_padding = np.pad(img, pad_width=pad_width)
return img_with_padding
if __name__ == "__main__":
import matplotlib.pyplot as plt
import gdsfactory as gf
c = gf.c.straight()
c = gf.c.bend_circular(layers_cladding=[gf.LAYER.WGCLAD], cladding_offset=3.0)
# i = to_np(c, nm_per_pixel=250)
i = to_np(c, nm_per_pixel=20)
c.show()
plt.imshow(i.transpose(), origin="lower")
plt.colorbar()
plt.show()
|
163022
|
class WrongInputDataType(Exception):
def __init__(self, message="Input data must be a pandas.Series."):
self.message = message
super().__init__(self.message)
class NotFittedError(Exception):
def __init__(self, message="Please call fit() before detect().", tip=""):
self.message = " ".join([message, tip])
super().__init__(self.message)
class NoRangeDefinedError(NotFittedError):
def __init__(self, message="Or specify min/max range when instantiating detector object."):
super().__init__(message)
class InvalidArgument(Exception):
def __init__(self, argument_name, requirement):
self.message = f"{argument_name} must be {requirement}."
super().__init__(self.message)
class NotInteger(InvalidArgument):
def __init__(self, argument_name):
super().__init__(argument_name, "an integer")
class NonUniqueTimeStamps(Exception):
def __init__(self, message="Found multiple values at the same time stamp."):
self.message = message
super().__init__(self.message)
|
163030
|
import unittest
from sqlalchemy.orm import sessionmaker
#from nesta.core.orms.uk_geography_lookup_orm import UkGeographyLookup
from nesta.core.orms.uk_geography_lookup_orm import Base
from nesta.core.orms.orm_utils import get_mysql_engine
class TestUkGeographyLookup(unittest.TestCase):
'''Check that the WiktionaryNgram ORM works as expected'''
engine = get_mysql_engine("MYSQLDBCONF", "mysqldb")
Session = sessionmaker(engine)
def setUp(self):
'''Create the temporary table'''
Base.metadata.create_all(self.engine)
def tearDown(self):
'''Drop the temporary table'''
Base.metadata.drop_all(self.engine)
def test_build(self):
pass
if __name__ == "__main__":
unittest.main()
|
163052
|
import mock
from nose.tools import assert_equal, assert_in, raises, assert_is, assert_is_instance, assert_false, assert_true
from .. import metrics as mm, exceptions, histogram, simple_metrics as simple, meter
class TestMetricsModule(object):
def setUp(self):
self.original_registy = mm.REGISTRY.copy()
self.original_tags = mm.TAGS.copy()
mm.REGISTRY.clear()
mm.TAGS.clear()
def tearDown(self):
mm.REGISTRY.clear()
mm.REGISTRY.update(self.original_registy)
mm.TAGS.clear()
mm.TAGS.update(self.original_tags)
def test_new_metric(self):
Cls = mock.Mock()
args = [mock.Mock(), mock.Mock()]
kwargs = dict(other=mock.Mock())
res = mm.new_metric("test", Cls, *args, **kwargs)
assert_in("test", mm.REGISTRY)
item = mm.REGISTRY["test"]
assert_equal(
Cls.call_args_list,
[mock.call(*args, **kwargs)]
)
assert_equal(item, Cls())
assert_equal(item, res)
@raises(exceptions.DuplicateMetricError)
def test_new_metric_duplicated(self):
Cls = mock.Mock()
mm.new_metric("test", Cls)
mm.new_metric("test", Cls)
@raises(exceptions.InvalidMetricError)
def test_metric_not_found(self):
mm.metric("test")
def test_metric(self):
expected = mm.REGISTRY["test"] = mock.Mock()
assert_equal(mm.metric("test"), expected)
def test_metrics(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
expected = ["test1", "test2"]
assert_equal(mm.metrics(), expected)
def test_get(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
assert_equal(mm.get("test1"), mm.REGISTRY["test1"].get.return_value)
@raises(exceptions.InvalidMetricError)
def test_get_not_existing(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.get("test3")
def test_notify(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.notify("test1", 123)
assert_equal(
mm.REGISTRY["test1"].notify.call_args_list,
[mock.call(123)]
)
@raises(exceptions.InvalidMetricError)
def test_notify_not_existing(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock())
mm.notify("test3", 123)
def test_delete_metric(self):
m1 = mock.Mock()
m2 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2)
assert_equal(mm.delete_metric("test1"), m1)
assert_equal(mm.REGISTRY, dict(test2=m2))
def test_delete_metric_not_found(self):
m1 = mock.Mock()
m2 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2)
assert_equal(mm.delete_metric("test3"), None)
assert_equal(mm.REGISTRY, dict(test1=m1, test2=m2))
def test_delete_metric_with_tags(self):
mm.TAGS = {"test": {"test1", "test3"}}
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = dict(test1=m1, test2=m2, test3=m3)
assert_equal(mm.delete_metric("test1"), m1)
assert_equal(mm.REGISTRY, dict(test2=m2, test3=m3))
assert_equal(mm.TAGS["test"], {"test3"})
def test_new_histogram_default(self):
metric = mm.new_histogram("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.UniformReservoir)
assert_equal(metric.reservoir.size, histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
def test_new_histogram(self):
metric = mm.new_histogram("test", histogram.UniformReservoir(10))
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.UniformReservoir)
assert_equal(metric.reservoir.size, 10)
def test_new_counter(self):
metric = mm.new_counter("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, simple.Counter)
def test_new_gauge(self):
metric = mm.new_gauge("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, simple.Gauge)
def test_new_meter(self):
metric = mm.new_meter("test")
assert_is(metric, mm.metric("test"))
assert_is_instance(metric, meter.Meter)
@raises(exceptions.InvalidMetricError)
def test_new_reservoir_bad_type(self):
mm.new_reservoir('xxx')
@raises(TypeError)
def test_new_reservoir_bad_args(self):
mm.new_reservoir('uniform', xxx='yyy')
def test_new_reservoir_with_defaults(self):
reservoir = mm.new_reservoir()
assert_is_instance(reservoir, histogram.UniformReservoir)
assert_equal(reservoir.size, histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
def test_new_reservoir(self):
reservoir = mm.new_reservoir('sliding_window', 5)
assert_is_instance(reservoir, histogram.SlidingWindowReservoir)
assert_equal(reservoir.size, 5)
def test_new_histogram_with_implicit_reservoir(self):
metric = mm.new_histogram_with_implicit_reservoir('test', 'sliding_window', 5)
assert_is_instance(metric, histogram.Histogram)
assert_is_instance(metric.reservoir, histogram.SlidingWindowReservoir)
assert_equal(metric.reservoir.size, 5)
@mock.patch('appmetrics.metrics.time')
def test_with_histogram(self, time):
# emulate the time spent in the function by patching time.time() and returning
# two known values.
times = [5, 3.4]
time.time.side_effect = times.pop
# decorated function
@mm.with_histogram("test")
def fun(v1, v2):
"""a docstring"""
return v1+v2
assert_equal(fun.__doc__, "a docstring")
res = fun(1, 2)
assert_equal(res, 3)
assert_equal(mm.metric("test").raw_data(), [1.6])
@mock.patch('appmetrics.metrics.time')
def test_with_histogram_with_method(self, time):
# emulate the time spent in the function by patching time.time() and returning
# two known values.
times = [5, 3.4]
time.time.side_effect = times.pop
# decorated method
class MyClass(object):
def __init__(self, v1):
self.v1 = v1
@mm.with_histogram("test")
def method(self, v2):
"""a docstring"""
return self.v1+v2
assert_equal(MyClass.method.__doc__, "a docstring")
obj = MyClass(1)
assert_equal(obj.method.__doc__, "a docstring")
res = obj.method(2)
assert_equal(res, 3)
assert_equal(mm.metric("test").raw_data(), [1.6])
def test_with_histogram_multiple(self):
@mm.with_histogram("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_histogram("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
assert_equal(f1.__doc__, "a docstring")
assert_equal(f2.__doc__, "another docstring")
res = f1(1, 2)
assert_equal(res, 3)
res = f2(2, 3)
assert_equal(res, 6)
assert_equal(len(mm.metric("test").raw_data()), 2)
@raises(exceptions.InvalidMetricError)
def test_with_histogram_bad_reservoir_type(self):
# decorated function
@mm.with_histogram("test", "xxx")
def fun(v1, v2):
"""a docstring"""
return v1+v2
@raises(exceptions.DuplicateMetricError)
def test_with_histogram_multiple_and_arguments(self):
@mm.with_histogram("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_histogram("test", size=100)
def f2(v1, v2):
"""another docstring"""
return v1*v2
@raises(exceptions.DuplicateMetricError)
def test_with_histogram_multiple_different_type(self):
mm.new_gauge("test")
@mm.with_histogram("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
def test_with_meter(self):
@mm.with_meter("test")
def fun(v):
"""a docstring"""
return v*2
assert_equal(fun.__doc__, "a docstring")
res = [fun(i) for i in range(6)]
assert_equal(res, [0, 2, 4, 6, 8, 10])
assert_equal(mm.metric("test").raw_data(), 6)
def test_with_meter_with_method(self):
class MyClass(object):
def __init__(self, v):
self.v = v
@mm.with_meter("test")
def m1(self, v):
"""a docstring"""
return v*self.v
@mm.with_meter("test")
def m2(self, v):
"""another docstring"""
return v+self.v
assert_equal(MyClass.m1.__doc__, "a docstring")
assert_equal(MyClass.m2.__doc__, "another docstring")
obj = MyClass(2)
res = [obj.m1(i) for i in range(3)]
assert_equal(res, [0, 2, 4])
res = [obj.m2(i) for i in range(3)]
assert_equal(res, [2, 3, 4])
assert_equal(mm.metric("test").raw_data(), 6)
def test_with_meter_multiple(self):
@mm.with_meter("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_meter("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
assert_equal(f1.__doc__, "a docstring")
assert_equal(f2.__doc__, "another docstring")
res = f1(1, 2)
assert_equal(res, 3)
res = f2(2, 3)
assert_equal(res, 6)
assert_equal(mm.metric("test").raw_data(), 2)
@raises(exceptions.DuplicateMetricError)
def test_with_meter_multiple_and_arguments(self):
@mm.with_meter("test")
def f1(v1, v2):
"""a docstring"""
return v1+v2
@mm.with_meter("test", tick_interval=100)
def f2(v1, v2):
"""another docstring"""
return v1*v2
@raises(exceptions.DuplicateMetricError)
def test_with_meter_multiple_different_type(self):
mm.new_gauge("test")
@mm.with_meter("test")
def f2(v1, v2):
"""another docstring"""
return v1*v2
@mock.patch('appmetrics.histogram.Histogram.notify')
def test_timer(self, notify):
with mm.timer("test"):
pass
assert_equal(notify.call_count, 1)
@mock.patch('appmetrics.histogram.Histogram.notify')
def test_timer_multiple(self, notify):
with mm.timer("test"):
pass
with mm.timer("test"):
pass
assert_equal(notify.call_count, 2)
@raises(exceptions.DuplicateMetricError)
def test_timer_multiple_different_reservoir(self):
with mm.timer("test", reservoir_type="sliding_window"):
pass
with mm.timer("test"):
pass
@raises(exceptions.DuplicateMetricError)
def test_timer_multiple_different_type(self):
mm.new_gauge("test")
with mm.timer("test"):
pass
@raises(exceptions.InvalidMetricError)
def test_tag_invalid_name(self):
mm.tag("test", "test")
def test_tag(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.tag("test1", "1")
mm.tag("test3", "1")
mm.tag("test2", "2")
assert_equal(mm.TAGS, {"1": {"test1", "test3"}, "2": {"test2"}})
def test_tags(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_equal(mm.tags(), mm.TAGS)
assert_false(mm.tags() is mm.TAGS)
def test_untag_bad_tag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_false(mm.untag("test1", "xxx"))
def test_untag_bad_metric(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_false(mm.untag("xxx", "1"))
def test_untag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_true(mm.untag("test1", "1"))
assert_equal(mm.TAGS, {"1": {"test3"}, "2": {"test2"}})
def test_untag_last_group(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_true(mm.untag("test1", "1"))
assert_true(mm.untag("test3", "1"))
assert_equal(mm.TAGS, {"2": {"test2"}})
def test_metrics_by_tag_invalid_tag(self):
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test2"}}
assert_equal(mm.metrics_by_tag("test"), {})
def test_metrics_by_tag(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.TAGS = {"1": {"test1", "test3"}, "2": {"test3"}}
assert_equal(mm.metrics_by_tag("1"), {"test1": m1.get(), "test3": m3.get()})
def test_metrics_by_tag_deletion_while_looping(self):
m1 = mock.Mock()
m2 = mock.Mock()
m3 = mock.Mock()
m2.get.side_effect = exceptions.InvalidMetricError
mm.REGISTRY = {"test1": m1, "test2": m2, "test3": m3}
mm.TAGS = {"1": {"test1", "test2", "test3"}, "2": {"test2"}}
assert_equal(mm.metrics_by_tag("1"), {"test1": m1.get(), "test3": m3.get()})
def test_metrics_by_name_list(self):
mm.REGISTRY = dict(test1=mock.Mock(), test2=mock.Mock(), test3=mock.Mock())
out = mm.metrics_by_name_list(["test1", "test3"])
expected = {'test1': mm.REGISTRY["test1"].get.return_value,
'test3': mm.REGISTRY["test3"].get.return_value}
assert_equal(out, expected)
|
163061
|
import pypsa, os
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
network = pypsa.Network()
folder_name = "ac-dc-data"
network.import_from_csv_folder(folder_name)
network.lopf(network.snapshots)
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.EqualEarth()},
figsize=(5,5))
line_colors = network.lines.bus0.map(network.buses.carrier)\
.replace({'AC': 'indianred', 'DC': 'limegreen'})
network.plot(bus_colors='grey', ax=ax,
margin=.5, line_widths={'Line':2., 'Link':0},
line_colors=line_colors,
geomap='10m', title='Mixed AC-DC (red - green) network',
# flow='mean',
color_geomap=True)
fig.canvas.draw(); fig.tight_layout()
fig.savefig('ac_dc_meshed.png')
for sn in network.sub_networks.obj:
print(sn,network.sub_networks.at[sn.name,"carrier"],len(sn.buses()),len(sn.branches()))
print("\nControllable branches:")
print(network.links)
now = network.snapshots[5]
print("\nCheck power balance at each bus:")
for bus in network.buses.index:
print("\n"*3+bus)
generators = sum(network.generators_t.p.loc[now,network.generators.bus==bus])
loads = sum(network.loads_t.p.loc[now,network.loads.bus==bus])
print("Generators:",generators)
print("Loads:",loads)
print("Total:",generators-loads)
p0 = 0.
p1 = 0.
for c in network.iterate_components(network.branch_components):
bs = (c.df.bus0 == bus)
if bs.any():
print(c,"\n",c.pnl.p0.loc[now,bs])
p0 += c.pnl.p0.loc[now,bs].sum()
bs = (c.df.bus1 == bus)
if bs.any():
print(c,"\n",c.pnl.p1.loc[now,bs])
p1 += c.pnl.p1.loc[now,bs].sum()
print("Branches",p0+p1)
np.testing.assert_allclose(generators-loads+1.,p0+p1+1.)
print("")
print(sum(network.generators_t.p.loc[now]))
print(sum(network.loads_t.p.loc[now]))
results_folder_name = os.path.join(folder_name,"results-lopf")
if True:
network.export_to_csv_folder(results_folder_name)
|
163062
|
from itertools import combinations
n=int(input())
letters=[x for x in input().split()]
k=int(input())
res=list(combinations(letters,k))
count=0
for i in res:
if 'a' in i:
count+=1
print(count/len(res))
|
163066
|
import magma as m
from magma.testing import check_files_equal
import os
def test_inline_2d_array_interface():
class Main(m.Generator):
@staticmethod
def generate(width, depth):
class MonitorWrapper(m.Circuit):
io = m.IO(arr=m.In(m.Array[depth, m.Bits[width]]))
m.inline_verilog("""
monitor #(.WIDTH({width}), .DEPTH({depth})) monitor_inst(.arr({arr}));
""", width=width, depth=depth, arr=io.arr)
return MonitorWrapper
m.compile("build/test_inline_2d_array_interface",
Main.generate(8, 64))
assert check_files_equal(__file__,
f"build/test_inline_2d_array_interface.v",
f"gold/test_inline_2d_array_interface.v")
file_dir = os.path.abspath(os.path.dirname(__file__))
assert not os.system("verilator --lint-only "
f"{file_dir}/build/test_inline_2d_array_interface.v "
f"{file_dir}/vsrc/2d_array_interface.v "
"--top-module MonitorWrapper")
|
163075
|
import numpy as np
from web.evaluate import calculate_purity, evaluate_categorization
from web.embedding import Embedding
from web.datasets.utils import _fetch_file
from web.datasets.categorization import fetch_ESSLI_2c
def test_purity():
y_true = np.array([1,1,2,2,3])
y_pred = np.array([2,2,2,2,1])
assert abs(0.6 - calculate_purity(y_true, y_pred)) < 1e-10
def test_categorization():
data = fetch_ESSLI_2c()
url = "https://www.dropbox.com/s/5occ4p7k28gvxfj/ganalogy-sg-wiki-en-400.bin?dl=1"
file_name = _fetch_file(url, "test")
w = Embedding.from_word2vec(file_name, binary=True)
assert evaluate_categorization(w, data.X, data.y, seed=777, method="all") >= 0.2
|
163076
|
import argparse
import numpy as np
from squeezenet import SqueezeNet
import os
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
SIZE = 227
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint-path', required=True)
parser.add_argument('--image', nargs='+', required=True)
parser.add_argument('--num-classes', type=int, required=True)
args = parser.parse_args()
model = SqueezeNet(weights=None, classes=args.num_classes)
model.load_weights(args.checkpoint_path)
xs = []
for path in args.image:
img = image.load_img(path, target_size=(SIZE, SIZE))
x = image.img_to_array(img)
xs.append(x)
xs = np.array(xs)
xs = preprocess_input(xs)
probs = model.predict(xs)
print('')
for i, path in enumerate(args.image):
print('%s' % path)
print(' Prediction: %s' % np.argmax(probs[i]))
if __name__ == '__main__':
main()
|
163078
|
class Solution:
"""
@param grid: a 2D array
@return: the maximum area of an island in the given 2D array
"""
def maxAreaOfIsland(self, grid):
# Write your code here
maxArea = 0
m = len(grid)
n = len(grid[0])
def dfs(r, c):
if grid[r][c] == 0:
return 0
total = 1
grid[r][c] = 0
for nr, nc in ((r-1, c), (r+1, c), (r, c-1), (r, c+1)):
if 0 <= nr < m and 0 <= nc < n:
total += dfs(nr, nc)
return total
for i, row in enumerate(grid):
for j, val in enumerate(row):
if val == 1:
maxArea = max(maxArea, dfs(i, j))
return maxArea
|
163093
|
import requests
from django.conf import settings
from constance import config
class Canvas:
base_url = "https://canvas.instructure.com/api/v1/"
graphql_url = "https://canvas.instructure.com/api/graphql/"
def __init__(self):
self.api_token = config.CANVAS_API_TOKEN
def _get(self, url):
r = requests.get(
self.base_url + url,
headers={"Authorization": "Bearer " + self.api_token},
)
return r.json()
def _query_graph(self, query):
response = requests.post(
self.graphql_url,
headers={"Authorization": "Bearer " + self.api_token},
data=query,
)
return response.json()
def get_course_details(self, course_id):
query = {
"query": "query MyQuery {course(id: "
+ str(course_id)
+ ") {enrollmentsConnection {nodes {user {email}grades {finalScore}}}}}",
}
return self._query_graph(query)
def get_course_scores(self, course_id):
students = self.get_course_details(course_id)
students = students.get("data")
if not students:
return {}
students = students["course"]["enrollmentsConnection"]["nodes"]
scores = {}
for student in students:
scores[student.get("user").get("email")] = student.get("grades").get(
"finalScore"
)
return scores
def get_student_score_for_course(self, course_id, email):
scores = self.get_course_scores(course_id)
return scores.get(email)
|
163136
|
import streamlit as st
from pydantic import BaseModel
import streamlit_pydantic as sp
class ExampleModel(BaseModel):
some_text: str
some_number: int = 10
some_boolean: bool = True
with st.form(key="pydantic_form"):
sp.pydantic_input(key="my_input_model", model=ExampleModel)
submit_button = st.form_submit_button(label="Submit")
|
163139
|
import random
import time
random.seed(1234)
from ltron.bricks.brick_scene import BrickScene
from ltron.experts.reassembly import ReassemblyExpert
from ltron.matching import match_configurations
scene = BrickScene(renderable=True, track_snaps=True)
scene.import_ldraw(
#'~/.cache/ltron/collections/omr/ldraw/8661-1 - Carbon Star.mpd'
#'~/.cache/ltron/collections/omr/ldraw/7657-1 - AT-ST.mpd'
'~/.cache/ltron/collections/omr/ldraw/10030-1 - Imperial Star Destroyer - UCS.mpd'
)
print('loaded')
shape_ids = {
str(brick_shape) : i
for i, brick_shape in enumerate(scene.shape_library.values(), start=1)
}
color_ids = {
str(color) : i
for i, color in enumerate(scene.color_library.values(), start=0)
}
target_config = scene.get_configuration(shape_ids, color_ids)
#scene.remove_instance(23)
n = len(scene.instances)
remove = set(random.randint(1, n) for _ in range(10))
for r in remove:
transform = scene.instances[r].transform.copy()
transform[0,3] += random.randint(-5,5)*20
transform[1,3] += random.randint(-5,5)*8
transform[2,3] += random.randint(-5,5)*20
scene.move_instance(r, transform)
workspace_config = scene.get_configuration(shape_ids, color_ids)
t0 = time.time()
matches = match_configurations(workspace_config, target_config)
t1 = time.time()
print('t: %.06f'%(t1-t0))
'''
expert = ReassemblyExpert(1, shape_ids)
target_config = scene.get_configuration(shape_ids, color_ids)
scene.remove_instance(23)
workspace_config = scene.get_configuration(shape_ids, color_ids)
expert.add_remove_brick(workspace_config, target_config)
'''
|
163141
|
import re
from random import randint
from typing import Any
from typing import Callable
from typing import Dict
from typing import Match
from typing import Optional
from retrying import retry
import apysc as ap
from apysc._event.custom_event_type import CustomEventType
from apysc._expression import expression_data_util
from apysc._expression import var_names
from tests.testing_helper import assert_attrs
class TestTimer:
def on_timer(self, e: ap.TimerEvent, options: Dict[str, Any]) -> None:
"""
The handler for the timer event.
Parameters
----------
e : TimerEvent
Event instance.
options : dict
Optional arguments dictionary.
"""
def on_timer_complete(
self, e: ap.TimerEvent, options: Dict[str, Any]) -> None:
"""
Ther handler for the timer complete event.
Parameters
----------
e : TimerEvent
Event instance.
options : dict
Optional arguments dictionary.
"""
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(
handler=self.on_timer,
delay=33.3,
repeat_count=10)
assert_attrs(
expected_attrs={
'_delay': ap.Number(33.3),
'_repeat_count': ap.Int(10),
},
any_obj=timer)
assert callable(timer._handler)
assert callable(timer._handler_data['handler'])
assert timer._handler_data['options'] == {}
assert timer.variable_name.startswith(f'{var_names.TIMER}_')
assert 'on_timer' in timer._handler_name
assert isinstance(timer._delay, ap.Number)
assert isinstance(timer._repeat_count, ap.Int)
expression = expression_data_util.get_current_expression()
assert f'var {timer.variable_name};' in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_delay(self) -> None:
timer: ap.Timer = ap.Timer(
handler=self.on_timer,
delay=33.3)
assert timer.delay == 33.3
assert isinstance(timer.delay, ap.Number)
assert timer._delay.variable_name != timer.delay.variable_name
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_repeat_count(self) -> None:
timer: ap.Timer = ap.Timer(
handler=self.on_timer,
delay=33.3, repeat_count=3)
assert timer.repeat_count == 3
assert isinstance(timer.repeat_count, ap.Int)
assert timer._repeat_count.variable_name \
!= timer.repeat_count.variable_name
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_running(self) -> None:
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
assert not timer.running
assert isinstance(timer.running, ap.Boolean)
assert timer._running.variable_name != timer.running.variable_name
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_start(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
timer.start()
assert timer.running
expression: str = expression_data_util.get_current_expression()
pattern: str = (
rf'if \(_\.isUndefined\({timer.variable_name}\)\) {{'
rf'\n {timer.variable_name} = setInterval\('
rf'{timer._handler_name}, {var_names.NUMBER}_.+?\);'
r'\n}'
)
match: Optional[Match] = re.search(
pattern=pattern,
string=expression,
flags=re.MULTILINE | re.DOTALL,
)
assert match is not None
expression = \
expression_data_util.get_current_event_handler_scope_expression()
match = re.search(
pattern=(
r'function .*on_timer.*\('
),
string=expression,
flags=re.MULTILINE,
)
assert match is not None
expression_data_util.empty_expression()
timer = ap.Timer(handler=self.on_timer, delay=33.3)
table_name: str = expression_data_util.TableName.\
CIRCULAR_CALLING_HANDLER_NAME.value
query: str = (
f'INSERT INTO {table_name}'
'(handler_name, prev_handler_name, prev_variable_name) '
f"VALUES('{timer._handler_name}', 'test_prev_handler', "
f"'test_prev_variable');"
)
expression_data_util.exec_query(sql=query)
timer.start()
expression = expression_data_util.get_current_expression()
assert f'_.isUndefined({timer.variable_name})' not in expression
assert '_.isUndefined(test_prev_variable)' in expression
assert f'setInterval({timer._handler_name}' not in expression
assert 'test_prev_variable = setInterval(test_prev_handler' \
in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_current_count(self) -> None:
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
assert timer.current_count == 0
assert isinstance(timer.current_count, ap.Int)
assert timer._current_count.variable_name \
!= timer.current_count.variable_name
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__wrap_handler(self) -> None:
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
wrapped: Callable[[ap.TimerEvent, Any], None] = \
timer._wrap_handler(handler=self.on_timer)
e: ap.TimerEvent = ap.TimerEvent(this=timer)
wrapped(e, {})
assert timer.current_count == 0
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_stop(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
timer.start()
timer.stop()
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'if (!_.isUndefined({timer.variable_name})) {{'
f'\n clearInterval({timer.variable_name});'
f'\n {timer.variable_name} = undefined;'
'\n}'
)
assert expected in expression
assert not timer.running
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_stop_expression(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33.3)
expression: str = timer._get_stop_expression(indent_num=1)
expected: str = (
f' if (!_.isUndefined({timer.variable_name})) {{'
f'\n clearInterval({timer.variable_name});'
f'\n {timer.variable_name} = undefined;'
'\n }'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_count_branch_expression(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(
handler=self.on_timer, delay=33.3, repeat_count=5)
timer.start()
expression: str = \
expression_data_util.get_current_event_handler_scope_expression()
match: Optional[Match] = re.search(
pattern=(
rf' if \({var_names.INT}_.+? !== 0 && '
rf'{var_names.INT}_.+? === {var_names.INT}_.+?\) {{'
r'\n if \(.*?'
r'\n }'
),
string=expression,
flags=re.MULTILINE | re.DOTALL)
assert match is not None
event_type: str = CustomEventType.TIMER_COMPLETE.value
match = re.search(
pattern=(
rf'\$\({timer.blank_object_variable_name}\)'
rf'\.trigger\("{event_type}"\);'
),
string=expression,
flags=re.MULTILINE,
)
assert match is not None
assert timer._current_count == 0
expected: str = (
f'{timer._current_count.variable_name} = 0;'
)
assert expected in expression
expected = (
f'$({timer.blank_object_variable_name}).off('
f'"{event_type}");'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__convert_delay_to_number(self) -> None:
timer: ap.Timer = ap.Timer(
handler=self.on_timer, delay=33.3)
assert timer.delay == 33.3
assert isinstance(timer.delay, ap.Number)
timer = ap.Timer(
handler=self.on_timer, delay=ap.Number(33.3))
assert timer.delay == 33.3
assert isinstance(timer.delay, ap.Number)
timer = ap.Timer(
handler=self.on_timer, delay=ap.FPS.FPS_60)
assert timer.delay == 16.6666667
assert isinstance(timer.delay, ap.Number)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_timer_complete(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(
handler=self.on_timer, delay=33.3)
name: str = timer.timer_complete(handler=self.on_timer_complete)
assert isinstance(
timer._custom_event_handlers[
CustomEventType.TIMER_COMPLETE.value][name],
dict)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_reset(self) -> None:
expression_data_util.empty_expression()
timer: ap.Timer = ap.Timer(
handler=self.on_timer, delay=33.3)
timer.start()
timer._current_count._value = 10
timer.reset()
assert timer.current_count == 0
assert not timer.running
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{timer._current_count.variable_name} = 0;'
)
assert expected in expression
|
163152
|
import sys
from .danger import Danger, fail, markdown, message, warn
class DangerPlugin:
def __init__(self):
self._danger = None
self.message = message
self.markdown = markdown
self.warn = warn
self.fail = fail
def __init_subclass__(cls, **kwargs):
parent_module = cls.__module__.split(".")[0]
module = sys.modules[parent_module]
instance = cls()
for method_name in dir(cls):
if method_name.startswith("__") or method_name in set(dir(DangerPlugin)):
continue
method = getattr(instance, method_name)
setattr(module, method_name, method)
@property
def danger(self):
if not self._danger:
self._danger = Danger()
return self._danger
|
163164
|
import argparse
import re
import os
import json
import numpy as np
import pickle as pkl
"""
for extracting word embedding yourself, please download pretrained model from one of the following links.
"""
url = {'glove': 'http://nlp.stanford.edu/data/glove.6B.zip',
'google': 'https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing',
'fasttext': 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.en.zip'}
data_dir = '../data/'
feat_len = 300
def embed_text_file(text_file, word_vectors, get_vector, save_file):
with open(text_file) as fp:
text_list = json.load(fp)
all_feats = []
has = 0
cnt_missed = 0
missed_list = []
for i in range(len(text_list)):
class_name = text_list[i].lower()
if i % 500 == 0:
print('%d / %d : %s' % (i, len(text_list), class_name))
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
now_feat = get_embedding(options[j].strip(), word_vectors, get_vector)
if np.abs(now_feat.sum()) > 0:
cnt_word += 1
feat += now_feat
if cnt_word > 0:
feat = feat / cnt_word
if np.abs(feat.sum()) == 0:
print('cannot find word ' + class_name)
cnt_missed = cnt_missed + 1
missed_list.append(class_name)
else:
has += 1
feat = feat / (np.linalg.norm(feat) + 1e-6)
all_feats.append(feat)
all_feats = np.array(all_feats)
for each in missed_list:
print(each)
print('does not have semantic embedding: ', cnt_missed, 'has: ', has)
if not os.path.exists(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
print('## Make Directory: %s' % save_file)
with open(save_file, 'wb') as fp:
pkl.dump(all_feats, fp)
print('save to : %s' % save_file)
def get_embedding(entity_str, word_vectors, get_vector):
try:
feat = get_vector(word_vectors, entity_str)
return feat
except:
feat = np.zeros(feat_len)
str_set = filter(None, re.split("[ \-_]+", entity_str))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = feat + now_feat
cnt_word = cnt_word + 1
except:
continue
if cnt_word > 0:
feat = feat / cnt_word
return feat
def get_glove_dict(txt_dir):
print('load glove word embedding')
txt_file = os.path.join(txt_dir, 'glove.6B.300d.txt')
word_dict = {}
feat = np.zeros(feat_len)
with open(txt_file) as fp:
for line in fp:
words = line.split()
assert len(words) - 1 == feat_len
for i in range(feat_len):
feat[i] = float(words[i+1])
feat = np.array(feat)
word_dict[words[0]] = feat
print('loaded to dict!')
return word_dict
def glove_google(word_vectors, word):
return word_vectors[word]
def fasttext(word_vectors, word):
return word_vectors.get_word_vector(word)
def parse_arg():
parser = argparse.ArgumentParser(description='word embeddign type')
parser.add_argument('--wv', type=str, default='glove',
help='word embedding type: [glove, google, fasttext]')
parser.add_argument('--path', type=str, default='',
help='path to pretrained word embedding model')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arg()
text_file = os.path.join(data_dir, 'list', 'invdict_wordntext.json')
model_path = args.path
if args.wv == 'glove':
save_file = os.path.join(data_dir, 'word_embedding_model', 'glove_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
word_vectors = get_glove_dict(model_path)
get_vector = glove_google
elif args.wv == 'google':
save_file = os.path.join(data_dir, 'word_embedding_model', 'google_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format(model_path, binary=True)
get_vector = glove_google
elif args.wv == 'fasttext':
save_file = os.path.join(data_dir, 'word_embedding_model', 'fasttext_word2vec_wordnet.pkl')
if not os.path.exists(save_file):
from fastText import load_model
word_vectors = load_model(os.path.join(model_path, 'wiki.en.bin'))
get_vector = fasttext
else:
raise NotImplementedError
if not os.path.exists(save_file):
print('obtain semantic word embeddig', save_file)
embed_text_file(text_file, word_vectors, get_vector, save_file)
else:
print('Embedding existed :', save_file, 'Skip!!!')
|
163187
|
CAMPAIGN_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "5",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:01:07.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.99,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.85,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_2_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "4",
"name": "Verify your example account 798",
"occurred": "2021-11-21T16:00:00.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.97,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.86,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
OLD_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "<EMAIL>",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"<EMAIL>"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_EMPTY_CAMPAIGN = {}
INCIDENTS_BY_ID = {'0': CAMPAIGN_INCIDENT_CONTEXT, '1': NEW_EMPTY_CAMPAIGN, '3': OLD_INCIDENT_CONTEXT,
'4': NEW_INCIDENT_2_CONTEXT, '5': NEW_INCIDENT_CONTEXT}
|
163206
|
import re
import config
from helper.DatabaseHelper import Player
from nonebot import on_command, CommandSession,permission
__plugin_name__ = '解除绑定'
__plugin_usage__ = r"""解除绑定(仅管理及群主可用)
例:#解绑 @一个人
或 #unbind 艾特一个人"""
@on_command('unbind', aliases='解绑', only_to_me=False, permission=permission.SUPERUSER | permission.GROUP_OWNER | permission.GROUP_ADMIN)
async def Bind(session: CommandSession):
SenderQQNumber = session.ctx['user_id']
SenderAtQQNumber = session.ctx['message']
SenderGroupNumber = session.ctx['group_id']
if str(SenderGroupNumber) in config.SendGroup:
pass
else:
try:
AtQQNumber = re.findall(r"(?<=\[CQ:at,qq=).*?(?=\])", str(SenderAtQQNumber))[0]
try:
SqlGamerName = Player(QQNumber=AtQQNumber).GamerName()
Player(AtQQNumber).remove()
await session.send(f'[CQ:at,qq={SenderQQNumber}] 解除{AtQQNumber}与{SqlGamerName}的绑定!!!')
except Player.PlayerNotFoundException:
await session.send(f'[CQ:at,qq={SenderQQNumber}] 该用户没有绑定呢!!!')
except:
await session.send(f'[CQ:at,qq={SenderQQNumber}] #unbind后面艾特一个用户... 例如 #bind @user')
|
163246
|
import pytest
from loan_calculator.grossup.iof import IofGrossup
def test_trivial_iof_grossup(loan):
iof_grossup = IofGrossup(
loan,
loan.start_date,
daily_iof_aliquot=0.0,
complementary_iof_aliquot=0.0,
service_fee_aliquot=0.0,
)
assert iof_grossup.grossed_up_principal == pytest.approx(loan.principal, rel=0.01) # noqa
assert iof_grossup.irr == pytest.approx(iof_grossup.base_loan.daily_interest_rate, rel=0.01) # noqa
|
163251
|
import json
import mock
'''
Mock Request and Response objects needed for many tests.
'''
class MockRequest(object):
'''
This is a mocked Request object containing only an url,
as this is the only attribute accessed during the tests.
There is a default value for it, but it can also be passed.
'''
def __init__(self, url=None):
if url is not None:
self.url = url
else:
self.url = 'http://foo.foo'
class MockResponse(object):
'''
This is a mocked Response object (can be used to replace
a response from any call to "requests.get" or
"request.put" or "request.delete", ...).
It contains a request, a status code and some JSON content.
For all of these, there is default values, but they can also
be passed.
Some standard cases are available, e.g. or "handle not found",
which has a specific combination of HTTP status code, handle
response code and content.
'''
def __init__(self, status_code=None, content=None, request=None, success=False, notfound=False, empty=False, wascreated=False):
self.content = None
self.status_code = None
self.request = None
# Some predefined cases:
if success:
self.status_code = 200
self.content = '{"responseCode":1, "handle":"my/testhandle"}'
elif notfound:
self.status_code = 404
self.content = '{"responseCode":100}'
elif empty:
self.status_code = 200
self.content = '{"responseCode":200}'
elif wascreated:
self.status_code = 201
self.content = '{"responseCode":1, "handle":"my/testhandle"}'
# User-defined overrides predefined cases:
if content is not None:
self.content = content
if status_code is not None:
self.status_code = status_code
if request is not None:
self.request = request
# Defaults (they do not override):
if self.content is None:
self.content = '{"responseCode":1}'
if self.status_code is None:
self.status_code = 200
if self.request is None:
self.request = MockRequest()
class MockSearchResponse(object):
'''
This is a mocked Response object for search servlet.
'''
def __init__(self, status_code=None, content=None, request=None, success=False, wrong_url=False, undefined_field=False, auth_fail=False, handle_url=False, empty=False, prefix=None):
self.content = None
self.status_code = None
self.request = None
beta2=True # which HSv8 version should be mocked?
beta1=False
solr=False # Is a solr queried or a database?
# Some predefined cases:
if success:
self.status_code = 200
self.content = json.dumps(["prefix/suffix", "prefix2/suffix2", "prefix2/suffix2b"])
elif empty:
self.status_code = 200
self.content = json.dumps([])
elif undefined_field:
if solr:
self.status_code = 9999999 # TODO
self.content = 'RemoteSolrException: Error from server at .+: undefined field FooBar' # TODO
elif auth_fail:
self.status_code = 401
elif wrong_url:
self.request = MockRequest()
self.status_code = 404
self.content = '<!DOCTYPE html>...'
elif handle_url:
self.request = MockRequest()
self.status_code = 404
if beta1:
self.content = 'The handle you requested.+cannot be found'
if beta2:
self.content = {"responseCode":102, "message":"Empty handle invalid"}
# User-defined overrides predefined cases:
if content is not None:
self.content = content
if status_code is not None:
self.status_code = status_code
if request is not None:
self.request = request
# Defaults (they do not override):
if self.content is None:
self.content = '[]'
if self.status_code is None:
self.status_code = 200
if self.request is None:
self.request = MockRequest()
class MockCredentials(object):
'''
This is a mocked credentials object.
'''
def __init__(self,
config=None,
user=None,
password=None,
url=None,
restapi=None,
handleowner=None,
private_key=None,
certificate_and_key=None,
certificate_only=None,
prefix=None
):
self.config = config
if restapi is not None:
self.config = {}
self.config['REST_API_url_extension'] = restapi
self.user = '100:my/testhandle'
if user is not None:
self.user = user
self.key = private_key
self.cert = certificate_only
self.cert_and_key = certificate_and_key
self.password = '<PASSWORD>'
if password is not None:
self.password = password
self.url='http://some/url'
if url is not None:
self.url = url
if handleowner is not None:
self.handleowner = handleowner
else:
self.handleowner = self.user
self.prefix=prefix
self.all_config = {}
self.all_config.update(self.config)
self.all_config['username'] = self.user
self.all_config['password'] = self.password
self.all_config['handleowner'] = self.handleowner
self.all_config['handle_server_url'] = self.url
self.all_config['private_key'] = self.key
self.all_config['certificate_only'] = self.cert
self.all_config['certificate_and_key'] = self.cert_and_key
self.all_config['prefix'] = self.prefix
self.get_config = mock.MagicMock(return_value=self.config)
self.get_username = mock.MagicMock(return_value=self.user)
self.get_password = mock.MagicMock(return_value=self.password)
self.get_server_URL = mock.MagicMock(return_value=self.url)
self.get_handleowner = mock.MagicMock(return_value=self.handleowner)
self.get_path_to_private_key = mock.MagicMock(return_value=self.key)
self.get_path_to_file_certificate_only = mock.MagicMock(return_value=self.cert)
self.get_path_to_file_certificate_and_key = mock.MagicMock(return_value=self.cert_and_key)
self.get_all_args = mock.MagicMock(return_value=self.all_config)
|
163268
|
import logging
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.db.models import Max
from laws.models import GovProposal
from simple.parsers import parse_laws
logger = logging.getLogger("open-knesset.parse_laws")
def scrape_gov_proposals(use_last_booklet, specific_booklet_to_use=None):
booklet = 0
if specific_booklet_to_use:
booklet = specific_booklet_to_use
elif use_last_booklet:
booklet = GovProposal.objects.aggregate(Max('booklet_number')).values()[0]
parser = parse_laws.ParseGovLaws(booklet)
parser.parse_gov_laws()
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--forceupdate', action='store_true', dest='forceupdate',
help="forced update for gov bills, will download all pdfs and update Bills"),
make_option('--pdf', action='store', dest='pdf', default=None,
help="Download and parse a specific bill"),
make_option('--booklet', action='store', dest='booklet', default=None, type='int',
help="specific booklet to fetch, on min booklet depends on context")
)
help = "Give information on government bills (pdfs)"
def handle_noargs(self, **options):
forceupdate = options.get('forceupdate', False)
pdf = options.get('pdf')
booklet = options.get('booklet', None)
if pdf:
parse_laws.ParseGovLaws(min_booklet=0).update_single_bill(pdf, booklet=booklet)
proposal_url_oknesset = GovProposal.objects.filter(source_url=pdf)[0].get_absolute_url()
logger.info("updated: %s" % proposal_url_oknesset)
else:
scrape_gov_proposals(use_last_booklet=not forceupdate, specific_booklet_to_use=booklet)
|
163396
|
import numpy as np
import os
import pandas as pd
import micro_dl.utils.tile_utils as tile_utils
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.image_utils as image_utils
import micro_dl.utils.mp_utils as mp_utils
class ImageTilerUniform:
"""Tiles all images in a dataset"""
def __init__(self,
input_dir,
output_dir,
tile_size=[256, 256],
step_size=[64, 64],
depths=1,
time_ids=-1,
channel_ids=-1,
normalize_channels=-1,
slice_ids=-1,
pos_ids=-1,
hist_clip_limits=None,
flat_field_dir=None,
image_format='zyx',
num_workers=4,
int2str_len=3,
tile_3d=False):
"""
Tiles images.
If tile_dir already exist, it will check which channels are already
tiled, get indices from them and tile from indices only on the channels
not already present.
:param str input_dir: Directory with frames to be tiled
:param str output_dir: Base output directory
:param list tile_size: size of the blocks to be cropped
from the image
:param list step_size: size of the window shift. In case
of no overlap, the step size is tile_size. If overlap, step_size <
tile_size
:param int/list depths: The z depth for generating stack training data
Default 1 assumes 2D data for all channels to be tiled.
For cases where input and target shapes are not the same (e.g. stack
to 2D) you should specify depths for each channel in tile.channels.
:param list/int time_ids: Tile given timepoint indices
:param list/int channel_ids: Tile images in the given channel indices
default=-1, tile all channels.
:param list/int normalize_channels: list of booleans matching channel_ids
indicating if channel should be normalized or not.
:param int slice_ids: Index of which focal plane acquisition to
use (for 2D). default=-1 for the whole z-stack
:param int pos_ids: Position (FOV) indices to use
:param list hist_clip_limits: lower and upper percentiles used for
histogram clipping.
:param str flat_field_dir: Flatfield directory. None if no flatfield
correction
:param str image_format: zyx (preferred) or xyz
:param int num_workers: number of workers for multiprocessing
:param int int2str_len: number of characters for each idx to be used
in file names
:param bool tile_3d: Whether tiling is 3D or 2D
"""
self.input_dir = input_dir
self.output_dir = output_dir
self.normalize_channels = normalize_channels
self.depths = depths
self.tile_size = tile_size
self.step_size = step_size
self.hist_clip_limits = hist_clip_limits
self.image_format = image_format
assert self.image_format in {'zyx', 'xyz'}, \
'Data format must be zyx or xyz'
self.num_workers = num_workers
self.int2str_len = int2str_len
self.tile_3d = tile_3d
self.str_tile_step = 'tiles_{}_step_{}'.format(
'-'.join([str(val) for val in tile_size]),
'-'.join([str(val) for val in step_size]),
)
self.tile_dir = os.path.join(
output_dir,
self.str_tile_step,
)
# If tile dir already exist, only tile channels not already present
self.tiles_exist = False
# If tile dir already exist, things could get messy because we don't
# have any checks in place for how to add to existing tiles
try:
os.makedirs(self.tile_dir, exist_ok=False)
# make dir for saving indiv meta per image, could be used for
# tracking job success / fail
os.makedirs(os.path.join(self.tile_dir, 'meta_dir'),
exist_ok=False)
except FileExistsError as e:
print("Tile dir exists. Only add untiled channels.")
self.tiles_exist = True
# make dir for saving individual meta per image, could be used for
# tracking job success / fail
os.makedirs(os.path.join(self.tile_dir, 'meta_dir'),
exist_ok=True)
self.flat_field_dir = flat_field_dir
self.frames_metadata = aux_utils.read_meta(self.input_dir)
# Get metadata indices
metadata_ids, _ = aux_utils.validate_metadata_indices(
frames_metadata=self.frames_metadata,
time_ids=time_ids,
channel_ids=channel_ids,
slice_ids=slice_ids,
pos_ids=pos_ids,
uniform_structure=True
)
self.channel_ids = metadata_ids['channel_ids']
self.normalize_channels = normalize_channels
self.time_ids = metadata_ids['time_ids']
self.slice_ids = metadata_ids['slice_ids']
self.pos_ids = metadata_ids['pos_ids']
self.normalize_channels = normalize_channels
# Determine which channels should be normalized in tiling
if self.normalize_channels == -1:
self.normalize_channels = [True] * len(self.channel_ids)
else:
assert len(self.normalize_channels) == len(self.channel_ids),\
"Channel ids {} and normalization list {} mismatch".format(
self.channel_ids,
self.normalize_channels,
)
# If more than one depth is specified, length must match channel ids
if isinstance(self.depths, list):
assert len(self.depths) == len(self.channel_ids),\
"depths ({}) and channels ({}) length mismatch".format(
self.depths, self.channel_ids,
)
# Get max of all specified depths
max_depth = max(self.depths)
# Convert channels + depths to dict for lookup
self.channel_depth = dict(zip(self.channel_ids, self.depths))
else:
# If depth is scalar, make depth the same for all channels
max_depth = self.depths
self.channel_depth = dict(zip(
self.channel_ids,
[self.depths] * len(self.channel_ids)),
)
# Adjust slice margins
self.slice_ids = aux_utils.adjust_slice_margins(
slice_ids=self.slice_ids,
depth=max_depth,
)
def get_tile_dir(self):
"""
Return directory containing tiles
:return str tile_dir: Directory with tiles
"""
return self.tile_dir
def _get_dataframe(self):
"""
Creates an empty dataframe with metadata column names for tiles. It's
the same names as for frames, but with channel_name removed and with
the addition of row_start and col_start.
TODO: Should I also save row_end and col_end while I'm at it?
Might be useful if we want to recreate tiles from a previous preprocessing
with mask run... Or just retrieve tile_size from preprocessing_info...
This is one of the functions that will have to be adapted once tested on
3D data.
:return dataframe tiled_metadata
"""
return pd.DataFrame(columns=[
"channel_idx",
"slice_idx",
"time_idx",
"file_name",
"pos_idx",
"row_start",
"col_start"])
def _get_flat_field(self, channel_idx):
"""
Get flat field image for a given channel index
:param int channel_idx: Channel index
:return np.array flat_field_im: flat field image for channel
"""
flat_field_im = None
if self.flat_field_dir is not None:
flat_field_im = np.load(
os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx),
)
)
return flat_field_im
def _get_tile_indices(self, tiled_meta,
time_idx,
channel_idx,
pos_idx,
slice_idx):
"""Get the tile indices from saved meta data
:param pd.DataFrame tiled_meta: DF with image level meta info
:param int time_idx: time index for current image
:param int channel_idx: channel index for current image
:param int pos_idx: position / sample index for current image
:param int slice_idx: slice index of current image
:return list tile_indices: list of tile indices
"""
# Get tile indices from one channel only
c = tiled_meta['channel_idx'] == channel_idx
z = tiled_meta['slice_idx'] == slice_idx
p = tiled_meta['pos_idx'] == pos_idx
t = tiled_meta['time_idx'] == time_idx
channel_meta = tiled_meta[c & z & p & t]
# Get tile_indices
if self.tile_3d:
tile_indices = pd.concat([
channel_meta['row_start'],
channel_meta['row_start'].add(self.tile_size[0]),
channel_meta['col_start'],
channel_meta['col_start'].add(self.tile_size[1]),
channel_meta['slice_start'],
channel_meta['slice_start'].add(self.tile_size[2])
], axis=1)
else:
tile_indices = pd.concat([
channel_meta['row_start'],
channel_meta['row_start'].add(self.tile_size[0]),
channel_meta['col_start'],
channel_meta['col_start'].add(self.tile_size[1]),
], axis=1)
# Match list format similar to tile_image
tile_indices = tile_indices.values.tolist()
return tile_indices
def _get_tiled_data(self):
"""
If tile directory already exists, check which channels have been
processed and only tile new channels.
:return dataframe tiled_meta: Metadata with previously tiled channels
:return list of lists tile_indices: Nbr tiles x 4 indices with row
start + stop and column start + stop indices
"""
if self.tiles_exist:
tiled_meta = aux_utils.read_meta(self.tile_dir)
# Find untiled channels
tiled_channels = np.unique(tiled_meta['channel_idx'])
new_channels = list(set(self.channel_ids) -
set(tiled_channels))
if len(new_channels) == 0:
print('All channels in config have already been tiled')
return
self.channel_ids = new_channels
tile_indices = self._get_tile_indices(
tiled_meta=tiled_meta,
time_idx=self.time_ids[0],
channel_idx=tiled_channels[0],
pos_idx=self.pos_ids[0],
slice_idx=self.slice_ids[0]
)
else:
tiled_meta = self._get_dataframe()
tile_indices = None
return tiled_meta, tile_indices
def _get_input_fnames(self,
time_idx,
channel_idx,
slice_idx,
pos_idx,
mask_dir=None):
"""Get input_fnames
:param int time_idx: Time index
:param int channel_idx: Channel index
:param int slice_idx: Slice (z) index
:param int pos_idx: Position (FOV) index
:param str mask_dir: Directory containing masks
:return: list of input fnames
"""
if mask_dir is None:
depth = self.channel_depth[channel_idx]
else:
depth = self.mask_depth
margin = 0 if depth == 1 else depth // 2
im_fnames = []
for z in range(slice_idx - margin, slice_idx + margin + 1):
if mask_dir is not None:
mask_meta = aux_utils.read_meta(mask_dir)
meta_idx = aux_utils.get_meta_idx(
mask_meta,
time_idx,
channel_idx,
z,
pos_idx,
)
file_path = os.path.join(
mask_dir,
mask_meta.loc[meta_idx, 'file_name'],
)
else:
meta_idx = aux_utils.get_meta_idx(
self.frames_metadata,
time_idx,
channel_idx,
z,
pos_idx,
)
file_path = os.path.join(
self.input_dir,
self.frames_metadata.loc[meta_idx, 'file_name'],
)
# check if file_path exists
im_fnames.append(file_path)
return im_fnames
def get_crop_tile_args(self,
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type,
tile_indices=None,
mask_dir=None,
min_fraction=None,
normalize_im=False):
"""Gather arguments for cropping or tiling
:param int channel_idx: channel index for current image
:param int time_idx: time index for current image
:param int slice_idx: slice index for current image
:param int pos_idx: position / sample index for current image
:param str task_type: crop or tile
:param list tile_indices: list of tile indices
:param str mask_dir: dir containing image level masks
:param float min_fraction: min foreground volume fraction for use tile
:param bool normalize_im: indicator to normalize image based on z-score or not
:return list cur_args: tuple of arguments for tiling
list tile_indices: tile indices for current image
"""
input_fnames = self._get_input_fnames(
time_idx=time_idx,
channel_idx=channel_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
mask_dir=mask_dir
)
# no flat field correction for mask
flat_field_fname = None
hist_clip_limits = None
is_mask = False
if mask_dir is None:
if self.flat_field_dir is not None:
flat_field_fname = os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx)
)
# no hist_clipping for mask as mask is bool
if self.hist_clip_limits is not None:
hist_clip_limits = tuple(
self.hist_clip_limits
)
else:
# Using masks, need to make sure they're bool
is_mask = True
if task_type == 'crop':
cur_args = (tuple(input_fnames),
flat_field_fname,
hist_clip_limits,
time_idx,
channel_idx,
pos_idx,
slice_idx,
tuple(tile_indices),
self.image_format,
self.tile_dir,
self.int2str_len,
is_mask,
self.tile_3d,
normalize_im)
elif task_type == 'tile':
cur_args = (tuple(input_fnames),
flat_field_fname,
hist_clip_limits,
time_idx,
channel_idx,
pos_idx,
slice_idx,
self.tile_size,
self.step_size,
min_fraction,
self.image_format,
self.tile_dir,
self.int2str_len,
is_mask,
normalize_im)
return cur_args
def tile_stack(self):
"""
Tiles images in the specified channels.
https://research.wmz.ninja/articles/2018/03/
on-sharing-large-arrays-when-using-pythons-multiprocessing.html
Saves a csv with columns
['time_idx', 'channel_idx', 'pos_idx','slice_idx', 'file_name']
for all the tiles
"""
# Get or create tiled metadata and tile indices
prev_tiled_metadata, tile_indices = self._get_tiled_data()
tiled_meta0 = None
fn_args = []
for channel_idx in self.channel_ids:
# Find channel index position in channel_ids list
list_idx = self.channel_ids.index(channel_idx)
# Perform flatfield correction if flatfield dir is specified
flat_field_im = self._get_flat_field(channel_idx=channel_idx)
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in self.pos_ids:
if tile_indices is None:
# tile and save first image
# get meta data and tile_indices
im = image_utils.preprocess_imstack(
frames_metadata=self.frames_metadata,
input_dir=self.input_dir,
depth=self.channel_depth[channel_idx],
time_idx=time_idx,
channel_idx=channel_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
flat_field_im=flat_field_im,
hist_clip_limits=self.hist_clip_limits,
normalize_im=self.normalize_channels[list_idx],
)
save_dict = {'time_idx': time_idx,
'channel_idx': channel_idx,
'pos_idx': pos_idx,
'slice_idx': slice_idx,
'save_dir': self.tile_dir,
'image_format': self.image_format,
'int2str_len': self.int2str_len}
tiled_meta0, tile_indices = \
tile_utils.tile_image(
input_image=im,
tile_size=self.tile_size,
step_size=self.step_size,
return_index=True,
save_dict=save_dict,
)
else:
cur_args = self.get_crop_tile_args(
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type='crop',
tile_indices=tile_indices,
normalize_im=self.normalize_channels[list_idx],
)
fn_args.append(cur_args)
tiled_meta_df_list = mp_utils.mp_crop_save(
fn_args,
workers=self.num_workers,
)
if tiled_meta0 is not None:
tiled_meta_df_list.append(tiled_meta0)
tiled_metadata = pd.concat(tiled_meta_df_list, ignore_index=True)
if self.tiles_exist:
tiled_metadata.reset_index(drop=True, inplace=True)
prev_tiled_metadata.reset_index(drop=True, inplace=True)
tiled_metadata = pd.concat(
[prev_tiled_metadata, tiled_metadata],
ignore_index=True,
)
# Finally, save all the metadata
tiled_metadata = tiled_metadata.sort_values(by=['file_name'])
tiled_metadata.to_csv(
os.path.join(self.tile_dir, "frames_meta.csv"),
sep=",",
)
def tile_mask_stack(self,
mask_dir,
mask_channel,
min_fraction,
mask_depth=1):
"""
Tiles images in the specified channels assuming there are masks
already created in mask_dir. Only tiles above a certain fraction
of foreground in mask tile will be saved and added to metadata.
Saves a csv with columns ['time_idx', 'channel_idx', 'pos_idx',
'slice_idx', 'file_name'] for all the tiles
:param str mask_dir: Directory containing masks
:param int mask_channel: Channel number assigned to mask
:param float min_fraction: Minimum fraction of foreground in tiled masks
:param int mask_depth: Depth for mask channel
"""
# mask depth has to match input or ouput channel depth
assert mask_depth <= max(self.channel_depth.values())
self.mask_depth = mask_depth
# tile and save masks
# if mask channel is already tiled
if self.tiles_exist and mask_channel in self.channel_ids:
mask_meta_df = pd.read_csv(
os.path.join(self.tile_dir, 'frames_meta.csv')
)
else:
# TODO: different masks across timepoints (but MaskProcessor
# generates mask for tp=0 only)
mask_fn_args = []
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in self.pos_ids:
# Evaluate mask, then channels.The masks will influence
# tiling indices, so it's not allowed to add masks to
# existing tiled data sets (indices will be retrieved
# from existing meta)
cur_args = self.get_crop_tile_args(
channel_idx=mask_channel,
time_idx=time_idx,
slice_idx=slice_idx,
pos_idx=pos_idx,
task_type='tile',
mask_dir=mask_dir,
min_fraction=min_fraction,
normalize_im=False,
)
mask_fn_args.append(cur_args)
# tile_image uses min_fraction assuming input_image is a bool
mask_meta_df_list = mp_utils.mp_tile_save(
mask_fn_args,
workers=self.num_workers,
)
mask_meta_df = pd.concat(mask_meta_df_list, ignore_index=True)
# Finally, save all the metadata
mask_meta_df = mask_meta_df.sort_values(by=['file_name'])
mask_meta_df.to_csv(
os.path.join(self.tile_dir, 'frames_meta.csv'),
sep=',',
)
# remove mask_channel from self.channel_ids if included
_ = [self.channel_ids.pop(idx)
for idx, val in enumerate(self.channel_ids)
if val == mask_channel]
_ = [self.normalize_channels.pop(idx)
for idx, val in enumerate(self.channel_ids)
if val == mask_channel]
fn_args = []
for slice_idx in self.slice_ids:
for time_idx in self.time_ids:
for pos_idx in np.unique(self.frames_metadata["pos_idx"]):
# Loop through all channels and tile from indices
cur_tile_indices = self._get_tile_indices(
tiled_meta=mask_meta_df,
time_idx=time_idx,
channel_idx=mask_channel,
pos_idx=pos_idx,
slice_idx=slice_idx
)
if np.any(cur_tile_indices):
for i, channel_idx in enumerate(self.channel_ids):
cur_args = self.get_crop_tile_args(
channel_idx,
time_idx,
slice_idx,
pos_idx,
task_type='crop',
tile_indices=cur_tile_indices,
normalize_im=self.normalize_channels[i],
)
fn_args.append(cur_args)
tiled_meta_df_list = mp_utils.mp_crop_save(
fn_args,
workers=self.num_workers,
)
tiled_metadata = pd.concat(tiled_meta_df_list, ignore_index=True)
# If there's been tiling done already, add to existing metadata
prev_tiled_metadata = aux_utils.read_meta(self.tile_dir)
tiled_metadata = pd.concat(
[prev_tiled_metadata.reset_index(drop=True),
tiled_metadata.reset_index(drop=True)],
axis=0,
ignore_index=True,
)
# Finally, save all the metadata
tiled_metadata = tiled_metadata.sort_values(by=['file_name'])
tiled_metadata.to_csv(
os.path.join(self.tile_dir, "frames_meta.csv"),
sep=',',
)
|
163425
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import unittest
from collections import Counter
from weightedDict import WeightedDict
class TestWeightedDict(unittest.TestCase):
# Usage example
def test_main(self):
random.seed(42)
wdict = WeightedDict()
wdict['dog'] = 38.2
wdict['cat'] = 201.7
wdict['cow'] = 222.3
wdict['ostrich'] = 0.
wdict['cow'] = 31.5 # Change the weight for cow
wdict['unicorn'] = 0.01
wdict['wolf'] = 128.1
wdict['bear'] = 12.1
wdict['aardvark'] = 9.1
print(wdict['dog'])
print(wdict.sample())
print(wdict.keys())
wdict.pop('cat') # Remove the cat
dasum = 0.
tallies = {}
num_samples = 100000
for i in wdict:
tallies[i] = 0
for _ in range(num_samples):
tallies[wdict.sample()] += 1
for i in wdict:
dasum += wdict[i]
for i in wdict:
print(i, tallies[i], '%.2f' % (num_samples * wdict[i] / dasum))
print(wdict)
# A more rigorous test
def test_big(self):
random.seed(42)
dstr = 'bcdefghijklmnopqrstuvwxyz'
data = {i: j for i, j in zip(dstr, [x + 1 for x in range(len(dstr))])}
foo = WeightedDict()
for i in dstr:
foo[i] = data[i]
# Check the sampling
bar = Counter()
dnum = 10000
for _ in range(dnum):
bar[foo.sample()] += 1
den = sum(data.values())
vals = {i: int(dnum * (j / den)) for i, j in data.items()}
self.assertEqual(set(vals.keys()), set(bar.keys()))
dsum = 0
for i in sorted(vals):
dif = abs(vals[i] - bar[i])
dsum += dif
print(i, vals[i], bar[i])
print('Total percent from max: ' + str(100 * float(dsum) / dnum) + '%')
self.assertLess((100 * float(dsum) / dnum), 10)
# Check insert and deletion consistency.
data2 = data.copy()
for ii in range(30000):
foo.check_tree()
toggle = random.choice(dstr)
print(ii, toggle, dstr)
if toggle not in data2:
data2[toggle] = data[toggle]
foo[toggle] = data[toggle]
else:
data2.pop(toggle)
foo.pop(toggle)
self.assertEqual(tuple(foo.keys()), tuple(sorted(data2.keys())))
for i, j in data2.items():
self.assertLess(abs(foo[i] - j), .000000001)
# Test emptying the tree
if ii % 10000 == 0:
dkeys = foo.keys()
random.shuffle(dkeys)
for toggle in dkeys:
foo.check_tree()
data2.pop(toggle)
foo.pop(toggle)
self.assertEqual(
tuple(foo.keys()), tuple(sorted(data2.keys())))
for i, j in data2.items():
self.assertLess(abs(foo[i] - j), .000000001)
print(foo)
print('Success. Yay!')
# Note that the test output isn't identical across Python versions (2
# & 3) because random.seed has changed. We could use version=1, but
# that's not compatible with Python 2.
if __name__ == "__main__":
unittest.main()
|
163454
|
import tensorflow as tf
input_image_size = 28
output_image_size = 24
input_image_channels = 1
num_labels = 10
valid_records = 5000
test_records = 10000
train_records = 55000
batch_size = 100
def read_path_file(image_file):
f = open(image_file, 'r')
paths = []
labels = []
for line in f:
label, path = line[:-1].split(',')
paths.append(path)
labels.append(int(label))
return paths, labels
def distorted_inputs(image_file, num_threads):
image_list, label_list = read_path_file(image_file)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(label_list, dtype=tf.int32)
filename_queue = tf.train.slice_input_producer([images, labels], shuffle=False)
result = read_image(filename_queue)
distorted_image = tf.image.resize_image_with_crop_or_pad(result.image, output_image_size,
output_image_size)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
white_image = tf.image.per_image_whitening(distorted_image)
return generate_batches(white_image, result.label, num_threads)
def inputs(image_file, num_threads):
image_list, label_list = read_path_file(image_file)
images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(label_list, dtype=tf.int32)
filename_queue = tf.train.slice_input_producer([images, labels], shuffle=False)
result = read_image(filename_queue)
distorted_image = tf.image.resize_image_with_crop_or_pad(result.image, output_image_size,
output_image_size)
white_image = tf.image.per_image_whitening(distorted_image)
return generate_batches(white_image, result.label, num_threads)
def read_image(filename_queue):
class image_object(object):
pass
result = image_object()
file_contents = tf.read_file(filename_queue[0])
image = tf.image.decode_png(file_contents, channels=input_image_channels)
image = tf.cast(image, tf.float32)
result.image = tf.reshape(image, [input_image_size, input_image_size, input_image_channels])
label = tf.cast(filename_queue[1], tf.int32)
result.label = tf.sparse_to_dense(label, [num_labels], 1.0, 0.0)
return result
def generate_batches(image, label, num_threads):
images, labels = tf.train.batch(
[image, label], batch_size=batch_size,
num_threads=num_threads,
)
return images, labels
|
163460
|
import ast
import tokenize
from typing import List
import asttokens
import pytest
from flake8.defaults import MAX_LINE_LENGTH # type: ignore
from flake8.processor import FileProcessor # type: ignore
class FakeOptions:
hang_closing: bool
indent_size: int
max_line_length: int
max_doc_length: int
verbose: bool
@pytest.fixture
def fake_options() -> FakeOptions:
options = FakeOptions()
options.hang_closing = False
options.indent_size = 4
options.max_line_length = MAX_LINE_LENGTH
options.max_doc_length = MAX_LINE_LENGTH
options.verbose = False
return options
@pytest.fixture
def file_tokens(code_str: str, tmpdir, fake_options: FakeOptions) -> List[tokenize.TokenInfo]:
"""
Args:
code_str: Code to be tokenized.
Returns:
Tokens for code to be checked. This emulates the behaviour of Flake8's
``FileProcessor`` which is using ``tokenize.generate_tokens``.
"""
code_file = tmpdir.join('code_file.py')
code_file.write(code_str)
file_processor = FileProcessor(str(code_file), options=fake_options)
tokens = file_processor.generate_tokens()
return list(tokens)
@pytest.fixture
def first_token(file_tokens: List[tokenize.TokenInfo]) -> tokenize.TokenInfo:
"""
Returns:
First token of provided list.
"""
return file_tokens[0]
@pytest.fixture
def tree(code_str: str) -> ast.Module:
return ast.parse(code_str)
@pytest.fixture
def asttok(code_str: str, tree: ast.Module) -> asttokens.ASTTokens:
return asttokens.ASTTokens(code_str, tree=tree)
@pytest.fixture
def first_node_with_tokens(code_str: str, tree: ast.Module, asttok: asttokens.ASTTokens):
"""
Given ``code_str`` fixture, parse that string with ``ast.parse`` and then
augment it with ``asttokens.ASTTokens``.
Returns:
ast.node: First node in parsed tree.
"""
return tree.body[0]
@pytest.fixture
def tokens(asttok, first_node_with_tokens) -> List[tokenize.TokenInfo]:
return list(asttok.get_tokens(
first_node_with_tokens,
include_extra=True,
))
@pytest.fixture
def lines(code_str) -> List[str]:
"""
Given ``code_str`` chop it into lines as Flake8 would pass to a plugin -
each line includes its newline terminator.
Returns:
list
"""
return code_str.splitlines(True)
|
163466
|
import click
import pytest
from pytest_mock import MockFixture
from opta.exceptions import UserErrors
from opta.utils import alternate_yaml_extension, check_opta_file_exists, exp_backoff
def test_exp_backoff(mocker: MockFixture) -> None:
# Sleep should be exponential for each iteration
mocked_sleep = mocker.patch("opta.utils.sleep")
retries = 3
for _ in exp_backoff(num_tries=retries):
pass
raw_call_args = mocked_sleep.call_args_list
sleep_param_history = [arg[0][0] for arg in raw_call_args]
assert sleep_param_history == [2, 4, 16]
# Sleep should not be called if body succeeded and exited.
mocked_sleep = mocker.patch("opta.utils.sleep")
for _ in exp_backoff(num_tries=retries):
break
assert mocked_sleep.call_count == 0
def test_check_opta_file_exists_file_exists(mocker: MockFixture) -> None:
mock_config_path = "mock_config_path"
mock_os_path_exists = mocker.patch("opta.utils.os.path.exists", return_value=True)
mock_click_prompt = mocker.patch("opta.utils.click.prompt")
mock_system_exit = mocker.patch("opta.utils.sys.exit")
config_path = check_opta_file_exists(mock_config_path)
assert config_path == mock_config_path
mock_os_path_exists.assert_called_once_with(mock_config_path)
mock_click_prompt.assert_not_called()
mock_system_exit.assert_not_called()
def test_check_opta_file_exists_file_does_not_exists_user_input(
mocker: MockFixture,
) -> None:
mock_config_path = "mock_config_path"
mock_user_config_path = "mock_user_config_path"
mock_os_path_exists = mocker.patch(
"opta.utils.os.path.exists", side_effect=[False, True]
)
mock_click_prompt = mocker.patch(
"opta.utils.click.prompt", return_value=mock_user_config_path
)
mock_system_exit = mocker.patch("opta.utils.sys.exit")
config_path = check_opta_file_exists(mock_config_path)
assert config_path == mock_user_config_path
mock_os_path_exists.assert_has_calls(
[mocker.call(mock_config_path), mocker.call(mock_user_config_path)]
)
mock_click_prompt.assert_called_once_with(
"Enter a Configuration Path (Empty String will exit)",
default="",
type=click.STRING,
show_default=False,
)
mock_system_exit.assert_not_called()
def test_check_opta_file_exists_file_does_not_exists_no_user_input(
mocker: MockFixture,
) -> None:
mock_config_path = "mock_config_path"
mock_no_user_config_path = ""
mock_os_path_exists = mocker.patch(
"opta.utils.os.path.exists", side_effect=[False, False]
)
mock_click_prompt = mocker.patch(
"opta.utils.click.prompt", return_value=mock_no_user_config_path
)
mock_system_exit = mocker.patch("opta.utils.sys.exit")
config_path = check_opta_file_exists(mock_config_path)
assert config_path == mock_no_user_config_path
mock_os_path_exists.assert_called_once_with(mock_config_path)
mock_click_prompt.assert_called_once_with(
"Enter a Configuration Path (Empty String will exit)",
default="",
type=click.STRING,
show_default=False,
)
mock_system_exit.assert_called_once_with(0)
def test_check_opta_file_exists_file_does_not_exists_invalid_user_input(
mocker: MockFixture,
) -> None:
mock_config_path = "mock_config_path"
mock_invalid_user_config_path = "mock_invalid_user_config_path"
mock_os_path_exists = mocker.patch(
"opta.utils.os.path.exists", side_effect=[False, False]
)
mock_click_prompt = mocker.patch(
"opta.utils.click.prompt", return_value=mock_invalid_user_config_path
)
mock_system_exit = mocker.patch("opta.utils.sys.exit")
with pytest.raises(UserErrors):
_ = check_opta_file_exists(mock_config_path)
mock_os_path_exists.assert_has_calls(
[mocker.call(mock_config_path), mocker.call(mock_invalid_user_config_path)]
)
mock_click_prompt.assert_called_once_with(
"Enter a Configuration Path (Empty String will exit)",
default="",
type=click.STRING,
show_default=False,
)
mock_system_exit.assert_not_called()
def test_alternate_yaml_extension() -> None:
assert alternate_yaml_extension("opta.yaml") == ("opta.yml", True)
assert alternate_yaml_extension("opta.yml") == ("opta.yaml", True)
assert alternate_yaml_extension("opta.YML") == ("opta.yaml", True)
assert alternate_yaml_extension("path/opta.yml") == ("path/opta.yaml", True)
assert alternate_yaml_extension("path/config") == ("path/config", False)
|
163473
|
import logging
from enum import Enum
from typing import List, Dict, Any
from fidesops.service.pagination.pagination_strategy import PaginationStrategy
from fidesops.service.pagination.pagination_strategy_cursor import (
CursorPaginationStrategy,
)
from fidesops.service.pagination.pagination_strategy_link import LinkPaginationStrategy
from fidesops.service.pagination.pagination_strategy_offset import (
OffsetPaginationStrategy,
)
from pydantic import ValidationError
from fidesops.common_exceptions import (
NoSuchStrategyException,
ValidationError as FidesopsValidationError,
)
from fidesops.schemas.saas.strategy_configuration import StrategyConfiguration
logger = logging.getLogger(__name__)
class SupportedPaginationStrategies(Enum):
"""
The supported methods by which Fidesops can post-process Saas connector data.
"""
offset = OffsetPaginationStrategy
link = LinkPaginationStrategy
cursor = CursorPaginationStrategy
def get_strategy(
strategy_name: str,
configuration: Dict[str, Any],
) -> PaginationStrategy:
"""
Returns the strategy given the name and configuration.
Raises NoSuchStrategyException if the strategy does not exist
"""
if strategy_name not in SupportedPaginationStrategies.__members__:
valid_strategies = ", ".join([s.name for s in SupportedPaginationStrategies])
raise NoSuchStrategyException(
f"Strategy '{strategy_name}' does not exist. Valid strategies are [{valid_strategies}]"
)
strategy = SupportedPaginationStrategies[strategy_name].value
try:
strategy_config: StrategyConfiguration = strategy.get_configuration_model()(
**configuration
)
return strategy(configuration=strategy_config)
except ValidationError as e:
raise FidesopsValidationError(message=str(e))
def get_strategies() -> List[PaginationStrategy]:
"""Returns all supported pagination strategies"""
return [e.value for e in SupportedPaginationStrategies]
|
163494
|
from django.contrib.auth.models import Group as AbstractGroup
from django.core.validators import RegexValidator
from django.db import models
from openwisp_users.base.models import (
AbstractUser,
BaseGroup,
BaseOrganization,
BaseOrganizationOwner,
BaseOrganizationUser,
)
from organizations.abstract import (
AbstractOrganization,
AbstractOrganizationOwner,
AbstractOrganizationUser,
)
class User(AbstractUser):
social_security_number = models.CharField(
max_length=11,
null=True,
blank=True,
validators=[RegexValidator(r'^\d\d\d-\d\d-\d\d\d\d$')],
)
class Meta(AbstractUser.Meta):
abstract = False
class Organization(BaseOrganization, AbstractOrganization):
pass
class OrganizationUser(BaseOrganizationUser, AbstractOrganizationUser):
pass
class OrganizationOwner(BaseOrganizationOwner, AbstractOrganizationOwner):
pass
class Group(BaseGroup, AbstractGroup):
pass
|
163508
|
description = 'testing qmesydaq'
group = 'optional'
excludes = []
sysconfig = dict(
datasinks = ['LiveViewSink'],
)
tango_base = 'tango://localhost:10000/test/qmesydaq/'
devices = dict(
# RAWFileSaver = device('nicos.devices.datasinks.RawImageSink',
# description = 'Saves image data in RAW format',
# filenametemplate = [
# '%(proposal)s_%(pointcounter)06d.raw',
# '%(proposal)s_%(scancounter)s_'
# '%(pointcounter)s_%(pointnumber)s.raw'],
# subdir = 'QMesyDAQ2',
# lowlevel = True,
# ),
LiveViewSink = device('nicos.devices.datasinks.LiveViewSink',
description = 'Sends image data to LiveViewWidget',
),
qm_ctr0 = device('nicos.devices.entangle.CounterChannel',
description = 'QMesyDAQ Counter0',
tangodevice = tango_base + 'counter0',
type = 'counter',
lowlevel = True,
),
qm_ctr1 = device('nicos.devices.entangle.CounterChannel',
description = 'QMesyDAQ Counter1',
tangodevice = tango_base + 'counter1',
type = 'counter',
lowlevel = True,
),
qm_ctr2 = device('nicos.devices.entangle.CounterChannel',
description = 'QMesyDAQ Counter2',
tangodevice = tango_base + 'counter2',
type = 'monitor',
lowlevel = True,
),
qm_ctr3 = device('nicos.devices.entangle.CounterChannel',
description = 'QMesyDAQ Counter3',
tangodevice = tango_base + 'counter3',
type = 'monitor',
lowlevel = True,
),
qm_ev = device('nicos.devices.entangle.CounterChannel',
description = 'QMesyDAQ Events channel',
tangodevice = tango_base + 'events',
type = 'counter',
lowlevel = True,
),
qm_timer = device('nicos.devices.entangle.TimerChannel',
description = 'QMesyDAQ Timer',
tangodevice = tango_base + 'timer',
),
mesytec = device('nicos.devices.generic.Detector',
description = 'QMesyDAQ Image type Detector',
timers = ['qm_timer'],
counters = [
'qm_ev',
# 'qm_ctr0', 'qm_ctr2'
],
monitors = [
#'qm_ctr1', 'qm_ctr3'
],
others = [],
),
)
startupcode = '''
SetDetectors(mesytec)
'''
|
163550
|
from __future__ import absolute_import
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
import sys
from djipsum import __VERSION__
from djipsum.fields import DjipsumFields
class Command(BaseCommand):
help = 'To generate awesome lorem ipsum for your model!'
def add_arguments(self, parser):
parser.add_argument(
'-dv',
'--djipsum_version',
action='store_true',
help='Show djipsum version number and exit.'
)
parser.add_argument(
'-auto',
'--auto_gen',
action='store_true',
help='Automatic generate lorem ipsum from custom generator class.'
)
parser.add_argument(
'-cg',
'--custom_generator',
help='Custom a function generator (full path) for auto-gen.'
)
parser.add_argument(
'--app',
help='The app name.'
)
parser.add_argument(
'--model',
help='The model class name.'
)
parser.add_argument(
'--max',
type=int,
default=10,
help='Maximum generate lorem ipsum.'
)
def handle(self, *args, **options):
auto_gen = options['auto_gen']
custom_generator = options['custom_generator']
app = options['app']
model = options['model']
maximum = options['max']
if auto_gen and custom_generator:
components = custom_generator.split('.')
func_name = components[-1]
try:
mod = __import__('.'.join(components[:-1]), fromlist=[func_name])
generate_cst_faker = getattr(mod, func_name)
self.stdout.write(
self.style.SUCCESS(
generate_cst_faker(maximum=maximum)
)
)
sys.exit()
except Exception as e:
raise CommandError(e)
elif options['djipsum_version']:
return __VERSION__
elif app == None:
return self.print_help('djipsum', '-h')
try:
model_class = apps.get_model(app_label=app, model_name=model)
except Exception as e:
raise CommandError(e)
exclude = ['pk', 'id', 'objects']
# removing `model_class._meta.fields` to `model_class._meta.get_fields()
# to get all fields.`
# Ref: http://stackoverflow.com/a/3106314/6396981
fields = [
{'field_type': f.__class__.__name__, 'field_name': f.name}
for f in model_class._meta.get_fields() if f.name not in exclude
]
validated_model_fields = DjipsumFields(
model_class,
fields,
maximum
).create_validated_fields()
def loremInfo():
return """\n[+] Successfully generate the lorem ipsum for `{0}`\n\n{1}\n""".format(
model_class,
validated_model_fields
)
self.stdout.write(
self.style.SUCCESS(
loremInfo()
)
)
|
163551
|
from rest_framework import serializers
from rest_framework.serializers import Serializer, SerializerMethodField
class SearchSerializer(Serializer):
id = SerializerMethodField()
topic_source_id = serializers.CharField()
topic_issuer_id = serializers.IntegerField()
topic_type_id = serializers.IntegerField()
topic_names = SerializerMethodField()
topic_inactive = serializers.BooleanField()
topic_revoked = serializers.BooleanField()
@staticmethod
def get_id(obj):
return int(obj['django_id'])
@staticmethod
def get_topic_names(obj):
return [name for name in obj['topic_name']]
|
163553
|
from .preposition_extract import in_complex_preposition, complex_preposition_child, get_children
class ArgumentExtractor:
ARGUMENT_POSTAGS = {
'NOUN',
'PRON',
'ADJ',
'PROPN'
}
LN_HOMOGENEOUS = 'conj'
LN_AGENT = 'nsubj'
LN_DIRECTION = 'nmod'
COMPLEX_ADVERBS = {
'то': ['Ins', 'самый', 'Dat']
}
PATRONYMICS = ['вна', 'чна', 'вич', 'ьич', 'тич']
def __call__(self, pred_number, postags, morphs, lemmas, syntax_dep_tree):
""" Return list of arguments for predicate in the sentence """
arguments = self._get_own_args(pred_number, postags, morphs, lemmas, syntax_dep_tree)
expanded_args = self._get_conj_args(pred_number, postags, morphs, lemmas, syntax_dep_tree)
result = list(set(arguments + expanded_args))
possible_subject = self._get_subject(arguments, postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
advcl = self._get_direct_link(pred_number, syntax_dep_tree, 'advcl')
if not advcl:
advcl = self._get_direct_link(pred_number, syntax_dep_tree, 'acl')
if advcl:
possible_subject = self._get_subject(get_children(advcl, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
possible_subject = self._get_adv_cascade_subject(pred_number, postags, morphs, lemmas, syntax_dep_tree)
if possible_subject and possible_subject[1]:
for i, n in enumerate(result):
if n == possible_subject[0]:
result[i] = possible_subject[1]
elif possible_subject and possible_subject[0] not in result:
result.append(possible_subject[0])
possible_object = self._get_object(pred_number, syntax_dep_tree)
if possible_object:
result.append(possible_object)
# possible_modifier = self._get_modifier(pred_number, syntax_dep_tree)
# if possible_modifier:
# result.append(possible_modifier)
parataxial_subject = self._get_adv_cascade_subject(pred_number, postags, morphs, lemmas, syntax_dep_tree)
if parataxial_subject and parataxial_subject[1]:
for i, n in enumerate(result):
if n == parataxial_subject[0]:
result[i] = parataxial_subject[1]
elif parataxial_subject and parataxial_subject[0] not in result:
result.append(parataxial_subject[0])
result = self._clean_up_adverbs(list(set(result)), morphs, lemmas)
return result
def _get_own_args(self, pred_number, postags, morphs, lemmas, syntax_dep_tree):
arguments = []
children = get_children(pred_number, syntax_dep_tree)
for child_number in children:
prep = in_complex_preposition(child_number, postags, morphs, lemmas, syntax_dep_tree)
if prep:
arg_num = complex_preposition_child(prep, syntax_dep_tree)
if arg_num is None:
continue
if postags[arg_num] not in self.ARGUMENT_POSTAGS:
continue
arguments.append(arg_num)
elif postags[child_number] in self.ARGUMENT_POSTAGS:
if syntax_dep_tree[child_number].link_name == 'obl' and postags[child_number] == 'NOUN':
complex_subject = False
for grandchild in get_children(child_number, syntax_dep_tree):
if syntax_dep_tree[grandchild].link_name == 'case' and postags[grandchild] == 'NOUN':
complex_subject = True; break
if complex_subject:
arguments.append(grandchild)
else:
arguments.append(child_number)
else:
arguments.append(child_number)
possible_cause = self._get_cause(pred_number, syntax_dep_tree, postags)
if possible_cause:
arguments.append(possible_cause)
return arguments
def _get_conj_args(self, pred_number, postags, morphs, lemmas, syntax_dep_tree):
def is_homogen_pair(a, b):
synt_b = syntax_dep_tree[b]
return synt_b.link_name == self.LN_HOMOGENEOUS and synt_b.parent == a
def find_linkname(predicate, linkname):
return [token for token in syntax_dep_tree if token.link_name == linkname and token.parent == predicate]
def expand_linkname(arguments, linkname):
if not find_linkname(pred_number, linkname):
first_argument = [arg for arg in arguments if
syntax_dep_tree[arg].link_name == linkname]
if first_argument:
first_argument = first_argument[0]
return [first_argument] + [argument for argument, arg_synt in enumerate(syntax_dep_tree) if
is_homogen_pair(first_argument, argument)]
else:
return [argument for argument, arg_synt in enumerate(syntax_dep_tree) if
is_homogen_pair(first_argument, argument)]
return []
conj_predicates = [number_c for number_c, synt_c in enumerate(syntax_dep_tree)
if is_homogen_pair(number_c, pred_number)]
result = []
for predicate in conj_predicates:
arguments = self._get_own_args(predicate, postags, morphs, lemmas, syntax_dep_tree)
result += expand_linkname(arguments, self.LN_AGENT)
#result += expand_linkname(arguments, self.LN_OBJ)
return result
def _get_subject(self, arguments, postags, morphs, lemmas, syntax_dep_tree):
def _find_subject_name(subject):
if postags[subject] != 'NOUN':
return None
result = [i for i, token in enumerate(syntax_dep_tree) if
token.parent == subject and token.link_name == 'name']
result += [i for i, token in enumerate(syntax_dep_tree) if
token.parent == subject and token.link_name == 'appos' and morphs[i].get('Case') not in ['Ins', 'Gen']]
result += [i for i, token in enumerate(syntax_dep_tree) if
token.parent == subject and token.link_name == 'iobj' and morphs[i].get('Case') == 'Nom']
result += [i for i, token in enumerate(syntax_dep_tree) if
token.parent == subject and token.link_name == 'flat' and morphs[i].get('Case') == 'Nom' \
and lemmas[i][-3:] not in self.PATRONYMICS]
if not result:
return None
return result[0]
for argument in arguments:
if syntax_dep_tree[argument].link_name in self.LN_AGENT:
subject = argument
name = _find_subject_name(subject)
if name:
second_name = _find_subject_name(name)
if second_name:
name = second_name
return subject, name
return []
def _get_first_part(self, pred_number, syntax_dep_tree):
""" Return the first verb of quasi-complex predicates """
composition_links = {'xcomp', 'ccomp', 'parataxis'}
if syntax_dep_tree[pred_number].link_name in composition_links:
return syntax_dep_tree[pred_number].parent
return None
def _get_adv_cascade_subject(self, pred_number, postags, morphs, lemmas, syntax_dep_tree):
""" Return a subject for participle phrases """
possible_subject = None
first_part = self._get_first_part(pred_number, syntax_dep_tree)
if first_part:
first_first_part = self._get_first_part(first_part, syntax_dep_tree)
if first_first_part:
possible_subject = self._get_subject(
self._get_own_args(first_first_part, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
conjunct = self._get_direct_link(first_first_part, syntax_dep_tree, self.LN_HOMOGENEOUS)
if conjunct:
possible_subject = self._get_subject(
self._get_own_args(conjunct, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
advcl = self._get_direct_link(conjunct, syntax_dep_tree, 'advcl')
if advcl:
possible_subject = self._get_subject(
self._get_own_args(advcl, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
first_part = self._get_first_part(advcl, syntax_dep_tree)
if first_part:
possible_subject = self._get_subject(
self._get_own_args(first_part, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
else:
possible_subject = self._get_subject(
self._get_own_args(first_part, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
conjunct = self._get_direct_link(first_part, syntax_dep_tree, self.LN_HOMOGENEOUS)
if conjunct:
possible_subject = self._get_subject(
self._get_own_args(conjunct, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
advcl = self._get_direct_link(conjunct, syntax_dep_tree, 'advcl')
if advcl:
possible_subject = self._get_subject(
self._get_own_args(advcl, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
if not possible_subject:
first_part = self._get_first_part(advcl, syntax_dep_tree)
if first_part:
possible_subject = self._get_subject(
self._get_own_args(first_part, postags, morphs, lemmas, syntax_dep_tree),
postags, morphs, lemmas, syntax_dep_tree)
return possible_subject
def _get_direct_link(self, pred_number, syntax_dep_tree, linkname):
if syntax_dep_tree[pred_number].link_name != linkname:
return None
return syntax_dep_tree[pred_number].parent
def _get_cause(self, pred_number, syntax_dep_tree, postags):
for i, possible_cause in enumerate(syntax_dep_tree):
if possible_cause.link_name == 'nmod' and postags[i] == 'NOUN':
if syntax_dep_tree[possible_cause.parent].link_name == 'nsubj' \
and syntax_dep_tree[possible_cause.parent].parent == pred_number \
and postags[possible_cause.parent] == 'PART':
return i
return None
def _get_object(self, pred_number, syntax_dep_tree):
for i, possible_obj in enumerate(syntax_dep_tree):
if possible_obj.link_name == 'obj':
if syntax_dep_tree[possible_obj.parent].link_name == 'acl':
return i
return None
def _get_modifier(self, pred_number, syntax_dep_tree):
for i, possible_obj in enumerate(syntax_dep_tree):
if possible_obj.link_name == 'nmod':
if syntax_dep_tree[possible_obj.parent].link_name == 'obl':
return i
return None
def _clean_up_adverbs(self, arguments, morphs, lemmas):
result = []
for arg in arguments:
candidate_to_exclude = self.COMPLEX_ADVERBS.get(lemmas[arg])
if not candidate_to_exclude:
result.append(arg)
else:
if morphs[arg].get('Case') == candidate_to_exclude[0] \
and len(lemmas) > arg and lemmas[arg + 1] == candidate_to_exclude[1] \
and morphs[arg + 1].get('Case') == candidate_to_exclude[2]:
continue
else:
result.append(arg)
return result
|
163556
|
import abc
from typing import Dict, List, Deque
import datetime
import numpy
from agnes.algos.base import _BaseAlgo
from agnes.common import logger
from agnes.common.schedules import Saver
from agnes.nns.initializer import _BaseChooser
from agnes.common.envs_prep import DummyVecEnv
class BaseRunner(abc.ABC):
logger = logger.ListLogger()
saver: Saver = Saver()
workers_num = 1
trainer: _BaseAlgo
worker: _BaseAlgo
env: DummyVecEnv
state: numpy.ndarray
done: numpy.ndarray
def __init__(self, env, algo, nn: _BaseChooser, config: Dict):
self.env = env["env"]
self.nn_name = nn.meta
self.cnfg, self.env_type = algo.get_config(env["env_type"])
if config is not None:
self.cnfg = config
self.timesteps = self.cnfg['timesteps']
self.nsteps = self.cnfg['nsteps']
self.vec_num = env["env_num"]
self.env_id = env["env_name"]
def is_trainer(self) -> bool:
return True
def load(self, filename) -> None:
if self.is_trainer():
self.trainer.load(filename)
if hasattr(self, "worker"):
self.worker.load(filename)
def log(self, *args) -> None:
if self.is_trainer():
self.logger = logger.ListLogger(*args)
self.logger.info({
"envs_num": self.vec_num * self.workers_num,
"device": self.trainer.device_info(),
"env_type": self.env_type,
"NN type": self.nn_name,
"algo": self.trainer.meta,
"env_name": self.env_id,
"config": self.cnfg
})
def run(self, log_interval: int = 1):
pass
def save_every(self, filename: str, frames_period: int) -> None:
if self.is_trainer():
self.saver = Saver(filename, frames_period)
def save(self, filename: str) -> None:
if self.is_trainer():
self.trainer.save(filename)
def _one_log(self,
lr_things: List[dict],
epinfobuf: Deque[dict],
nbatch: int,
tfirststart: float,
tstart: float, tnow: float, nupdates: int,
stepping_to_learning: float = None,
print_out: bool = True):
train_dict = {k: logger.safemean([dic[k] for dic in lr_things]) for k in lr_things[0]}
etc = (tnow - tfirststart) * (self.timesteps / (self.nsteps * nupdates) - 1.)
kvpairs = {
"eplenmean": logger.safemean(numpy.asarray([epinfo['l'] for epinfo in epinfobuf]).reshape(-1)),
"eprewmean": logger.safemean(numpy.asarray([epinfo['r'] for epinfo in epinfobuf]).reshape(-1)),
"fps": int(nbatch / (tnow - tstart + 1e-20)),
"misc/nupdates": nupdates,
"misc/serial_timesteps": self.nsteps * nupdates,
"misc/time_elapsed": str(datetime.timedelta(seconds=round(tnow - tfirststart))),
"misc/total_timesteps": self.nsteps * nupdates * self.workers_num * self.vec_num,
"misc/etc": str(datetime.timedelta(seconds=round(etc)))
}
kvpairs.update(train_dict)
if stepping_to_learning is not None:
kvpairs['misc/stepping_to_learning'] = stepping_to_learning
self.logger(kvpairs, nupdates, print_out=print_out)
def _one_run(self):
data = None
epinfos = []
for step in range(self.nsteps):
action, pred_action, out = self.worker(self.state, self.done)
nstate, reward, done, infos = self.env.step(action)
self.done = done
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
transition = {
"state": self.state,
"action": pred_action,
"new_state": nstate,
"reward": reward,
"done": done
}
transition.update(out)
data = self.worker.experience(transition)
self.state = nstate
return data, epinfos
def __del__(self):
self.env.close()
del self.env
|
163638
|
from iotbot.decorators import equal_content
from iotbot.sugar import Text
# 下面三个函数名不能改,否则不会调用
# 但是都是可选项,建议把不需要用到的函数删除,节约资源
@equal_content('middleware')
def receive_group_msg(ctx):
print('------------------')
print(dir(ctx))
if hasattr(ctx, 'master'):
print(ctx.master)
print(type(ctx))
Text(ctx.master)
|
163661
|
from iotool import io_factory
from model import build
from trainval import trainval
import ops
from flags import DGCNN_FLAGS
|
163731
|
import os
import json
import time
from copy import deepcopy
from datetime import date, datetime
from decimal import Decimal
from random import random, randint, choice
import stdnet
from stdnet.utils import test, zip, to_string, unichr, ispy3k, range
from stdnet.utils import date2timestamp
from stdnet.utils.populate import populate
from examples.models import Statistics, Statistics3, Role
class make_random(object):
rtype = ['number','list',None] + ['dict']*3
def __init__(self):
self.count = 0
def make(self, size = 5, maxsize = 10, nesting = 1, level = 0):
keys = populate(size = size)
if level:
keys.append('')
for key in keys:
t = choice(self.rtype) if level else 'dict'
if nesting and t == 'dict':
yield key,dict(self.make(size = randint(0,maxsize),
maxsize = maxsize,
nesting = nesting - 1,
level = level + 1))
else:
if t == 'list':
v = [random() for i in range(10)]
elif t == 'number':
v = random()
elif t == 'dict':
v = random()
else:
v = t
yield key,v
class TestJsonField(test.TestCase):
models = [Statistics, Role]
def test_default(self):
models = self.mapper
a = Statistics(dt=date.today())
self.assertEqual(a.data, {})
yield models.add(a)
self.assertEqual(a.data, {})
a = yield models.statistics.get(id=a.id)
self.assertEqual(a.data, {})
def testMetaData(self):
field = Statistics._meta.dfields['data']
self.assertEqual(field.type,'json object')
self.assertEqual(field.index,False)
self.assertEqual(field.as_string,True)
def testCreate(self):
models = self.mapper
mean = Decimal('56.4')
started = date(2010,1,1)
timestamp = datetime.now()
a = yield models.statistics.new(dt=date.today(),
data={'mean': mean,
'std': 5.78,
'started': started,
'timestamp':timestamp})
self.assertEqual(a.data['mean'], mean)
a = yield models.statistics.get(id=a.id)
self.assertEqual(len(a.data), 4)
self.assertEqual(a.data['mean'], mean)
self.assertEqual(a.data['started'], started)
self.assertAlmostEqual(date2timestamp(a.data['timestamp']),
date2timestamp(timestamp), 5)
def testCreateFromString(self):
models = self.mapper
mean = 'mean'
timestamp = time.time()
data = {'mean': mean,
'std': 5.78,
'timestamp': timestamp}
datas = json.dumps(data)
a = yield models.statistics.new(dt=date.today(), data=datas)
a = yield models.statistics.get(id=a.id)
self.assertEqual(a.data['mean'], mean)
a = yield models.statistics.get(id=a.id)
self.assertEqual(len(a.data),3)
self.assertEqual(a.data['mean'],mean)
self.assertAlmostEqual(a.data['timestamp'], timestamp)
def test_default(self):
models = self.mapper
a = Statistics(dt=date.today())
self.assertEqual(a.data, {})
yield models.add(a)
self.assertEqual(a.data, {})
a = yield models.statistics.get(id=a.id)
self.assertEqual(a.data, {})
def testValueError(self):
models = self.mapper
a = models.statistics(dt=date.today(), data={'mean': self})
yield self.async.assertRaises(stdnet.FieldValueError, models.session().add, a)
self.assertTrue('data' in a._dbdata['errors'])
def testDefaultValue(self):
models = self.mapper
role = models.role(name='test')
self.assertEqual(role.permissions, [])
role.permissions.append('ciao')
role.permissions.append(4)
yield models.session().add(role)
self.assertTrue(role.id)
role = yield models.role.get(id=role.id)
self.assertEqual(role.permissions, ['ciao', 4])
class TestJsonFieldAsData(test.TestCase):
'''Test a model with a JSONField which expand as instance fields.
The `as_string` atttribute is set to ``False``.'''
model = Statistics3
def_data = {'mean': 1.0,
'std': 5.78,
'pv': 3.2,
'name': 'bla',
'dt': date.today()}
def_baddata = {'': 3.2,
'ts': {'a':[1,2,3,4,5,6,7],
'b':[10,11,12]},
'mean': {'1y':1.0,'2y':1.1},
'std': {'1y':4.0,'2y':5.1},
'dt': datetime.now()}
def_data2 = {'pv': {'':3.2,
'ts': {'a':[1,2,3,4,5,6,7],
'b':[10,11,12]},
'mean': {'1y':1.0,'2y':1.1},
'std': {'1y':4.0,'2y':5.1}},
'dt': datetime.now()}
def make(self, data=None, name=None):
data = data or self.def_data
name = name or self.data.random_string()
return self.model(name=name, data=data)
def testMeta(self):
field = self.model._meta.dfields['data']
self.assertFalse(field.as_string)
def testMake(self):
m = self.make()
self.assertTrue(m.is_valid())
data = m._dbdata['cleaned_data']
data.pop('data')
self.assertEqual(len(data), 6)
self.assertEqual(float(data['data__mean']), 1.0)
self.assertEqual(float(data['data__std']), 5.78)
self.assertEqual(float(data['data__pv']), 3.2)
def testGet(self):
models = self.mapper
session = models.session()
m = yield session.add(self.make())
m = yield models.statistics3.get(id=m.id)
self.assertEqual(m.data['mean'], 1.0)
self.assertEqual(m.data['std'], 5.78)
self.assertEqual(m.data['pv'], 3.2)
self.assertEqual(m.data['dt'], date.today())
self.assertEqual(m.data['name'], 'bla')
def testmakeEmptyError(self):
'''Here we test when we have a key which is empty.'''
models = self.mapper
session = models.session()
m = self.make(self.def_baddata)
self.assertFalse(m.is_valid())
yield self.async.assertRaises(stdnet.FieldValueError, session.add, m)
def testmakeEmpty(self):
models = self.mapper
session = models.session()
m = self.make(self.def_data2)
self.assertTrue(m.is_valid())
cdata = m._dbdata['cleaned_data']
self.assertEqual(len(cdata),10)
self.assertTrue('data' in cdata)
self.assertEqual(cdata['data__pv__mean__1y'],'1.0')
obj = yield session.add(m)
obj = yield models.statistics3.get(id=obj.id)
self.assertEqual(obj.data['dt'].date(), date.today())
self.assertEqual(obj.data__dt.date(), date.today())
self.assertEqual(obj.data['pv']['mean']['1y'], 1.0)
self.assertEqual(obj.data__pv__mean__1y, 1.0)
self.assertEqual(obj.data__dt.date(), date.today())
def testmakeEmpty2(self):
models = self.mapper
session = models.session()
m = self.make({'ts': [1,2,3,4]})
obj = yield models.add(m)
obj = yield models.statistics3.get(id=obj.id)
self.assertEqual(obj.data, {'ts': [1, 2, 3, 4]})
def __testFuzzySmall(self):
#TODO: This does not pass in pypy
models = self.mapper
session = models.session()
r = make_random()
data = dict(r.make(nesting = 0))
m = self.make(data)
self.assertTrue(m.is_valid())
cdata = m._dbdata['cleaned_data']
cdata.pop('data')
for k in cdata:
if k is not 'name':
self.assertTrue(k.startswith('data__'))
obj = yield session.add(m)
obj = yield models.statistics3.get(id=obj.id)
self.assertEqualDict(data, obj.data)
def __testFuzzyMedium(self):
#TODO: This does not pass in pypy
models = self.mapper
session = models.session()
r = make_random()
data = dict(r.make(nesting = 1))
m = self.make(data)
self.assertTrue(m.is_valid())
cdata = m._dbdata['cleaned_data']
cdata.pop('data')
for k in cdata:
if k is not 'name':
self.assertTrue(k.startswith('data__'))
obj = yield session.add(m)
#obj = self.model.objects.get(id=obj.id)
#self.assertEqualDict(data,obj.data)
def __testFuzzy(self):
#TODO: This does not pass in pypy
models = self.mapper
session = models.session()
r = make_random()
data = dict(r.make(nesting = 3))
m = self.make(deepcopy(data))
self.assertTrue(m.is_valid())
cdata = m._dbdata['cleaned_data']
cdata.pop('data')
for k in cdata:
if k is not 'name':
self.assertTrue(k.startswith('data__'))
obj = yield session.add(m)
#obj = self.model.objects.get(id=obj.id)
#self.assertEqualDict(data,obj.data)
def testEmptyDict(self):
models = self.mapper
session = models.session()
r = yield session.add(self.model(name='bla', data = {'bla':'ciao'}))
self.assertEqual(r.data, {'bla':'ciao'})
r.data = None
yield session.add(r)
r = yield models.statistics3.get(id=r.id)
self.assertEqual(r.data, {})
def testFromEmpty(self):
'''Test the change of a data jsonfield from empty to populated.'''
models = self.mapper
session = models.session()
r = yield models.statistics3.new(name = 'bla')
self.assertEqual(r.data, {})
r.data = {'bla':'ciao'}
yield session.add(r)
r = yield models.statistics3.get(id=r.id)
self.assertEqual(r.data, {'bla':'ciao'})
def assertEqualDict(self,data1,data2):
for k in list(data1):
v1 = data1.pop(k)
v2 = data2.pop(k,{})
if isinstance(v1,dict):
self.assertEqualDict(v1,v2)
else:
self.assertAlmostEqual(v1,v2)
self.assertFalse(data1)
self.assertFalse(data2)
|
163788
|
from itertools import chain
from typing import NamedTuple, Dict, Sequence, List, Set, Type, Union, TYPE_CHECKING, Tuple
from lightbus.exceptions import TransportNotFound, TransportsNotInstalled
from lightbus.transports.pool import TransportPool
from lightbus.utilities.importing import load_entrypoint_classes
empty = NamedTuple("Empty")
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.config import Config
from lightbus.transports import (
RpcTransport,
ResultTransport,
EventTransport,
SchemaTransport,
Transport,
)
EventTransportPoolType = TransportPool["EventTransport"]
RpcTransportPoolType = TransportPool["RpcTransport"]
ResultTransportPoolType = TransportPool["ResultTransport"]
SchemaTransportPoolType = TransportPool["SchemaTransport"]
AnyTransportPoolType = Union[
EventTransportPoolType, RpcTransportPoolType, ResultTransportPoolType, SchemaTransportPoolType
]
class TransportRegistry:
""" Manages access to transports
It is possible for different APIs within lightbus to use different transports.
This registry handles the logic of loading the transports for a given
configuration. Thereafter, it provides access to these transports based on
a given API.
The 'default' API is a special case as it is fallback transport for
any APIs that do not have their own specific transports configured.
"""
class _RegistryEntry(NamedTuple):
rpc: RpcTransportPoolType = None
result: ResultTransportPoolType = None
event: EventTransportPoolType = None
schema_transport: TransportPool = None
def __init__(self):
self._registry: Dict[str, TransportRegistry._RegistryEntry] = {}
def load_config(self, config: "Config") -> "TransportRegistry":
# For every configured API...
for api_name, api_config in config.apis().items():
# ...and for each type of transport...
for transport_type in ("event", "rpc", "result"):
# ...get the transport config...
transport_selector = getattr(api_config, f"{transport_type}_transport")
transport_config = self._get_transport_config(transport_selector)
# ... and use it to create the transport.
if transport_config:
transport_name, transport_config = transport_config
transport_class = get_transport(type_=transport_type, name=transport_name)
self._set_transport(
api_name, transport_class, transport_type, transport_config, config
)
# Schema transport
transport_config = self._get_transport_config(config.bus().schema.transport)
if transport_config:
transport_name, transport_config = transport_config
transport_class = get_transport(type_="schema", name=transport_name)
self.schema_transport = self._instantiate_transport_pool(
transport_class, transport_config, config
)
return self
def _get_transport_config(self, transport_selector):
if transport_selector:
for transport_name in transport_selector._fields:
transport_config = getattr(transport_selector, transport_name)
if transport_config is not None:
return transport_name, transport_config
def _instantiate_transport_pool(
self, transport_class: Type["Transport"], transport_config: NamedTuple, config: "Config"
):
transport_pool = TransportPool(
transport_class=transport_class, transport_config=transport_config, config=config
)
return transport_pool
def _set_transport(
self,
api_name: str,
transport_class: Type["Transport"],
transport_type: str,
transport_config: NamedTuple,
config: "Config",
):
"""Set the transport pool for a specific API"""
from lightbus.transports import Transport
assert issubclass(
transport_class, Transport
), f"Must be a subclass for Transport, was {transport_class}"
self._registry.setdefault(api_name, self._RegistryEntry())
transport_pool = self._instantiate_transport_pool(transport_class, transport_config, config)
self._registry[api_name] = self._registry[api_name]._replace(
**{transport_type: transport_pool}
)
def _get_transport_pool(
self, api_name: str, transport_type: str, default=empty
) -> AnyTransportPoolType:
# Get the registry entry for this API (if any)
registry_entry = self._registry.get(api_name)
api_transport = None
# If we have a registry entry for this API, then get the transport for it
if registry_entry:
api_transport = getattr(registry_entry, transport_type)
# Otherwise get the transport for the default API (which is always our fallback)
# (but don't bother if they have explicity asked for the default_api, as if they
# have then we've already failed to get that in the previous step)
if not api_transport and api_name != "default":
try:
api_transport = self._get_transport_pool("default", transport_type)
except TransportNotFound:
pass
# If we STILL don't have a transport then show a sensible error
if not api_transport and default == empty:
raise TransportNotFound(
f"No {transport_type} transport found for API '{api_name}'. Neither was a default "
f"API transport found. Either specify a {transport_type} transport for this specific API, "
f"or specify a default {transport_type} transport. In most cases setting a default transport "
f"is the best course of action."
)
else:
return api_transport
def _get_transport_pools(
self, api_names: Sequence[str], transport_type: str
) -> Dict[AnyTransportPoolType, List[str]]:
apis_by_transport: Dict[AnyTransportPoolType, List[str]] = {}
for api_name in api_names:
transport = self._get_transport_pool(api_name, transport_type)
apis_by_transport.setdefault(transport, [])
apis_by_transport[transport].append(api_name)
return apis_by_transport
def _has_transport(self, api_name: str, transport_type: str) -> bool:
try:
self._get_transport_pool(api_name, transport_type)
except TransportNotFound:
return False
else:
return True
def set_rpc_transport(
self,
api_name: str,
transport_class: Type["RpcTransport"],
transport_config: NamedTuple,
config: "Config",
):
self._set_transport(api_name, transport_class, "rpc", transport_config, config)
def set_result_transport(
self,
api_name: str,
transport_class: Type["ResultTransport"],
transport_config: NamedTuple,
config: "Config",
):
self._set_transport(api_name, transport_class, "result", transport_config, config)
def set_event_transport(
self,
api_name: str,
transport_class: Type["EventTransport"],
transport_config: NamedTuple,
config: "Config",
):
self._set_transport(api_name, transport_class, "event", transport_config, config)
def set_schema_transport(
self,
transport_class: Type["SchemaTransport"],
transport_config: NamedTuple,
config: "Config",
):
self.schema_transport = self._instantiate_transport_pool(
transport_class, transport_config, config
)
def get_rpc_transport(self, api_name: str, default=empty) -> RpcTransportPoolType:
return self._get_transport_pool(api_name, "rpc", default=default)
def get_result_transport(self, api_name: str, default=empty) -> ResultTransportPoolType:
return self._get_transport_pool(api_name, "result", default=default)
def get_event_transport(self, api_name: str, default=empty) -> EventTransportPoolType:
return self._get_transport_pool(api_name, "event", default=default)
def get_all_rpc_transports(self) -> Set[RpcTransportPoolType]:
return {t.rpc for t in self._registry.values() if t.rpc}
def get_all_result_transports(self) -> Set[ResultTransportPoolType]:
return {t.result for t in self._registry.values() if t.result}
def get_all_event_transports(self) -> Set[EventTransportPoolType]:
return {t.event for t in self._registry.values() if t.event}
def get_schema_transport(self, default=empty) -> SchemaTransportPoolType:
if self.schema_transport or default != empty:
return self.schema_transport or default
else:
# TODO: Link to docs
raise TransportNotFound(
"No schema transport is configured for this bus. Check your schema transport "
"configuration is setup correctly (config section: bus.schema.transport)."
)
def has_rpc_transport(self, api_name: str) -> bool:
return self._has_transport(api_name, "rpc")
def has_result_transport(self, api_name: str) -> bool:
return self._has_transport(api_name, "result")
def has_event_transport(self, api_name: str) -> bool:
return self._has_transport(api_name, "event")
def has_schema_transport(self) -> bool:
return bool(self.schema_transport)
def get_rpc_transports(self, api_names: Sequence[str]) -> Dict[RpcTransportPoolType, List[str]]:
"""Get a mapping of transports to lists of APIs
This is useful when multiple APIs can be served by a single transport
"""
return self._get_transport_pools(api_names, "rpc")
def get_event_transports(
self, api_names: Sequence[str]
) -> Dict[EventTransportPoolType, List[str]]:
"""Get a mapping of transports to lists of APIs
This is useful when multiple APIs can be served by a single transport
"""
return self._get_transport_pools(api_names, "event")
def get_all_transports(self) -> Set[AnyTransportPoolType]:
"""Get a set of all transports irrespective of type"""
all_transports = chain(*[entry._asdict().values() for entry in self._registry.values()])
return set([t for t in all_transports if t is not None])
def get_available_transports(type_):
loaded = load_entrypoint_classes(f"lightbus_{type_}_transports")
if not loaded:
raise TransportsNotInstalled(
f"No {type_} transports are available, which means lightbus has not been "
f"installed correctly. This is likely because you are working on Lightbus itself. "
f"In which case, within your local lightbus repo you should run "
f"something like 'pip install .' or 'python setup.py develop'.\n\n"
f"This will install the entrypoints (defined in setup.py) which point Lightbus "
f"to it's bundled transports."
)
return {name: class_ for module_name, name, class_ in loaded}
def get_transport(type_, name):
for name_, class_ in get_available_transports(type_).items():
if name == name_:
return class_
raise TransportNotFound(
f"No '{type_}' transport found named '{name}'. Check the transport is installed and "
f"has the relevant entrypoints setup in it's setup.py file. Or perhaps "
f"you have a typo in your config file."
)
def get_transport_name(cls: Type[AnyTransportPoolType]):
for type_ in ("rpc", "result", "event"):
for *_, name, class_ in load_entrypoint_classes(f"lightbus_{type_}_transports"):
if cls == class_:
return name
raise TransportNotFound(
f"Transport class {cls.__module__}.{cls.__name__} is not specified in any entrypoint."
)
|
163815
|
from django.test import TestCase
from zentral.core.events.utils import decode_args, encode_args
class EventUtilsTestCase(TestCase):
def test_decode_args(self):
for encoded, decoded in (("", [""]),
(r"\\a", [r"\a"]),
(r"|1|2|3||", ["", "1", "2", "3", "", ""]),
(r"a\|bc|d\\e", ["a|bc", r"d\e"])):
self.assertEqual(decode_args(encoded), decoded)
def test_encode_args(self):
for encoded, decoded in (("", [""]),
(r"\\a", [r"\a"]),
(r"|1|2|3||", ["", 1, "2", 3, "", ""]),
(r"a\|bc|d\\e", ["a|bc", r"d\e"])):
self.assertEqual(encode_args(decoded), encoded)
|
163834
|
import asyncio
from datetime import timedelta
from typing import List
from protoactor.actor.actor_context import AbstractContext, GlobalRootContext
from protoactor.actor.event_stream import GlobalEventStream
from protoactor.actor.props import Props
from protoactor.mailbox.dispatcher import Dispatchers
from protoactor.remote.remote import Remote
from protoactor.remote.serialization import Serialization
from protoactor.cluster.member_status import AbstractMemberStatusValue, AbstractMemberStatusValueSerializer, \
MemberStatus
from protoactor.cluster.member_status_events import ClusterTopologyEvent
from protoactor.cluster.providers.abstract_cluster_provider import AbstractClusterProvider
from protoactor.cluster.providers.single_remote_instance.protos_pb2 import GetKinds, GetKindsResponse, DESCRIPTOR
class SingleRemoteInstanceProvider(AbstractClusterProvider):
def __init__(self, server_host: str, server_port: int):
self._kinds_responder = 'remote_kinds_responder'
self._timeout = timedelta(seconds=10)
self._server_host = server_host
self._server_port = server_port
self._server_address = '%s:%s' % (server_host, str(server_port))
self._kinds = []
self._ok_status = None
self._ko_status = None
self._is_server = None
self._shutdown = None
async def fn(ctx: AbstractContext):
if isinstance(ctx.message, GetKinds) and ctx.sender is not None:
await ctx.respond(GetKindsResponse(kinds=self._kinds))
props = Props.from_func(fn)
Serialization().register_file_descriptor(DESCRIPTOR)
Remote().register_known_kind(self._kinds_responder, props)
async def register_member_async(self, cluster_name: str, host: str, port: int, kinds: List[str],
status_value: AbstractMemberStatusValue,
serializer: AbstractMemberStatusValueSerializer) -> None:
self._kinds = kinds
self._ok_status = serializer.from_value_bytes('Ok!'.encode())
self._ko_status = serializer.from_value_bytes('Ko!'.encode())
self._is_server = host == self._server_host and port == self._server_port
async def deregister_member_async(self) -> None:
pass
def monitor_member_status_changes(self) -> None:
async def fn():
while not self._shutdown:
await self.__notify_statuses()
Dispatchers().default_dispatcher.schedule(fn)
async def update_member_status_value_async(self, status_value: AbstractMemberStatusValue) -> None:
pass
def shutdown(self) -> None:
self._shutdown = True
async def __notify_statuses(self):
status = None
if self._is_server:
status = MemberStatus(self._server_address, self._server_host, self._server_port, self._kinds, True,
self._ok_status)
else:
responder = await Remote().spawn_named_async(self._server_address, self._kinds_responder,
self._kinds_responder, self._timeout)
if responder.pid is not None:
try:
response = await GlobalRootContext.request_future(responder.pid, GetKinds(), self._timeout)
status = MemberStatus(self._server_address, self._server_host, self._server_port, response.kinds,
True, self._ok_status)
except TimeoutError:
status = MemberStatus(self._server_address, self._server_host, self._server_port, [], True,
self._ko_status)
else:
status = MemberStatus(self._server_address, self._server_host, self._server_port, [], False,
self._ko_status)
event = ClusterTopologyEvent([status])
await GlobalEventStream.publish(event)
await asyncio.sleep(60)
|
163844
|
import tensorflow
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import RMSprop
(mnist_train_images, mnist_train_labels), (mnist_test_images, mnist_test_labels) = mnist.load_data()
from tensorflow.keras import backend as K
if K.image_data_format() == 'channels_first':
train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 1, 28, 28)
test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 1, 28, 28)
input_shape = (1, 28, 28)
else:
train_images = mnist_train_images.reshape(mnist_train_images.shape[0], 28, 28, 1)
test_images = mnist_test_images.reshape(mnist_test_images.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
train_images /= 255
test_images /= 255
train_labels = tensorflow.keras.utils.to_categorical(mnist_train_labels, 10)
test_labels = tensorflow.keras.utils.to_categorical(mnist_test_labels, 10)
import matplotlib.pyplot as plt
def display_sample(num):
#Print the one-hot array of this sample's label
print(train_labels[num])
#Print the label converted back to a number
label = train_labels[num].argmax(axis=0)
#Reshape the 768 values to a 28x28 image
image = train_images[num].reshape([28,28])
plt.title('Sample: %d Label: %d' % (num, label))
plt.imshow(image, cmap=plt.get_cmap('gray_r'))
plt.show()
display_sample(1234)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
# 64 3x3 kernels
model.add(Conv2D(64, (3, 3), activation='relu'))
# Reduce by taking the max of each 2x2 block
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout to avoid overfitting
model.add(Dropout(0.25))
# Flatten the results to one dimension for passing into our final layer
model.add(Flatten())
# A hidden layer to learn with
model.add(Dense(128, activation='relu'))
# Another dropout
model.add(Dropout(0.5))
# Final categorization from 0-9 with softmax
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
"""This could take hours to run, and your computer's CPU will be maxed out
during that time! Don't run the next block unless you can tie up your computer
for a long time. It will print progress as each epoch is run, but each epoch '
can take around 20 minutes."""
history = model.fit(train_images, train_labels,
batch_size=32,
epochs=10,
verbose=2,
validation_data=(test_images, test_labels))
score = model.evaluate(test_images, test_labels, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
163872
|
import re
import subprocess
import sys
from typing import Optional, List
from platypush.message.response.ping import PingResponse
from platypush.plugins import Plugin, action
PING_MATCHER = re.compile(
r"(?P<min>\d+.\d+)/(?P<avg>\d+.\d+)/(?P<max>\d+.\d+)/(?P<mdev>\d+.\d+)"
)
PING_MATCHER_BUSYBOX = re.compile(
r"(?P<min>\d+.\d+)/(?P<avg>\d+.\d+)/(?P<max>\d+.\d+)"
)
WIN32_PING_MATCHER = re.compile(r"(?P<min>\d+)ms.+(?P<max>\d+)ms.+(?P<avg>\d+)ms")
class PingPlugin(Plugin):
"""
Perform ICMP network ping on remote hosts.
"""
def __init__(self, executable: str = 'ping', count: int = 1, timeout: float = 5.0, **kwargs):
"""
:param executable: Path to the ``ping`` executable. Default: the first ``ping`` executable found in PATH.
:param count: Default number of packets that should be sent (default: 1).
:param timeout: Default timeout before failing a ping request (default: 5 seconds).
"""
super().__init__(**kwargs)
self.executable = executable
self.count = count
self.timeout = timeout
def _get_ping_cmd(self, host: str, count: int, timeout: float) -> List[str]:
if sys.platform == 'win32':
return [
self.executable,
'-n',
str(count or self.count),
'-w',
str((timeout or self.timeout) * 1000),
host,
]
return [
self.executable,
'-n',
'-q',
'-c',
str(count or self.count),
'-W',
str(timeout or self.timeout),
host,
]
@action
def ping(self, host: str, count: Optional[int] = None, timeout: Optional[float] = None) -> PingResponse:
"""
Ping a remote host.
:param host: Remote host IP or name
:param count: Number of packets that should be sent (default: 1).
:param timeout: Timeout before failing a ping request (default: 5 seconds).
"""
count = count or self.count
timeout = timeout or self.timeout
pinger = subprocess.Popen(
self._get_ping_cmd(host, count=count, timeout=timeout),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
out = pinger.communicate()
if sys.platform == "win32":
match = WIN32_PING_MATCHER.search(str(out).split("\n")[-1])
min_val, avg_val, max_val = match.groups()
mdev_val = None
elif "max/" not in str(out):
match = PING_MATCHER_BUSYBOX.search(str(out).split("\n")[-1])
min_val, avg_val, max_val = match.groups()
mdev_val = None
else:
match = PING_MATCHER.search(str(out).split("\n")[-1])
min_val, avg_val, max_val, mdev_val = match.groups()
return PingResponse(host=host, success=True, min=min_val, max=max_val, avg=avg_val, mdev=mdev_val)
except (subprocess.CalledProcessError, AttributeError):
return PingResponse(host=host, success=False)
# vim:sw=4:ts=4:et:
|
163878
|
from goco import Goco
GoogleApi = Goco('client_secret.json')
MyDrive = GoogleApi.connect(scope='drive.readonly', service_name='drive', version='v3')
Files = MyDrive.files().list()
print(Files.execute())
input()
|
163933
|
from rest_framework import status
from rest_framework.reverse import reverse
from resource_tracker.models import ResourceGroupTextAttributeDefinition
from tests.test_resource_tracker.test_api.base_test_api import BaseTestAPI
class TestTextAttributeDefinitionDelete(BaseTestAPI):
def setUp(self):
super(TestTextAttributeDefinitionDelete, self).setUp()
self.to_be_deleted_id = self.rg_physical_servers_description.id
self.url = reverse('api_text_attribute_definition_retrieve_update_delete',
args=[self.rg_physical_servers.id,
self.rg_physical_servers_description.id])
def test_text_attribute_definition_delete(self):
self.assertTrue(ResourceGroupTextAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(ResourceGroupTextAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())
def test_cannot_delete_text_attribute_definition_when_wrong_rg(self):
url = reverse('api_text_attribute_definition_retrieve_update_delete',
args=[self.rg_ocp_projects.id,
self.rg_physical_servers_description.id])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
163939
|
import unittest
from fluentcheck.classes import Check
from fluentcheck.exceptions import CheckError
class TestSequencesAssertions(unittest.TestCase):
def test_is_empty(self):
res = Check('').is_empty()
self.assertIsInstance(res, Check)
res2 = Check([]).is_empty()
self.assertIsInstance(res2, Check)
res3 = Check(()).is_empty()
self.assertIsInstance(res3, Check)
with self.assertRaises(CheckError):
Check('abc').is_empty()
def test_is_not_empty(self):
res = Check('123').is_not_empty()
self.assertIsInstance(res, Check)
with self.assertRaises(CheckError):
Check([]).is_not_empty()
def test_is_iterable(self):
res = Check(range(6)).is_iterable()
self.assertIsInstance(res, Check)
res2 = Check([1, 2, 3]).is_iterable()
self.assertIsInstance(res2, Check)
with self.assertRaises(CheckError):
Check(8).is_iterable()
def test_is_not_iterable(self):
res = Check(1).is_not_iterable()
self.assertIsInstance(res, Check)
with self.assertRaises(CheckError):
Check([1, 2, 3]).is_not_iterable()
def test_is_couple(self):
res = Check([1, 2]).is_couple()
self.assertIsInstance(res, Check)
res2 = Check(('1', '2')).is_couple()
self.assertIsInstance(res2, Check)
with self.assertRaises(CheckError):
Check([1, 2, 3]).is_couple()
def test_is_triplet(self):
res = Check([1, 2, 3]).is_triplet()
self.assertIsInstance(res, Check)
res2 = Check({'a': 1, 'b': 2, 'c': 3}).is_triplet()
self.assertIsInstance(res, Check)
with self.assertRaises(CheckError):
Check([1, 2]).is_triplet()
def test_is_nuple(self):
obj = 1, 2, 3, 4, 5
res = Check(obj).is_nuple(5)
self.assertIsInstance(res, Check)
with self.assertRaises(CheckError):
Check((1, 2)).is_nuple(4)
def test_has_dimensionality(self):
obj = [[1, 2], [3, 4]]
res = Check(obj).has_dimensionality(2)
self.assertIsInstance(res, Check)
obj = [1, 2, 3]
with self.assertRaises(CheckError):
Check(obj).has_dimensionality(3)
def test_is_tuple(self):
res = Check(('a', 'b', 'c')).is_tuple()
self.assertIsInstance(res, Check)
res2 = Check((1, (1, 2), 2)).is_tuple()
self.assertIsInstance(res2, Check)
with self.assertRaises(CheckError):
Check([]).is_tuple()
def test_is_list(self):
res = Check([10, 9, 8]).is_list()
self.assertIsInstance(res, Check)
res2 = Check([]).is_list()
self.assertIsInstance(res2, Check)
with self.assertRaises(CheckError):
Check((1, 2)).is_list()
|
163954
|
from .misc_metrics import (
compute_num_spikes,
compute_firing_rate,
compute_presence_ratio,
compute_snrs,
compute_isi_violations,
compute_amplitudes_cutoff,
)
from .pca_metrics import calculate_pc_metrics, _possible_pc_metric_names
# based on PCA
# "isolation_distance", "l_ratio", "d_prime", "nn_hit_rate", "nn_miss_rate",
# in misc_metrics.py
_metric_name_to_func = {
"num_spikes": compute_num_spikes,
"firing_rate": compute_firing_rate,
"presence_ratio": compute_presence_ratio,
"snr": compute_snrs,
"isi_violation": compute_isi_violations,
"amplitude_cutoff": compute_amplitudes_cutoff,
}
# TODO
# @Cole @ Alessio
# "silhouette_score",
# "noise_overlap",
# "max_drift",
# "cumulative_drift",
|
163956
|
class DeviceSamples(object):
gps_device_loc_a = {"scan_program": "gpsd",
"event_type": "gps_scan",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"type": "Feature",
"sat_time": "2017-03-25T00:30:48.000Z",
"time_drift": 2,
"sys_time": "2017-03-25T00:32:48.416592",
"event_timestamp": "2016-05-07 04:10:35",
"location": {
"type": "Point",
"coordinates": [-122.431297, 37.773972]}}
gps_device_loc_b = {"scan_program": "gpsd",
"event_type": "gps_scan",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"type": "Feature",
"sat_time": "2017-03-25T00:30:48.000Z",
"time_drift": 2,
"sys_time": "2017-03-25T00:32:48.416592",
"event_timestamp": "2016-05-07 04:10:35",
"location": {
"type": "Point",
"coordinates": [-100.431297, 32.773972]}}
geoip_loc_a = {"scan_program": "geoip",
"event_type": "geoip_scan",
"event_timestamp": "2016-05-07 04:10:35",
"type": "Feature",
"location": {
"type": "Point",
"coordinates": [-122.431297, 37.773972]}}
geoip_loc_b = {"scan_program": "geoip",
"event_type": "geoip_scan",
"event_timestamp": "2016-05-07 04:10:35",
"type": "Feature",
"location": {
"type": "Point",
"coordinates": [-100.431297, 32.773972]}}
gsm_modem_1 = {"platform": "PLATFORM-NAME",
"event_type": "gsm_modem_scan",
"scan_results": [
{'bsic': '12', 'mcc': '310', 'rla': 0, 'lac': '178d',
'mnc': '411', 'txp': 05, 'rxl': 33, 'cell': 0,
'rxq': 00, 'ta': 255, 'cellid': '000f', 'arfcn': 154},
{'cell': 1, 'rxl': 20, 'lac': '178d', 'bsic': '30',
'mnc': '411', 'mcc': '310', 'cellid': '0010',
'arfcn': 128},
{'cell': 2, 'rxl': 10, 'lac': '178d', 'bsic': '00',
'mnc': '411', 'mcc': '310', 'cellid': '76e2',
'arfcn': 179},
{'cell': 3, 'rxl': 10, 'lac': '178d', 'bsic': '51',
'mnc': '411', 'mcc': '310', 'cellid': '1208',
'arfcn': 181},
{'cell': 4, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 5, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 6, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 236}
],
"scan_start": "",
"scan_finish": "2016-05-07 02:36:50",
"event_timestamp": '2016-05-07 04:10:35',
"scan_program": "gsm_modem",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"scanner_public_ip": "172.16.17.32",
"band": "GSM850_MODE"}
# This one triggers a no-neighbor alert
gsm_modem_2 = {"platform": "PLATFORM-NAME",
"event_type": "gsm_modem_scan",
"scan_results": [
{'bsic': '12', 'mcc': '310', 'rla': 0, 'lac': '178d',
'mnc': '411', 'txp': 05, 'rxl': 33, 'cell': 0,
'rxq': 00, 'ta': 255, 'cellid': '000f', 'arfcn': 154},
{'cell': 1, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 2, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 3, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 181},
{'cell': 4, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 5, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 6, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 236}
],
"scan_start": "",
"scan_finish": "2016-05-07 02:36:50",
"event_timestamp": '2016-05-07 04:10:35',
"scan_program": "gsm_modem",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"scanner_public_ip": "172.16.17.32",
"band": "GSM850_MODE"}
kal_scan_1 = {'platform': 'PLATFORM-NAME',
'event_type': 'kalibrate_scan',
'scan_finish': '2016-05-07 04:14:30',
'site_name': 'SITE_NAME',
'scanner_public_ip': '0.0.0.0',
'sensor_name': 'SENSOR_NAME',
'sensor_id': 'SENSOR_ID',
'scan_results': [
{'channel_detect_threshold': '279392.605625',
'power': '5909624.47', 'final_freq': '869176168',
'mod_freq': 23832.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 869200000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '12'}, # This should not be in the feed DB
{'channel_detect_threshold': '279392.605625',
'power': '5909624.47', 'final_freq': '869176168',
'mod_freq': 23832.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 869200000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '128'},
{'channel_detect_threshold': '279392.605625',
'power': '400160.02', 'final_freq': '874376406',
'mod_freq': 23594.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 874400000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '154'},
{'channel_detect_threshold': '279392.605625',
'power': '401880.05', 'final_freq': '889829992',
'mod_freq': 29992.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 889800000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '+',
'channel': '231'},
{'channel_detect_threshold': '279392.605625',
'power': '397347.54', 'final_freq': '891996814',
'mod_freq': 3186.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 892000000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '242'}],
'scan_start': '2016-05-07 04:10:35',
'event_timestamp': '2016-05-07 04:10:35',
'scan_program': 'kalibrate'}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.