text stringlengths 957 885k |
|---|
from ._base import TableMixin
__all__ = ['HRTEMP', 'HRTCPR', 'PRTMST', 'PRTECN']
import datetime
class HRTEMP(TableMixin):
TABLE_NAME = 'HRTEMP'
HRTEMPID = ('INT', 18)
STATUSCODE = ('CHAR', 1)
SRCCNCID = ('INT', 18)
COMPANYNO = ('DEC', 2)
DIVISIONNO = ('DEC', 3)
CHGCNCID = ('INT', 18)
CHGCOMPANY = ('INT', 2)
CHGDIVISION = ('DEC', 3)
EMPCNCID = ('INT', 18)
EMPCOMPANY = ('DEC', 2)
EMPDIVISION = ('DEC', 3)
PRTMSTID = ('INT', 18)
EMPLOYEENO = ('INT', 9)
SOCIALSECNO = ('DEC', 9)
EMPLNAME = ('CHAR', 30)
ABBRV = ('CHAR', 10)
ADDR1 = ('CHAR', 30)
ADDR2 = ('CHAR', 30)
ADDR3 = ('CHAR', 30)
CITY = ('CHAR', 20)
STATECODE = ('CHAR', 2)
ZIPCODE = ('DEC', 3)
COUNTRYCODE = ('DEC', 3)
AREACODE = ('DEC', 3)
PHONENO = ('DEC', 7)
CELLPHAC = ('DEC', 3)
CELLPHNO = ('DEC', 7)
CONTACTNAME = ('CHAR', 30)
CONTACTAC = ('DEC', 3)
CONTACTPHONE = ('DEC', 7)
BUSUFFIX = ('CHAR', 4)
CNTRYCODE = ('CHAR', 3)
MARITALSTAT = ('CHAR', 1)
LVLCODE = ('DEC', 2)
OFFICERSCODE = ('CHAR', 1)
HRTOCCID = ('INT', 18)
OCCUPDESC1 = ('CHAR', 20)
OCCUPDESC2 = ('CHAR', 20)
SEXCODE = ('CHAR', 1)
MINORITYCODE = ('DEC', 1)
HANDICAPCODE = ('CHAR', 1)
DISABLEVEL = ('CHAR', 2)
BLOODTYPE = ('CHAR', 3)
BIRTHPLACE = ('CHAR', 25)
PERMRESIDENT = ('CHAR', 1)
DRIVERLICNO = ('CHAR', 25)
DLNUMBER = ('CHAR', 8)
BIRTHDATE = ('DATE', )
ORIGHIREDATE = ('DATE', )
ADJHIREDATE = ('DATE', )
VACELIGDATE = ('DATE', )
LASTYEDATE = ('DATE', )
ELIGSCKACCRL = ('DATE', )
SICKACRLDATE = ('DATE', )
EXPRIEDATE = ('DATE', )
RETIREDDATE = ('DATE', )
VISAEXPDATE = ('DATE', )
REVIEWDATE = ('DATE', )
ESTAVAILDATE = ('DATE', )
ISSUEI9DATE = ('DATE', )
I9EXPDATE = ('DATE', )
ISSUEI9 = ('CHAR', 1)
COBRALTRDATE = ('DATE', )
COBRALTRRCVD = ('DATE', )
COBREASNTFLG = ('CHAR', 1)
REHIREDATE = ('DATE', )
HOLELIGDATE = ('DATE', )
DISABILITYDT = ('DATE', )
TERMDATE = ('DATE', )
PRTTRMID = ('INT', 18)
TERMCODE = ('DEC', 2)
LASTDAYWK = ('DATE', )
BENEFITGP = ('CHAR', 50)
ISSUEAUTH = ('CHAR', 25)
EVERIFYDT = ('DATE', )
EVERCASE = ('CHAR', 15)
EVERCRES = ('CHAR', 25)
TERMRSN = ('DEC', 3)
ELGBRHIRE = ('CHAR', 1)
PRTLBRID = ('INT', 18)
DEPTNO = ('DEC', 3)
PRTECLID = ('INT', 18)
EMPLCLASS = ('DEC', 3)
EMPLTYPE = ('CHAR', 2)
class HRTCPR(TableMixin):
TABLE_NAME = 'HRTCPR'
HRTCPRID = ('INT', 18)
STATUSCODE = ('CHAR', 1)
SRCCNCID = ('INT', 18)
COMPANYNO = ('INT', 2)
DIVISIONNO = ('INT', 3)
HRTEMPID = ('INT', 18)
CONTROLNO = ('INT', 20)
PROPERTYNO = ('INT', 3)
DESCRIPTION = ('CHAR', 50)
ASGDATE = ('DATE',)
RTNDATE = ('DATE',)
EXPDATE = ('DATE',)
DUEDATE = ('DATE',)
RETIREDDATE = ('DATE',)
APTVENID = ('INT', 18)
VENDORNO = ('INT', 5)
PROPAMOUNT = ('INT', 9)
RETURNEDTO = ('CHAR', 20)
ADDEDBY = ('CHAR', 20)
ADDEDDATE = ('TIMESTAMP', 26)
UPDPGM = ('CHAR', 20)
UPDATEDBY = ('CHAR', 20)
UPDDATE = ('TIMESTAMP', 26)
DEFAULTS = [
('STATUSCODE', 'A' ),
('SRCCNCID', '5'),
('COMPANYNO', '1'),
('DIVISIONNO', '0'),
('ASGDATE', datetime.date.today()),
('UPDPGM', 'HRTP130'),
('UPDATEDBY', 'CGCOWNER'),
]
FORIEGN_KEYS = [
{
'EMPLOYEENO': {'table': HRTEMP, 'ref': 'HRTEMPID' },
'OCCUPDESC': {'table': HRTEMP, 'ref': 'OCCUPDESC1'}
},
]
class PRTMST(TableMixin):
TABLE_NAME = 'PRTMST'
PRTMSTID = ('INT', 18)
STATUSCODE = ('CHAR', 18)
SRCCNCIDCOMPANYNO = ('INT', 18)
COMPANYNO = ('DEC', 2)
DIVISIONNO = ('DEC', 3)
DSTCNCID = ('CHAR', 18)
DISTCOMPANY = ('DEC', 2)
DISTDIVISION = ('DEC', 3)
CSHCNCID = ('INT', 18)
CSHCOMPANY = ('DEC', 2)
CSHDIVISION = ('DEC', 3)
EMPLOYEENO = ('INT', 9)
EMPNAME = ('CHAR', 18)
ABBRV = ('CHAR', 18)
ADDR1 = ('CHAR', 18)
ADDR2 = ('CHAR', 18)
ADDR3 = ('CHAR', 18)
CITY = ('CHAR', 18)
STATECODE = ('CHAR', 18)
ZIPCODE = ('CHAR', 18)
COUNTRYCODE = ('CHAR', 18)
SYTCCDID = ('INT', 18)
AREACODE = ('DEC', 18)
PHONENO = ('DEC', 18)
CELLPHAC = ('DEC', 18)
CELLPHNO = ('DEC', 18)
PAGERAC = ('DEC', 18)
PAGERNO = ('DEC', 18)
SOCIALSECNO = ('DEC', 18)
MARITALSTAT = ('CHAR', 1) #TODO: Finish building class
OCCUPDESC1 = ('CHAR', 18)
OCCUPDESC2 = ('CHAR', 18)
EXEMPTCERTPR = ('CHAR', 18)
LVLCODE = ('CHAR', 18)
OFFICERSCODE = ('CHAR', 18)
SEXCODE = ('CHAR', 18)
PRTEEIOD = ('CHAR', 18)
MINOTIRYCODE = ('CHAR', 18)
BNKMSTID = ('CHAR', 18)
GLBNKACCT = ('CHAR', 18)
PAYFREQCDE = ('CHAR', 18)
PAYTYPE = ('CHAR', 18)
FEDEXEMPCODE = ('CHAR', 18)
EXMPTFMSTATE = ('CHAR', 18)
DECEASEDCODE = ('CHAR', 18)
PENSIONCODE = ('CHAR', 18)
PRTUNMID = ('CHAR', 18)
UNIONNO = ('CHAR', 18)
BIRTHDATE = ('CHAR', 18)
ORIGHIREDATE = ('CHAR', 18)
ADJHIREDATE = ('CHAR', 18)
BEGININGDATE = ('CHAR', 18)
LASTWKDATE = ('CHAR', 18)
TERMDATE = ('CHAR', 18)
TERMCODE = ('CHAR', 18)
TERMRSN = ('CHAR', 18)
ELGBHIRE = ('CHAR', 18)
HOMESTATE = ('CHAR', 18)
WCSTATE = ('CHAR', 18)
PRTSTMID = ('CHAR', 18)
STIDCODE = ('CHAR', 18)
FICACODE = ('CHAR', 18)
FUTACODE = ('CHAR', 18)
PRTLCMID = ('CHAR', 18)
LOCALCODE = ('CHAR', 18)
EXFITAMT = ('CHAR', 18)
EXFITPCT = ('CHAR', 18)
OPTTAXCODE = ('CHAR', 18)
TAXCRCODE = ('CHAR', 18)
TAXSTATUS = ('CHAR', 18)
EICSTATUS = ('CHAR', 18)
FEDDEDAMT = ('CHAR', 18)
EMPLTIPS = ('CHAR', 18)
SDIPAYPERCDE = ('CHAR', 18)
WYNAICSCDE = ('CHAR', 18)
WYWCCOVERAGE = ('CHAR', 18)
DEDUCTIONCDE = ('CHAR', 18)
STDDEPNO = ('CHAR', 18)
SHIFTNO = ('CHAR', 18)
PRTECLID = ('CHAR', 18)
EMPCLASS = ('CHAR', 18)
EMPTYPE = ('CHAR', 18)
EMPLGROUP = ('CHAR', 18)
PRTCRWID = ('CHAR', 18)
CREWGROUP = ('CHAR', 18)
CREWNO = ('CHAR', 18)
JCTMSTID = ('CHAR', 18)
JOBNO = ('CHAR', 18)
SUBJOB = ('CHAR', 18)
COSTCODE = ('CHAR', 18)
COSTTYPE = ('CHAR', 18)
STDHOURS = ('CHAR', 18)
STDCOSTCODE = ('CHAR', 18)
TICKETNO = ('CHAR', 18)
FIRSTEMPNAME = ('CHAR', 18)
MIDDLENAME1 = ('CHAR', 18)
MIDDLENAME2 = ('CHAR', 18)
LASTEMPNAME = ('CHAR', 18)
SUFFIX = ('CHAR', 18)
SUPERVISOR1 = ('CHAR', 18)
class PRTECN(TableMixin):
TABLE_NAME = 'PRTECN'
PRTECNID = ('INT', 18)
PRTMSTID = ('INT', 18)
STATUSCODE = ('CHAR', 1)
COMPANYNO = ('DEC', 2)
DIVISIONNO = ('DEC', 3)
EMPLOYEENO = ('INT', 9)
SEQNO = ('DEC', 2)
CONTNAME = ('CHAR', 30)
CELLPHAC = ('DEC', 3)
CELLPHNO = ('DEC', 7)
FAXAC = ('DEC', 3)
FAXPHNO = ('DEC', 7)
OTHAC = ('DEC', 3)
OTHPHNO = ('DEC', 7)
EMAILADDR = ('CHAR', 64)
|
"""Tests for extractors module."""
import unittest
import numpy as np
from sklearn.pipeline import Pipeline
from nvdlib.nvd import NVD
from toolkit.pipelines.pipelines import get_preprocessing_pipeline
from toolkit.transformers import FeatureExtractor, Hook
# noinspection PyProtectedMember
from toolkit.transformers.extractors import _FeatureExtractor # pylint: disable=protected-access
from toolkit.utils import clear
class TestFeatureExtractor(unittest.TestCase):
"""Tests for FeatureExtractor class."""
@classmethod
def setUpClass(cls):
"""Return preprocessed data."""
cls.test_data = _get_preprocessed_test_data()
@clear
def test_init(self):
"""Test FeatureExtractor initialization."""
# default parameters
prep = FeatureExtractor()
self.assertIsNotNone(prep)
self.assertIsInstance(prep, FeatureExtractor)
self.assertIsInstance(prep, FeatureExtractor)
# custom feature_keys
feature = 'useless-feature'
# delete to get rid of old keys
del prep
prep = FeatureExtractor(
feature_hooks={
feature: lambda _w, _t: True,
}
)
self.assertIsInstance(prep, FeatureExtractor)
self.assertIsInstance(prep, FeatureExtractor)
# check that the custom feature_keys has been added
self.assertTrue(feature in prep.feature_keys)
@clear
def test_extract_features(self):
"""Test FeatureExtractor `_extract_features` method."""
# get tokenized sentence
sent = self.test_data[0].features
self.assertIsNotNone(sent)
# apply default extractors transformation
prep = FeatureExtractor()
result = prep._extract_features(sent, word_pos=0)
self.assertIsInstance(result, dict)
# check few expected results
self.assertEqual(result['prev-word'], '<start>')
self.assertEqual(result['prev-tag'], '<start>')
@clear
def test_fit_transform(self):
"""Test FeatureExtractor `fit_transform` method."""
# preprocess the sentences
test_data = np.array(self.test_data)
test_data, test_labels = test_data[:, 0], test_data[:, 1]
assert test_labels is not None
# apply default extractors transformation
prep = FeatureExtractor()
result = prep.fit_transform(X=test_data)
self.assertEqual(len(result), len(test_data))
# delete to get rid of old keys
del prep
# apply transformation with custom feature_keys
prep = FeatureExtractor(
feature_hooks={
'useless-feature': lambda _s, _w, _t: True,
}
)
with self.assertRaises(TypeError):
# raises if skip=False (default), since arguments `s`, `w`, `t`
# were not fed
_ = prep.fit_transform(X=test_data)
# skip=True
result = prep.fit_transform(X=test_data, skip_unfed_hooks=True)
self.assertEqual(len(result), len(test_data))
@clear
def test_pipeline(self):
"""Test FeatureExtractor as a single pipeline unit."""
# should not raise, since NLTKPreprocessor does implement `fit`
# and `transform` methods
_ = Pipeline([
('preprocessor', FeatureExtractor)
])
# noinspection PyPep8Naming
class Test_FeatureExtractor(unittest.TestCase):
"""Tests for _FeatureExtractor class."""
@clear
def test_init(self):
"""Test _FeatureExtractor initialization."""
_prep = _FeatureExtractor()
self.assertTrue(any(_prep._hooks)) # pylint: disable=protected-access
@clear
def test_update(self):
"""Test _FeatureExtractor update method."""
hook = Hook(key='key', func=lambda: None)
_prep = _FeatureExtractor().update(hook)
self.assertTrue('key' in _prep.keys)
@clear
def test_feed(self):
"""Test _FeatureExtractor feed method."""
hook = Hook(key='key', func=lambda x: x)
_prep = _FeatureExtractor().update(hook)
# feed the extractor with skip=True
result = _prep.feed({'x': 'test'}, skip_unfed_hooks=True)
self.assertIsInstance(result, dict)
key, value = list(*result.items())
self.assertEqual(key, 'key')
self.assertEqual(value, 'test')
# feed and disable skip
with self.assertRaises(TypeError):
result = _prep.feed({'x': 'test'}, skip_unfed_hooks=False)
key, value = list(*result.items())
self.assertEqual(key, 'key')
self.assertEqual(value, 'test')
def _get_preprocessed_test_data():
"""Return preprocessed data.
Note: used for tests only.
"""
feed = NVD.from_feeds(feed_names=['recent'])
assert feed is not None
# download and update
feed.update()
# get the sample cves
__cve_iter = feed.cves()
__records = 500
data = list()
for i, cve in enumerate(__cve_iter):
if i >= __records:
break
data.append(cve)
pipeline = get_preprocessing_pipeline(
nvd_attributes=['project', 'description']
)
steps, preps = list(zip(*pipeline.steps))
# set up fit parameters (see sklearn fit_params notation)
fit_params = {
"%s__feed_attributes" % steps[2]: ['description'],
"%s__output_attributes" % steps[2]: ['label']
}
prep_data = pipeline.fit_transform(
X=data,
**fit_params
)
return prep_data
|
# -*- coding: utf-8 -*-
from parser_redeye import *
if __name__ == "__main__":
# This is some sort of parser test.
subscribe_args = (
("m", "message", True, u"Subscribe to message."),
("u", "user", True, u"Subscribe to user."),
("t", "tag", True, u"Subscribe to tag."),
("c", "club", True, u"Subscribe to club."),
("n", "newtab", False,
u"Receive messages for this subscription from into tab"),
)
show_args = (
("m", "message", True, u"Show specified message."),
("u", "user", True, u"Show user's posts."),
("t", "tag", True, u"Show posts with tag."),
("c", "club", True, u"Show club posts."),
("p", "page", True, u"Results page (from 0)."),
("r", "replies", False,
u"Include replies in output (only with -m)."),
)
post_args = (
("s", "notop", False,
u"Post cannot be bumped to top."), # no-op
("t", "tags", True,
u"Mark post with this tag(s) (comma-separated)."),
("c", "clubs", True,
u"Post to this club(s) (comma-separated)."),
("a", "anonymous", False, u"Anonymous post."),
("q", "anonymous-comments", False,
u"Make all comments to this post anonymous (doesn''t work at all yet)."),
)
comment_args = (
("m", "message", True, u"Message to comment."),
("a", "anonymous", False, u"Anonymous comment."),
)
recommend_args = (
("m", "message", True, u"Message to recommend."),
)
delete_args = (
('m', 'message', True, 'Message or comment to delete.'),
)
handlers = (
("ping",
(
("s", "safe", False, u"Do not vyebyvatsya."),
),
"ping",
),
("except", (), "except", ),
("register", (), "register", "name", ),
("interface", (), "interface", "iface", ),
("subscribe", subscribe_args, "subscribe", ),
("sub", subscribe_args, "subscribe", ),
("unsubscribe", subscribe_args, "unsubscribe", ),
("usub", subscribe_args, "unsubscribe", ),
("subscriptions", (), "subscriptions", ),
("lsub", (), "subscriptions", ),
("help", (), "help", ),
("show", show_args, "show", ),
("s", show_args, "show", ),
("post", post_args, "post", "text", ),
("p", post_args, "post", "text", ),
("comment", comment_args, "comment", "text", ),
("c", comment_args, "comment", "text", ),
("recommend", recommend_args, "recommend", "comment", ),
("r", recommend_args, "recommend", "comment", ),
("on", (), "on", ),
("off", (), "off", ),
("delete", delete_args, "delete", ),
("d", delete_args, "delete", ),
("login", (), "login", ),
)
p = RedEyeParser(handlers, {}, [])
test = (
("post --tags=linux,anime,mplayer ваш ляликс - говно для просмотра аниме!",
("post", "text", {"tags": "linux,anime,mplayer"}, "ваш ляликс - говно для просмотра аниме!")),
("comment -m 123456 ТЫ ГОВНО",
("comment", "text", {"message": "123456"}, "ТЫ ГОВНО")),
("c --message=123456/123",
("comment", "text", {"message": "123456/123"}, "")),
("subscribe -t mytag",
("subscribe", None, {"tag": "mytag"}, "")),
("subscriptions",
("subscriptions", None, {}, "")),
("show -t lol --club=fuck",
("show", None, {"tag": "lol", "club": "fuck"}, "")),
("interface simplified",
("interface", "iface", {}, "simplified")),
('c "" -am HUX2KJ/2BB',
('comment', 'text', {"message": "HUX2KJ/2BB", 'anonymous': True}, "")),
)
class ShitMsg(object):
def __init__(self, b):
self.body = b
for t, r in test:
msg = ShitMsg(t)
res = p.resolve(msg)[1:]
print msg, res, r
assert r == res
print "Done ok."
else:
raise Exception("Do not import this shit, just run it!")
|
import os
import KratosMultiphysics as Kratos
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage
import KratosMultiphysics.kratos_utilities as kratos_utils
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class KinematicConstraintsTestSolution(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "kinematic_constraints_tests_files")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
tolerance = 1e-3
for node in self.spheres_model_part.Nodes:
velocity = node.GetSolutionStepValue(Kratos.VELOCITY)
angular_velocity = node.GetSolutionStepValue(Kratos.ANGULAR_VELOCITY)
if node.Id == 1:
if self.time > 0.18 and self.time < 0.2:
expected_value = 0.0
self.CheckValueOfVelocity(velocity, 0, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfVelocity(velocity, 1, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfVelocity(velocity, 2, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfAngularVelocity(angular_velocity, 0, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfAngularVelocity(angular_velocity, 1, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfAngularVelocity(angular_velocity, 2, expected_value, tolerance)
elif self.time > 0.31999 and self.time < 0.32:
expected_value = -1.179
self.CheckValueOfVelocity(velocity, 1, expected_value, tolerance)
if node.Id == 2:
if self.time > 0.25 and self.time < 0.3:
expected_value = -10.0 * self.time
self.CheckValueOfVelocity(velocity, 0, expected_value, tolerance)
if self.time > 0.59999 and self.time < 0.6:
expected_value = -1.962
self.CheckValueOfVelocity(velocity, 1, expected_value, tolerance)
if node.Id == 3:
if self.time < 0.1:
expected_value = -5.0
self.CheckValueOfVelocity(velocity, 0, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfVelocity(velocity, 1, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfVelocity(velocity, 2, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfAngularVelocity(angular_velocity, 0, expected_value, tolerance)
expected_value = 0.0
self.CheckValueOfAngularVelocity(angular_velocity, 1, expected_value, tolerance)
expected_value = -10.0
self.CheckValueOfAngularVelocity(angular_velocity, 2, expected_value, tolerance)
if node.Id == 4:
if self.time > 0.22 and self.time < 0.25:
expected_value = 0.2192
self.CheckValueOfAngularVelocity(angular_velocity, 2, expected_value, tolerance)
def CheckValueOfVelocity(self, velocity, component, expected_value, tolerance):
self.assertAlmostEqual(velocity[component], expected_value, delta=tolerance)
def CheckValueOfAngularVelocity(self, angular_velocity, component, expected_value, tolerance):
self.assertAlmostEqual(angular_velocity[component], expected_value, delta=tolerance)
class TestKinematicConstraints(KratosUnittest.TestCase):
def setUp(self):
pass
@classmethod
def test_KinematicConstraints_1(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "kinematic_constraints_tests_files")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
model = Kratos.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(KinematicConstraintsTestSolution, model, parameters_file_name, 1)
def tearDown(self):
file_to_remove = os.path.join("kinematic_constraints_tests_files", "TimesPartialRelease")
kratos_utils.DeleteFileIfExisting(GetFilePath(file_to_remove))
os.chdir(this_working_dir_backup)
if __name__ == "__main__":
Kratos.Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
<gh_stars>1-10
import os.path
import numpy as np
try:
np.random.bit_generator = np.random._bit_generator
print("rename numpy.random._bit_generator")
except:
print("numpy.random.bit_generator exists")
import cv2
from glob import glob
import imgaug.augmenters as iaa
def to_categorical(mask, num_classes, channel='channel_first'):
"""
Convert label into categorical format (one-hot encoded)
Args:
mask: The label to be converted
num_classes: maximum number of classes in the label
channel: whether the output mask should be 'channel_first' or 'channel_last'
Returns:
The one-hot encoded label
"""
if channel != 'channel_first' and channel != 'channel_last':
assert False, r"channel should be either 'channel_first' or 'channel_last'"
assert num_classes > 1, "num_classes should be greater than 1"
unique = np.unique(mask)
assert len(unique) <= num_classes, "number of unique values should be smaller or equal to the num_classes"
assert np.max(unique) < num_classes, "maximum value in the mask should be smaller than the num_classes"
if mask.shape[1] == 1:
mask = np.squeeze(mask, axis=1)
if mask.shape[-1] == 1:
mask = np.squeeze(mask, axis=-1)
eye = np.eye(num_classes, dtype='uint8')
output = eye[mask]
if channel == 'channel_first':
output = np.moveaxis(output, -1, 1)
return output
class ImageProcessor:
@staticmethod
def simple_aug(image, mask):
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
seq = iaa.Sequential(
[
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
sometimes(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.1, 0.05), "y": (-0.1, 0.1)},
rotate=(-10, 10), # rotate by -10 to +10 degrees
shear=(-12, 12), # shear by -12 to +12 degrees
order=[0, 1],
cval=(0, 255),
mode='constant'
)),
],
random_order=True
)
if image.ndim == 4:
mask = np.array(mask)
image_heavy, mask_heavy = seq(images=image, segmentation_maps=mask.astype(np.int32))
else:
image_heavy, mask_heavy = seq(images=image[np.newaxis, ...], segmentation_maps=mask[np.newaxis, ...])
image_heavy, mask_heavy = image_heavy[0], mask_heavy[0]
return image_heavy, mask_heavy
@staticmethod
def crop_volume(vol, crop_size=112):
"""
Center crop the input vol into [B, 2 * crop_size, 2 * crop_size, ...]
:param vol:
:param crop_size:
:return:
"""
return np.array(vol[:,
int(vol.shape[1] / 2) - crop_size: int(vol.shape[1] / 2) + crop_size,
int(vol.shape[2] / 2) - crop_size: int(vol.shape[2] / 2) + crop_size, ])
class DataGenerator:
def __init__(self, phase="train", batch_size=8, height=256, width=256, modality="bssfp", crop_size=224,
n_samples=-1, toprint=False, augmentation=False, data_dir='../data/mscmrseg'):
assert modality == "bssfp" or modality == "t2" or modality == 'lge'
self._height, self._width = height, width
self._modality = modality
self._crop_size = crop_size
self._phase = phase
self._batch_size = batch_size
self._index = 0 # start from the 0th sample
self._totalcount = 0
self._toprint = toprint
self._augmentation = augmentation
if modality == 'bssfp':
folder = 'bSSFP'
else:
folder = modality
self._image_names = glob(os.path.join(data_dir, 'trainA/*{}*.png'.format(folder)))
self._mask_names = glob(os.path.join(data_dir, 'trainAmask/*{}*.png'.format(folder)))
assert len(self._image_names) == len(self._mask_names)
self._len = len(self._image_names)
print("{}: {}".format(modality, self._len))
self._shuffle_indices = np.arange(self._len)
self._shuffle_indices = np.random.permutation(self._shuffle_indices)
if n_samples == -1:
self._n_samples = self._len
else:
self._n_samples = n_samples
self._image_names = np.array(self._image_names)
self._mask_names = np.array(self._mask_names)
def __len__(self):
return self._len
def get_images_masks(self, img_path, mask_path):
img = cv2.imread(img_path)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = np.where(mask == 85, 1, mask)
mask = np.where(mask == 212, 2, mask)
mask = np.where(mask == 255, 3, mask)
return img, mask
def __iter__(self):
self._totalcount = 0
return self
def __next__(self):
x_batch = []
y_batch = []
indices = []
if self._totalcount >= self._n_samples:
self._totalcount = 0
raise StopIteration
for i in range(self._batch_size):
indices.append(self._index)
self._index += 1
self._totalcount += 1
self._index = self._index % self._len
if self._totalcount >= self._n_samples:
break
indices_choise = self._shuffle_indices[indices]
image_name_batch = self._image_names[indices_choise]
mask_name_batch = self._mask_names[indices_choise]
for img_name, msk_name in zip(image_name_batch, mask_name_batch):
img, mask = self.get_images_masks(img_path=img_name, mask_path=msk_name)
mask = np.expand_dims(mask, axis=-1)
if self._augmentation:
img, mask = ImageProcessor.simple_aug(image=img, mask=mask)
x_batch.append(img)
y_batch.append(mask)
# min-max batch normalization
x_batch = np.array(x_batch, np.float32) / 255.
if self._crop_size:
x_batch = ImageProcessor.crop_volume(x_batch, crop_size=self._crop_size // 2)
y_batch = ImageProcessor.crop_volume(np.array(y_batch), crop_size=self._crop_size // 2)
x_batch = np.moveaxis(x_batch, -1, 1)
y_batch = to_categorical(np.array(y_batch), num_classes=4)
return x_batch, y_batch # (N, 3, 256, 256) (N, 4, 256, 256)
if __name__ == "__main__":
def getcolormap():
from matplotlib.colors import ListedColormap
colorlist = np.round(
np.array([[0, 0, 0], [186, 137, 120], [124, 121, 174], [240, 216, 152], [148, 184, 216]]) / 256, decimals=2)
mycolormap = ListedColormap(colors=colorlist, name='mycolor', N=5)
return mycolormap
import matplotlib.pyplot as plt
bssfp_generator = DataGenerator(phase='train', modality='bssfp', crop_size=224, n_samples=1000, augmentation=True,
data_dir='../../data/mscmrseg')
for img, msk in bssfp_generator:
print(img.shape, msk.shape)
print(img.min(), img.max())
print(np.argmax(msk,axis=1).min(), np.argmax(msk,axis=1).max())
for m, gt in zip(img, msk):
f, plots = plt.subplots(1, 2)
plots[0].axis('off')
plots[1].axis('off')
plots[0].imshow(m[1], cmap='gray')
plots[1].imshow(np.argmax(gt, axis=0), cmap=getcolormap(), vmin=0, vmax=3)
plt.show()
pass
|
<reponame>stage-right/santiago
from cmd import Cmd
from sys import argv
import santiago
def make_droplet(name, size, image, region,
ssh_key_ids=None, virtio=True, private_networking=False,
backups_enabled=False, user_data=None, ipv6=False):
droplet = {
'name': str(name),
'size': str(size),
'image': str(image),
'region': str(region),
'virtio': str(virtio).lower(),
'ipv6': str(ipv6).lower(),
'private_networking': str(private_networking).lower(),
'backups': str(backups_enabled).lower(),
}
if user_data: droplet['user_data'] = user_data
if ssh_key_ids: droplet['ssh_keys'] = [int(id) for id in ssh_key_ids.split(",")]
return droplet
def make_domain(name, ip):
return {"name": name, "ip_address": ip}
def make_record(record_type, data, name=None, priority=None, port=None, weight=None):
record = {'data': data, 'type': record_type}
if name: record['name'] = name
if priority: record['priority'] = priority
if port: record['port'] = port
if weight: record['weight'] = weight
return record
def make_key(name, pubkey):
return {'name': name, 'public_key': pubkey}
def build_cmd(listfunc, createfunc, deletefunc):
class GeneratedCmd(Cmd):
def do_list(self, args):
print(listfunc(*args.split(" ")))
def do_create(self, args):
print(createfunc(args))
def do_destroy(self, args):
print(destroyfunc(*args.split(" ")))
return GeneratedCmd()
class DomainRecordCmd(Cmd):
def do_list(self, args):
print(santiago.all_domain_records(*args.split(" ")))
def do_id(self, args):
print(santiago.domain_record_id(*args.split(" ")))
def do_create(self, args):
record_args = args.split(" ")[1:-1]
print(santiago.new_domain_record(args.split(" ")[0], make_record(*record_args), args.split(" ")[-1]))
def do_delete(self, args):
print(santiago.destroy_domain_record(*args.split(" ")))
class DomainCmd(Cmd):
def do_list(self, args):
print(santiago.all_domains(args))
def do_create(self, args):
domain_args = args.split(" ")[:-1]
print(santiago.new_domain(make_domain(*domain_args), args.split(" ")[-1]))
def do_record(self, args):
DomainRecordCmd().onecmd(args)
class DropletCmd(Cmd):
def do_list(self, args):
print(santiago.all_droplets(args))
def do_create(self, args):
droplet_args = args.split(" ")[:-1]
print(santiago.new_droplet(make_droplet(*droplet_args), args.split(" ")[-1]))
def do_delete(self, args):
print(santiago.destroy_droplet(*args.split(" ")))
class KeyCmd(Cmd):
def do_list(self, args):
print(santiago.all_ssh_keys(args))
def do_create(self, args):
key_args = args.split(" ")[:-1]
print(santiago.new_ssh_key(make_key(*key_args), args.split(" ")[-1]))
class SantiagoCmd(Cmd):
def do_quit(self, args):
raise SystemExit
def do_droplet(self, args):
DropletCmd().onecmd(args)
def do_domain(self, args):
DomainCmd().onecmd(args)
def do_keys(self, args):
KeyCmd().onecmd(args)
def main(args=None):
shell = SantiagoCmd()
shell.prompt = '> '
if len(argv) == 1:
shell.cmdloop()
else:
shell.onecmd(" ".join(argv[1:]))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""
Take a set of residue-frequency files generated by `get-resfreqs.py`,
group them into a single table file by matching residue pair ids, and
plot them as a clustered heat-map.
"""
from __future__ import division
import sys
import argparse
import numpy as np
from contact_calc.flare import compose_frequencytable, write_json
def parse_frequencyfiles(freq_files, freq_cutoff):
columns = len(freq_files)
ret = {}
for fidx, freq_file in enumerate(freq_files):
for line in freq_file:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
tokens = line.split("\t")
res1 = tokens[0]
res2 = tokens[1]
freq = float(tokens[2])
if not (res1, res2) in ret:
ret[(res1, res2)] = np.zeros(columns)
ret[(res1, res2)][fidx] = freq
# Remove entries where no frequency exceeds 0.6
ret = {key: val for key, val in ret.items() if np.amax(val) > freq_cutoff}
return ret
def write_frequencytable(freq_table, col_labels, fname):
with open(fname, "w") as out_file:
out_file.write(",".join(["", ""] + col_labels) + "\n")
for (res1, res2) in freq_table:
freq_strings = [str(freq) for freq in freq_table[(res1, res2)]]
out_file.write(",".join([res1, res2] + freq_strings) + "\n")
def plot_frequencies(freq_table, col_labels, out_file, cluster_columns):
import pandas as pd
import matplotlib
import os
if "DISPLAY" not in os.environ:
matplotlib.use('agg')
import seaborn as sns;
sns.set(color_codes=True)
freq_matrix = np.array([freq_table[(r1, r2)] for (r1, r2) in freq_table])
row_labels = [r1 + " - " + r2 for (r1, r2) in freq_table]
pdframe = pd.DataFrame(freq_matrix, index=row_labels, columns=col_labels)
# Scale down figsize if too large
figsize = [pdframe.shape[1], pdframe.shape[0]]
if figsize[1] > 320:
figsize[0] *= 320 / figsize[1]
figsize[1] *= 320 / figsize[1]
# Create clustermap
fingerprints = sns.clustermap(pdframe,
figsize=figsize,
annot=True,
col_cluster=cluster_columns,
cmap='Blues')
# Remove color bar
fingerprints.cax.set_visible(False)
import matplotlib.pyplot as plt
plt.setp(fingerprints.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(fingerprints.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
fingerprints.savefig(out_file)
def main():
# Parse command line arguments
class MyParser(argparse.ArgumentParser):
def error(self, message):
# Prints full program help when error occurs
self.print_help(sys.stderr)
sys.stderr.write('\nError: %s\n' % message)
sys.exit(2)
parser = MyParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_frequencies',
type=argparse.FileType('r'),
required=True,
nargs='+',
help="Paths to one or more residue frequency files")
parser.add_argument('--frequency_cutoff',
type=float,
required=False,
default=0.6,
help="Only interactions occurring at least this frequently will be plotted (default: 0.6)")
parser.add_argument('--column_headers',
type=str,
required=False,
nargs='+',
help="Header column labels. If nothing is specified, the input_frequencies filenames are used")
parser.add_argument('--cluster_columns',
type=bool,
required=False,
default=False,
help="Perform hierarchical clustering on the columns (default: False)")
parser.add_argument('--table_output',
type=str,
required=False,
default=None,
help="If specified, the tab-separated frequency table will be written to this file")
parser.add_argument('--plot_output',
type=str,
required=False,
default=None,
help="If specified, the heatmap will be written to this file (supports svg and png formats)")
parser.add_argument('--flare_output',
type=str,
required=False,
default=None,
help="If specified, a compare-flare will be written to this json-file")
args = parser.parse_args()
freq_table = parse_frequencyfiles(args.input_frequencies, args.frequency_cutoff)
# Determine column headers and exit on error
column_headers = [f.name for f in args.input_frequencies] if args.column_headers is None else args.column_headers
if len(column_headers) != len(args.input_frequencies):
parser.print_help(sys.stderr)
sys.stderr.write("\nError: --column_header arguments must match length of --input_frequencies\n")
sys.exit(2)
# Check output format and call corresponding function(s)
if args.table_output is None and args.flare_output is None and args.plot_output is None:
parser.print_help(sys.stderr)
sys.stderr.write("\nError: Either --table_output, --flare_output, or --plot_output must be specified\n")
sys.exit(2)
if args.table_output is not None:
write_frequencytable(freq_table, column_headers, args.table_output)
print("Wrote frequency table to "+args.table_output)
if args.flare_output is not None:
compare_flare = compose_frequencytable(freq_table, column_headers, args.frequency_cutoff)
write_json(compare_flare, args.flare_output)
print("Wrote multi flare to "+args.flare_output)
if args.plot_output is not None:
plot_frequencies(freq_table, column_headers, args.plot_output, args.cluster_columns)
print("Wrote fingerprint heatmap to "+args.plot_output)
if __name__ == '__main__':
main()
|
import os
import glob
import fire
from finetune_vs_scratch.preprocessing import special_tokens
from finetune_vs_scratch.tokenizer import tokenizer_special_tokens
from tokenizers import SentencePieceBPETokenizer, normalizers, Regex
from transformers import PreTrainedTokenizerFast
from tokenizers.processors import RobertaProcessing
def train_tokenizer(
train_path: str, output_path: str, strip_accents: bool =False,
lowercase: bool = False, vocab_size: int=30_000, min_frequency: int = 10, limit_alphabet:int = 400, num_files=40,
):
"""
Train tokenizer
Arguments:
clean_text (bool, optional, defaults to True) – Whether to clean the text, by removing any control characters and replacing all whitespaces by the classic one.
strip_accents (bool, optional) – Whether to strip all accents. If this option is not specified (ie == None), then it will be determined by the value for lowercase (as in the original Bert).
lowercase (bool, optional, defaults to True) – Whether to lowercase.
"""
tweet_files = sorted(
glob.glob(os.path.join(train_path, "*.txt"))
)[:num_files]
print(f"Found {len(tweet_files)} files in {train_path}")
tokenizer = SentencePieceBPETokenizer()
tokenizer.add_special_tokens(tokenizer_special_tokens)
print(tokenizer)
print("Training...")
print(f"Lowercase: {lowercase}")
print(f"Strip accents: {strip_accents}")
tokenizer_normalizers = [
normalizers.NFKC(),
normalizers.BertNormalizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=strip_accents,
lowercase=lowercase,
),
normalizers.Replace(Regex("(\W)?@usuario(\W)"), " @usuario "),
normalizers.Replace("hashtag", " hashtag "),
# Error de preprocesamiento
normalizers.Replace(Regex("(\W)url(\W)"), " url "),
normalizers.Replace("http://url", " url "),
]
tokenizer.post_processor = RobertaProcessing(
cls=("<s>", tokenizer.token_to_id("<s>")),
sep=("</s>", tokenizer.token_to_id("</s>")),
)
print(tokenizer_normalizers)
tokenizer.normalizer = normalizers.Sequence(tokenizer_normalizers)
print(tokenizer.normalizer)
tokenizer.train(
tweet_files,
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=True,
special_tokens=tokenizer_special_tokens,
limit_alphabet=limit_alphabet,
)
vocab = tokenizer.get_vocab()
inv_vocab = {v:k for k, v in vocab.items()}
print(f"First tokens: {[inv_vocab[i] for i in range(20)]}")
alphabet = sorted(list({a for x in vocab for a in x}))
print("Alphabet = ", " ".join(alphabet))
for tok in vocab:
if any(t in tok for t in special_tokens):
print(f"{tok:<12} --> {vocab[tok]}")
transformer_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
)
text = "@usuario ESTO es una prueba esdrújula PAPÁ"
print(f"Without tokenizing: {text}")
decoded = transformer_tokenizer.decode(
transformer_tokenizer(text)["input_ids"]
)
print(f"Processed: {decoded}")
text = ["@usuario dos oraciones", "segunda ORACIÓN"]
print(f"Without tokenizing: {text}")
decoded = transformer_tokenizer.decode(
transformer_tokenizer(*text)["input_ids"]
)
print(f"Processed: {decoded}")
transformer_tokenizer.save_pretrained(output_path)
if __name__ == '__main__':
fire.Fire(train_tokenizer)
|
<filename>sagemaker-debugger/tensorflow_profiling/entry_point/tf-TensorLike.py
# Standard Library
import argparse
import time
# Third Party
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
# First Party
from smdebug.tensorflow import KerasHook
def between_steps_bottleneck():
time.sleep(1)
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_batch_begin(self, batch, logs=None):
if 10 <= batch < 20:
between_steps_bottleneck()
def train(batch_size, epoch, model, enable_bottleneck, data_augmentation):
callbacks = [CustomCallback()] if enable_bottleneck else []
(X_train, y_train), (X_valid, y_valid) = cifar10.load_data()
Y_train = to_categorical(y_train, 10)
Y_valid = to_categorical(y_valid, 10)
X_train = X_train.astype("float32")
X_valid = X_valid.astype("float32")
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_valid -= mean_image
X_train /= 128.0
X_valid /= 128.0
if not data_augmentation:
model.fit(
X_train,
Y_train,
batch_size=batch_size,
epochs=epoch,
validation_data=(X_valid, Y_valid),
shuffle=True,
)
else:
datagen = ImageDataGenerator(
zca_whitening=True,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
fill_mode="nearest",
cval=0.0,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.0,
)
datagen.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(
datagen.flow(X_train, Y_train, batch_size=batch_size),
callbacks=callbacks,
epochs=epoch,
validation_data=(X_valid, Y_valid),
workers=1,
)
def main():
_ = KerasHook(out_dir="") # need this line so that import doesn't get removed by pre-commit
parser = argparse.ArgumentParser(description="Train resnet50 cifar10")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--data_augmentation", type=bool, default=False)
parser.add_argument("--model_dir", type=str, default="./model_keras_resnet")
parser.add_argument("--enable_bottleneck", type=bool, default=True)
args = parser.parse_args()
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = ResNet50(weights=None, input_shape=(32, 32, 3), classes=10)
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# start the training.
train(args.batch_size, args.epoch, model, args.enable_bottleneck, args.data_augmentation)
if __name__ == "__main__":
main()
|
<reponame>scopatz/pymoab
from pymoab import core
from pymoab import types
from subprocess import call
import numpy as np
import os
def test_write_mesh():
mb = core.Core()
mb.create_vertices(np.ones(3))
mb.write_file("outfile.h5m")
assert os.path.isfile("outfile.h5m")
def test_integer_tag():
mb = core.Core()
vh = vertex_handle(mb)
test_tag = mb.tag_get_handle("Test",1,types.MB_TYPE_INTEGER)
test_val = 4
test_tag_data = np.array((test_val,))
mb.tag_set_data(test_tag, vh, test_tag_data)
data = mb.tag_get_data(test_tag, vh)
assert len(data) == 1
assert data[0] == test_val
assert data.dtype == 'int32'
def test_double_tag():
mb = core.Core()
vh = vertex_handle(mb)
test_tag = mb.tag_get_handle("Test",1,types.MB_TYPE_DOUBLE)
test_val = 4.4
test_tag_data = np.array((test_val))
mb.tag_set_data(test_tag, vh, test_tag_data)
data = mb.tag_get_data(test_tag, vh)
assert len(data) == 1
assert data[0] == test_val
assert data.dtype == 'float64'
#a couple of tests that should fail
test_tag = mb.tag_get_handle("Test1",1,types.MB_TYPE_DOUBLE)
test_val = 4.4
test_tag_data = np.array((test_val),dtype='float32')
try:
mb.tag_set_data(test_tag, vh, test_tag_data)
except AssertionError:
pass
else:
print "Shouldn't be here. Test fails."
raise AssertionError
test_tag = mb.tag_get_handle("Test2",1,types.MB_TYPE_DOUBLE)
test_val = 4.4
test_tag_data = np.array((test_val),dtype='int32')
try:
mb.tag_set_data(test_tag, vh, test_tag_data)
except AssertionError:
pass
else:
print "Shouldn't be here. Test fails."
raise AssertionError
def test_opaque_tag():
mb = core.Core()
vh = vertex_handle(mb)
tag_length = 6
test_tag = mb.tag_get_handle("Test",tag_length,types.MB_TYPE_OPAQUE)
test_val = 'four'
test_tag_data = np.array((test_val,))
mb.tag_set_data(test_tag, vh, test_tag_data)
data = mb.tag_get_data(test_tag, vh)
assert len(data) == 1
assert data.nbytes == tag_length
assert data[0] == test_val
assert data.dtype == '|S' + str(tag_length)
def test_create_meshset():
mb = core.Core()
msh = mb.create_meshset()
vh = vertex_handle(mb)
mb.add_entities(msh,vh)
#best we can do for now
def vertex_handle(core):
"""Convenience function for getting an arbitrary vertex element handle."""
coord = np.array((1,1,1),dtype='float64')
vert = core.create_vertices(coord)
vert_copy = np.array((vert[0],),dtype='uint64')
return vert_copy
def test_create_elements():
mb = core.Core()
coords = np.array((0,0,0,1,0,0,1,1,1),dtype='float64')
verts = mb.create_vertices(coords)
assert 3 == verts.size()
#create elements
verts = np.array(((verts[0],verts[1],verts[2]),),dtype='uint64')
tris = mb.create_elements(types.MBTRI,verts)
assert 1 == len(tris)
#check that the element is there via GLOBAL_ID tag
global_id_tag = mb.tag_get_handle("GLOBAL_ID",1,types.MB_TYPE_INTEGER)
tri_id = mb.tag_get_data(global_id_tag, tris)
assert 1 == len(tri_id)
assert 0 == tri_id[0]
def test_range():
mb = core.Core()
coord = np.array((1,1,1),dtype='float64')
vert = mb.create_vertices(coord)
test_tag = mb.tag_get_handle("Test",1,types.MB_TYPE_INTEGER)
data = np.array((1,))
mb.tag_set_data(test_tag,vert,data)
dum = 0
for v in vert:
dum += 1
if dum > 100: break
assert vert.size() is dum
def test_tag_failures():
mb = core.Core()
coord = np.array((1,1,1),dtype='float64')
verts = mb.create_vertices(coord)
verts_illicit_copy = np.array((verts[0],),dtype='uint32')
test_tag = mb.tag_get_handle("Test",1,types.MB_TYPE_INTEGER)
data = np.array((1,))
#this operation should fail due to the entity handle data type
mb.tag_set_data(test_tag,verts,data)
try:
mb.tag_set_data(test_tag, verts_illicit_copy, data)
except AssertionError:
pass
else:
print "Shouldn't be here. Test fails."
raise AssertionError
global_id_tag = mb.tag_get_handle("GLOBAL_ID",1,types.MB_TYPE_INTEGER)
#so should this one
try:
tri_id = mb.tag_get_data(global_id_tag, verts_illicit_copy)
except AssertionError:
pass
else:
print "Shouldn't be here. Test fails."
raise AssertionError
|
# copyright (c) 2018 Larz60+
import ScraperPaths
import GetPage
import CIA_ScanTools
from bs4 import BeautifulSoup
import sys
import os
class CIA_UsersGuide:
def __init__(self):
self.spath = ScraperPaths.ScraperPaths()
self.gp = GetPage.GetPage()
self.getpage = self.gp.get_page
self.get_filename = self.gp.get_filename
self.cst = CIA_ScanTools.CIA_Scan_Tools()
self.fact_links = self.cst.fact_links
self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/docs/guidetowfbook.html'
self.filename = self.get_filename(self.mainurl)
self.mainpage = self.getpage(self.mainurl, self.filename)
self.scrape_page()
self.cst.save_fact_links()
def get_categories(self, soup):
cats = []
h2s = soup.find_all('h2')
for n, h2 in enumerate(h2s):
if n < 2:
continue
h2 = h2.text.strip()[:-3]
cats.append(h2)
# print(f'\n{h2}')
return cats
def scrape_page(self):
baseurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/graphics/'
prettify = self.cst.prettify
soup = BeautifulSoup(self.mainpage, 'lxml')
maintable = soup.find('table')
# tmpfile = self.spath.tmppath / 'maintable.html'
# with tmpfile.open('w') as fp:
# fp.write(f'===========================================================================\n')
# fp.write(f'maintable:{prettify(maintable, indent=4)}\n')
firsttrs = maintable.find_all('tr')
# tmpfile = self.spath.tmppath / 'firsttrs.html'
# with tmpfile.open('w') as fp:
# for n, tr in enumerate(firsttrs):
# fp.write(f'===========================================================================\n')
# fp.write(f'firsttrs_{n}: {prettify(tr, indent=4)}\n\n')
#narrow down to profileguide div
profilediv = firsttrs[5].find('div', {'id': 'profileguide'})
# tmpfile = self.spath.tmppath / 'profilediv.html'
# with tmpfile.open('w') as fp:
# fp.write(f'===========================================================================\n')
# fp.write(f'profilediv: {prettify(profilediv, 2)}\n')
# insert new tags to help split html. These will go just before each <h2> tag
all_h2_tags = profilediv.find_all('h2')
tmpfile = self.spath.tmppath / 'h2_parent_child.html'
with tmpfile.open('w') as fp:
fp.write(f'\n===========================================================================\n')
fp.write(f'===========================================================================\n')
for n, h2 in enumerate(all_h2_tags):
parent = h2.findParent()
child = h2.findChild()
fp.write(f'\nParent_{n}:\n {parent}')
fp.write(f'\n===========================================================================\n')
fp.write(f'\nh2_{n}:\n {h2}')
fp.write(f'\n===========================================================================\n')
fp.write(f'\nChild{n}:\n {child}')
fp.write(f'\n===========================================================================\n')
fp.write(f'===========================================================================\n')
# for n, tr in enumerate(firsttrs):
# fp.write(f'firsttrs_{n}: {prettify(tr, 2)}\n\n')
sys.exit(0)
# before tables, find major td
first_trs = table.find_all('tr')
# for n, tr in enumerate(first_trs):
# print(f'\n===========================================================================')
# print(f'tr_{n}\n{prettify(tr, 2)}')
divs = first_trs[5].find_all('div')
categories = self.get_categories(soup)
c1 = self.fact_links['FactbookGuide'] = {}
for n, div in enumerate(divs):
# category = categories[n]
print(f'\n===========================================================================')
# print(f'Category: {category}')
print(f'table_0, tr_5, div_{n}\n{prettify(div, 2)}')
# trs = div.find_all('tr')
# for n1, tr in enumerate(trs):
# # print(f'\ndiv_{n} category: {category}, tr_{n1}:\n{prettify(tr, 2)}')
# tds = tr.find_all('td')
# print(f'\n=================================================================================')
# if tds is None:
# print(f"\ndiv_{n} category: {category}, tr_{n1}: No td's")
# continue
# for n2, td in enumerate(tds):
# print(f'\ndiv_{n} category: {category}, tr_{n1}, td_{n2}:\n{prettify(td, 2)}')
# images = td.find_all('img')
# if images is not None:
# for n3, image in enumerate(images):
# if n3 > 0:
# print()
# alt = image.get('alt')
# height = image.get('height')
# width = image.get('width')
# style = image.get('style')
# iurl = image.get('src')
# ims = iurl.split('/')
# url = f'{baseurl}{ims[1]}'
# filename = self.get_filename(url)
# caption = image.text.strip()
# print(f' image_{n3}: alt: {alt}, height: {height}, width: {width}, style: {style}, caption: {caption}\n url: {url}\n filename: {filename.resolve()}')
# self.getpage(url, filename, image=True)
# if isinstance(tds, list):
# for n3, td in enumerate(tds):
# print(f'\n div_{n} category: {category}, tr_{n1}, td_{n2}, td_{n3}:\n{td}')
# else:
# print(f'\n single td: {td}')
if __name__ == '__main__':
CIA_UsersGuide()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.test import TestCase
from django.utils.encoding import force_text
from rest_framework.test import APIClient
from drf_cache.redis_connection import RedisConn
from drf_cache.seed import update_seed_version
# @override_settings(ROOT_URLCONF="libs.cache.tests.urls")
class TestCacheDecorator(TestCase):
# urls = "libs.cache.tests.urls"
def setUp(self):
# settings.configure(default_settings=test_settings, DEBUG=True)
# django.setup() # 用于unittest的debug
self.client = APIClient()
self.redis_conn = RedisConn()
self.redis_conn().flushall()
cache.clear()
def tearDown(self):
self.redis_conn().flushall()
cache.clear()
def test_update_list_seed(self):
resource_name = "testresource"
# 原始要更新的缓存
resp = self.client.get("/testhello/2/test_cache_with_pk/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
con_1 = force_text(resp.content)
resp = self.client.get("/testhello/2/test_cache_with_pk/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
con_2 = force_text(resp.content)
self.assertEqual(con_1, con_2)
resp = self.client.get("/testhello/test_cache_with_list/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
conc_1 = force_text(resp.content)
resp = self.client.get("/testhello/test_cache_with_list/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
conc_2 = force_text(resp.content)
self.assertEqual(conc_1, conc_2)
# 更新list缓存版本
update_seed_version(resource_name, None, "L")
# list被更新了
resp = self.client.get("/testhello/test_cache_with_list/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
conc_3 = force_text(resp.content)
self.assertNotEqual(conc_3, conc_1)
# 单个对象不变
resp = self.client.get("/testhello/2/test_cache_with_pk/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
con_3 = force_text(resp.content)
self.assertEqual(con_3, con_1)
# 更新了单个对象
update_seed_version(resource_name, "2", "O")
# list被更新了
resp = self.client.get("/testhello/test_cache_with_list/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
conc_4 = force_text(resp.content)
self.assertNotEqual(conc_4, conc_3)
# 单个对象缓存也被更新了
resp = self.client.get("/testhello/2/test_cache_with_pk/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
con_4 = force_text(resp.content)
self.assertNotEqual(con_4, con_3)
# 更新另一个id的对象
update_seed_version(resource_name, "33", "O")
# 原有对象不变
resp = self.client.get("/testhello/2/test_cache_with_pk/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
con_5 = force_text(resp.content)
self.assertEqual(con_5, con_4)
# list被更新了
resp = self.client.get("/testhello/test_cache_with_list/"
"?dkjha=asjdaks¶ms_b=alsdl&ksjdkasjd=askdjakshd&asdhghashd=asjdkajs")
conc_5 = force_text(resp.content)
self.assertNotEqual(conc_5, conc_4)
|
"""
A parser for .ASC files.
"""
import re
import os
from datetime import datetime
from math import sqrt
from typing import Optional, Dict, List, Tuple
from mypy_extensions import TypedDict
from sideeye.data import Fixation, Point, Trial, Item, Experiment
from sideeye.config import Configuration, ASCParsingConfig
from sideeye.types import Condition, ItemNum
class CharPosition(TypedDict, total=False):
"""Position of character in pixels."""
char: str
x1: int
x2: int
y1: int
y2: int
char_pos: int
line_pos: int
LINE_TYPES = ["MSG", "EFIX", "EBLINK", "SYNCTIME"]
ITEM_REGEX = re.compile(r"E(?P<condition>.+)I(?P<item>.+)D0")
CHAR_REGEX = re.compile(
r"CHAR.+(?P<char>.)\s+(?P<x1>.+)\s+(?P<y1>.+)\s+(?P<x2>.+)\s+(?P<y2>.+)"
)
FIX_REGEX = re.compile(
r"EFIX\s+\w+\s+(?P<start>\d+)\s+(?P<end>\d+)\s+(?P<dur>.+)\s+(?P<x>[\d.]+)\s+(?P<y>[\d.]+)\s+.+"
)
END_REGEX = re.compile(r".+\s+(?P<end>.+)\s+TRIAL_RESULT")
BLINK_REGEX = re.compile(r"EBLINK\s+.+\s+.+\s+(?P<blink_dur>.+)")
START_REGEX = re.compile(r".+\s+(?P<start>.+)\s+SYNCTIME")
def get_condition(line: str) -> Optional[str]:
"""Returns item condition in line."""
match = ITEM_REGEX.search(line)
return match.group("condition") if "TRIALID" in line and match else None
def get_item(line: str) -> Optional[str]:
"""Returns item number in line."""
match = ITEM_REGEX.search(line)
return match.group("item") if "TRIALID" in line and match else None
def get_char(line: str) -> Optional[CharPosition]:
"""Returns displayed character and location."""
char = CHAR_REGEX.search(line)
return (
CharPosition(
char=char.group("char"),
x1=int(char.group("x1")),
x2=int(char.group("x2")),
y1=int(char.group("y1")),
y2=int(char.group("y2")),
)
if char
else None
)
def get_start(line: str) -> Optional[int]:
"""Returns start time of trial."""
match = START_REGEX.search(line)
return int(match.group("start")) if match else None
def get_end(line: str) -> Optional[int]:
"""Returns end time of trial."""
match = END_REGEX.search(line)
return int(match.group("end")) if match else None
def get_blink_dur(line: str) -> Optional[int]:
"""Returns duration of blink."""
match = BLINK_REGEX.search(line)
return int(match.group("blink_dur")) if match else None
def dist(x_1: float, x_2: float, y_1: float, y_2: float) -> float:
"""Returns distance between point (x_1, y_1) and (x_2, y_2)."""
return sqrt((x_2 - x_1) ** 2 + (y_2 - y_1) ** 2)
def get_lines(characters: List[CharPosition]) -> List[CharPosition]:
"""Converts list of character locations into a list containing a list
of characters for each line of text."""
characters = sorted(characters, key=lambda x: x["y1"])
lines: List[List[CharPosition]] = []
max_y1 = 0
max_y2 = 0
for char in characters:
if char["y1"] > max_y2:
lines += [[]]
if char["y1"] > max_y1:
max_y1 = char["y1"]
if char["y2"] > max_y2:
max_y2 = char["y2"]
lines[-1] += [char]
character_lines: List[List[CharPosition]] = [
[
CharPosition(
char=char["char"],
x1=char["x1"],
x2=char["x2"],
y1=char["y1"],
y2=char["y2"],
line_pos=line_pos,
char_pos=char_pos,
)
for (char_pos, char) in enumerate(sorted(line, key=lambda x: x["x1"]))
]
for (line_pos, line) in enumerate(lines)
]
return [char for line in character_lines for char in line]
def get_fixation(
line: str, characters: List[CharPosition], item: Item, index: int, time_offset: int
) -> Tuple[Optional[Fixation], int]:
"""Returns a Fixation object."""
fix = FIX_REGEX.search(line)
if fix:
fix_x = float(fix.group("x"))
fix_y = float(fix.group("y"))
characters = get_lines(characters)
for char in characters:
if (char["x1"] < fix_x < char["x2"]) and (char["y1"] < fix_y < char["y2"]):
offset = time_offset if time_offset else int(fix.group("start"))
return (
Fixation(
Point(char["char_pos"], char["line_pos"]),
int(fix.group("start")) - offset,
int(fix.group("end")) - offset,
index,
item.find_region(char["char_pos"], char["line_pos"]),
),
offset,
)
return None, time_offset
def get_new_fixations(
new_fixation: Fixation,
fixations: List[Fixation],
config: ASCParsingConfig = Configuration().asc_parsing,
) -> List[Fixation]:
"""Append a new fixation or merge with the previous fixation."""
if fixations and new_fixation.duration() < config.fixation_min_cutoff:
old_fix = fixations[-1]
return fixations[:-1] + [
Fixation(
Point(old_fix.char, old_fix.line),
old_fix.start,
new_fixation.end,
old_fix.index,
old_fix.region,
)
]
if fixations and fixations[-1].duration() < config.fixation_min_cutoff:
old_fix = fixations[-1]
return fixations[:-1] + [
Fixation(
Point(new_fixation.char, new_fixation.line),
old_fix.start,
new_fixation.end,
old_fix.index,
new_fixation.region,
)
]
return fixations[:] + [new_fixation]
def get_trials(
asc: str,
items: Dict[Condition, Dict[ItemNum, Item]],
config: ASCParsingConfig = Configuration().asc_parsing,
) -> List[Trial]:
"""
Parses .ASC text into a list of Trial objects.
Args:
asc (string): Text of .ASC file.
items (Dict[str, Dict[str, Item]]): List of items in experiments.
config (ASCParsingConfig): Configuration for .ASC parsing.
"""
characters: List[CharPosition] = []
fixations: List[Fixation] = []
fixation_start_time = 0
trials: List[Trial] = []
exclude = False
blinks = 0
start_time = 0
condition: Optional[str] = None
item: Optional[str] = None
for line in asc.split("\n"):
if line.split() and line.split()[0] in LINE_TYPES:
start_time = get_start(line) or start_time
condition = get_condition(line) or condition
item = get_item(line) or item
char = get_char(line)
characters = characters + [char] if char else characters
new_fixation, fixation_start_time = (
get_fixation(
line,
characters,
items[item][condition],
len(fixations),
fixation_start_time,
)
if start_time
and item
and condition
and item in items
and condition in items[item]
else (None, fixation_start_time)
)
fixations = (
get_new_fixations(new_fixation, fixations, config)
if new_fixation
else fixations
)
if (
config.max_saccade_dur
and len(fixations) > 1
and fixations[-1].start - fixations[-2].end > config.max_saccade_dur
):
exclude = True
blink_dur = get_blink_dur(line)
if blink_dur:
blinks += 1
if config.blink_max_dur and blink_dur > config.blink_max_dur:
exclude = True
if config.blink_max_count and blinks > config.blink_max_count:
exclude = True
end_time = get_end(line)
if end_time:
if (
item
and condition
and item in items
and condition in items[item]
and not exclude
):
trials += [
Trial(
len(trials),
end_time - start_time,
items[item][condition],
fixations,
)
]
start_time = 0
fixations = []
fixation_start_time = 0
characters = []
exclude = False
blinks = 0
item = None
condition = None
return trials
def parse(
asc_file: str,
items: Dict[Condition, Dict[ItemNum, Item]],
config: ASCParsingConfig = Configuration().asc_parsing,
):
"""
Parses a .ASC file into an Experiment object.
Args:
asc_file (string): Path to .ASC file.
items (Dict[str, Dict[str, Item]]): List of items in experiments.
config (ASCParsingConfig): Configuration for .ASC parsing.
"""
with open(asc_file) as file:
trials = get_trials(file.read(), items, config)
return Experiment(
"".join(os.path.split(asc_file)[1].split(".")[:-1]),
trials,
asc_file,
datetime.fromtimestamp(os.path.getmtime(asc_file)),
)
|
<filename>IQA_v9.py
import argparse
import sys
import time
import torch.nn.functional as F
from scipy import stats
from torch import nn
from torch.optim import lr_scheduler
from data import *
from models.modules.architecture import resnet50_backbone, resnet34_backbone
from models.modules.attns import MultiSpectralAttentionLayer
from models.modules.common import default_Linear
from settings import options as option
from utils import util
from utils.logger import PrintLogger
def load_network(load_path, network, strict=True):
if isinstance(network, nn.DataParallel):
network = network.module
network.load_state_dict(torch.load(load_path), strict=strict)
def freeze_params(m):
for param in m.parameters():
param.requires_grad = False
class DebugIQA(torch.nn.Module):
def __init__(self, options):
nn.Module.__init__(self)
res = resnet50_backbone(pretrained=True)
self.head1 = res.conv1
self.head2 = nn.Sequential(
res.bn1,
res.relu,
res.maxpool
)
self.layer1 = res.layer1
self.layer2 = res.layer2
self.layer3 = res.layer3
self.layer4 = res.layer4
if options['fc']:
freeze_params(self.head1)
freeze_params(self.head2)
freeze_params(self.layer1)
freeze_params(self.layer2)
freeze_params(self.layer3)
freeze_params(self.layer4)
nc = 64
self.project0 = nn.Sequential(
nn.Conv2d(nc * 2, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 112, 112), nn.PReLU(nc),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.PReLU(nc),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 112, 112), nn.PReLU(nc),
nn.Conv2d(nc, nc, 112, groups=nc, bias=False)
)
nc = 256
self.project1 = nn.Sequential(
nn.Conv2d(nc * 2, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
nn.Conv2d(nc, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 56, 56), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 28, 28), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 14, 14), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 7, 7), nn.ReLU(True),
nn.Conv2d(nc, nc, 7, groups=nc, bias=False)
)
nc = 512
self.project2 = nn.Sequential(
nn.Conv2d(nc * 2, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
nn.Conv2d(nc, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 28, 28), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 14, 14), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 7, 7), nn.ReLU(True),
nn.Conv2d(nc, nc, 7, groups=nc, bias=False)
)
nc = 1024
self.project3 = nn.Sequential(
nn.Conv2d(nc * 2, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
nn.Conv2d(nc, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 14, 14), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 2, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 7, 7), nn.ReLU(True),
nn.Conv2d(nc, nc, 7, groups=nc, bias=False)
)
nc = 2048
self.project4 = nn.Sequential(
nn.Conv2d(nc * 2, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
nn.Conv2d(nc, nc, 3, 1, 1, groups=nc, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 7, 7), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc), nn.ReLU(True),
nn.Conv2d(nc, nc, 3, 1, 1, bias=False), nn.BatchNorm2d(nc),
MultiSpectralAttentionLayer(nc, 7, 7), nn.ReLU(True),
nn.Conv2d(nc, nc, 7, groups=nc, bias=False)
)
num = (64 + 256 + 512 + 1024 + 2048) * 2
self.big_fc = nn.Sequential(
default_Linear(num, num),
default_Linear(num, 1)
)
def forward(self, x_dis, x_ref):
x_dis = self.head1(x_dis)
x_ref = self.head1(x_ref)
cat0, c0 = self._getsim(x_dis, x_ref)
cat0 = self.project0(cat0).squeeze()
x_dis = self.head2(x_dis)
x_ref = self.head2(x_ref)
x_dis = self.layer1(x_dis)
x_ref = self.layer1(x_ref)
cat1, c1 = self._getsim(x_dis, x_ref)
cat1 = self.project1(cat1).squeeze()
x_dis = self.layer2(x_dis)
x_ref = self.layer2(x_ref)
cat2, c2 = self._getsim(x_dis, x_ref)
cat2 = self.project2(cat2).squeeze()
x_dis = self.layer3(x_dis)
x_ref = self.layer3(x_ref)
cat3, c3 = self._getsim(x_dis, x_ref)
cat3 = self.project3(cat3).squeeze()
x_dis = self.layer4(x_dis)
x_ref = self.layer4(x_ref)
cat4, c4 = self._getsim(x_dis, x_ref)
cat4 = self.project4(cat4).squeeze()
out = self.big_fc(torch.cat([c0,c1,c2,c3,c4,cat0,cat1,cat2,cat3,cat4], dim=1))
return out
def _getsim(self, x_dis, x_ref):
catandrearranged = self._rearrange(x_dis, x_ref)
x_dis = x_dis.view(x_dis.size(0), x_dis.size(1), -1)
x_ref = x_ref.view(x_ref.size(0), x_ref.size(1), -1)
cosineDist = torch.cosine_similarity(x_dis, x_ref, dim=2)
return catandrearranged, cosineDist
@staticmethod
def _rearrange(x_dis, x_ref):
def _shuffle(a, n):
result=[]
for i in range(len(a)):
if i == n:
break
result.append(a[i])
result.append(a[i+n])
return result
C = x_dis.size(1)
out = torch.cat([x_dis, x_ref], dim=1)
idx = _shuffle(list(range(C*2)), C)
out = out[:, idx, :, :]
return out
class IQAManager(object):
def __init__(self, options):
print('Preparing the network.')
self._options = options
self._device = torch.device('cuda' if options['gpu_ids'] is not None else 'cpu')
torch.backends.cudnn.benchmark = True if options['gpu_ids'] is not None else False
self.schedulers = []
# Network.
self._net = torch.nn.DataParallel(DebugIQA(options)).to(self._device)
if not self._options['fc']:
self._net.load_state_dict(torch.load(self._options['path']['fc_root']))
print('FC model loaded.')
else:
print(self._net)
# # print(self._net)
self.print_network(self._net)
# Criterion.
# self._criterion = torch.nn.MSELoss().to(self._device)
if self._options['fc']:
self._criterion = torch.nn.L1Loss().to(self._device)
else:
self._criterion = torch.nn.MSELoss().to(self._device)
self._criterion_weight = self._options['loss_iqa_weight']
# Solver.
if self._options['fc']:
self._solver = torch.optim.Adam(
filter(lambda p: p.requires_grad, self._net.module.parameters()),
lr=self._options['base_lr'],
# momentum=0.9,
weight_decay=self._options['weight_decay']
)
else:
self._solver = torch.optim.Adam(
self._net.module.parameters(),
lr=self._options['base_lr'],
weight_decay=self._options['weight_decay']
)
if self._options['lr_scheme'] == 'MultiStepLR':
self.schedulers.append(
lr_scheduler.MultiStepLR(self._solver, self._options['lr_steps'], self._options['lr_gamma']))
else:
raise NotImplementedError('MultiStepLR learning rate scheme is enough.')
# Dataset
self._options['phase'] = 'train'
train_set = create_dataset(self._options)
self._train_loader = create_dataloader(train_set, self._options)
self._options['phase'] = 'test'
test_set = create_dataset(self._options)
self._test_loader = create_dataloader(test_set, self._options)
def train(self):
"""Train the network."""
print('------Training------')
best_srcc = 0.0
best_plcc = 0.0
best_epoch = None
print('Epoch\tTime\tLearning_rate\tLoss_IQA\t', end='')
print('Train_SRCC\tTest_SRCC\tTest_PLCC')
start_time = time.time()
for t in range(self._options['epochs']):
epoch_loss = []
pscores = []
tscores = []
num_total = 0
for XX in self._train_loader:
X = XX['Dis'].clone().detach().cuda() # torch.tensor(X.cuda())
y_score = XX['Label'].clone().detach().cuda()
X_ref = XX['Ref'].clone().detach().cuda()
self._solver.zero_grad()
score = self._net(X, X_ref)
loss = self._criterion_weight * self._criterion(score, y_score.view(len(score), 1).float().detach())
epoch_loss.append(loss.item())
num_total += y_score.size(0)
pscores = pscores + score.squeeze(dim=1).cpu().tolist()
tscores = tscores + y_score.cpu().tolist()
# Backward pass.
loss.backward()
self._solver.step()
train_srcc, _ = stats.spearmanr(pscores, tscores)
test_srcc, test_plcc = self._consistency()
if test_srcc > best_srcc:
best_srcc = test_srcc
best_epoch = t + 1
print('*', end='')
if self._options['fc']:
modelpath = self._options['path']['fc_root']
else:
modelpath = self._options['path']['db_root']
torch.save(self._net.state_dict(), modelpath)
if test_plcc > best_plcc:
best_plcc = test_plcc
time_elapsed = time.time() - start_time
start_time = time.time()
print('%d\t%ds\t%.1e\t\t%4.3f\t\t' %
(t + 1, time_elapsed, self.get_current_learning_rate(),
sum(epoch_loss) / len(epoch_loss)), end='')
print('%4.4f\t\t%4.4f\t\t%4.4f' % (train_srcc, test_srcc, test_plcc))
self.update_learning_rate()
print('Best at epoch %d, test srcc: %f, test plcc: %f' % (best_epoch, best_srcc, best_plcc))
return best_srcc, best_plcc
def _consistency(self):
self._net.eval()
final_pscores = []
# final_tscores = []
for i in range(0, self._options['test_times']):
# num_total = 0
pscores = []
tscores = []
for XX in self._test_loader:
# Data.
X = XX['Dis'].clone().detach().cuda() # torch.tensor(X.cuda())
y_score = XX['Label'].clone().detach().cuda()
X_ref = XX['Ref'].clone().detach().cuda()
# Prediction.
with torch.no_grad():
score = self._net(X, X_ref)
pscores = pscores + score.squeeze(dim=1).cpu().tolist() # score[0].cpu().tolist()
tscores = tscores + y_score.cpu().tolist()
# num_total += y_score.size(0)
final_pscores.append(pscores)
# final_tscores.append(tscores)
pscores = np.mean(final_pscores, axis=0)
pscores = pscores.squeeze().tolist()
# tscores = np.mean(final_tscores, axis=0)
# tscores = tscores.tolist()
test_srcc, _ = stats.spearmanr(pscores, tscores)
test_plcc, _ = stats.pearsonr(pscores, tscores)
self._net.train()
return abs(test_srcc), abs(test_plcc)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
def get_current_learning_rate(self):
return self._solver.param_groups[0]['lr']
@staticmethod
def print_network(network):
if isinstance(network, nn.DataParallel):
network = network.module
s = str(network)
n = sum(map(lambda x: x.numel(), network.parameters())) # numle: get number of all elements
print('Number of parameters in G: {:,d}'.format(n))
def main():
"""The main function."""
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
options = option.parse(parser.parse_args().opt) # load settings and initialize settings
util.mkdir_and_rename(options['path']['results_root']) # rename old experiments if exists
util.mkdirs((path for key, path in options['path'].items()
if not key == 'experiments_root' and
not key == 'saved_model' and
not key == 'fc_root' and
not key == 'db_root'))
option.save(options)
options = option.dict_to_nonedict(options) # Convert to NoneDict, which return None for missing key.
# Redirect all writes to the "txt" file
sys.stdout = PrintLogger(options['path']['log'])
# # logger = Logger(opt)
if options['manual_seed'] is not None:
random.seed(options['manual_seed'])
index = None
if options['dataset'] == 'pipal':
index = list(range(0, 200))
else:
raise NotImplementedError
lr_backup = options['base_lr']
lr_steps_backup = options['lr_steps']
epochs_backup = options['epochs']
srcc_all = np.zeros((1, options['epoch']), dtype=np.float)
plcc_all = np.zeros((1, options['epoch']), dtype=np.float)
print('Total epochs:' + str(options['epoch']))
for i in range(0, options['epoch']):
# randomly split train-test set
random.shuffle(index)
train_index = index[0:round(0.8 * len(index))]
test_index = index[round(0.8 * len(index)):len(index)]
options['train_index'] = train_index
options['test_index'] = test_index
# train the fully connected layer only
print('[No.%d/%d] Training the FC layer...' % (i, options['epoch']))
options['fc'] = True
options['base_lr'] = lr_backup
options['lr_steps'] = lr_steps_backup
options['epochs'] = epochs_backup
manager = IQAManager(options)
best_srcc1, best_plcc1 = manager.train()
# fine-tune all model
print('[No.%d/%d] Fine-tune all model...' % (i, options['epoch']))
options['fc'] = False
options['base_lr'] = options['base_lr_step2']
options['lr_steps'] = options['lr_steps_step2']
options['epochs'] = options['epochs_step2']
manager = IQAManager(options)
best_srcc2, best_plcc2 = manager.train()
srcc_all[0][i] = np.max([best_srcc1, best_srcc2])
plcc_all[0][i] = np.max([best_plcc1, best_plcc2])
# srcc_all[0][i] = best_srcc1
# plcc_all[0][i] = best_plcc1
srcc_mean = np.mean(srcc_all)
srcc_median = np.median(srcc_all)
plcc_mean = np.mean(plcc_all)
plcc_median = np.median(plcc_all)
print(srcc_all)
print('average srcc:%4.4f' % srcc_mean)
print('median srcc:%4.4f' % srcc_median)
print(plcc_all)
print('average plcc:%4.4f' % plcc_mean)
print('median plcc:%4.4f' % plcc_median)
print('--------------Finish! [' + options['name'] + ']--------------')
util.mkdir_and_rename(options['path']['results_root'], ('done_{:.4f}'.format(srcc_median)))
if __name__ == '__main__':
main()
|
# -*- coding: UTF-8 -*-
"""Plot OSIRIS-generated data."""
from __future__ import print_function
from __future__ import division
from glob import glob
import os
import re
from math import floor
import h5py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
import matplotlib.colors as colors
from matplotlib.animation import FuncAnimation, FFMpegWriter
from duat.common import ensure_dir_exists, human_order_key, MPCaller, Call, logger
from bisect import bisect_left, bisect_right
def _is_latex(s):
"""
Decide if a string is a LaTeX expression.
Args:
s (str): The string to test.
Returns:
bool: True if it seems a LaTeX expression, False otherwise.
"""
if not s: # Empty string is not LaTeX
return False
return re.match("^.(_.)?$", s) or "\\" in s # If in the form x, x_i, or if there is a backlash (command)
def _improve_latex(s):
"""Improve an OSIRIS-generated latex string using common rules."""
# Descriptive subindexes in roman
s2 = s.replace(r"\omega_p", r"\omega_{\mathrm{p}}")
s2 = s2.replace("m_e", r"m_{\mathrm{e}}")
# "Arbitrary units" in roman
s2 = s2.replace("a.u.", r"\mathrm{a.u.}")
return s2
def _create_label(name, units, latex_label=False):
"""
Prepare a label for a plot
Args:
name (str): Text describing the magnitude
units (str): Text describing the units
latex_label (bool): Whether to use LaTeX
Returns:
str: The desired label
"""
if latex_label:
if units:
units = "$" + _improve_latex(units) + "$"
# Names might be text or LaTeX. Try to guess
if _is_latex(name):
name = "$" + _improve_latex(name) + "$"
if units:
return "%s (%s)" % (name, units)
else:
return name
def _autonorm(norm, z):
"""
Automatic options for color plot normalization.
Args:
norm (str or other): Description of the norm.
z (matrix of numbers): Data.
Returns:
(`matplotlib.colors.Normalize`): A suitable normalize option (or None)
"""
if isinstance(norm, str):
norm = norm.lower()
if norm == "lin":
norm = None
if norm == "log":
masked_z = np.ma.masked_where(z <= 0, z)
z_max = masked_z.max()
z_min = masked_z.min()
z_min = max(z_min, z_max / 1E9)
norm = colors.LogNorm(vmin=z_min, vmax=z_max)
return norm
def _fix_colorbar(cbar):
# Manual fix for matplotlib's 8358 issue
if isinstance(cbar.norm, colors.LogNorm) or isinstance(cbar.norm, colors.SymLogNorm):
cbar.ax.minorticks_off()
class Diagnostic:
"""
A OSIRIS diagnostic.
Attributes:
data_path (str): Path to the directory were the data is stored.
data_name (str): A friendly name for the data.
units (str): The name of the unit the magnitude is measured in.
dt (float): The time step between snapshots of a consecutive number.
t_0 (float): The time of the first snapshot.
time_units (str): The name of the unit of time.
file_list (list of str): List of h5 files, one per time snapshot.
snapshot_list (list of int): List of integers identifying the snapshots. Multiply by dt to get time.
keys (list of str): Names of the datasets in the Diagnostic, given in human order.
axes (list of dict): Info of each axis in the Diagnostic.
datasets_as_axis(dict): Info of datasets if treated as axes. WARNING: Only used with energy bins.
shape (tuple): A tuple with:
* list: The number of grid dimensions.
* int: The number of datasets (excluding axes definition).
* int: The number of snapshots in time.
Note:
The axes list is provided in the order of the numpy convention for arrays. This is the opposite of order used
to label the axes in the hdf5 files. For example in a 2d array the first axes will be the labeled as AXIS2, and
the second will be AXIS1. Unless the user makes use of other external tools to read the data, he/she can safely
ignore this note.
"""
def __init__(self, data_path):
"""
Create a Diagnostic instance.
Args:
data_path (str): Path of the directory containing the diagnostic data.
Raises:
ValueError: If there is no data in `data_path`.
"""
self.data_path = data_path
self.file_list = glob(os.path.join(data_path, "*.h5"))
if not self.file_list:
raise ValueError("No diagnostic data found in %s" % data_path)
self.file_list.sort(key=human_order_key)
self.snapshot_list = list(
map(lambda x: int((os.path.split(x)[1]).split(".h5")[0].split("-")[-1]), self.file_list))
# Get info from first time snapshot
with h5py.File(self.file_list[0], "r") as f:
self.t_0 = f.attrs["TIME"][0]
self.time_units = f.attrs["TIME UNITS"][0].decode('UTF-8')
self.keys = self._get_keys(f)
self.axes = self._get_axes(f, self.keys[0])
self.units = f[self.keys[0]].attrs["UNITS"][0].decode('UTF-8')
if len(self.keys) > 1:
# Take a general name from the global attribute
self.data_name = f.attrs["NAME"][0].decode('UTF-8')
# Construct an axes-like object representing the dataset
self.datasets_as_axis = {}
# Guess name
name = f[self.keys[0]].attrs["TAG"][0].decode('UTF-8') # E.g., "Energy <= 0.00000000"
name = re.match("(.*?) <", name).group(1) # TODO: Catch errors, check syntax
self.datasets_as_axis["NAME"] = name
self.datasets_as_axis["LONG_NAME"] = name
# Guess values
dataset_axes = []
number_pattern = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?' # A pattern matching floats
for d in self.keys:
tag = f[d].attrs["TAG"][0].decode('UTF-8')
# tag is, e.g., ' 0.00000000 < Energy <= 5.00000007E-02'
values = list(map(float, re.findall(number_pattern, tag)))
dataset_axes.append(sum(values) / len(values)) # So simple inequalities are represented somehow
# TODO: First and last datasets are not bounded intervals. Their only finite point is used as the x coordinate, which must be understood with caution
self.datasets_as_axis["LIST"] = dataset_axes
self.datasets_as_axis["MIN"] = dataset_axes[0]
self.datasets_as_axis["MAX"] = dataset_axes[-1]
self.datasets_as_axis["UNITS"] = "?"
else:
# Take a specific name from the dataset
self.data_name = f[self.keys[0]].attrs["LONG_NAME"][0].decode('UTF-8')
# No axes-like object in this case
self.datasets_as_axis = None
if len(self.file_list) < 2:
self.dt = 0
else:
with h5py.File(self.file_list[1], "r") as f:
self.dt = f.attrs["TIME"][0] / self.snapshot_list[1] - self.t_0
self.shape = ([len(x["LIST"]) for x in self.axes], len(self.keys), len(self.snapshot_list))
def __repr__(self):
return "Diagnostic<%s %s>" % (self.data_name, str(self.shape))
@staticmethod
def _get_keys(file):
"""Get the dataset keys from an opened file."""
keys = list(file.keys())
if "AXIS" not in keys:
raise ValueError("AXIS group not found.")
keys.remove("AXIS")
keys.sort(key=human_order_key)
return keys
@staticmethod
def _get_axes(file, dataset_key=None):
"""Get the axes info."""
if dataset_key is None:
dataset_key = Diagnostic._get_keys(file)[0]
axes = []
for i, axis in enumerate(reversed(list(file["AXIS"].keys()))):
ax = file["AXIS"][axis]
data = {}
for d in ["LONG_NAME", "UNITS", "NAME", "TYPE"]:
data[d] = ax.attrs[d][0].decode('UTF-8')
data["MIN"], data["MAX"] = ax[:]
# TODO: Non linear axis support
data["LIST"] = np.linspace(data["MIN"], data["MAX"], num=file[dataset_key].shape[i])
axes.append(data)
return axes
def _clean_dataset_key(self, dataset_key):
"""Return the given dataset key as `str`, using human order if `int`. Might raise error or warning."""
if isinstance(dataset_key, int):
dataset_key = self.keys[dataset_key]
elif isinstance(dataset_key, str):
if dataset_key not in self.keys:
raise ValueError("Dataset %s does not exist in the file." % dataset_key)
elif dataset_key is None:
if len(self.keys) != 1: # Warn if implicitly selecting one among others.
logger.warning("No dataset selected when multiple are available. Plotting the first one.")
dataset_key = self.keys[0]
else:
raise TypeError("Unknown dataset type: %s", type(dataset_key))
return dataset_key
def _scaled_slice_to_slice(self, scaled_slice):
return scaled_slice._get_slice(self.t_0, self.dt * len(self.snapshot_list), len(self.snapshot_list))
def get_generator(self, dataset_selector=None, axes_selector=None, time_selector=None):
"""
Get a generator providing data from the file.
Calling this method returns a generator which, when called, will provide data for increasing times (unless
modified by time_selector parameter). The data might be reduced either by selecting a position in an axis (or
a dataset) or by using a function along some axis (or datasets), e.g., a sum.
This data is provided as numpy arrays where the first axis refers to dataset coordinate (if present) and next
to (non-reduced) axis in the order they are found in the files.
Args:
dataset_selector (str, int or callable): Instructions to reduce datasets. An int selects a dataset in human
order, a str selects it by name. A function taking a list and returning a scalar can be used to reduce
the data, e.g., sum, mean...
axes_selector (tuple): Instructions to reduce axes data. It must be
a tuple of the same length of the number axes or None to perform no reduction.
Each element can be of the following types:
* int: Select the item in the given position.
* None: No reduction is performed in this axis.
* ScaledFunction: A function applied in a range selected by simulation units.
* callable (default): Reduce the data along this axes using the given function (e.g., mean, max, sum...).
time_selector (slice or ScaledSlice): A slice or ScaledSlice instance selecting the points in time to take.
A slice selects times from the list returned by :func:`~duat.osiris.plot.Diagnostic.get_time_list`.
A ScaledSlice chooses a slice that best represents a choice in terms of time units.
Returns:
generator: A generator which provides the data.
"""
multiple_datasets = False # If a dataset list is going to be returned
if dataset_selector is not None:
if self.shape[1] == 1:
logger.warning("Single dataset found. Ignoring the provided dataset_selector.")
def f_dataset_selector(f):
return f[self.keys[0]][:]
else:
if isinstance(dataset_selector, int):
dataset_selector = self.keys[dataset_selector]
if isinstance(dataset_selector, str): # If it was int or str
def f_dataset_selector(f):
return f[dataset_selector][:]
else: # Assumed function
def f_dataset_selector(f):
return np.apply_along_axis(dataset_selector, 0, [f[key][:] for key in self.keys])
else:
if self.shape[1] > 1:
multiple_datasets = True
def f_dataset_selector(f):
return np.array([f[key][:] for key in self.keys])
else:
def f_dataset_selector(f):
return f[self.keys[0]][:]
if axes_selector is not None:
if len(axes_selector) != len(self.axes):
raise ValueError(
"Invalid axes_selector parameter. Length must be %d. Check the axes of the Diagnostic instance." % len(
self.axes))
def f_axes_selector(x):
offset = 1 if multiple_datasets else 0 # If multiple dataset, do not count its axis for reduction
for i, sel in enumerate(axes_selector):
if sel is not None:
if isinstance(sel, int):
x = np.take(x, sel, axis=i - offset)
elif isinstance(sel, ScaledFunction):
x = np.apply_along_axis(sel._get_function(self.axes[i]['LIST']), i - offset, x)
else: # Assumed function
x = np.apply_along_axis(sel, i - offset, x)
offset += 1
return x
else:
def f_axes_selector(x):
return x
if time_selector is not None:
if isinstance(time_selector, ScaledSlice):
time_selector = self._scaled_slice_to_slice(time_selector)
elif not isinstance(time_selector, slice):
logger.warning("Invalid time_selector parameter ignored. Use a slice or a ScaledSlice instead.")
time_selector = None
def gen():
for file_name in (self.file_list[time_selector] if time_selector else self.file_list):
with h5py.File(file_name, "r") as f:
data = f_dataset_selector(f)
# Make sure to exit the context manager before yielding
# h5py might accuse you of murdering identifiers if you don't!
yield f_axes_selector(data)
return gen()
def get_axes(self, dataset_selector=None, axes_selector=None):
"""
Get a dictionary with the info of the axes obtained as result of a given reduction.
Args:
dataset_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
axes_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
Returns:
list of dict: Ordered list of the axes left by the reduction.
"""
axes = []
if dataset_selector is not None:
if self.shape[1] == 1:
logger.warning("Single dataset found. Ignoring the provided dataset_selector.")
# Anyhow, datasets are reduced, so skip
elif self.shape[1] > 1:
axes.append(self.datasets_as_axis)
if axes_selector is not None:
if len(axes_selector) != len(self.axes):
raise ValueError(
"Invalid axes_selector parameter. Length must be %d. Check the axes of the Diagnostic instance." % len(
self.axes))
for i, sel in enumerate(axes_selector):
if sel is None:
axes.append(self.axes[i])
else:
for a in self.axes:
axes.append(a)
return axes
def get_time_list(self, time_selector=None):
"""
Get the list of times obtained as a result of a given slice.
Args:
time_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
Returns:
:obj:`list` of :obj:`float`: The times resulting as a consequence of the slice.
"""
if time_selector:
if isinstance(time_selector, slice):
# This could be improved to avoid generating unneeded values
return [self.t_0 + self.dt * i for i in self.snapshot_list][time_selector]
elif isinstance(time_selector, ScaledSlice):
return self.get_time_list(self._scaled_slice_to_slice(time_selector))
else:
raise TypeError("time_selector must be a slice or a ScaledSlice")
else:
return [self.t_0 + self.dt * i for i in self.snapshot_list]
def time_1d_animation(self, output_path=None, dataset_selector=None, axes_selector=None, time_selector=None,
dpi=200, fps=1, scale_mode="expand",
latex_label=True, interval=200):
"""
Generate a plot of 1d data animated in time.
If an output path with a suitable extension is supplied, the method will export it. Available formats are mp4
and gif. The returned objects allow for minimal customization and representation. For example in Jupyter you
might use `IPython.display.HTML(animation.to_html5_video())`, where `animation` is the returned `FuncAnimation`
instance.
Note:
Exporting a high resolution animated gif with many frames might eat your RAM.
Args:
output_path (str): The place where the plot is saved. If "" or None, the plot is shown in matplotlib.
dataset_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
axes_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
time_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
interval (float): Delay between frames in ms. If exporting to mp4, the fps is used instead to generate the
file, although the returned objects do use this value.
dpi (int): The resolution of the frames in dots per inch (only if exporting).
fps (int): The frames per seconds (only if exporting to mp4).
scale_mode (str): How the scale is changed through time. Available methods are:
* "expand": The y limits increase when needed, but they don't decrease.
* "adjust_always": Always change the y limits to those of the data.
* "max": Use the maximum range from the beginning.
latex_label (bool): Whether for use LaTeX code for the plot.
Returns:
(`matplotlib.figure.Figure`, `matplotlib.axes.Axes`, `matplotlib.animation.FuncAnimation`):
Objects representing the generated plot and its animation.
Raises:
FileNotFoundError: If tried to export to mp4 but ffmpeg is not found in the system.
"""
if output_path:
ensure_dir_exists(os.path.dirname(output_path))
axes = self.get_axes(dataset_selector=dataset_selector, axes_selector=axes_selector)
if len(axes) != 1:
raise ValueError("Expected 1 axis plot, but %d were provided" % len(axes))
axis = axes[0]
gen = self.get_generator(dataset_selector=dataset_selector, axes_selector=axes_selector,
time_selector=time_selector)
# Set plot labels
fig, ax = plt.subplots()
fig.set_tight_layout(True)
x_name = axis["LONG_NAME"]
x_units = axis["UNITS"]
y_name = self.data_name
y_units = self.units
ax.set_xlabel(_create_label(x_name, x_units, latex_label))
ax.set_ylabel(_create_label(y_name, y_units, latex_label))
# Plot the points
x_min, x_max = axis["MIN"], axis["MAX"]
plot_data, = ax.plot(axis["LIST"], next(gen))
ax.set_xlim(x_min, x_max)
if scale_mode == "max":
# Get a list (generator) with the mins and maxs in each time step
min_max_list = map(lambda l: [min(l), max(l)],
self.get_generator(dataset_selector=dataset_selector, axes_selector=axes_selector,
time_selector=time_selector))
f = lambda mins, maxs: (min(mins), max(maxs))
y_min, y_max = f(*zip(*min_max_list))
ax.set_ylim(y_min, y_max)
time_list = self.get_time_list(time_selector)
# Prepare a function for the updates
def update(i):
"""Update the plot, returning the artists which must be redrawn."""
try:
new_dataset = next(gen)
except StopIteration:
logger.warning("Tried to add a frame to the animation, but all data was used.")
return
label = 't = {0}'.format(time_list[i])
plot_data.set_ydata(new_dataset[:])
ax.set_title(label)
if not scale_mode or scale_mode == "max":
pass
elif scale_mode == "expand":
prev = ax.get_ylim()
data_limit = [min(new_dataset), max(new_dataset)]
ax.set_ylim(min(prev[0], data_limit[0]), max(prev[1], data_limit[1]))
elif scale_mode == "adjust_always":
ax.set_ylim(min(new_dataset), max(new_dataset))
return plot_data, ax
anim = FuncAnimation(fig, update, frames=range(1, len(time_list) - 2), interval=interval)
if not output_path: # "" or None
pass
else:
filename = os.path.basename(output_path)
if "." in filename:
extension = output_path.split(".")[-1].lower()
else:
extension = None
if extension == "gif":
anim.save(output_path, dpi=dpi, writer='imagemagick')
elif extension == "mp4":
metadata = dict(title=os.path.split(self.data_path)[-1], artist='duat', comment=self.data_path)
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, output_path, dpi):
# Iterate over frames
for i in range(1, len(time_list) - 1):
update(i)
writer.grab_frame()
# Keep showing the last frame for the fixed time
writer.grab_frame()
else:
logger.warning("Unknown extension in path %s. No output produced." % output_path)
plt.close()
return fig, ax, anim
def time_1d_colormap(self, output_path=None, dataset_selector=None, axes_selector=None, time_selector=None,
dpi=200, latex_label=True, cmap=None, norm=None, show=True, rasterized=True,
contour_plot=False):
"""
Generate a colormap in an axis and the time.
This function plots a magnitude depending on ONE spatial coordinate (hence the name) and on time as a colormap
in the cartesian product of such a magnitude and the time.
Note:
For simple manipulation like labels or title you can make use of the returned tuple or a
`matplotlib.pyplot.style.context`. More advanced manipulation can be done extracting the data with the
:func:`~duat.osiris.plot.Diagnostic.get_generator` method instead.
Args:
output_path (str): The place where the plot is saved. If "" or None, the figure is not saved.
dataset_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
axes_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
time_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
dpi (int): The resolution of the file in dots per inch.
latex_label (bool): Whether for use LaTeX code for the plot.
cmap (str or `matplotlib.colors.Colormap`): The Colormap to use in the plot.
norm (str or `matplotlib.colors.Normalize`): How to scale the colormap. For advanced manipulation, use some
Normalize subclass, e.g., colors.SymLogNorm(0.01). Automatic scales can be selected with
the following strings:
* "lin": Linear scale from minimum to maximum.
* "log": Logarithmic scale from minimum to maximum up to vmax/vmin>1E9, otherwise increasing vmin.
show (bool): Whether to show the plot. This is blocking if matplotlib is in non-interactive mode.
rasterized (bool): Whether the map is rasterized. This does not apply to axes, title... Note non-rasterized
images with large amount of data exported to PDF might challenging to handle.
contour_plot (bool): Whether contour lines are plot instead of the density map.
Returns:
(`matplotlib.figure.Figure`, `matplotlib.axes.Axes`): Objects representing the generated plot.
"""
if output_path:
ensure_dir_exists(os.path.dirname(output_path))
axes = self.get_axes(dataset_selector=dataset_selector, axes_selector=axes_selector)
if len(axes) != 1:
raise ValueError("Expected 1 axis plot, but %d were provided" % len(axes))
if len(self.file_list) < 2:
raise ValueError("Unable to plot a colormap with only one time snapshot")
axis = axes[0]
gen = self.get_generator(dataset_selector=dataset_selector, axes_selector=axes_selector,
time_selector=time_selector)
# Set plot labels
fig, ax = plt.subplots()
x_name = axis["LONG_NAME"]
x_units = axis["UNITS"]
y_name = "t"
y_units = self.time_units
title_name = self.data_name
title_units = self.units
ax.set_xlabel(_create_label(x_name, x_units, latex_label))
ax.set_ylabel(_create_label(y_name, y_units, latex_label))
time_list = self.get_time_list(time_selector)
# Gather the points
x_min, x_max = axis["MIN"], axis["MAX"]
z = np.asarray(list(gen))
norm = _autonorm(norm, z)
plot_function = ax.contourf if contour_plot else ax.pcolormesh
if rasterized:
# Rasterizing in contourf is a bit tricky
# Cf. http://stackoverflow.com/questions/33250005/size-of-matplotlib-contourf-image-files
plot = plot_function(axis["LIST"], time_list, z, norm=norm, cmap=cmap, zorder=-9)
ax.set_rasterization_zorder(-1)
else:
plot = plot_function(axis["LIST"], time_list, z, norm=norm, cmap=cmap)
ax.set_xlim(x_min, x_max)
ax.set_ylim(time_list[0], time_list[-1])
ax.set_title(_create_label(title_name, title_units, latex_label))
_fix_colorbar(fig.colorbar(plot))
if output_path: # "" or None
plt.savefig(output_path, dpi=dpi)
if show:
plt.show()
else:
plt.close()
return fig, ax
def axes_2d_colormap(self, output_path=None, dataset_selector=None, axes_selector=None, time_selector=None,
dpi=200, latex_label=True, cmap=None, norm=None, show=True, rasterized=True,
contour_plot=False):
"""
Generate a colormap in two axes.
A single time snapshot must be selected with the time_selector parameter. For an animated version in time see
the :func:`~duat.osiris.plot.Diagnostic.time_2d_animation` method.
Note:
For simple manipulation like labels or title you can make use of the returned tuple or a
`matplotlib.pyplot.style.context`. More advanced manipulation can be done extracting the data with the
:func:`~duat.osiris.plot.Diagnostic.get_generator` method instead.
Args:
output_path (str): The place where the plot is saved. If "" or None, the figure is not saved.
dataset_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
axes_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
time_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
dpi (int): The resolution of the file in dots per inch.
latex_label (bool): Whether for use LaTeX code for the plot.
cmap (str or `matplotlib.colors.Colormap`): The Colormap to use in the plot.
norm (str or `matplotlib.colors.Normalize`): How to scale the colormap. For advanced manipulation, use some
Normalize subclass, e.g., colors.SymLogNorm(0.01). Automatic scales can be selected with
the following strings:
* "lin": Linear scale from minimum to maximum.
* "log": Logarithmic scale from minimum to maximum up to vmax/vmin>1E9, otherwise increasing vmin.
show (bool): Whether to show the plot. This is blocking if matplotlib is in non-interactive mode.
rasterized (bool): Whether the map is rasterized. This does not apply to axes, title... Note non-rasterized
images with large amount of data exported to PDF might challenging to handle.
contour_plot (bool): Whether contour lines are plot instead of the density map.
Returns:
(`matplotlib.figure.Figure`, `matplotlib.axes.Axes`): Objects representing the generated plot.
"""
if output_path:
ensure_dir_exists(os.path.dirname(output_path))
axes = self.get_axes(dataset_selector=dataset_selector, axes_selector=axes_selector)
if len(axes) != 2:
raise ValueError("Expected 2 axes plot, but %d were provided" % len(axes))
gen = self.get_generator(dataset_selector=dataset_selector, axes_selector=axes_selector,
time_selector=time_selector)
# Set plot labels
fig, ax = plt.subplots()
x_name = axes[0]["LONG_NAME"]
x_units = axes[0]["UNITS"]
y_name = axes[1]["LONG_NAME"]
y_units = axes[1]["UNITS"]
title_name = self.data_name
title_units = self.units
ax.set_xlabel(_create_label(x_name, x_units, latex_label))
ax.set_ylabel(_create_label(y_name, y_units, latex_label))
time_list = self.get_time_list(time_selector)
if len(time_list) == 0:
raise ValueError("No time snapshot selected")
if len(time_list) != 1:
raise ValueError("A single time snapshot must be selected for this plot")
# Gather the points
x_min, x_max = axes[0]["MIN"], axes[0]["MAX"]
y_min, y_max = axes[1]["MIN"], axes[1]["MAX"]
z = np.transpose(np.asarray(list(gen)[0]))
norm = _autonorm(norm, z)
plot_function = ax.contourf if contour_plot else ax.pcolormesh
if rasterized:
# Rasterizing in contourf is a bit tricky
# Cf. http://stackoverflow.com/questions/33250005/size-of-matplotlib-contourf-image-files
plot = plot_function(axes[0]["LIST"], axes[1]["LIST"], z, norm=norm, cmap=cmap, zorder=-9)
ax.set_rasterization_zorder(-1)
else:
plot = plot_function(axes[0]["LIST"], axes[1]["LIST"], z, norm=norm, cmap=cmap)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_title(_create_label(title_name, title_units, latex_label))
_fix_colorbar(fig.colorbar(plot))
if output_path: # "" or None
plt.savefig(output_path, dpi=dpi)
if show:
plt.show()
else:
plt.close()
return fig, ax
def time_2d_animation(self, output_path=None, dataset_selector=None, axes_selector=None, time_selector=None,
dpi=200, fps=1, cmap=None, norm=None, rasterized=True, z_min=None,
z_max=None, latex_label=True, interval=200):
"""
Generate a plot of 2d data as a color map which animated in time.
If an output path with a suitable extension is supplied, the method will export it. Available formats are mp4
and gif. The returned objects allow for minimal customization and representation. For example in Jupyter you
might use `IPython.display.HTML(animation.to_html5_video())`, where `animation` is the returned `FuncAnimation`
instance.
Note:
Exporting a high resolution animated gif with many frames might eat your RAM.
Args:
output_path (str): The place where the plot is saved. If "" or None, the plot is shown in matplotlib.
dataset_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
axes_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
time_selector: See :func:`~duat.osiris.plot.Diagnostic.get_generator` method.
interval (float): Delay between frames in ms. If exporting to mp4, the fps is used instead to generate the
file, although the returned objects do use this value.
dpi (int): The resolution of the frames in dots per inch (only if exporting).
fps (int): The frames per seconds (only if exporting to mp4).
latex_label (bool): Whether for use LaTeX code for the plot.
cmap (str or `matplotlib.colors.Colormap`): The Colormap to use in the plot.
norm (str or `matplotlib.colors.Normalize`): How to scale the colormap. For advanced manipulation, use some
Normalize subclass, e.g., colors.SymLogNorm(0.01). Automatic scales can be selected with
the following strings:
* "lin": Linear scale from minimum to maximum.
* "log": Logarithmic scale from minimum to maximum up to vmax/vmin>1E9, otherwise increasing vmin.
rasterized (bool): Whether the map is rasterized. This does not apply to axes, title... Note non-rasterized
images with large amount of data exported to PDF might challenging to handle.
Returns:
(`matplotlib.figure.Figure`, `matplotlib.axes.Axes`, `matplotlib.animation.FuncAnimation`):
Objects representing the generated plot and its animation.
Raises:
FileNotFoundError: If tried to export to mp4 but ffmpeg is not found in the system.
"""
if output_path:
ensure_dir_exists(os.path.dirname(output_path))
axes = self.get_axes(dataset_selector=dataset_selector, axes_selector=axes_selector)
if len(axes) != 2:
raise ValueError("Expected 2 axes plot, but %d were provided" % len(axes))
gen = self.get_generator(dataset_selector=dataset_selector, axes_selector=axes_selector,
time_selector=time_selector)
# Set plot labels
fig, ax = plt.subplots()
fig.set_tight_layout(True)
x_name = axes[0]["LONG_NAME"]
x_units = axes[0]["UNITS"]
y_name = axes[1]["LONG_NAME"]
y_units = axes[1]["UNITS"]
title_name = self.data_name
title_units = self.units
ax.set_xlabel(_create_label(x_name, x_units, latex_label))
ax.set_ylabel(_create_label(y_name, y_units, latex_label))
# Gather the points
x_min, x_max = axes[0]["MIN"], axes[0]["MAX"]
y_min, y_max = axes[1]["MIN"], axes[1]["MAX"]
z = np.transpose(np.asarray(next(gen)))
time_list = self.get_time_list(time_selector)
if len(time_list) < 2:
raise ValueError("At least two time snapshots are needed to make an animation")
norm = _autonorm(norm, z)
plot_function = ax.pcolormesh
if rasterized:
# Rasterizing in contourf is a bit tricky
# Cf. http://stackoverflow.com/questions/33250005/size-of-matplotlib-contourf-image-files
plot = plot_function(axes[0]["LIST"], axes[1]["LIST"], z, norm=norm, cmap=cmap, zorder=-9)
ax.set_rasterization_zorder(-1)
else:
plot = plot_function(axes[0]["LIST"], axes[1]["LIST"], z, norm=norm, cmap=cmap)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_title(_create_label(title_name, title_units, latex_label))
_fix_colorbar(fig.colorbar(plot))
# Prepare a function for the updates
def update(i):
"""Update the plot, returning the artists which must be redrawn."""
try:
new_dataset = np.transpose(np.asarray(next(gen)))
except StopIteration:
logger.warning("Tried to add a frame to the animation, but all data was used.")
return
label = 't = {0}'.format(time_list[i])
# BEWARE: The set_array syntax is rather problematic. Depending on the shading used in pcolormesh, the
# following might not work.
plot.set_array(new_dataset[:-1, :-1].ravel())
# For more details, check lumbric's answer to
# https://stackoverflow.com/questions/18797175/animation-with-pcolormesh-routine-in-matplotlib-how-do-i-initialize-the-data
ax.set_title(label)
return plot, ax
anim = FuncAnimation(fig, update, frames=range(1, len(time_list) - 2), interval=interval)
if not output_path: # "" or None
pass
else:
filename = os.path.basename(output_path)
if "." in filename:
extension = output_path.split(".")[-1].lower()
else:
extension = None
if extension == "gif":
anim.save(output_path, dpi=dpi, writer='imagemagick')
elif extension == "mp4":
metadata = dict(title=os.path.split(self.data_path)[-1], artist='duat', comment=self.data_path)
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, output_path, dpi):
# Iterate over frames
for i in range(1, len(time_list) - 1):
update(i)
writer.grab_frame()
# Keep showing the last frame for the fixed time
writer.grab_frame()
else:
logger.warning("Unknown extension in path %s. No output produced." % output_path)
plt.close()
return fig, ax, anim
def get_diagnostic_list(run_dir="."):
"""
Create a list with the diagnostic found in the given run directory.
Args:
run_dir (str): The run directory.
Returns:
:obj:`list` of :obj:`Diagnostic`: List of the diagnostic found.
"""
diagnostic_list = []
# This might be not compatible with Windows due to the meaning of \ in patterns, but who cares...
ms_folder = os.path.join(run_dir, "MS", "") # Notice the empty string forces the route ending in the separator...
for root, dirs, files in os.walk(ms_folder):
if not dirs and files: # Terminal directory with files in it
diagnostic_list.append(Diagnostic(root))
return diagnostic_list
def _pos_in_bin(x, a, b, n):
"""Find the bin where x is found in a mesh from a to b with n points (including both).
The index starts at 0 and if x is in the mesh the interval to the right will be returned.
The returned value can be outside [0, n-1] if x is not in [a, b).
Note that for x=b, the returned value is n"""
return floor((x - a) / (b - a) * n)
class ScaledSlice:
"""
A slice described in simulation units (instead of list position).
This object can be used to describe a time_selector parameter.
"""
def __init__(self, start, stop, step=None):
"""
Create a ScaledSlice instance.
Args:
start(float): Where the slice should start. Actual start will be before if needed.
stop(float): Where the slice should stop. The point is in general excluded, as usual in Python.
step (float): The desired step of the slice. Actual step will be the biggest multiple of the mesh step which
is less than this one.
"""
self.start = start
self.stop = stop
self.step = step
def __repr__(self):
if self.step:
return "ScaledSlice<(%g, %g, %g)>" % (self.start, self.stop, self.step)
else:
return "ScaledSlice<(%g, %g)>" % (self.start, self.stop)
def _get_slice(self, mesh_min, mesh_max, n_points):
"""Return a slice best approximating the instance in the given partition"""
a = _pos_in_bin(self.start, mesh_min, mesh_max, n_points)
if a < 0:
a = 0
if a >= n_points:
a = n_points - 1
b = _pos_in_bin(self.stop, mesh_min, mesh_max, n_points)
if b < 0:
b = 0
if b >= n_points:
b = n_points - 1
if self.step:
c = floor(self.step / ((mesh_max - mesh_min) / n_points))
if c < 1:
c = 1
return slice(a, b, c)
else:
return slice(a, b)
class ScaledFunction:
"""
A function that applies to a range defined in simulation units (instead of list position).
This object can be used to describe a component of an axes_selector parameter.
"""
def __init__(self, f, start, stop):
"""
Create a ScaledFunction instance.
Args:
f (Callable): The function to be applied to the range, e.g., a sum.
start(float): Where the application range should start. Actual start will be before if needed to include
the given point
stop(float): Where the slice should stop. Actual stop will be after if needed to include the given point.
"""
self.start = start
self.stop = stop
self.f = f
def __repr__(self):
return "ScaledFunction<(%g, %g, %g)>" % (str(self.function), self.start, self.stop)
def _get_function(self, l):
"""Get a function representing the instance for the given list"""
m = bisect_right(l, self.start) - 1 # "right" is the new "insertion", so an equal values is still to our left
if m < 0: # It was less than the minimum
m = 0
# Other half is symmetrical
n = bisect_left(l, self.stop) + 1
if n >= len(l) + 1:
n = len(l)
return lambda x: self.f(x[m:n])
|
<gh_stars>1-10
#!/usr/bin/env python3
# Standard lib
import unittest
# 3rd party
import numpy as np
import h5py
# Our own imports
from ..helpers import FileSystemTestCase
from deep_hipsc_tracking.stats import grid_db, _grid_db
from deep_hipsc_tracking.tracking import Link
# Constants
LINK1 = Link.from_arrays(
tt=np.array([1, 2, 3, 4, 5]),
xx=np.array([6.1, 7.1, 8.1, 9.1, 10.1]),
yy=np.array([6.2, 7.2, 8.2, 9.2, 10.2]),
)
LINK2 = Link.from_arrays(
tt=np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]),
xx=np.array([6.3, 7.3, 8.3, 9.3, 10.3, 11.3, 12.3, 13.3, 14.3]),
yy=np.array([1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 6.4, 5.4, 3.4]),
)
LINK3 = Link.from_arrays(
tt=np.array([3, 4, 5, 6, 7]),
xx=np.array([6.5, 7.5, 8.5, 9.5, 10.5]),
yy=np.array([5.6, 6.6, 7.6, 8.6, 9.6]),
)
LINK4 = Link.from_arrays(
tt=np.array([4, 5, 6, 7, 8, 9]),
xx=np.array([6.7, 7.7, 8.7, 9.7, 10.7, 11.7]),
yy=np.array([4.8, 5.8, 6.8, 7.8, 8.8, 9.8]),
)
LINK5 = Link.from_arrays(
tt=np.array([4, 5, 6, 7, 8, 9]),
xx=np.array([7.7, 8.7, 9.7, 10.7, 11.7, 12.7]),
yy=np.array([4.8, 5.8, 6.8, 7.8, 8.8, 9.8]),
)
LINK6 = Link.from_arrays(
tt=np.array([4, 5, 6, 7, 8, 9]),
xx=np.array([7.7, 8.7, 9.7, 10.7, 11.7, 12.7]),
yy=np.array([5.8, 6.8, 7.8, 8.8, 9.8, 10.8]),
)
GRID_DB_ATTRS = [
'space_scale',
'time_scale',
'timepoint_coords',
'timepoint_real_coords',
'timepoint_links',
'track_links',
'track_links_inv',
'timepoint_meshes',
'timepoint_triangles',
'timepoint_perimeters',
'timepoint_warp_coords',
'timepoint_warp_radius',
'local_densities_mesh',
'local_cell_areas_mesh',
'delta_divergence_mesh',
'delta_curl_mesh',
'local_displacement_mesh',
'local_distance_mesh',
'local_disp_vs_dist_mesh',
'local_velocity_mesh',
'local_speed_mesh',
'local_persistence_mesh',
]
PERSISTENCE_ATTRS = [
'pct_quiescent', 'pct_persistent', 'r_rad', 'x_rad', 'y_rad',
'x_pos', 'y_pos', 'disp', 'dist', 'vel', 'tt', 'xx', 'yy', 'mask',
]
# Helper functions
def are_objects_equal(val1, val2, places=4, msg=''):
""" Assert that stuff is equal given nested datatypes
Properly handles tricky things like arrays inside dictionaries and NaNs
:param object val1:
The first object to test
:param object val2:
The second object to test
:returns:
True if they seem similar, False otherwise
"""
try:
if hasattr(val1, 'dtype') and hasattr(val2, 'dtype'):
assert np.allclose(val1, val2, equal_nan=True), msg
elif isinstance(val1, (int, float)) and np.isnan(val1):
assert np.isnan(val2), msg
elif isinstance(val1, (int, float)) and isinstance(val2, (int, float)):
assert round(val1, places) == round(val2, places), msg
elif isinstance(val1, dict):
assert val1.keys() == val2.keys(), msg
for key, sval1 in val1.items():
sval2 = val2[key]
assert are_objects_equal(sval1, sval2, places=places, msg=msg), msg
elif isinstance(val1, (list, tuple)):
assert len(val1) == len(val2)
for v1, v2 in zip(val1, val2):
assert are_objects_equal(v1, v2, places=places, msg=msg), msg
else:
assert val1 == val2, msg
except AssertionError:
raise
except Exception as e:
print('Error evaluating are objects equal: {}'.format(e))
return False
return True
# Tests
class TestFindTracksInROI(unittest.TestCase):
""" Find tracks inside an ROI """
LINKS = [
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([1]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([1]*5)),
Link.from_arrays(tt=np.array([1, 2, 3]), xx=np.array([1]*3), yy=np.array([1]*3)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([1]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([2]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([1]*5), yy=np.array([2]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([2]*5)),
]
def test_finds_perimeter_points_over_time(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
point_ids = db.get_perimeter_point_ids(1, list(range(9)))
exp_ids = [0, 2, 8, 6]
self.assertEqual(point_ids, exp_ids)
point_ids = db.get_perimeter_point_ids(1, [0, 1, 3, 4])
exp_ids = [0, 1, 4, 3]
self.assertEqual(point_ids, exp_ids)
point_ids = db.get_perimeter_point_ids(5, list(range(8)))
exp_ids = [0, 2, 7, 5]
self.assertEqual(point_ids, exp_ids)
point_timeline_ids = {
1: list(range(9)),
2: list(range(9)),
3: list(range(9)),
4: list(range(8)),
5: list(range(8)),
}
res = db.get_perimeter_timeline_point_ids(point_timeline_ids)
exp_ids = {
1: [0, 2, 8, 6],
2: [0, 2, 8, 6],
3: [0, 2, 8, 6],
4: [0, 2, 7, 5],
5: [0, 2, 7, 5],
}
self.assertEqual(res, exp_ids)
def test_finds_points_image_coords_over_time(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
res = db.find_point_timelines_in_roi(coords)
exp = {
1: [0, 1, 2, 3, 4, 5],
2: [0, 1, 2, 3, 4, 5],
3: [0, 1, 2, 3, 4, 5],
4: [0, 1, 2, 3, 4],
5: [0, 1, 2, 3, 4],
}
self.assertEqual(res, exp)
coords = np.array([
[0, 1, 1, 0],
[0, 0, 2, 2],
]).T
res = db.find_point_timelines_in_roi(coords)
exp = {
1: [0, 1, 3, 4, 6, 7],
2: [0, 1, 3, 4, 6, 7],
3: [0, 1, 3, 4, 6, 7],
4: [0, 1, 3, 5, 6],
5: [0, 1, 3, 5, 6],
}
self.assertEqual(res, exp)
def test_inverts_points_image_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
point_ids = db.find_point_timelines_in_roi(coords)
res = db.invert_point_timeline_ids(point_ids)
exp = {
1: [6, 7, 8],
2: [6, 7, 8],
3: [6, 7, 8],
4: [5, 6, 7],
5: [5, 6, 7],
}
self.assertEqual(res, exp)
def test_inverts_empty_set_to_all_points(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
res = db.invert_point_timeline_ids({1: set(), 5: set()})
exp = {
1: list(range(9)),
5: list(range(8)),
}
self.assertEqual(res, exp)
def test_inverts_point_timelines_image_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
point_ids = db.find_points_in_roi(coords, timepoint=1)
res = db.invert_point_ids(point_ids, timepoint=1)
exp = [6, 7, 8]
self.assertEqual(res, exp)
point_ids = db.find_points_in_roi(coords, timepoint=5)
res = db.invert_point_ids(point_ids, timepoint=5)
exp = [5, 6, 7]
self.assertEqual(res, exp)
def test_finds_points_image_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
res = db.find_points_in_roi(coords, timepoint=1)
exp = [0, 1, 2, 3, 4, 5]
self.assertEqual(res, exp)
res = db.find_points_in_roi(coords, timepoint=5)
exp = [0, 1, 2, 3, 4]
self.assertEqual(res, exp)
coords = np.array([
[0, 1, 1, 0],
[0, 0, 2, 2],
]).T
res = db.find_points_in_roi(coords, timepoint=1)
exp = [0, 1, 3, 4, 6, 7]
self.assertEqual(res, exp)
res = db.find_points_in_roi(coords, timepoint=5, use_mesh='coords')
exp = [0, 1, 3, 5, 6]
self.assertEqual(res, exp)
def test_find_tracks_over_time(self):
db = grid_db.GridDB(processes=1, time_scale=2.0, space_scale=3.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=5)
db.warp_grid_to_circle()
coords = np.array([
[0, 1.1, 1.1, 0], # From the center to the max radius to the right
[0, 0, -0.1, -0.1], # Narrow slice along the x-axis from 0 to -0.1
]).T
res = db.find_track_timelines_in_roi(coords, use_mesh='warp_coords')
exp = [4, 5]
self.assertEqual(res, exp)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
res = db.find_track_timelines_in_roi(coords, use_mesh='coords')
exp = [0, 1, 2, 3, 4, 5]
self.assertEqual(res, exp)
coords = np.array([
[0, 3, 3, 0],
[0, 0, 6, 6],
]).T
res = db.find_track_timelines_in_roi(coords, use_mesh='real_coords', timepoints=[4, 5])
exp = [0, 1, 3, 6, 7]
self.assertEqual(res, exp)
def test_invert_tracks_image_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
track_ids = db.find_tracks_in_roi(coords, timepoint=1)
res = db.invert_track_ids(track_ids)
exp = [6, 7, 8]
self.assertEqual(res, exp)
track_ids = db.find_tracks_in_roi(coords, timepoint=5)
res = db.invert_track_ids(track_ids)
exp = [4, 6, 7, 8]
self.assertEqual(res, exp)
def test_finds_tracks_image_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 2, 2, 0],
[0, 0, 1, 1],
]).T
res = db.find_tracks_in_roi(coords, timepoint=1)
exp = [0, 1, 2, 3, 4, 5]
self.assertEqual(res, exp)
res = db.find_tracks_in_roi(coords, timepoint=5)
exp = [0, 1, 2, 3, 5]
self.assertEqual(res, exp)
coords = np.array([
[0, 1, 1, 0],
[0, 0, 2, 2],
]).T
res = db.find_tracks_in_roi(coords, timepoint=1)
exp = [0, 1, 3, 4, 6, 7]
self.assertEqual(res, exp)
res = db.find_tracks_in_roi(coords, timepoint=5, use_mesh='coords')
exp = [0, 1, 3, 6, 7]
self.assertEqual(res, exp)
def test_finds_tracks_in_warp_coords(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
# Need large max distance so triangulation doesn't fail...
db.triangulate_grid(max_distance=5)
db.warp_grid_to_circle()
coords = np.array([
[0, 1.1, 1.1, 0], # From the center to the max radius to the right
[0, 0, -0.1, -0.1], # Narrow slice along the x-axis from 0 to -0.1
]).T
res = db.find_tracks_in_roi(coords, timepoint=1, use_mesh='timepoint_warp_coords')
exp = [4, 5] # Grabs the center and the middle right point
self.assertEqual(res, exp)
res = db.find_tracks_in_roi(coords, timepoint=5, use_mesh='warp_coords')
exp = [5] # Grabs middle right point only (center is missing)
self.assertEqual(res, exp)
def test_finds_tracks_in_real_coords(self):
db = grid_db.GridDB(processes=1, time_scale=2.0, space_scale=3.0)
for link in self.LINKS:
db.add_track(link)
coords = np.array([
[0, 3, 3, 0],
[0, 0, 6, 6],
]).T
res = db.find_tracks_in_roi(coords, timepoint=1, use_mesh='timepoint_real_coords')
exp = [0, 1, 3, 4, 6, 7]
self.assertEqual(res, exp)
res = db.find_tracks_in_roi(coords, timepoint=5, use_mesh='real_coords')
exp = [0, 1, 3, 6, 7]
self.assertEqual(res, exp)
class TestFindNeighboringTracks(unittest.TestCase):
""" Find tracks at a timepoint using graph distances """
LINKS = [
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([1]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([0]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([1]*5)),
Link.from_arrays(tt=np.array([1, 2, 3]), xx=np.array([1]*3), yy=np.array([1]*3)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([1]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([0]*5), yy=np.array([2]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([1]*5), yy=np.array([2]*5)),
Link.from_arrays(tt=np.array([1, 2, 3, 4, 5]), xx=np.array([2]*5), yy=np.array([2]*5)),
]
def test_finds_nearest_links_one_step(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=0, timepoint=1)
exp = {1, 3}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=3)
exp = {1, 3, 5, 7}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=8, timepoint=5)
exp = {5, 7}
self.assertEqual(res, exp)
def test_finds_nearest_links_missing_track(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=4, timepoint=5, distance=1)
exp = set()
self.assertEqual(res, exp)
def test_finds_nearest_links_disconnected_tracks(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=4, timepoint=5, distance=1)
exp = set()
self.assertEqual(res, exp)
def test_finds_nearest_links_two_steps(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS[:5]:
db.add_track(link)
db.add_track(self.LINKS[6])
db.add_track(self.LINKS[8])
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=0, timepoint=1, distance=1)
exp = {1, 3}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=6, timepoint=1, distance=1)
exp = set()
self.assertEqual(res, exp)
def test_finds_nearest_links_n_steps_poorly_connected(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=0)
exp = {0}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=1)
exp = {1, 3}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=2)
exp = {2, 6}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=3)
exp = {7, 5}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=4)
exp = {8}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=0, timepoint=5, distance=5)
exp = set()
self.assertEqual(res, exp)
def test_finds_nearest_links_n_steps_well_connected(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=0)
exp = {4}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=1)
exp = {1, 3, 5, 7}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=2)
exp = {0, 2, 6, 8}
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=3)
exp = set()
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=4)
exp = set()
self.assertEqual(res, exp)
res = db.find_neighboring_tracks(trackidx=4, timepoint=1, distance=5)
exp = set()
self.assertEqual(res, exp)
def test_finds_nearest_tracks_over_time_n_steps(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_neighboring_track_timelines(trackidx=0, distance=1)
exp = {1, 3}
self.assertEqual(res, exp)
res = db.find_neighboring_track_timelines(trackidx=0, distance=2)
exp = {2, 4, 6}
self.assertEqual(res, exp)
res = db.find_neighboring_track_timelines(trackidx=1, distance=1)
exp = {0, 4, 2}
self.assertEqual(res, exp)
res = db.find_neighboring_track_timelines(trackidx=1, distance=2)
exp = {3, 5, 7}
self.assertEqual(res, exp)
def test_finds_all_nearest_tracks_over_all_time(self):
db = grid_db.GridDB(processes=1, time_scale=1.0, space_scale=1.0)
for link in self.LINKS:
db.add_track(link)
db.triangulate_grid(max_distance=1.1)
res = db.find_all_neighboring_track_timelines(distance=1)
exp = {
0: {1, 3},
1: {0, 2, 4},
2: {1, 5},
3: {0, 4, 6},
4: {1, 3, 5, 7},
5: {2, 4, 8},
6: {3, 7},
7: {4, 6, 8},
8: {5, 7},
}
self.assertEqual(res, exp)
res = db.find_all_neighboring_track_timelines(distance=2, trackidxs=[1, 3, 5, 7])
exp = {
1: {3, 5, 7},
3: {1, 5, 7},
5: {1, 3, 7},
7: {1, 3, 5},
}
self.assertEqual(res, exp)
class TestCalcAverageTriDensity(unittest.TestCase):
def test_calcs_area(self):
timepoint_points = np.array([
[0.0, 0.0],
[1.0, 1.0],
[0.0, 1.0],
[2.0, 4.0],
[0.0, 0.0],
[0.0, 3.0],
[4.0, 0.0],
])
timepoint_triangles = np.array([
[0, 1, 2],
[3, 1, 2], # Average over two sets, one with area 0.5, one with area 1.5
[4, 5, 6], # Single triangle area == 6.0
])
res = _grid_db.calc_average_tri_density(timepoint_points.astype(np.float64),
timepoint_triangles.astype(np.int64),
timepoint_points.shape[0],
timepoint_triangles.shape[0])
exp = np.array([0.5, 1.0, 1.0, 1.5, 6.0, 6.0, 6.0])
np.testing.assert_almost_equal(res, exp)
class TestCalcAverageSegmentDivergence(unittest.TestCase):
def test_calcs_divergence_different_sizes(self):
# Area 1 to Area 2 with unlinked points
areas1 = np.array([1.0, 2.0, 3.0])
areas2 = np.array([2.0, 1.0, 3.0, 4.0])
links = np.array([
(0, 0), (1, 1)
])
res1, res2 = _grid_db.calc_average_segment_divergence(
areas1.astype(np.float64),
areas2.astype(np.float64),
links.astype(np.int64),
areas1.shape[0],
areas2.shape[0],
links.shape[0],
0.5)
exp1 = np.array([
2.0, -2.0, np.nan,
])
exp2 = np.array([
-2.0, 2.0, np.nan, np.nan
])
np.testing.assert_almost_equal(res1, exp1, decimal=4)
np.testing.assert_almost_equal(res2, exp2, decimal=4)
class TestCalcAverageSegmentAngle(unittest.TestCase):
def test_calcs_no_angle_change(self):
# 30 degrees to 30 degrees, with a shift
points1 = np.array([
[0.0, 0.0],
[0.866, 0.5],
])
points2 = np.array([
[1.0, 1.0],
[1.866, 1.5],
])
links = {
0: 0,
1: 1,
}
mesh = {
0: [1],
1: [0],
}
res1, res2 = _grid_db.calc_average_segment_angle(
points1, points2, mesh, links, points1.shape[0], points2.shape[0])
exp = np.array(
[0, 0]
)
np.testing.assert_almost_equal(res1, -res2)
np.testing.assert_almost_equal(res1, exp, decimal=4)
def test_calcs_with_different_size_arrays(self):
# 30 degrees to 30 degrees, with a shift
points1 = np.array([
[0.0, 0.0],
[0.866, 0.5],
[0.0, 0.1],
])
points2 = np.array([
[1.0, 1.0],
[1.866, 1.5],
[2.0, 2.0],
[3.0, 3.0],
])
links = {
0: 0,
1: 1,
}
mesh = {
0: [1],
1: [0],
}
res1, res2 = _grid_db.calc_average_segment_angle(
points1, points2, mesh, links, points1.shape[0], points2.shape[0])
exp1 = np.array(
[0, 0, np.nan],
)
exp2 = np.array(
[0, 0, np.nan, np.nan],
)
np.testing.assert_almost_equal(res1, exp1, decimal=4)
np.testing.assert_almost_equal(res2, exp2, decimal=4)
def test_calcs_pos_angle_change(self):
# 30 degrees to 30 degrees, with a shift
points1 = np.array([
[0.0, 0.0],
[0.866, 0.5],
[0.0, 1.0],
])
points2 = np.array([
[1.0, 1.0],
[1.5, 1.866],
[0.134, 1.5],
])
links = {
0: 0,
1: 1,
2: 2,
}
mesh = {
0: [1, 2],
1: [0],
2: [0],
}
res1, res2 = _grid_db.calc_average_segment_angle(
points1, points2, mesh, links, points1.shape[0], points2.shape[0])
exp = np.array(
[np.pi/180*45, np.pi/180*30, np.pi/180*60]
)
np.testing.assert_almost_equal(res1, -res2)
np.testing.assert_almost_equal(res1, exp, decimal=4)
def test_calcs_multiple_angles(self):
points1 = np.array([
[0.0, 0.0],
[0.1, 0.1],
[0.2, 0.2],
])
points2 = np.array([
[0.0, 1.0],
[0.1, 1.2],
[0.2, 1.4],
])
links = {
0: 0,
1: 1,
2: 2,
}
mesh = {
0: [1, 2],
1: [0, 2],
2: [0, 1],
}
res1, res2 = _grid_db.calc_average_segment_angle(
points1, points2, mesh, links, points1.shape[0], points2.shape[0])
exp = np.array(
[0.3217506, 0.3217505, 0.32175051]
)
np.testing.assert_almost_equal(res1, -res2)
np.testing.assert_almost_equal(res1, exp, decimal=4)
class TestCalcDeltaStats(unittest.TestCase):
def test_calcs_deltas_no_warp(self):
in_coords = [
((0, 0), (0.1, 0.1)),
((0, 1), (0.1, 2.1)),
((1, 1), (2.1, 2.1)),
]
divergence, curl, coords, warp_coords = _grid_db.calc_delta_stats(in_coords, [])
exp_coords = np.array([
[0.05, 0.05],
[1.55, 1.55],
[0.05, 1.55],
])
exp_warp_coords = np.zeros((0, 2))
self.assertAlmostEqual(divergence, np.log(2.0))
self.assertAlmostEqual(curl, 0.0)
np.testing.assert_almost_equal(coords, exp_coords)
np.testing.assert_almost_equal(warp_coords, exp_warp_coords)
def test_calcs_deltas_with_warp(self):
in_coords = [
((0, 0), (0.1, 0.1)),
((1, 1), (2.1, 2.1)),
((0, 1), (0.1, 2.1)),
]
in_warp_coords = [
((0.0, 0.0), (0.01, 0.01)),
((0.1, 0.1), (0.21, 0.21)),
((0.0, 0.1), (0.01, 0.21)),
]
divergence, curl, coords, warp_coords = _grid_db.calc_delta_stats(in_coords, in_warp_coords)
exp_coords = np.array([
[0.05, 0.05],
[1.55, 1.55],
[0.05, 1.55],
])
exp_warp_coords = np.array([
[0.005, 0.005],
[0.155, 0.155],
[0.005, 0.155],
])
self.assertAlmostEqual(divergence, np.log(2.0))
self.assertAlmostEqual(curl, 0.0)
np.testing.assert_almost_equal(coords, exp_coords)
np.testing.assert_almost_equal(warp_coords, exp_warp_coords)
class TestCalcLocalDensity(unittest.TestCase):
def test_calcs_density_no_warp(self):
timepoint_points = [
(2.0, 2.0),
(1.0, 4.0),
(4.0, 1.0),
(4.0, 4.0),
]
timepoint_warp_points = []
timepoint_mesh = {
0: {1, 2, 3},
}
res = _grid_db.calc_local_density(timepoint_points, timepoint_warp_points, timepoint_mesh)
self.assertEqual(len(res), 3)
res_areas, res_perimeters, res_warp_perimeters = res
exp_areas = {
0: 1 / 4.5, # Triangle has area == 4.5 so density is 1/4.5
}
exp_perimeters = {
0: np.array([[4.0, 1.0], [4.0, 4.0], [1.0, 4.0]]),
}
exp_warp_perimeters = {}
self.assertEqual(res_areas, exp_areas)
assert are_objects_equal(res_perimeters, exp_perimeters)
assert are_objects_equal(res_warp_perimeters, exp_warp_perimeters)
def test_calcs_density_with_warp(self):
timepoint_points = [
(2.0, 2.0),
(1.0, 4.0),
(4.0, 1.0),
(4.0, 4.0),
]
timepoint_warp_points = [
(0.0, 0.0),
(0.1, 0.3),
(0.4, 0.1),
(0.4, 0.4),
]
timepoint_mesh = {
0: {1, 2, 3},
}
res = _grid_db.calc_local_density(timepoint_points, timepoint_warp_points, timepoint_mesh)
self.assertEqual(len(res), 3)
res_areas, res_perimeters, res_warp_perimeters = res
exp_areas = {
0: 1 / 4.5, # Triangle has area == 4.5 so density is 1/4.5
}
exp_perimeters = {
0: np.array([[4.0, 1.0], [4.0, 4.0], [1.0, 4.0]]),
}
exp_warp_perimeters = {
0: np.array([[0.4, 0.1], [0.4, 0.4], [0.1, 0.3]]),
}
self.assertEqual(res_areas, exp_areas)
assert are_objects_equal(res_perimeters, exp_perimeters)
assert are_objects_equal(res_warp_perimeters, exp_warp_perimeters)
class TestCalcDeltaDensity(unittest.TestCase):
def test_calcs_delta_density_no_warp(self):
timepoint1_points = [
(2.0, 2.0),
(1.0, 4.0),
(4.0, 1.0),
(4.0, 4.0),
]
timepoint2_points = [
(3.1, 3.1),
(1.1, 6.1),
(6.1, 1.1),
(6.1, 6.1),
]
timepoint1_warp_points = []
timepoint2_warp_points = []
timepoint1_mesh = {
0: {1, 2, 3},
}
timepoint2_mesh = {
0: {1, 2, 3},
}
timepoint_links = {
0: 0,
1: 1,
2: 2,
3: 3,
}
res = _grid_db.calc_delta_density(timepoint1_points, timepoint2_points,
timepoint1_warp_points, timepoint2_warp_points,
timepoint1_mesh, timepoint2_mesh,
timepoint_links)
self.assertEqual(len(res), 4)
res_divergences, res_curls, res_perimeters, res_warp_perimeters = res
exp_divergences = {
(0, 0): 1.0217, # Expanded by ~3x
}
exp_curls = {
(0, 0): 0.0, # No rotation between the two
}
exp_perimeters = {
(0, 0): np.array([
(5.05, 1.05),
(5.05, 5.05),
(1.05, 5.05),
]),
}
exp_warp_perimeters = {}
assert are_objects_equal(res_divergences, exp_divergences)
assert are_objects_equal(res_curls, exp_curls)
assert are_objects_equal(res_perimeters, exp_perimeters)
assert are_objects_equal(res_warp_perimeters, exp_warp_perimeters)
def test_calcs_delta_density_with_warp(self):
timepoint1_points = [
(2.0, 2.0),
(1.0, 4.0),
(4.0, 1.0),
(4.0, 4.0),
]
timepoint2_points = [
(3.1, 3.1),
(1.1, 6.1),
(6.1, 1.1),
(6.1, 6.1),
]
timepoint1_warp_points = [
(0.02, 0.02),
(0.01, 0.04),
(0.04, 0.01),
(0.04, 0.04),
]
timepoint2_warp_points = [
(0.031, 0.031),
(0.011, 0.061),
(0.061, 0.011),
(0.061, 0.061),
]
timepoint1_mesh = {
0: {1, 2, 3},
}
timepoint2_mesh = {
0: {1, 2, 3},
}
timepoint_links = {
0: 0,
1: 1,
2: 2,
3: 3,
}
res = _grid_db.calc_delta_density(timepoint1_points, timepoint2_points,
timepoint1_warp_points, timepoint2_warp_points,
timepoint1_mesh, timepoint2_mesh,
timepoint_links)
self.assertEqual(len(res), 4)
res_divergences, res_curls, res_perimeters, res_warp_perimeters = res
exp_divergences = {
(0, 0): 1.0217, # Expanded by ~3x
}
exp_curls = {
(0, 0): 0.0, # No rotation between the two
}
exp_perimeters = {
(0, 0): np.array([
(5.05, 1.05),
(5.05, 5.05),
(1.05, 5.05),
]),
}
exp_warp_perimeters = {
(0, 0): np.array([
(0.0505, 0.0105),
(0.0505, 0.0505),
(0.0105, 0.0505),
]),
}
assert are_objects_equal(res_divergences, exp_divergences)
assert are_objects_equal(res_curls, exp_curls)
assert are_objects_equal(res_perimeters, exp_perimeters)
assert are_objects_equal(res_warp_perimeters, exp_warp_perimeters)
class TestGridDB(FileSystemTestCase):
def assertDictTuplesEqual(self, dict1, dict2, decimals=2):
msg = 'Invalid dict tuples\n\n Got {}\n\n Expected {}\n\n'
msg = msg.format(dict1, dict2)
self.assertEqual(dict1.keys(), dict2.keys(), msg=msg)
for key in dict1:
coords1 = dict1[key]
coords2 = dict2[key]
self.assertEqual(len(coords1), len(coords2), msg=msg)
for c1, c2 in zip(coords1, coords2):
assert are_objects_equal(c1, c2, places=decimals, msg=msg)
def assertAttrsEqual(self, db1, db2, attrs):
for attr in attrs:
self.assertTrue(hasattr(db1, attr), msg='db1 missing {}'.format(attr))
self.assertTrue(hasattr(db2, attr), msg='db2 missing {}'.format(attr))
val1 = getattr(db1, attr)
val2 = getattr(db2, attr)
msg = '"{}" mismatch:\n\n db1.{}={}\n\n db2.{}={}\n\n'.format(
attr, attr, val1, attr, val2)
try:
self.assertTrue(are_objects_equal(val1, val2), msg=msg)
except Exception:
print(msg)
raise
def load_full_grid_db(self, links=None,
time_scale: float = 2.5,
space_scale: float = 1.5,
max_distance: float = 10) -> grid_db.GridDB:
""" Load a full grid database and calculate everything """
if links is None:
links = [LINK1, LINK2, LINK3, LINK4, LINK5, LINK6]
db = grid_db.GridDB(processes=1, time_scale=time_scale, space_scale=space_scale)
for link in links:
db.add_track(link)
db.triangulate_grid(max_distance=max_distance)
db.warp_grid_to_circle()
db.calc_radial_stats()
db.calc_local_densities_mesh()
db.calc_delta_divergence_mesh()
db.calc_delta_curl_mesh()
return db
# Tests
def test_interp_track_values(self):
db = self.load_full_grid_db()
res_tt, res_xx, res_yy = db.interp_track_values(1, 'timepoint_coords', resample_factor=2, interp_points=3)
exp_tt = np.array([2.0, 2.47058824, 2.94117647, 3.41176471, 3.88235294, 4.35294118,
4.82352941, 5.29411765, 5.76470588, 6.23529412, 6.70588235, 7.17647059,
7.64705882, 8.11764706, 8.58823529, 9.05882353, 9.52941176, 10.0])
exp_xx = np.array([6.3, 6.77058824, 7.24117647, 7.71176471, 8.18235294, 8.65294118,
9.12352941, 9.59411765, 10.06470588, 10.53529412, 11.00588235, 11.47647059,
11.94705882, 12.41764706, 12.88823529, 13.35882353, 13.82941176, 14.3])
exp_yy = np.array([1.4, 1.87058824, 2.34117647, 2.81176471, 3.28235294, 3.75294118,
4.22352941, 4.69411765, 5.16470588, 5.63529412, 6.10588235, 6.15490196,
6.39019608, 6.00784314, 5.77254902, 4.97843137, 4.27254902, 3.56666667])
assert are_objects_equal(res_tt, exp_tt)
assert are_objects_equal(res_xx, exp_xx)
assert are_objects_equal(res_yy, exp_yy)
res_tt, res_density, res_cell_area = db.interp_track_values(1, 'local_densities_mesh', 'local_cell_areas_mesh',
resample_factor=2, interp_points=3)
exp_density = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, 0.32319929,
0.37894138, 0.82776939, 1.05670427, 1.12347668, 1.35241142, 1.02106027,
0.84676812, 0.70297756, 0.47291232, np.nan, np.nan, np.nan])
exp_cell_area = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, 3.61029179,
2.35735036, 1.80220443, 1.32573431, 1.18676404, 0.71029406, 1.14522091,
1.36580945, 1.87132235, 2.35661557, np.nan, np.nan, np.nan])
assert are_objects_equal(res_tt, exp_tt)
assert are_objects_equal(res_density, exp_density)
assert are_objects_equal(res_cell_area, exp_cell_area)
def test_add_track_to_mesh(self):
link1 = Link.from_arrays(
tt=np.array([1, 2, 3, 4, 5]),
xx=np.array([1.1, 1.2, 1.3, 1.4, 1.5]),
yy=np.array([1.1, 1.2, 1.3, 1.4, 1.5]),
)
link2 = Link.from_arrays(
tt=np.array([2, 3, 4, 5]),
xx=np.array([2.2, 2.3, 2.4, 2.5]),
yy=np.array([2.2, 2.3, 2.4, 2.5]),
)
link3 = Link.from_arrays(
tt=np.array([3, 4, 5, 6]),
xx=np.array([3.3, 3.4, 3.5, 3.6]),
yy=np.array([3.3, 3.4, 3.5, 3.6]),
)
db = self.load_full_grid_db(links=[link1, link2, link3],
time_scale=0.1,
space_scale=2.0)
exp_coords = {
1: [(1.1, 1.1)],
2: [(1.2, 1.2), (2.2, 2.2)],
3: [(1.3, 1.3), (2.3, 2.3), (3.3, 3.3)],
4: [(1.4, 1.4), (2.4, 2.4), (3.4, 3.4)],
5: [(1.5, 1.5), (2.5, 2.5), (3.5, 3.5)],
6: [(3.6, 3.6)],
}
self.assertDictTuplesEqual(db.timepoint_coords, exp_coords)
exp_velocity = {
1: [2.8283],
2: [2.8283, 2.8283],
3: [2.8283, 2.8283, 2.8283],
4: [2.8283, 2.8283, 2.8283],
5: [np.nan, np.nan, 2.8283],
6: [np.nan],
}
self.assertDictTuplesEqual(db.local_velocity_mesh, exp_velocity)
def test_get_track_summaries(self):
db = self.load_full_grid_db()
res_x, res_y = db.get_all_track_summaries('timepoint_coords', func='mean')
exp_x = np.array([8.1, 10.3, 8.5, 9.2, 10.2, 10.2])
exp_y = np.array([8.2, 4.28888889, 7.6, 7.3, 7.3, 8.3])
assert are_objects_equal(res_x, exp_x)
assert are_objects_equal(res_y, exp_y)
res_x, res_y, res_t = db.get_all_track_summaries('timepoint_real_coords', func='mean')
exp_x = np.array([12.15, 15.45, 12.75, 13.8, 15.3, 15.3])
exp_y = np.array([12.3, 6.43333333, 11.4, 10.95, 10.95, 12.45])
exp_t = np.array([7.5, 15.0, 12.5, 16.25, 16.25, 16.25])
assert are_objects_equal(res_x, exp_x)
assert are_objects_equal(res_y, exp_y)
assert are_objects_equal(res_t, exp_t)
res_density = db.get_all_track_summaries('local_densities_mesh', func='mean')[0]
exp_density = np.array([0.20576145, 0.6369991, 0.98977128, 0.66489163, 0.77777793, 0.97738294])
assert are_objects_equal(res_density, exp_density)
res_distance = db.get_all_track_summaries('local_distance_mesh', func='max')[0]
exp_distance = np.array([7.81539074, 14.00685354, 7.81539074, 9.91486682, 9.91486682, 9.91486682])
assert are_objects_equal(res_distance, exp_distance)
def test_mesh_shapes_match(self):
db = self.load_full_grid_db()
counts = {}
timepoints = None
for attr in db.NUM_ARRAY_ARGS.keys():
if timepoints is None:
timepoints = set(getattr(db, attr))
else:
self.assertEqual(timepoints, set(getattr(db, attr)),
msg='Bad timepoints {}'.format(attr))
for timepoint, values in getattr(db, attr).items():
if timepoint not in counts:
counts[timepoint] = len(values)
else:
self.assertEqual(counts[timepoint], len(values),
msg="Bad counts {} at {}".format(attr, timepoint))
def test_extract_track_data_timepoint_real_coords(self):
db = self.load_full_grid_db()
tidx, xidx, xreal, yreal, treal = db.get_track_values(1, 'timepoint_real_coords')
exp_tidx = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])
exp_xidx = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
exp_treal = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]) * 2.5
exp_xreal = np.array([6.3, 7.3, 8.3, 9.3, 10.3, 11.3, 12.3, 13.3, 14.3]) * 1.5
exp_yreal = np.array([1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 6.4, 5.4, 3.4]) * 1.5
np.testing.assert_almost_equal(tidx, exp_tidx)
np.testing.assert_almost_equal(xidx, exp_xidx)
np.testing.assert_almost_equal(xreal, exp_xreal)
np.testing.assert_almost_equal(yreal, exp_yreal)
np.testing.assert_almost_equal(treal, exp_treal)
def test_warp_to_circle_at_timepoints(self):
# Create a circle that expands over time
# At t=1, r=1
# At t=2, r=2, ... etc
t = np.arange(1, 10)
# Shift the center in x and y
cx = 2
cy = -1
theta = np.linspace(0, 2*np.pi, 50)
x = np.cos(theta) + cx
y = np.sin(theta) + cy
# Points along the radius
links = [Link.from_arrays(t, x[i]*t, y[i]*t)
for i in range(theta.shape[0])]
# Point at the center
links.append(Link.from_arrays(t, t*cx, t*cy))
db = self.load_full_grid_db(links=links, max_distance=20)
# Ray from the center out
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) + np.array([(cx, cy)])
warp_coords = db.warp_to_timepoint(1, coords)
exp_coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
])
np.testing.assert_almost_equal(warp_coords, exp_coords, decimal=4)
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) + np.array([(cx*5, cy*5)])
warp_coords = db.warp_to_timepoint(5, coords)
exp_coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) / 5
np.testing.assert_almost_equal(warp_coords, exp_coords, decimal=4)
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) + np.array([(cx*9, cy*9)])
warp_coords = db.warp_to_timepoint(9, coords)
exp_coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) / 9
np.testing.assert_almost_equal(warp_coords, exp_coords, decimal=4)
def test_inv_warp_to_circle_at_timepoints(self):
# Create a circle that expands over time
# At t=1, r=1
# At t=2, r=2, ... etc
t = np.arange(1, 10)
# Shift the center in x and y
cx = 2
cy = -1
theta = np.linspace(0, 2*np.pi, 50)
x = np.cos(theta) + cx
y = np.sin(theta) + cy
# Points along the radius
links = [Link.from_arrays(t, x[i]*t, y[i]*t)
for i in range(theta.shape[0])]
# Point at the center
links.append(Link.from_arrays(t, t*cx, t*cy))
db = self.load_full_grid_db(links=links, max_distance=20)
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
])
warp_coords = db.inv_warp_to_timepoint(1, coords)
exp_coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) + np.array([[cx, cy]])
np.testing.assert_almost_equal(warp_coords, exp_coords, decimal=4)
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
])
warp_coords = db.inv_warp_to_timepoint(5, coords)
exp_coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
]) * 5 + np.array([[cx, cy]]) * 5
np.testing.assert_almost_equal(warp_coords, exp_coords, decimal=4)
def test_warp_inv_warp_roundtrips(self):
# Create a circle that expands over time
# At t=1, r=1
# At t=2, r=2, ... etc
t = np.arange(1, 10)
# Shift the center in x and y
cx = 2
cy = -1
theta = np.linspace(0, 2*np.pi, 50)
x = np.cos(theta) + cx
y = np.sin(theta) + cy
# Points along the radius
links = [Link.from_arrays(t, x[i]*t, y[i]*t)
for i in range(theta.shape[0])]
# Point at the center
links.append(Link.from_arrays(t, t*cx, t*cy))
db = self.load_full_grid_db(links=links, max_distance=20)
# Ray from the center out
coords = np.array([
(0, 0),
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
])
for timepoint in t:
warp_coords = db.warp_to_timepoint(timepoint, coords)
inv_warp_coords = db.inv_warp_to_timepoint(timepoint, warp_coords)
np.testing.assert_almost_equal(inv_warp_coords, coords, decimal=4)
def test_extract_track_data_timepoint_warp_coords(self):
db = self.load_full_grid_db()
tidx, xidx, xwarp, ywarp, rwarp = db.get_track_values(
1, 'timepoint_warp_coords', 'timepoint_warp_radius')
exp_tidx = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])
exp_xidx = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
exp_xwarp = np.array([np.nan, -1.01725220e-06, 1.14761399e-01, 1.14761377e-01,
4.63480483e-01, 4.63480483e-01, 3.55550202e-01, 2.19104332e-01, np.nan])
exp_ywarp = np.array([np.nan, -1.00000043, -0.99460722, -0.99460722, -0.88833763,
-0.88833763, -0.93735939, -0.97601022, np.nan])
np.testing.assert_almost_equal(tidx, exp_tidx)
np.testing.assert_almost_equal(xidx, exp_xidx)
np.testing.assert_almost_equal(xwarp, exp_xwarp, decimal=4)
np.testing.assert_almost_equal(ywarp, exp_ywarp, decimal=4)
np.testing.assert_almost_equal(rwarp, np.sqrt(xwarp**2 + ywarp**2))
def test_extract_track_data_density_curl_divergence(self):
db = self.load_full_grid_db()
tidx, xidx, density, cell_area, curl, div = db.get_track_values(
1, 'local_densities_mesh', 'local_cell_areas_mesh', 'delta_curl_mesh', 'delta_divergence_mesh')
exp_tidx = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])
exp_xidx = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
exp_density = np.array([np.nan, 0.1234568, 0.3603607, 0.3603607, 1.3333339, 1.3333333,
0.5925923, 0.355556, np.nan])
exp_cell_area = np.array([np.nan, 8.1, 2.775, 2.775, 0.75, 0.75, 1.6875, 2.8125, np.nan])
exp_curl = np.array([np.nan, 3.5132221e-08, 2.0213393e-08, 2.6045209e-08,
2.3076574e-07, -8.2363701e-02, -1.5114689e-01, -1.3756596e-01, np.nan])
exp_div = np.array([np.nan, -2.1300004, -1.0650002, -0.4049996, -0.4049995,
0.1875003, 0.4124992, 0.4499981, np.nan])
np.testing.assert_almost_equal(tidx, exp_tidx)
np.testing.assert_almost_equal(xidx, exp_xidx)
np.testing.assert_almost_equal(cell_area, exp_cell_area, decimal=4)
np.testing.assert_almost_equal(density, exp_density, decimal=4)
np.testing.assert_almost_equal(curl, exp_curl, decimal=4)
np.testing.assert_almost_equal(div, exp_div, decimal=4)
def test_extract_track_data_density_subset_timeline(self):
db = self.load_full_grid_db()
roi = np.array([
[5, 1],
[5, 9],
[8, 9],
[8, 1],
])
points = db.find_point_timelines_in_roi(perimeter=roi, timepoints=[4, 5])
density = db.find_values_for_point_timeline('local_densities_mesh', points=points)
exp_density = {
4: np.array([0.6349, 0.7843, 0.8889, 0.4535]),
5: np.array([0.7843]),
}
for key, res in density.items():
exp = exp_density[key]
np.testing.assert_almost_equal(res, exp, decimal=4)
def test_extract_track_data_density_curl_divergence_subset(self):
db = self.load_full_grid_db()
roi = np.array([
[5, 1],
[5, 9],
[8, 9],
[8, 1],
])
points = db.find_points_in_roi(perimeter=roi, timepoint=4)
density = db.find_values_for_points('local_densities_mesh', points=points, timepoint=4)
exp_density = np.array([0.6349, 0.7843, 0.8889, 0.4535])
np.testing.assert_almost_equal(density, exp_density, decimal=4)
cell_area = db.find_values_for_points('local_cell_areas_mesh', points=points, timepoint=4)
exp_cell_area = np.array([1.575, 1.275, 1.125, 2.205])
np.testing.assert_almost_equal(cell_area, exp_cell_area, decimal=4)
curl = db.find_values_for_points('delta_curl_mesh', points=points, timepoint=4)
exp_curl = np.array([-1.6653e-16, 2.2204e-16, 2.9606e-16, -1.7764e-16])
np.testing.assert_almost_equal(curl, exp_curl, decimal=4)
div = db.find_values_for_points('delta_divergence_mesh', points=points, timepoint=4)
exp_div = np.array([-1.3050e+00, -1.5099e-15, -1.3323e-15, -5.3291e-16])
np.testing.assert_almost_equal(div, exp_div, decimal=4)
def test_extract_track_velocity_distance(self):
db = self.load_full_grid_db()
res = db.get_track_values(
1, 'local_velocity_mesh', 'local_speed_mesh',
'local_distance_mesh', 'local_displacement_mesh',
'local_disp_vs_dist_mesh', 'local_persistence_mesh')
tidx, xidx, velocity, speed, distance, displacement, disp_vs_dist, persistence = res
exp_tidx = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10])
exp_xidx = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
exp_velocity = np.array([0.8043568, 0.8043568, 0.8043568, 0.7959997, 0.7536884,
0.7025816, 0.6795315, 0.6657548, np.nan])
exp_speed = np.array([0.8043568, 0.8043568, 0.8043568, 0.8031527, 0.792132,
0.7758652, 0.7493032, 0.7318444, np.nan])
exp_distance = np.array([1.3788973, 3.4472433, 5.5155892, 7.5715502, 9.2798938,
11.0758882, 12.6275757, 14.0068535, np.nan])
exp_displacement = np.array([1.3788973, 3.4472433, 5.5155892, 7.497977, 8.8194847,
10.0297027, 11.4520616, 12.7423756, np.nan])
exp_disp_vs_dist = np.array([1.0, 1.0, 1.0, 0.9910537, 0.9515971, 0.9055454,
0.9068893, 0.9096996, np.nan])
exp_persistence = np.array([1.0, 1.0, 0.8, 0.0, 0.0, 0.4, 1.0, 0.75, 0.0])
np.testing.assert_almost_equal(tidx, exp_tidx)
np.testing.assert_almost_equal(xidx, exp_xidx)
np.testing.assert_almost_equal(velocity, exp_velocity)
np.testing.assert_almost_equal(speed, exp_speed)
np.testing.assert_almost_equal(distance, exp_distance)
np.testing.assert_almost_equal(displacement, exp_displacement)
np.testing.assert_almost_equal(disp_vs_dist, exp_disp_vs_dist)
np.testing.assert_almost_equal(persistence, exp_persistence)
def test_from_to_hdf5_roundtrips(self):
hdf5_file = self.tempdir / 'temp.h5'
db1 = self.load_full_grid_db()
self.assertFalse(hdf5_file.is_file())
db1.to_hdf5(hdf5_file)
self.assertTrue(hdf5_file.is_file())
db2 = grid_db.GridDB.from_hdf5(hdf5_file)
self.assertAttrsEqual(db1, db2, attrs=GRID_DB_ATTRS)
self.assertEqual(db1.track_peristences.keys(),
db2.track_peristences.keys())
for key, track1 in db1.track_peristences.items():
track2 = db2.track_peristences[key]
if track1 is None:
self.assertIsNone(track2)
continue
self.assertAttrsEqual(track1, track2, attrs=PERSISTENCE_ATTRS)
def test_from_to_hdf5_roundtrips_lazy(self):
hdf5_file = self.tempdir / 'temp.h5'
db1 = self.load_full_grid_db()
self.assertFalse(hdf5_file.is_file())
db1.to_hdf5(hdf5_file)
self.assertTrue(hdf5_file.is_file())
db2 = grid_db.GridDB.from_hdf5(hdf5_file, lazy=True)
# By default, nothing is loaded
for attr in GRID_DB_ATTRS:
if attr in ('time_scale', 'space_scale'):
continue
self.assertTrue(hasattr(getattr(db2, attr), 'load'))
# Force everything to load
db2.load(GRID_DB_ATTRS)
# Now everything should be equal
self.assertAttrsEqual(db1, db2, attrs=GRID_DB_ATTRS)
# Have to handle track persistence specially
self.assertTrue(hasattr(db2.track_peristences, 'load'))
db2.load('track_peristences')
self.assertEqual(db1.track_peristences.keys(),
db2.track_peristences.keys())
for key, track1 in db1.track_peristences.items():
track2 = db2.track_peristences[key]
if track1 is None:
self.assertIsNone(track2)
continue
self.assertAttrsEqual(track1, track2, attrs=PERSISTENCE_ATTRS)
def test_from_to_hdf5_subgroup_roundtrips(self):
hdf5_file = self.tempdir / 'temp.h5'
db = h5py.File(str(hdf5_file), 'w')
grp1 = db.create_group('grid1')
db1 = self.load_full_grid_db()
db1.to_hdf5(grp1)
self.assertTrue(hdf5_file.is_file())
db.close()
db = h5py.File(str(hdf5_file), 'r')
db2 = grid_db.GridDB.from_hdf5(db['grid1'])
self.assertAttrsEqual(db1, db2, attrs=GRID_DB_ATTRS)
self.assertEqual(db1.track_peristences.keys(),
db2.track_peristences.keys())
for key, track1 in db1.track_peristences.items():
track2 = db2.track_peristences[key]
if track1 is None:
self.assertIsNone(track2)
continue
self.assertAttrsEqual(track1, track2, attrs=PERSISTENCE_ATTRS)
def test_get_timepoint_values(self):
db = self.load_full_grid_db()
radius, velocity = db.get_timepoint_values(5, 'timepoint_warp_radius', 'local_velocity_mesh')
exp_radius = np.array([1.00134514, 1.00120612, 0.97468799, 0.99925559, 0.51530565, 0.21895842])
exp_velocity = np.array([np.nan, 0.79599972, 0.84852814, 0.84852814, 0.84852814, 0.84852814])
self.assertEqual(radius.shape, velocity.shape)
np.testing.assert_almost_equal(radius, exp_radius, decimal=4)
np.testing.assert_almost_equal(velocity, exp_velocity, decimal=4)
def test_get_timepoint_range_with_bounds(self):
db = self.load_full_grid_db()
res = db.get_timepoint_range()
exp = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(res, exp)
db.min_timepoint = 3
res = db.get_timepoint_range()
exp = [3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(res, exp)
db.max_timepoint = 8
res = db.get_timepoint_range()
exp = [3, 4, 5, 6, 7, 8]
self.assertEqual(res, exp)
def test_get_flattened_values(self):
db = self.load_full_grid_db()
radius, velocity = db.get_flattened_values('timepoint_warp_radius', 'local_velocity_mesh')
exp_radius = np.array([np.nan, np.nan, np.nan, 1.0016676, 1.00000043, 0.98487195,
1.00134506, 1.00120612, 0.97468778, 0.99925555, 0.51530555, 0.21895845,
1.00134514, 1.00120612, 0.97468799, 0.99925559, 0.51530565, 0.21895842,
1.001977, 1.00000006, 0.99280825, 0.13994261, 0.53195755, 1.001977,
1.00000006, 0.99280825, 0.13994261, 0.53195755, 1.00252609, 0.99449928,
0.32051666, 1.00042521, 1.00030128, 0.99362276, 0.52814937, 1.0001087,
np.nan])
exp_velocity = np.array([0.84852814, 0.84852814, 0.80435676, 0.84852814, 0.80435676, 0.84852814,
0.84852814, 0.80435676, 0.84852814, 0.84852814, 0.84852814, 0.84852814,
np.nan, 0.79599972, 0.84852814, 0.84852814, 0.84852814, 0.84852814,
0.75368837, 0.84852814, 0.84852814, 0.84852814, 0.84852814, 0.7025816,
np.nan, 0.84852814, 0.84852814, 0.84852814, 0.67953145, 0.84852814,
0.84852814, 0.84852814, 0.66575476, np.nan, np.nan, np.nan, np.nan])
self.assertEqual(radius.shape, velocity.shape)
np.testing.assert_almost_equal(radius, exp_radius, decimal=4)
np.testing.assert_almost_equal(velocity, exp_velocity, decimal=4)
def test_add_single_track(self):
db = grid_db.GridDB(processes=1, time_scale=2.5, space_scale=1.5)
db.add_track(LINK1)
# time index to x, y (in pixels)
exp_timepoint_coords = {
1: [(6.1, 6.2)],
2: [(7.1, 7.2)],
3: [(8.1, 8.2)],
4: [(9.1, 9.2)],
5: [(10.1, 10.2)],
}
# time index to x, y, t (in um, um, mins respectively)
exp_timepoint_real_coords = {
1: [(9.15, 9.3, 2.5)],
2: [(10.65, 10.8, 5.0)],
3: [(12.15, 12.3, 7.5)],
4: [(13.65, 13.8, 10.0)],
5: [(15.15, 15.3, 12.5)],
}
# track index to coordinate indices in each timestep
exp_track_links = {
0: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
}
# Map of timepoints to converting indices
exp_timepoint_links = {
(1, 2): {0: 0},
(2, 3): {0: 0},
(3, 4): {0: 0},
(4, 5): {0: 0},
}
self.assertDictTuplesEqual(db.timepoint_coords, exp_timepoint_coords)
self.assertDictTuplesEqual(db.timepoint_real_coords, exp_timepoint_real_coords)
self.assertEqual(db.track_links, exp_track_links)
self.assertEqual(db.timepoint_links, exp_timepoint_links)
def test_add_two_tracks(self):
db = grid_db.GridDB(processes=1, time_scale=2.5, space_scale=1.5)
db.add_track(LINK1)
db.add_track(LINK2)
# time index to x, y (in pixels)
exp_timepoint_coords = {
1: [(6.1, 6.2)],
2: [(7.1, 7.2), (6.3, 1.4)],
3: [(8.1, 8.2), (7.3, 2.4)],
4: [(9.1, 9.2), (8.3, 3.4)],
5: [(10.1, 10.2), (9.3, 4.4)],
6: [(10.3, 5.4)],
7: [(11.3, 6.4)],
8: [(12.3, 6.4)],
9: [(13.3, 5.4)],
10: [(14.3, 3.4)],
}
# time index to x, y, t (in um, um, mins respectively)
exp_timepoint_real_coords = {
1: [(9.15, 9.3, 2.5)],
2: [(10.65, 10.8, 5.0), (9.45, 2.1, 5.0)],
3: [(12.15, 12.3, 7.5), (10.95, 3.6, 7.5)],
4: [(13.65, 13.8, 10.0), (12.45, 5.1, 10.0)],
5: [(15.15, 15.3, 12.5), (13.95, 6.6, 12.5)],
6: [(15.45, 8.1, 15.0)],
7: [(16.95, 9.6, 17.5)],
8: [(18.45, 9.6, 20.0)],
9: [(19.95, 8.1, 22.5)],
10: [(21.45, 5.1, 25.0)],
}
# track index to coordinate indices in each timestep
exp_track_links = {
0: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
1: {2: 1, 3: 1, 4: 1, 5: 1, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0},
}
# Map of timepoints to converting indices
exp_timepoint_links = {
(1, 2): {0: 0},
(2, 3): {0: 0, 1: 1},
(3, 4): {0: 0, 1: 1},
(4, 5): {0: 0, 1: 1},
(5, 6): {1: 0},
(6, 7): {0: 0},
(7, 8): {0: 0},
(8, 9): {0: 0},
(9, 10): {0: 0},
}
self.assertDictTuplesEqual(db.timepoint_coords, exp_timepoint_coords)
self.assertDictTuplesEqual(db.timepoint_real_coords, exp_timepoint_real_coords)
self.assertEqual(db.track_links, exp_track_links)
self.assertEqual(db.timepoint_links, exp_timepoint_links)
|
<reponame>jose-turintech/mlflow-turing-scoring-server
"""
This module implements and instantiates the common configuration class used in the project.
"""
# ────────────────────────────────────────── imports ────────────────────────────────────────── #
import sys
import tempfile
from pathlib import Path
from typing import Dict
from loguru import logger
from mlflow_turing_scoring_server.conf.logger_conf import logger_conf_factory, FileLoggerConf, file_logger_conf_factory
# ───────────────────────────────────────────────────────────────────────────────────────────── #
# Configuration Manager #
# ───────────────────────────────────────────────────────────────────────────────────────────── #
class ConfManager:
""" Configuration Manager class """
# APP paths
path_conf: Path = Path(__file__).parent.resolve() # conf package
path_app: Path = path_conf.parent.resolve() # mlflow_turing_scoring_server package
path_src: Path = path_app.parent.resolve() # src package
path_root: Path = path_src.parent.resolve() # mlflow-turing-scoring-server project
# Library environment file
_path_env_file: Path = path_src.joinpath('.env')
_env_file: str = str(_path_env_file)
_conf_prefix: str = None
tmp_directory: Path = Path(tempfile.gettempdir()).joinpath("mlflow-turing-scoring-server")
# The Logging Configurations object is instantiated once its use is invoked
_logging_conf: FileLoggerConf = None
defaults_logging_conf: Dict = dict(sink=tmp_directory.joinpath(*['logs', "mlflow_turing_scoring_server.log"]))
# -------------------------------------------------------------------------------------------------
def __init__(self, env_file: str or Path = None):
if env_file:
self.update_conf_mgr(env_file=env_file)
else:
self.update_logging_conf()
logger.info("Configuration Manager initialized")
# -------------------------------------------------------------------------------------------------
@property
def env_file(self) -> str:
"""
Environment configuration file used in the current configuration
"""
return self._env_file
def update_conf_mgr(self, env_file: str):
"""
Update all the configuration by loading the environment variables from the indicated file.
"""
self._path_env_file = Path(env_file)
self._env_file = str(self._path_env_file) if self._path_env_file.exists() else None
if not self._path_env_file.exists():
print(f"[WARNING] environment file does not exist: {env_file}")
return
self.update_logging_conf(_env_file=env_file)
# -----------------------------------------------------------------------------------------------------------------
# --- Logging Configuration
# -----------------------------------------------------------------------------------------------------------------
@property
def logging_conf(self) -> FileLoggerConf:
"""
:return: Logging configuration of the logs directed to file path outputs.
"""
if self._logging_conf is None:
self.update_logging_conf()
return self._logging_conf
def update_logging_conf(self, _env_file: str = None, defaults: dict = None):
"""
Update the LoggingConf configuration by loading the environment variables from the indicated file and
taking into account the default values
"""
factory_args = dict(
_env_file=_env_file or self._env_file,
prefix='LOGGER',
defaults=defaults or self.defaults_logging_conf
)
self._logging_conf: FileLoggerConf = file_logger_conf_factory(**factory_args)
if self._logging_conf.enable:
logger.remove()
logger.add(**logger_conf_factory(sink=sys.stdout, **factory_args).dict2add)
logger.add(**logger_conf_factory(sink=sys.stderr, level="ERROR", **factory_args).dict2add)
logger.add(**self._logging_conf.dict2add)
# ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
# ─── ConfManager instance
# ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
conf_mgr = ConfManager()
|
<reponame>k-koech/password_locker
#!/usr/bin/env python3.6
from os import system
import random
import string
from user import User
from credentials import Credentials
from time import sleep
def create_user(username,password):
'''
Function to create a new contact
'''
new_user = User(username, password)
return new_user
def save_users(user):
''' Function to save User # Init method up here '''
user.save_user()
def check_existing_users(username, password):
'''
Function that check if a contact exists with that username and password and return a Boolean
'''
return User.user_exist(username, password)
def find_user(username, password):
'''
Function that finds a contact by number and returns the contact
'''
return User.login(username, password)
#............... Accounts credentials..........................
def create_credentials(account,password):
new_credentials = Credentials(account, password)
return new_credentials
def save_credentials(credentials):
credentials.save_credential()
def display_credentials():
return Credentials.display_credentials()
def find_credential(account):
"""
Finds a Credentials by an account name and returns the credentials that belong to that account
"""
return Credentials.find_credentials(account)
def del_credentials(credentials):
# Deletes cedentials by the account name
credentials.delete_credentials()
# ===================================================================
def main():
print("WELCOME TO PASSWORD LOCKER")
print("Use the number codes")
print("1. Create a new Account\n2. Login")
account_code = input()
if account_code == "1":
print('-' * 45)
print("Create your account")
print("Username : ")
user_name = input()
print("Password : ")
password = input()
save_users(create_user(user_name,password)) # create and save new user.
print('\n')
print(f"New account {user_name} created")
system('clear')
# login after registration
login()
elif account_code == '2':
system('clear')
login()
# RE-USABLE FUNCTIONS
# login function to be reused
def login():
print("LOGIN")
print("Username : ")
search_username = input()
print("Passsword : ")
search_password = input()
logged_in(search_username, search_password)
def logged_in(search_username, search_password):
if check_existing_users(search_username, search_password):
search_user = find_user(search_username, search_password)
system('clear')
print(f"Logged in as {search_user.username}")
print('*' * 45)
print("Choose the service you need")
print("1. Store already existing account credentials ")
print("2. Create a new account credentials ")
print("3. Display my credentials ")
print("4. Delete my credential ")
print("5. Logout ")
account_name = "" #declaring global function
service_choice = input()
if service_choice=="1":
print('.' * 45)
print("Store already existing account credentials")
print("Enter your Account name")
account_name = input()
print("Enter your Account password")
account_password = input()
save_credentials(create_credentials(account_name, account_password))
print('\n')
print(f"Credentials for {account_name} created successfully")
sleep(2)
system('clear')
logged_in(search_username, search_password)
elif service_choice=="2":
print('.' * 45)
print("Create a NEW account credentials")
print("Enter your Account name")
account_name = input()
print("Do you want a system generated password?")
print("1. Yes\n2. No")
choice = input()
if choice=="1":
password = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
save_credentials(create_credentials(account_name, password))
print(f"Your Generated password is {password}")
print(f"Credentials for {account_name} created successfully")
print("Loading... please wait")
sleep(4)
system('clear')
logged_in(search_username, search_password)
elif choice=="2":
print(f"Enter {account_name} 's Account password")
account_password = input()
save_credentials(create_credentials(account_name, account_password))
print(f"Credentials for {account_name} created successfully")
sleep(2)
system('clear')
logged_in(search_username, search_password)
else:
print('.' * 45)
print("Wrong choice! Try again")
logged_in(search_username, search_password)
elif service_choice=="3":
show_credentials()
print("Press x or any key to exit ")
exit = input()
counter = 2
if exit == "X" or exit=='x':
for x in reversed(range(counter)):
print(f"Redirecting in {x} seconds")
sleep(1)
else:
for x in reversed(range(counter)):
print(f"Redirecting in {x} seconds")
sleep(1)
system("clear")
logged_in(search_username, search_password)
elif service_choice=="4":
print('.' * 45)
if display_credentials():
print("*******Your accounts*******")
for credentials in display_credentials():
print(f"=> {credentials.account}")
print("Enter account name to delete")
search_account = input()
if find_credential(search_account):
search_credential = find_credential(search_account)
print("_"*45)
search_credential.delete_credentials()
print('\n')
print(f"Your stored credentials for {search_credential.account} account is successfully deleted!!!")
sleep(3)
logged_in(search_username, search_password)
else:
print("_"*45)
print("That Credential you want to delete does not exist! Please create some")
print("Redirecting... please wait")
sleep(3)
logged_in(search_username, search_password)
else:
print("_"*45)
print("You dont seem to have any contacts saved yet")
print("Loading... please wait")
sleep(4)
system("clear")
logged_in(search_username, search_password)
elif service_choice=="5":
print("-"*35)
print("Logging you out ...")
sleep(2)
system('clear')
main()
else:
print("Invalid choice! Try again")
logged_in(search_username, search_password)
else:
system('clear')
print("Wrong credentials or User does not exist !! Try Again")
print('-' * 45)
main()
def show_credentials():
if display_credentials():
print('-' * 45)
print("Your credentials")
for credentials in display_credentials():
print(f"Account : {credentials.account} Password : {<PASSWORD>}")
print('-' * 45)
else:
print('-' * 45)
print("You dont seem to have any CREDENTIALS saved yet")
if __name__ == '__main__':
main() |
import datetime
import json
import logging
import urllib.request
from typing import List
import boto3
from contrail.configuration import config
from contrail.crawler.providers import aws_ec2
from contrail.crawler.providers import BaseProvider, register_provider
logger_all_regions = logging.getLogger('contrail.crawler.aws_ec2_spot')
_session = boto3.Session(
aws_access_key_id=config['AWS']['access_key_id'],
aws_secret_access_key=config['AWS']['secret']
)
# AWS Price List retrieval information, used to collect details about instance types.
URL_REGION_INDEX = aws_ec2.URL_REGION_INDEX
URL_REGION_VERSION = aws_ec2.URL_REGION_VERSION
DETAIL_COLLECTION_REGION = 'us-east-1'
@register_provider
class AmazonEC2Spot(BaseProvider):
"""
Price crawler for Amazon Spot Instances.
Each crawler object crawls only one region. Since boto3 (and Amazon's web API) imposes a 1000-row limit on the
number of instances that can be returned in a single query, retrieving a single "batch" of instances takes multiple
crawling cycles.
"""
instance_types = {}
def __init__(self, region_name):
super().__init__()
self.region = region_name
self.client = _session.client('ec2', region_name=region_name)
self.logger = logging.getLogger('contrail.crawler.aws_ec2_spot.{region}'.format(region=self.region))
self.instance_list = []
"""Working list of currently available instances in this region."""
self.next_token = ''
"""Token given to AWS to continue building instance_list."""
@classmethod
def create_providers(cls) -> List['AmazonEC2Spot']:
return [AmazonEC2Spot(rgn) for rgn in _session.get_available_regions('ec2')]
def crawl(self) -> datetime.timedelta:
"""
Collects the next set of instances in this region and appends them to the current `instance_list`.
If there are no more instances in the current batch, finalize and upload the current `instance_list` instead.
"""
if not self.__class__.instance_types:
self.__class__.instance_types = self.__class__.get_instance_type_details()
if self.next_token == '' and len(self.instance_list) > 0:
self.logger.info("Got all instances in this batch. Finalizing batch for upload.")
self.store_provider_data(region=self.region, data=self.instance_list)
self.instance_list.clear()
return datetime.timedelta(minutes=60)
response = self.client.describe_spot_price_history(
NextToken=self.next_token,
StartTime=datetime.datetime.now() - datetime.timedelta(minutes=60)
)
for instance in response['SpotPriceHistory']:
# JSON can't serialize datetime objects, so convert them before save
instance['Timestamp'] = instance['Timestamp'].isoformat()
# Attach instance type information
instance_type = instance.get('InstanceType')
if instance_type is None:
continue
type_data = self.__class__.instance_types.get(instance_type)
if type_data is None:
continue
instance.update(type_data)
self.next_token = response['NextToken']
self.instance_list += response['SpotPriceHistory']
self.logger.info("Retrieved {count} instances".format(region=self.region,
count=len(response['SpotPriceHistory'])))
return datetime.timedelta(seconds=3)
@classmethod
def get_instance_type_details(cls) -> dict:
"""
Load a dictionary that maps instance type names (e.g. 'm2.large') to instance details such as vCPUs, RAM, etc.
"""
region_request = urllib.request.urlopen(URL_REGION_INDEX)
region_data = region_request.read().decode('utf-8')
region_json = json.loads(region_data)
current_price_list_url = URL_REGION_VERSION.format(
currentVersionUrl=region_json['regions'][DETAIL_COLLECTION_REGION]['currentVersionUrl']
)
pricelist_request = urllib.request.urlopen(current_price_list_url)
pricelist_data = pricelist_request.read().decode('utf-8')
pricelist_json = json.loads(pricelist_data)
instance_types = {}
for _sku, data in pricelist_json['products'].items():
instance_type = data['attributes'].get('instanceType')
if instance_type is None:
continue
if instance_types.get(instance_type) is None:
# Remove data that does not apply to all instances of this type
for k in ['location', 'locationType']:
data['attributes'].pop(k)
instance_types[instance_type] = data['attributes']
else:
for attribute, value in data['attributes'].items():
existing_attr = instance_types[instance_type].get(attribute)
# Only collect instance type details that are universal across all instances with that type.
# Therefore, if a detail on one instance mismatches an earlier instance's detail, remove it --
# the detail is not common to that instance type.
if existing_attr is not None and existing_attr != value:
instance_types[instance_type].pop(attribute)
logger_all_regions.info("Loaded instance type information.")
return instance_types
|
from .basic_callbacks import Callback, CancelTrainException
from ..hook import Hooks
from ..utils import ToolBox as tbox
import torch
import torch.nn as nn
import math
from functools import partial
import matplotlib.pyplot as plt
class LR_Find(Callback):
_order = 1
def __init__(self, max_iter=100, min_lr=1e-6, max_lr=10, **kwargs):
super().__init__(**kwargs)
self.max_iter, self.min_lr, self.max_lr = max_iter, min_lr, max_lr
self.best_loss = 1e9
def begin_batch(self):
if self.state != 'train':
return
pos = self.n_iter/self.max_iter
lr = self.min_lr * (self.max_lr/self.min_lr) ** pos
for pg in self.opt.param_groups:
pg['lr'] = lr
def after_step(self):
if self.n_iter >= self.max_iter or self.loss > self.best_loss*10:
raise CancelTrainException()
if self.loss < self.best_loss:
self.best_loss = self.loss
class ParamScheduler(Callback):
_order = 1
def __init__(self, pname, sched_funcs, **kwargs):
super().__init__(**kwargs)
self.pname, self.sched_funcs = pname, sched_funcs
def begin_fit(self):
if not isinstance(self.sched_funcs, (list, tuple)):
self.sched_funcs = [self.sched_funcs] * len(self.opt.param_groups)
def set_param(self):
assert len(self.opt.param_groups) == len(self.sched_funcs)
for pg, f in zip(self.opt.param_groups, self.sched_funcs):
pg[self.pname] = f(self.p_epochs/self.epochs)
def begin_batch(self):
if self.state == 'train':
self.set_param()
def annealer(f):
def _inner(start, end):
return partial(f, start, end)
return _inner
@annealer
def sched_linear(start, end, pos):
return start + pos*(end-start)
@annealer
def sched_cos(start, end, pos):
return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
@annealer
def sched_no(start, end, pos): # pylint: disable=unused-argument
return start
@annealer
def sched_exp(start, end, pos):
return start * (end/start) ** pos
def cos_1cycle_anneal(start, high, end):
return [sched_cos(start, high), sched_cos(high, end)] # pylint: disable=no-value-for-parameter
def combine_scheds(pcts, scheds):
assert sum(pcts) == 1.
pcts = torch.tensor([0] + tbox.listify(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
def _inner(pos):
idx = (pos >= pcts).nonzero().max()
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos)
return _inner
class LayerAnalysisCallback(Callback):
_order = 3
def __init__(self, forward=True, hist_span=10, **kwargs):
"""
forward: True, 各层forward输出分析; False, 各层backward输出梯度分析
hist_span: histgram的分析范围。前向分析时应当大一些,例如10;后向分析时应当小一些,例如0.1。
"""
super().__init__(**kwargs)
self.forward = forward
self.hist_span = [-hist_span, hist_span]
def begin_fit(self):
def append_stats(hook, module, inputs, outputs): # pylint: disable=unused-argument
if not hasattr(hook, 'stats'):
hook.stats = ([], [], [])
if isinstance(outputs, tuple): # backward hook
outputs = outputs[0]
means, stds, hists = hook.stats
means.append(outputs[0].data.mean().cpu())
stds .append(outputs[0].data.std().cpu())
hists.append(outputs[0].data.cpu().histc(40, *self.hist_span))
self.hooks = Hooks(self.model, append_stats, self.forward)
def after_fit(self):
self.hooks.remove()
self.plot()
def plot(self):
mode = 'FORWARD' if self.forward else 'BACKWARD'
# 均值与标准差
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(12, 4))
for h in self.hooks:
ms, ss, _ = h.stats
ax0.plot(ms, label=h.name)
ax1.plot(ss, label=h.name)
ax0.legend(prop={'size': 6})
ax0.set_title(f"{mode}: Mean", fontsize=16)
ax1.legend(prop={'size': 6})
ax1.set_title(f"{mode}: Standard deviation", fontsize=16)
# 各层输出值的分布
figsize = (15, int(len(self.hooks)*0.7))
fig, axes = plt.subplots(int(math.ceil(len(self.hooks)/3)), 3, figsize=figsize)
[ax.axis('off') for ax in axes.flatten()] # pylint:disable=expression-not-assigned
for ax, h in zip(axes.flatten(), self.hooks):
ax.axis('on')
hist_matrix = torch.stack(h.stats[2]).t().float().log1p()
extent = [0, hist_matrix.size()[1], *self.hist_span]
im = ax.imshow(hist_matrix, origin='lower', extent=extent, aspect='auto')
ax.set_title(h.name)
fig.colorbar(im, ax=ax, shrink=1.0)
fig.subplots_adjust(hspace=0.6, top=1-0.75/figsize[1])
fig.suptitle(f'{mode}: Histogram of values by "log(1+x)"', fontsize=16)
# plt.tight_layout()
# 各层输出值中接近0的值的比例
figsize = (15, int(len(self.hooks)*0.7))
fig, axes = plt.subplots(int(math.ceil(len(self.hooks)/3)), 3, figsize=figsize)
[ax.axis('off') for ax in axes.flatten()] # pylint:disable=expression-not-assigned
for ax, h in zip(axes.flatten(), self.hooks):
ax.axis('on')
hist_matrix = torch.stack(h.stats[2]).t().float()
tiny_ratio = hist_matrix[19:22].sum(0)/hist_matrix.sum(0)
ax.plot(tiny_ratio)
ax.set_ylim(0, 1.02)
ax.set_title(h.name)
fig.subplots_adjust(hspace=0.6, top=1-0.75/figsize[1])
fig.suptitle(f'{mode}: Fraction of tiny values', fontsize=16)
LayerOutputAnalysisHookCallback = LayerAnalysisCallback
class EarlyStopping(Callback):
_order = -10000 # 保证最后执行
def __init__(self, monitor='train', patience=10, min_delta=0., **kwargs):
"""
Args:
monitor: train loss or val loss
patience: max patience epochs of getting worse
min_delta: 小于 min_delta 的提升被认为没有变化
"""
super().__init__(**kwargs)
assert monitor in ['train', 'val'], '"monitor" must be "train" or "val"'
self.monitor = monitor
self.patience = patience
self.min_delta = min_delta
self.patience_num = 0
def after_epoch(self):
loss = self.messages['metric_values_epoch'][self.monitor]['loss']
if loss > self.best_loss - self.min_delta:
self.patience_num += 1
else:
self.patience_num = 0
if self.patience_num >= self.patience:
print('\n ... Early stopping is triggered!\n')
raise CancelTrainException()
class GradientClipping(Callback):
def __init__(self, max_norm=0., **kwargs):
super().__init__(**kwargs)
self.max_norm = max_norm
def after_backward(self):
if self.max_norm:
nn.utils.clip_grad_norm_(self.model.parameters(), self.max_norm)
if __name__ == '__main__':
pass
# a = torch.arange(0, 100)
# p = torch.linspace(0.01, 1, 100)
# annealings = "NO LINEAR COS EXP".split()
# fns = [sched_no, sched_lin, sched_cos, sched_exp]
# for fn, t in zip(fns, annealings):
# f = fn(2, 1e-2)
# plt.plot(a, [f(o) for o in p], label=t)
# plt.legend()
# plt.show()
# sched = combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)]) # pylint: disable=no-value-for-parameter
# plt.plot(a, [sched(o) for o in p])
# plt.show()
|
#! /usr/bin/python
__author__="<NAME>"
__date__ ="$Jun 13, 2014 12:33:33 PM$"
import time
import os
from elasticsearch.transport import Transport
from elasticsearch import (Elasticsearch, RoundRobinSelector, ImproperlyConfigured, ElasticsearchException,
SerializationError, TransportError, NotFoundError, ConflictError, RequestError, ConnectionError)
import simplejson as json
from kazoo.client import KazooClient
from kazoo.exceptions import (KazooException)
os.environ['DEBUG'] = 'true'
##Zookeeper
#os.environ['ZK_ADDRESS'] = 'zookeeper1:2181,zookeeper2:2181,zookeeper3:2181'
#
##Elasticsearch
#os.environ['ES_CLUSTER'] = 'elasticsearch'
#os.environ['ES_ANALYTICS_INDEX'] = 'analytics'
#os.environ['ES_ANALYTICS_TYPE'] = 'data'
#os.environ['ES_REGISTRY_INDEX'] = 'docker_registry'
#os.environ['ES_REGISTRY_TYPE'] = 'tags'
es = None
import traceback
def log(data):
"""
Print debug output
"""
if (os.environ['DEBUG'] == 'true'):
print(data + '\n')
def get_from_index(index, type, id):
"""
Get the Elasticsearch index data
@type documentList: List
@param documentList: List of image layer JSON documents
"""
response = None
#Try 3 times to read the document from ES, each time picking a random ES node address in case of failure
for retries in range(3):
try:
response = es.get(index=index, doc_type=type, id=id)
log("ES Get Response :: " + json.dumps(response))
except ImproperlyConfigured:
log("ES ImproperlyConfigured!" + traceback.format_exc())
continue
except ElasticsearchException:
log("ES ElasticsearchException!" + traceback.format_exc())
continue
except TransportError:
log("ES TransportError!" + traceback.format_exc())
continue
except NotFoundError:
log("ES NotFoundError!" + traceback.format_exc())
continue
except ConflictError:
log("ES ConflictError!" + traceback.format_exc())
continue
except RequestError:
log("ES RequestError!" + traceback.format_exc())
continue
except SerializationError:
log("ES SerializationError!" + traceback.format_exc())
continue
except ConnectionError:
log("ES ConnectionError!" + traceback.format_exc())
continue
except Exception:
log("ES Exception!" + traceback.format_exc())
continue
finally:
log("Total number of ES read attempts: " + str(retries + 1))
#Exit for loop if ES transaction is successful otherwise pick another node and continue retrying
break
if response is None or response == '':
return ('false', retries + 1)
else:
return ('true', retries + 1)
def set_in_index(document, index, type):
"""
Store the list of documents in the Elasticsearch index via HTTP APIs
@type document: List
@param document: JSON document
"""
response = None
#Try 3 times to store the document in ES, each time picking a random ES node address in case of failure
for retries in range(3):
try:
log('ES Set Request :: ' + json.dumps(document) + ' : ' + index + ':' + type)
response = es.index(index=index, doc_type=type, id=document['id'], body=document)
log("ES Set Response :: " + json.dumps(response))
except ImproperlyConfigured:
log("ES ImproperlyConfigured!" + traceback.format_exc())
continue
except ElasticsearchException:
log("ES ElasticsearchException!" + traceback.format_exc())
continue
except TransportError:
log("ES TransportError!" + traceback.format_exc())
continue
except NotFoundError:
log("ES NotFoundError!" + traceback.format_exc())
continue
except ConflictError:
log("ES ConflictError!" + traceback.format_exc())
continue
except RequestError:
log("ES RequestError!" + traceback.format_exc())
continue
except SerializationError:
log("ES SerializationError!" + traceback.format_exc())
continue
except ConnectionError:
log("ES ConnectionError!" + traceback.format_exc())
continue
except Exception:
log("ES Exception!" + traceback.format_exc())
continue
finally:
log("Total number of ES write attempts: " + str(retries + 1))
#Exit for loop if ES transaction is successful otherwise pick another node and continue retrying
break
if response is None or response == '':
return 'false'
else:
return 'true'
def get_es_node_addresses():
"""
Get the Elasticsearch node addresses via Zookeeper
@return List of Elasticsearch node ip addresses and ports
"""
zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)
zk.start()
esNodes = []
try:
#Fetch the list of ES cluster node names from Zookeeper
zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'
children = zk.get_children(zkPath)
#Retrieve the JSON metadata associated with each ephemeral ES node
for node in children:
zookeeperAddr = zkPath + '/' + node
esNodeInfo = zk.get(zookeeperAddr)
jsonData = json.loads(esNodeInfo[0])
#Collect each node ip address and port
esNodes.append(jsonData['address'] + ':' + jsonData['port'])
except KazooException:
log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());
zk.stop()
zk.close()
log('ES Node list retrieved from Zookeeper :: ' + json.dumps(esNodes))
return esNodes
#Overriding the default ES Sniffing mechanism with Zookeeper
class ZookeeperTransport(Transport):
def get_es_node_addresses(self):
"""
Get the Elasticsearch node addresses via Zookeeper
@return List of Elasticsearch node ip addresses and ports
"""
esNodes = []
#Initlate the Zookeeper Kazoo connection
#kz_retry = KazooRetry(max_tries=3, delay=0.5, backoff=2)
zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)
zk.start()
try:
#Fetch the list of ES cluster node names from Zookeeper
zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'
children = zk.get_children(zkPath)
#Retrieve the JSON metadata associated with each ephemeral ES node
for node in children:
zookeeperAddr = zkPath + '/' + node
esNodeInfo = zk.get(zookeeperAddr)
jsonData = json.loads(esNodeInfo[0])
#Collect each node ip address and port
host = {'host':jsonData['address'], 'port': int(jsonData['port'])}
esNodes.append(host)
except KazooException:
log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());
#Close and Zookeeper connection
zk.stop()
zk.close()
return esNodes
def sniff_hosts(self):
"""
Obtain a list of nodes from the cluster and create a new connection
pool using the information retrieved.
To extract the node connection parameters use the `nodes_to_host_callback`.
"""
previous_sniff = self.last_sniff
hosts = []
try:
# reset last_sniff timestamp
self.last_sniff = time.time()
try:
hosts = self.get_es_node_addresses()
except Exception:
raise TransportError("N/A", "Unable to sniff hosts." + traceback.format_exc())
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
# we weren't able to get any nodes, maybe using an incompatible
# transport_schema or host_info_callback blocked all - raise error.
if not hosts:
raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found." + traceback.format_exc())
self.set_connections(hosts)
def calculate_build_time_percentiles(es):
response = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": [
{"match": {"build": "true"}}
]
}
},
"size":0,
"aggs" : {
"load_time_outlier" : {
"percentiles" : {
"field" : "build-time"
}
}
}
})
print_percentiles(response)
def calculate_push_time_percentiles(es):
response = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": [
{"match": {"build": "true"}},
{"match": {"push": "true"}}
]
}
},
"size":0,
"aggs" : {
"load_time_outlier" : {
"percentiles" : {
"field" : "push-time"
}
}
}
})
print_percentiles(response)
def calculate_pull_time_percentiles(es):
response = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": [
{"match": {"build": "true"}},
{"match": {"push": "true"}},
{"match": {"pull": "true"}}
]
}
},
"size":0,
"aggs" : {
"load_time_outlier" : {
"percentiles" : {
"field" : "pull-time"
}
}
}
})
print_percentiles(response)
def calculate_get_time_percentiles(es):
response = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": [
{"match": {"build": "true"}},
{"match": {"push": "true"}},
{"match": {"get": "true"}}
]
}
},
"size":0,
"aggs" : {
"load_time_outlier" : {
"percentiles" : {
"field" : "get-time"
}
}
}
})
print_percentiles(response)
def print_percentiles(response):
#Sort the percentile data
percentileKeys = response['aggregations']['load_time_outlier']['values'].keys()
percentileKeys = map(float, percentileKeys)
percentileKeys.sort()
log('Percentiles in Secs:')
for percentileKey in percentileKeys:
log(str(percentileKey) + "% : " + str(response['aggregations']['load_time_outlier']['values'][str(percentileKey)]))
def calculate_stats(successCondition, failureCondition):
successResponse = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": successCondition
}
},
"size":0
})
failureResponse = es.search(index=os.environ['ES_ANALYTICS_INDEX'], doc_type=os.environ['ES_ANALYTICS_TYPE'], body=
{
"query": {
"bool": {
"must": failureCondition
}
},
"size":0
})
success = successResponse['hits']['total']
failure = failureResponse['hits']['total']
successPercent = (float(success)/float((failure + success)))*100
failurePercent = (float(failure)/float((failure + success)))*100
log('Totals: ' + str(success) + '/' + str(failure+success))
log('Success: ' + str(successPercent) + '%')
log('Failure: ' + str(failurePercent) + '%')
if __name__ == "__main__":
#Initiate the ES connection pool
es = Elasticsearch(get_es_node_addresses(), sniff_on_start=True, sniff_on_connection_fail=True, max_retries=3, sniffer_timeout=180, selector_class=RoundRobinSelector, sniff_timeout=1, transport_class=ZookeeperTransport)
log('############## DOCKER BUILD STATS ####################')
calculate_stats([{"match": {"build": "true"}}], [{"match": {"build": "false"}}])
calculate_build_time_percentiles(es)
log('############## DOCKER PUSH STATS ####################')
calculate_stats([{"match": {"build": "true"}},{"match": {"push": "true"}}], [{"match": {"build": "true"}}, {"match": {"push": "false"}}])
calculate_push_time_percentiles(es)
log('############## DOCKER PULL STATS ####################')
calculate_stats([{"match": {"build": "true"}},{"match": {"push": "true"}}, {"match": {"pull": "true"}} ], [{"match": {"build": "true"}}, {"match": {"push": "true"}}, {"match": {"pull": "false"}}])
calculate_pull_time_percentiles(es)
log('############## ES GET STATS ####################')
calculate_stats([{"match": {"build": "true"}},{"match": {"push": "true"}}, {"match": {"get": "true"}} ], [{"match": {"build": "true"}}, {"match": {"push": "true"}}, {"match": {"get": "false"}}])
calculate_get_time_percentiles(es)
|
import logging
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime
from enum import unique
from typing import ClassVar, Collection, Dict, Iterable, Optional, Set, Union
from asn1crypto import cms, core, keys, x509
from pyhanko_certvalidator import CertificateValidator
from pyhanko_certvalidator.errors import (
InvalidCertificateError,
PathBuildingError,
PathValidationError,
RevokedError,
)
from pyhanko_certvalidator.path import ValidationPath
from pyhanko_certvalidator.validate import ACValidationResult
from ...pdf_utils.misc import OrderedEnum
from ..ades.report import AdESFailure, AdESIndeterminate, AdESSubIndic
from ..diff_analysis import (
DiffResult,
ModificationLevel,
SuspiciousModification,
)
from .errors import SigSeedValueValidationError
from .settings import KeyUsageConstraints
__all__ = [
'SignatureStatus', 'TimestampSignatureStatus',
'X509AttributeInfo', 'CertifiedAttributeInfo',
'ClaimedAttributes', 'CertifiedAttributes',
'CAdESSignerAttributeAssertions',
'StandardCMSSignatureStatus',
'SignatureCoverageLevel', 'ModificationInfo',
'PdfSignatureStatus', 'DocumentTimestampStatus'
]
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class SignatureStatus:
"""
Class describing the validity of a (general) CMS signature.
"""
intact: bool
"""
Reports whether the signature is *intact*, i.e. whether the hash of the
message content (which may or may not be embedded inside the CMS object
itself) matches the hash value that was signed.
"""
valid: bool
"""
Reports whether the signature is *valid*, i.e. whether the hash's signature
actually validates.
"""
trust_problem_indic: Optional[AdESSubIndic]
"""
If not ``None``, provides the AdES subindication indication what went
wrong when validating the signer's certificate.
"""
signing_cert: x509.Certificate
"""
Contains the certificate of the signer, as embedded in the CMS object.
"""
pkcs7_signature_mechanism: str
"""
CMS signature mechanism used.
"""
# TODO: also here some ambiguity analysis is in order
md_algorithm: str
"""
Message digest algorithm used.
"""
validation_path: ValidationPath
"""
Validation path providing a valid chain of trust from the signer's
certificate to a trusted root certificate.
"""
# XXX frozenset makes more sense here, but asn1crypto doesn't allow that
# (probably legacy behaviour)
key_usage: ClassVar[Set[str]] = {'non_repudiation'}
"""
Class property indicating which key usages are accepted on the signer's
certificate. The default is ``non_repudiation`` only.
"""
extd_key_usage: ClassVar[Optional[Set[str]]] = None
"""
Class property indicating which extended key usage key purposes are accepted
to be present on the signer's certificate.
See :attr:`.KeyUsageConstraints.extd_key_usage`.
"""
def summary_fields(self):
if self.trusted:
cert_status = 'TRUSTED'
elif self.revoked:
cert_status = 'REVOKED'
else:
cert_status = 'UNTRUSTED'
yield cert_status
@property
def revoked(self) -> bool:
"""
Reports whether the signer's certificate has been revoked or not.
If this field is ``True``, then obviously :attr:`trusted` will be
``False``.
"""
return self.trust_problem_indic in (
AdESFailure.REVOKED,
AdESIndeterminate.REVOKED_CA_NO_POE,
AdESIndeterminate.REVOKED_NO_POE
)
@property
def trusted(self) -> bool:
"""
Reports whether the signer's certificate is trusted w.r.t. the currently
relevant validation context and key usage requirements.
"""
return self.valid and self.trust_problem_indic is None
# TODO explain in more detail.
def summary(self, delimiter=','):
"""
Provide a textual but machine-parsable summary of the validity.
"""
if self.intact and self.valid:
return 'INTACT:' + delimiter.join(self.summary_fields())
else:
return 'INVALID'
@classmethod
async def validate_cert_usage(
cls, validator: CertificateValidator,
key_usage_settings: KeyUsageConstraints = None):
key_usage_settings = key_usage_settings or KeyUsageConstraints()
key_usage_settings = KeyUsageConstraints(
key_usage=(
cls.key_usage if key_usage_settings.key_usage is None
else key_usage_settings.key_usage
),
extd_key_usage=(
cls.extd_key_usage if key_usage_settings.extd_key_usage is None
else key_usage_settings.extd_key_usage
)
)
cert: x509.Certificate = validator._certificate
path = None
try:
# validate usage without going through pyhanko_certvalidator
key_usage_settings.validate(cert)
path = await validator.async_validate_usage(key_usage=set())
ades_status = None
except InvalidCertificateError as e:
# TODO accumulate these somewhere?
logger.warning(e)
ades_status = AdESIndeterminate.CHAIN_CONSTRAINTS_FAILURE
except RevokedError:
# TODO have certvalidator report with which of the three
# revocation cases under AdES we're dealing with.
# (For now, assume the worst.)
ades_status = AdESFailure.REVOKED
except PathBuildingError as e:
logger.warning(e)
ades_status = AdESIndeterminate.NO_CERTIFICATE_CHAIN_FOUND
except PathValidationError as e:
logger.warning(e)
# TODO make error reporting in certvalidator more granular
# so we can actually set proper AdES values
# (e.g. expiration-related ones, distinguish between CA and EE,
# revinfo freshness...)
ades_status = AdESIndeterminate.CERTIFICATE_CHAIN_GENERAL_FAILURE
if ades_status is not None:
subj = cert.subject.human_friendly
logger.warning(f"Chain of trust validation for {subj} failed.")
return ades_status, path
@property
def _trust_anchor(self):
if self.validation_path is not None:
trust_anchor: x509.Certificate = self.validation_path[0]
return trust_anchor.subject.human_friendly
else:
return "No path to trust anchor found."
@dataclass(frozen=True)
class TimestampSignatureStatus(SignatureStatus):
"""
Signature status class used when validating timestamp tokens.
"""
key_usage = set()
"""
There are no (non-extended) key usage requirements for TSA certificates.
"""
extd_key_usage = {'time_stamping'}
"""
TSA certificates must have the ``time_stamping`` extended key usage
extension (OID 1.3.6.1.5.5.7.3.8).
"""
timestamp: datetime
"""
Value of the timestamp token as a datetime object.
"""
def describe_timestamp_trust(self):
tsa = self.signing_cert
return (
"This timestamp is backed by a time stamping authority.\n"
"The timestamp token is cryptographically "
f"{'' if self.intact and self.valid else 'un'}sound.\n"
f"TSA certificate subject: \"{tsa.subject.human_friendly}\"\n"
f"TSA certificate SHA1 fingerprint: {tsa.sha1.hex()}\n"
f"TSA certificate SHA256 fingerprint: {tsa.sha256.hex()}\n"
f"TSA cert trust anchor: \"{self._trust_anchor}\"\n"
"The TSA certificate is "
f"{'' if self.trusted else 'un'}trusted."
)
@dataclass(frozen=True)
class X509AttributeInfo:
"""
Info on an X.509 attribute.
"""
attr_type: cms.AttCertAttributeType
"""
The certified attribute's type.
"""
attr_values: Iterable[core.Asn1Value]
"""
The certified attribute's values.
"""
@dataclass(frozen=True)
class CertifiedAttributeInfo(X509AttributeInfo):
"""
Info on a certified attribute, including AC validation results.
"""
validation_results: Iterable[ACValidationResult]
"""
The validation details for the attribute in question
(possibly several if values for the same attribute were sourced from
several different ACs).
"""
class CertifiedAttributes:
"""
Container class for extracted attribute certificate information.
"""
@classmethod
def from_results(cls, results: Iterable[ACValidationResult]):
# first, classify the attributes and results by type
by_type = defaultdict(lambda: ([], []))
for result in results:
for attr_type, attr in result.approved_attributes.items():
type_values, type_results = by_type[attr_type]
type_values.extend(attr['values'])
type_results.append(result)
# then, for each type, we package 'em up in a CertifiedAttributeInfo
infos = CertifiedAttributes()
for attr_type, (type_values, type_results) in by_type.items():
infos._attrs[attr_type] = CertifiedAttributeInfo(
attr_type=cms.AttCertAttributeType(attr_type),
# (shallow) immutability
attr_values=tuple(type_values),
validation_results=tuple(type_results)
)
return infos
def __init__(self):
self._attrs: Dict[str, CertifiedAttributeInfo] = {}
def __getitem__(self, item: str) -> CertifiedAttributeInfo:
return self._attrs[item]
def __len__(self):
return len(self._attrs)
def __bool__(self):
return bool(self._attrs)
def __iter__(self):
return iter(self._attrs.values())
def __contains__(self, item: str) -> bool:
return item in self._attrs
class ClaimedAttributes:
"""
Container class for extracted information on attributes asserted
by a signer without an attribute certificate.
"""
@classmethod
def from_iterable(cls, attrs: Iterable[cms.AttCertAttribute]):
infos = ClaimedAttributes()
by_type = defaultdict(lambda: ([], []))
for attr in attrs:
type_values, type_results = by_type[attr['type'].native]
type_values.extend(attr['values'])
for attr_type, (type_values, type_results) in by_type.items():
infos._attrs[attr_type] = CertifiedAttributeInfo(
attr_type=cms.AttCertAttributeType(attr_type),
# (shallow) immutability
attr_values=tuple(type_values),
validation_results=tuple(type_results)
)
return infos
def __init__(self):
self._attrs: Dict[str, X509AttributeInfo] = {}
def __getitem__(self, item: str) -> X509AttributeInfo:
return self._attrs[item]
def __len__(self):
return len(self._attrs)
def __bool__(self):
return bool(self._attrs)
def __iter__(self):
return iter(self._attrs.values())
def __contains__(self, item: str) -> bool:
return item in self._attrs
@dataclass(frozen=True)
class CAdESSignerAttributeAssertions:
"""
Value type describing information extracted (and, if relevant, validated)
from a ``signer-attrs-v2`` signed attribute.
"""
claimed_attrs: ClaimedAttributes
"""
Attributes claimed by the signer without additional justification.
May be empty.
"""
certified_attrs: Optional[CertifiedAttributes] = None
"""
Attributes claimed by the signer using an attribute certificate.
This field will only be populated if an attribute certificate
validation context is available, otherwise its value will be ``None``,
even if there are no attribute certificates present.
"""
ac_validation_errs: \
Optional[Collection[Union[PathValidationError, PathBuildingError]]] \
= None
"""
Attribute certificate validation errors.
This field will only be populated if an attribute certificate
validation context is available, otherwise its value will be ``None``,
even if there are no attribute certificates present.
"""
unknown_attrs_present: bool = False
"""
Records if the ``signer-attrs-v2`` attribute contained certificate types
or signed assertions that could not be processed.
This does not affect the validation process by default, but will trigger
a warning.
"""
@property
def valid(self):
return not self.ac_validation_errs
@dataclass(frozen=True)
class StandardCMSSignatureStatus(SignatureStatus):
"""
Status of a standard "end-entity" CMS signature, potentially with
timing information embedded inside.
"""
signer_reported_dt: Optional[datetime] = None
"""
Signer-reported signing time, if present in the signature.
Generally speaking, this timestamp should not be taken as fact.
"""
timestamp_validity: Optional[TimestampSignatureStatus] = None
"""
Validation status of the signature timestamp token embedded in this
signature, if present.
"""
content_timestamp_validity: Optional[TimestampSignatureStatus] = None
"""
Validation status of the content timestamp token embedded in this
signature, if present.
"""
ac_attrs: Optional[CertifiedAttributes] = None
"""
Certified attributes sourced from valid attribute certificates embedded into
the ``SignedData``'s ``certificates`` field and the CAdES-style
``signer-attrs-v2`` attribute (if present).
Will be ``None`` if no validation context for attribute certificate
validation was provided.
.. note::
There is a semantic difference between attribute certificates
extracted from the ``certificates`` field and those extracted from
the ``signer-attrs-v2`` attribute.
In the former case, the ACs are not covered by the signature.
However, a CAdES-style ``signer-attrs-v2`` attribute is signed, so
the signer is expected to have explicitly _acknowledged_ all attributes,
in the AC. See also :attr:`cades_signer_attrs`.
"""
ac_validation_errs: \
Optional[Collection[Union[PathValidationError, PathBuildingError]]] \
= None
"""
Errors encountered while validating attribute certificates embedded into
the ``SignedData``'s ``certificates`` field and the CAdES-style
``signer-attrs-v2`` attribute (if present).
Will be ``None`` if no validation context for attribute certificate
validation was provided.
"""
cades_signer_attrs: Optional[CAdESSignerAttributeAssertions] = None
"""
Information extracted and validated from the signed ``signer-attrs-v2``
attribute defined in CAdES.
"""
@property
def bottom_line(self) -> bool:
"""
Formulates a general judgment on the validity of this signature.
This takes into account the cryptographic validity of the signature,
the signature's chain of trust and the validity of the timestamp token
(if present).
:return:
``True`` if all constraints are satisfied, ``False`` otherwise.
"""
ts = self.timestamp_validity
if ts is None:
timestamp_ok = True
else:
timestamp_ok = ts.valid and ts.trusted
content_ts = self.content_timestamp_validity
if content_ts is None:
content_timestamp_ok = True
else:
content_timestamp_ok = content_ts.valid and content_ts.trusted
return (
self.intact and self.valid and self.trusted and timestamp_ok
and content_timestamp_ok
)
def summary_fields(self):
yield from super().summary_fields()
if self.timestamp_validity is not None:
yield 'TIMESTAMP_TOKEN<%s>' % (
self.timestamp_validity.summary(delimiter='|')
)
if self.content_timestamp_validity is not None:
yield 'CONTENT_TIMESTAMP_TOKEN<%s>' % (
self.content_timestamp_validity.summary(delimiter='|')
)
if self.cades_signer_attrs is not None \
and not self.cades_signer_attrs.valid:
yield 'CERTIFIED_SIGNER_ATTRS_INVALID'
def pretty_print_details(self):
def fmt_section(hdr, body):
return '\n'.join(
(hdr, '-' * len(hdr), body, '\n')
)
sections = self.pretty_print_sections()
bottom_line = (
f"The signature is judged {'' if self.bottom_line else 'IN'}VALID."
)
sections.append(("Bottom line", bottom_line))
return '\n'.join(
fmt_section(hdr, body) for hdr, body in sections
)
def pretty_print_sections(self):
cert: x509.Certificate = self.signing_cert
# TODO add section about ACs
if self.trusted:
trust_status = "trusted"
elif self.revoked:
trust_status = "revoked"
else:
trust_status = "untrusted"
about_signer = (
f"Certificate subject: \"{cert.subject.human_friendly}\"\n"
f"Certificate SHA1 fingerprint: {cert.sha1.hex()}\n"
f"Certificate SHA256 fingerprint: {cert.sha256.hex()}\n"
f"Trust anchor: \"{self._trust_anchor}\"\n"
f"The signer's certificate is {trust_status}."
)
validity_info = (
"The signature is cryptographically "
f"{'' if self.intact and self.valid else 'un'}sound.\n\n"
f"The digest algorithm used was '{self.md_algorithm}'.\n"
f"The signature mechanism used was "
f"'{self.pkcs7_signature_mechanism}'."
)
if 'ecdsa' in self.pkcs7_signature_mechanism:
ec_params: keys.ECDomainParameters = \
cert.public_key['algorithm']['parameters']
if ec_params.name == 'named':
curve_oid: core.ObjectIdentifier = ec_params.chosen
validity_info += (
f"\nThe elliptic curve used for the signer's ECDSA "
f"public key was '{curve_oid.native}' "
f"(OID: {curve_oid.dotted})."
)
timing_infos = []
reported_ts = self.signer_reported_dt
if reported_ts is not None:
timing_infos.append(
f"Signing time as reported by signer: {reported_ts.isoformat()}"
)
tst_status = self.timestamp_validity
if tst_status is not None:
ts = tst_status.timestamp
timing_infos.append(
f"Signature timestamp token: {ts.isoformat()}\n"
f"The token is guaranteed to be newer than the signature.\n"
f"{tst_status.describe_timestamp_trust()}"
)
content_tst_status = self.timestamp_validity
if tst_status is not None:
ts = content_tst_status.timestamp
timing_infos.append(
f"Content timestamp token: {ts.isoformat()}\n"
f"The token is guaranteed to be older than the signature.\n"
f"{content_tst_status.describe_timestamp_trust()}"
)
timing_info = (
"No available information about the signing time."
if not timing_infos else '\n\n'.join(timing_infos)
)
return [
("Signer info", about_signer), ("Integrity", validity_info),
("Signing time", timing_info),
]
@unique
class SignatureCoverageLevel(OrderedEnum):
"""
Indicate the extent to which a PDF signature (cryptographically) covers
a document. Note that this does *not* pass judgment on whether uncovered
updates are legitimate or not, but as a general rule, a legitimate signature
will satisfy at least :attr:`ENTIRE_REVISION`.
"""
UNCLEAR = 0
"""
The signature's coverage is unclear and/or disconnected.
In standard PDF signatures, this is usually a bad sign.
"""
CONTIGUOUS_BLOCK_FROM_START = 1
"""
The signature covers a contiguous block in the PDF file stretching from
the first byte of the file to the last byte in the indicated ``/ByteRange``.
In other words, the only interruption in the byte range is fully occupied
by the signature data itself.
"""
ENTIRE_REVISION = 2
"""
The signature covers the entire revision in which it occurs, but incremental
updates may have been added later. This is not necessarily evidence of
tampering. In particular, it is expected when a file contains multiple
signatures. Nonetheless, caution is required.
"""
ENTIRE_FILE = 3
"""
The entire file is covered by the signature.
"""
@dataclass(frozen=True)
class ModificationInfo:
coverage: SignatureCoverageLevel = None
"""
Indicates how much of the document is covered by the signature.
"""
diff_result: Optional[Union[DiffResult, SuspiciousModification]] = None
"""
Result of the difference analysis run on the file:
* If ``None``, no difference analysis was run.
* If the difference analysis was successful, this attribute will contain
a :class:`.DiffResult` object.
* If the difference analysis failed due to unforeseen or suspicious
modifications, the :class:`.SuspiciousModification` exception thrown
by the difference policy will be stored in this attribute.
"""
@property
def modification_level(self) -> Optional[ModificationLevel]:
"""
Indicates the degree to which the document was modified after the
signature was applied.
Will be ``None`` if difference analysis results are not available;
an instance of :class:`.ModificationLevel` otherwise.
"""
coverage = self.coverage
if self.diff_result is None:
if coverage == SignatureCoverageLevel.ENTIRE_REVISION:
# in this case, we can't know without the diff analysis result
return None
return (
ModificationLevel.NONE
if coverage == SignatureCoverageLevel.ENTIRE_FILE
else ModificationLevel.OTHER
)
elif isinstance(self.diff_result, DiffResult):
return self.diff_result.modification_level
else:
return ModificationLevel.OTHER
@dataclass(frozen=True)
class PdfSignatureStatus(ModificationInfo, StandardCMSSignatureStatus):
"""Class to indicate the validation status of a PDF signature."""
docmdp_ok: Optional[bool] = None
"""
Indicates whether the signature's
:attr:`~.ModificationInfo.modification_level` is in line with the document
signature policy in force.
If ``None``, compliance could not be determined.
"""
has_seed_values: bool = False
"""
Records whether the signature form field has seed values.
"""
seed_value_constraint_error: Optional[SigSeedValueValidationError] = None
"""
Records the reason for failure if the signature field's seed value
constraints didn't validate.
"""
@property
def bottom_line(self) -> bool:
"""
Formulates a general judgment on the validity of this signature.
This takes into account the cryptographic validity of the signature,
the signature's chain of trust, compliance with the document
modification policy, seed value constraint compliance and the validity
of the timestamp token (if present).
:return:
``True`` if all constraints are satisfied, ``False`` otherwise.
"""
generic_checks_ok = super().bottom_line
return (
generic_checks_ok
and self.seed_value_ok
and (self.docmdp_ok or self.modification_level is None)
)
@property
def seed_value_ok(self) -> bool:
"""
Indicates whether the signature satisfies all mandatory constraints in
the seed value dictionary of the associated form field.
.. warning::
Currently, not all seed value entries are recognised by the signer
and/or the validator, so this judgment may not be entirely accurate
in some cases.
See :class:`~.pyhanko.sign.fields.SigSeedValueSpec`.
"""
return self.seed_value_constraint_error is None
def summary_fields(self):
yield from super().summary_fields()
if self.coverage == SignatureCoverageLevel.ENTIRE_FILE:
yield 'UNTOUCHED'
elif self.coverage == SignatureCoverageLevel.ENTIRE_REVISION:
if self.modification_level is not None:
yield 'EXTENDED_WITH_' + self.modification_level.name
else:
yield 'EXTENDED'
else:
yield 'NONSTANDARD_COVERAGE'
if self.docmdp_ok:
if self.coverage != SignatureCoverageLevel.ENTIRE_FILE:
yield 'ACCEPTABLE_MODIFICATIONS'
else:
yield 'ILLEGAL_MODIFICATIONS'
def pretty_print_sections(self):
sections = super().pretty_print_sections()
if self.coverage == SignatureCoverageLevel.ENTIRE_FILE:
modification_str = "The signature covers the entire file."
else:
if self.modification_level is not None:
if self.modification_level == ModificationLevel.LTA_UPDATES:
modlvl_string = \
"All modifications relate to signature maintenance"
elif self.modification_level == ModificationLevel.FORM_FILLING:
modlvl_string = (
"All modifications relate to signing and form filling "
"operations"
)
else:
modlvl_string = "Some modifications may be illegitimate"
modification_str = (
"The signature does not cover the entire file.\n"
f"{modlvl_string}, and they appear to be "
f"{'' if self.docmdp_ok else 'in'}compatible with the "
"current document modification policy."
)
else:
modification_str = "Incremental update analysis was skipped"
sections.append(("Modifications", modification_str))
if self.has_seed_values:
if self.seed_value_ok:
sv_info = "There were no SV issues detected for this signature."
else:
sv_info = (
"The signature did not satisfy the SV constraints on "
"the signature field.\nError message: "
+ self.seed_value_constraint_error.failure_message
)
sections.append(("Seed value constraints", sv_info))
return sections
@dataclass(frozen=True)
class DocumentTimestampStatus(ModificationInfo, TimestampSignatureStatus):
"""Class to indicate the validation status of a PDF document timestamp."""
|
#!/usr/bin/env python3
from __future__ import annotations
import math
import os
import random
import unittest
import torch
import linear_operator
from linear_operator import settings
from linear_operator.test.utils import approx_equal
from linear_operator.utils import pivoted_cholesky
def rbf_kernel(x1, x2=None):
if x2 is None:
x2 = x1
if x1.dim() == 1:
x1 = x1.unsqueeze(-1)
if x2.dim() == 1:
x2 = x2.unsqueeze(-1)
dist = (x1.unsqueeze(-2) - x2.unsqueeze(-3)).norm(p=2, dim=-1).pow(2)
return dist.div(-2.0).exp()
class TestPivotedCholesky(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.linspace(0, 1, size)
covar_matrix = rbf_kernel(train_x, train_x)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
def test_solve_qr(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = torch.DoubleTensor(size).uniform_(math.log(1e-3), math.log(1e-1)).exp_().to(dtype=dtype)
linear_op = linear_operator.to_linear_operator(rbf_kernel(X)).add_diag(noise)
precondition_qr, _, logdet_qr = linear_op._preconditioner()
F = linear_op._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_solve_qr_constant_noise(self, dtype=torch.float64, tol=1e-8):
size = 50
X = torch.rand((size, 2)).to(dtype=dtype)
y = torch.sin(torch.sum(X, 1)).unsqueeze(-1).to(dtype=dtype)
with settings.min_preconditioning_size(0):
noise = 1e-2 * torch.ones(size, dtype=dtype)
linear_op = linear_operator.to_linear_operator(rbf_kernel(X)).add_diag(noise)
precondition_qr, _, logdet_qr = linear_op._preconditioner()
F = linear_op._piv_chol_self
M = noise.diag() + F.matmul(F.t())
x_exact = torch.solve(y, M)[0]
x_qr = precondition_qr(y)
self.assertTrue(approx_equal(x_exact, x_qr, tol))
logdet = 2 * torch.cholesky(M).diag().log().sum(-1)
self.assertTrue(approx_equal(logdet, logdet_qr, tol))
def test_solve_qr_float32(self):
self.test_solve_qr(dtype=torch.float32, tol=1e-2)
def test_solve_qr_constant_noise_float32(self):
self.test_solve_qr_constant_noise(dtype=torch.float32, tol=1e-3)
class TestPivotedCholeskyBatch(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.cat(
[torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0)], 0
).unsqueeze(-1)
covar_matrix = rbf_kernel(train_x, train_x)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
class TestPivotedCholeskyMultiBatch(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_pivoted_cholesky(self):
size = 100
train_x = torch.cat(
[
torch.linspace(0, 1, size).unsqueeze(0),
torch.linspace(0, 0.5, size).unsqueeze(0),
torch.linspace(0, 0.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
torch.linspace(0, 0.5, size).unsqueeze(0),
torch.linspace(0, 0.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.25, size).unsqueeze(0),
torch.linspace(0, 1.5, size).unsqueeze(0),
torch.linspace(0, 1, size).unsqueeze(0),
],
0,
).unsqueeze(-1)
covar_matrix = rbf_kernel(train_x, train_x).view(2, 2, 3, size, size)
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol @ piv_chol.transpose(-1, -2)
self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
if __name__ == "__main__":
unittest.main()
|
from nose.tools import istest, assert_equal
from mammoth.html_generation import HtmlGenerator, satisfy_html_path
from mammoth import html_paths
@istest
def generates_empty_string_when_newly_created():
generator = HtmlGenerator()
assert_equal("", generator.html_string())
@istest
def html_escapes_text():
generator = HtmlGenerator()
generator.text("<")
assert_equal("<", generator.html_string())
@istest
def self_closing_tag_is_self_closing():
generator = HtmlGenerator()
generator.self_closing("br")
assert_equal("<br />", generator.html_string())
@istest
def all_elements_are_closed_by_end_all():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("Hello!")
generator.end_all()
assert_equal("<p><span>Hello!</span></p>", generator.html_string())
@istest
def elements_with_no_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def elements_with_empty_string_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def self_closing_tag_can_have_attributes():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "42"})
assert_equal('<br data-blah="42" />', generator.html_string())
@istest
def attribute_values_are_escaped():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "<"})
assert_equal('<br data-blah="<" />', generator.html_string())
@istest
def opening_tag_can_have_attributes():
generator = HtmlGenerator()
generator.start("p", {"data-blah": "42"})
generator.text("Hello!")
generator.end()
assert_equal('<p data-blah="42">Hello!</p>', generator.html_string())
@istest
def appending_another_html_generator_does_nothing_if_empty():
generator = HtmlGenerator()
generator.start("p")
generator.append(HtmlGenerator())
assert_equal('', generator.html_string())
@istest
def appending_another_html_generator_writes_out_elements_if_other_generator_is_not_empty():
generator = HtmlGenerator()
generator.start("p")
other = HtmlGenerator()
other.text("Hello!")
generator.append(other)
assert_equal('<p>Hello!', generator.html_string())
@istest
class SatisfyPathTests(object):
@istest
def plain_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("Hello!")
assert_equal('<p>Hello!', generator.html_string())
@istest
def only_missing_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote>Hello<p>there', generator.html_string())
@istest
def mismatched_elements_are_closed_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("span")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><span>Hello</span><p>there', generator.html_string())
@istest
def fresh_element_matches_nothing(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("p")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"], fresh=True)])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><p>Hello</p><p>there', generator.html_string())
@istest
def attributes_are_generated_when_satisfying_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Hello")
assert_equal('<p class="tip">Hello', generator.html_string())
@istest
def elements_do_not_match_if_class_names_do_not_match(self):
generator = HtmlGenerator()
generator.start("p", {"class": "help"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="help">Help</p><p class="tip">Tip', generator.html_string())
@istest
def class_names_match_if_they_are_the_same(self):
generator = HtmlGenerator()
generator.start("p", {"class": "tip"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="tip">HelpTip', generator.html_string())
|
<filename>dsgrid/dataformat/enumeration.py
import datetime as dt
import copy
from enum import Enum
import os
import logging
import pytz
import re
import numpy as np
import pandas as pd
from dsgrid import DSGridRuntimeError, DSGridValueError
from dsgrid.dataformat import ENCODING, get_str
logger = logging.getLogger(__name__)
class Enumeration(object):
max_id_len = 64
max_name_len = 128
enum_dtype = np.dtype([
("id", "S" + str(max_id_len)),
("name", "S" + str(max_name_len))
])
dimension = None
def __init__(self, name, ids, names):
self.name = name
self.ids = ids
self.names = names
self.checkvalues()
return
def checkvalues(self):
ids = list(self.ids); names = list(self.names)
n_ids = len(ids); n_names = len(names)
if n_ids != n_names:
raise DSGridValueError("Number of ids (" + str(n_ids) +
") must match number of names (" + str(n_names) + ")")
if len(set(ids)) != n_ids:
raise DSGridValueError("Enumeration ids must be unique")
if max(len(value) for value in ids) > self.max_id_len:
raise DSGridValueError("Enumeration ids cannot exceed " +
"{} characters".format(self.max_id_len))
if max(len(value) for value in names) > self.max_name_len:
raise DSGridValueError("Enumeration names cannot exceed " +
"{} characters".format(self.max_name_len))
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.__dict__ == other.__dict__
)
def __len__(self):
return len(list(self.ids))
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
def __str__(self):
if len(self.ids) == 1:
return (f"{self.__class__.__name__}({self.name}, [{self.ids[0]}], "
f"[{self.names[0]}])")
return (f"{self.__class__.__name__}({self.name}, [{self.ids[0]}, ...], "
f"[{self.names[0]}, ...])")
def get_name(self,id):
ind = list(self.ids).index(id)
return self.names[ind]
def create_subset_enum(self,ids):
"""
Returns a new enumeration that is a subset of this one, based on keeping
the items in ids.
Parameters
----------
ids : list
subset of self.ids that should be kept in the new enumeration
Returns
-------
self.__class__
"""
_ids, _names = self._get_subset_ids_names(ids)
return self.__class__(self.name + ' Subset',_ids,_names)
def _get_subset_ids_names(self,ids):
n = len(ids)
_ids = [None] * n; _names = [None] * n
for i, full_id in enumerate(self.ids):
if full_id in ids:
j = ids.index(full_id)
logger.debug("Found info for {}, which is entry {} of {}".format(full_id,j,len(_ids)))
_ids[j] = self.ids[i]
_names[j] = self.names[i]
if len([x for x in _ids if x is None]):
raise DSGridRuntimeError("At least one of {} is not in {}".format(ids,self.ids))
return _ids, _names
def is_subset(self,other_enum):
"""
Returns true if this Enumeration is a subset of other_enum.
"""
if not isinstance(other_enum,self.__class__):
return False
for my_id in self.ids:
if not (my_id in other_enum.ids):
return False
return True
def persist(self, h5group):
dset = h5group.create_dataset(
self.dimension,
dtype=self.enum_dtype,
shape=(len(self),))
dset.attrs["name"] = self.name
dset["id"] = np.array(self.ids)
dset["name"] = np.array([name.encode(ENCODING) for name in self.names])
return dset
@classmethod
def load(cls, h5group):
h5dset = h5group[cls.dimension]
h5dset_data = h5group[cls.dimension][...] # workaround for h5pyd
return cls(
get_str(h5dset.attrs["name"]),
[get_str(vid) for vid in h5dset_data["id"]],
[get_str(vname) for vname in h5dset_data["name"]]
)
@classmethod
def read_csv(cls, filepath, name=None):
enum = pd.read_csv(filepath, dtype=str)
name = cls._name_from_filepath(filepath) if name is None else name
return cls(name, list(enum.id), list(enum.name))
def to_csv(self, filedir=None, filepath=None, overwrite=False):
p = self._default_filepath()
if filepath is not None:
p = filepath
elif filedir is not None:
p = os.path.join(filedir,self._default_filename())
if not overwrite and os.path.exists(p):
msg = "{} already exists".format(p)
logger.error(msg)
raise DSGridRuntimeError(msg)
df = pd.DataFrame(list(zip(self.ids,self.names)),columns=['id','name'])
df.to_csv(p,index=False)
@classmethod
def _name_from_filepath(cls,filepath):
return os.path.splitext(os.path.basename(filepath))[0].replace("_"," ").title()
def _default_filepath(self):
return os.path.join(enumdata_folder,self._default_filename())
def _default_filename(self):
return self.name.lower().replace(' ','_') + '.csv'
# Define standard dimensions
class SectorEnumeration(Enumeration):
dimension = "sector"
class GeographyEnumeration(Enumeration):
dimension = "geography"
class EndUseEnumerationBase(Enumeration):
dimension = "enduse"
def fuel(self,id): pass
def units(self,id): pass
@classmethod
def load(cls, h5group):
# Create correct type of EndUseEnumerationBase depending on auxillary data
if FuelEnumeration.dimension in h5group:
return MultiFuelEndUseEnumeration.load(h5group)
h5dset = h5group[cls.dimension]
h5dset_data = h5group[cls.dimension][...] # workaround for h5pyd
name = get_str(h5dset.attrs["name"])
ids = [get_str(vid) for vid in h5dset_data["id"]]
names = [get_str(vname) for vname in h5dset_data["name"]]
if 'fuel' in h5dset.attrs:
return SingleFuelEndUseEnumeration(name, ids, names,
fuel=h5dset.attrs['fuel'],
units=h5dset.attrs['units'])
else:
return EndUseEnumeration(name,ids,names)
@classmethod
def read_csv(cls, filepath, name=None):
"""
Infer and read into the correct derived class.
"""
enum = pd.read_csv(filepath , dtype=str)
if 'fuel' in enum.columns:
return SingleFuelEndUseEnumeration.read_csv(filepath,name=name)
if 'fuel_id' in enum.columns:
return MultiFuelEndUseEnumeration.read_csv(filepath,name=name)
return EndUseEnumeration.read_csv(filepath,name=name)
class TimeEnumeration(Enumeration):
dimension = "time"
TIMESTAMP_POSITION = Enum('TIMESTAMP_POSITION',
['period_beginning',
'period_midpoint',
'period_ending'])
TIMEZONE_DISPLAY_NAMES = {
'Etc/GMT+5': 'EST',
'Etc/GMT+6': 'CST',
'Etc/GMT+7': 'MST',
'Etc/GMT+8': 'PST' }
TIMEZONE_LOOKUP = {val: key for key, val in TIMEZONE_DISPLAY_NAMES.items()}
@classmethod
def create(cls,enum_name,start,duration,resolution,
extent_timezone=pytz.timezone('UTC'),
store_timezone=None,
timestamp_position=TIMESTAMP_POSITION['period_ending']):
"""
Create a new time enumeration based on the specified temporal extents,
resolution, and timezone.
Parameters
----------
enum_name : str
name for this enumeration, ideally descriptive of the parameters
used for creation
start : datetime.datetime
beginning of the time period to be represented by the timestamps
duration : datetime.timedelta
total length of time to be covered
resolution : datetime.timedelta
timestep for the enumeration
extent_timezone : pytz.timezone
timezone that should be used to interpret the extent parameters
store_timezone : None or pytz.timezone
timezone to write the ids and names in. If None, extent_timezone is
used.
timestamp_position : TimeEnumeration.TIMESTAMP_POSITION or convertable str
whether timestamps are placed at the beginning, ending, or midpoint
of the time period being described
Returns
-------
TimeEnumeration
"""
num_steps = duration / resolution
if not (num_steps == int(num_steps)):
logger.warning("Duration {} is not divided cleanly into steps of size {}".format(duration,resolution))
extent_timezone = cls._timezone_object(extent_timezone)
store_timezone = cls._timezone_object(store_timezone,extent_timezone)
end = start + duration
ts_pos = timestamp_position if isinstance(timestamp_position,cls.TIMESTAMP_POSITION) else cls.TIMESTAMP_POSITION[timestamp_position]
next_stamp = start
if ts_pos == cls.TIMESTAMP_POSITION['period_ending']:
next_stamp = start + resolution
elif ts_pos == cls.TIMESTAMP_POSITION['period_midpoint']:
next_stamp = start + (resolution / 2)
last_stamp = end
if ts_pos == cls.TIMESTAMP_POSITION['period_beginning']:
last_stamp = end - resolution
elif ts_pos == cls.TIMESTAMP_POSITION['period_midpoint']:
last_stamp = end - (resolution / 2)
ids = []
while next_stamp <= last_stamp:
ids.append(str(extent_timezone.localize(next_stamp).astimezone(store_timezone)))
next_stamp = next_stamp + resolution
return cls(enum_name,ids,ids)
@classmethod
def _timezone_object(cls,timezone,default=None):
result = timezone
if timezone is None:
result = default
if result in cls.TIMEZONE_LOOKUP:
result = cls.TIMEZONE_LOOKUP[result]
if isinstance(result,str):
result = pytz.timezone(result)
return result
@property
def store_timezone(self):
"""
Examines the first id to determine what timezone this TimeEnumeration
is stored in. Assumes the usage of datetime, pytz, and the "standard"
timezones, e.g.,
- pytz.timezone('Etc/GMT+5') = EST
- pytz.timezone('Etc/GMT+6') = CST
- pytz.timezone('Etc/GMT+7') = MST
- pytz.timezone('Etc/GMT+8') = PST
"""
if not self.ids:
raise DSGridValueError('No instances in this {}. Cannot determine a timezone.'.format(type(self)))
m = re.match(r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}([+-][0-9]{2}:[0-9]{2})?',self.ids[0])
if not m:
raise DSGridValueError('Not able to interpret {} as a timestamp'.format(self.ids[0]))
if m.group(1) is None:
logger.warning('Explicit timezone not found in timestamp {}, assuming UTC'.format(self.ids[0]))
return pytz.timezone('UTC')
assert m.group(1)[3:] == ':00', m.group(1)
tz_str = 'Etc/GMT'
tz_str += '+' if m.group(1)[0] == '-' else '-'
tz_str += str(int(m.group(1)[1:3]))
return pytz.timezone(tz_str)
@property
def store_timezone_display_name(self):
"""
Interprets self.ids[0] to report what timezone this enumeration is
stored in. Converts from pytz strings to what we typically use, namely
EST, CST, MST, or PST.
Returns
-------
str
timezone this TimeEnumeration is stored in, per self.store_timezone
and self.TIMEZONE_DISPLAY_NAMES
"""
result = str(self.store_timezone)
if result in self.TIMEZONE_DISPLAY_NAMES:
result = self.TIMEZONE_DISPLAY_NAMES[result]
return result
@property
def resolution(self):
"""
The resolution of this TimeEnumeration.
Returns
-------
dt.timedelta or array of dt.timedelta
Returns a single value if the intervals are all of the same length.
Returns a vector of values if they are different.
"""
ind = self.to_datetime_index()
result = (ind[1:] - ind[:-1])
unique_vals = result.unique()
if len(unique_vals) == 1:
return unique_vals.to_pytimedelta()[0]
return result.to_pytimedelta()
def get_extents(self,report_timezone=None,
timestamp_position=TIMESTAMP_POSITION['period_ending']):
"""
Returns the inclusive temporal extents represented in this
TimeEnumeration. That interpretation requires knowledge of the
timestamp_postion--beginning, end, or midpoint of the period being
described.
Parameters
----------
report_timezone : pytz.timezone
Timezone in which to report out the result
Returns
-------
(datetime.datetime,datetime.datetime)
Tuple of start and end times, inclusive of all time represented
based on the timestamp position, and in report_timezone.
"""
ind = self.to_datetime_index(return_timezone=report_timezone)
res = self.resolution
bres = res; eres = res
if not isinstance(res,dt.timedelta):
logger.warning("Temporal resolution is not uniform. Reported extents may be inaccurate.")
bres = res[0]; eres = res[-1]
start = ind[0].to_pydatetime(); end = ind[-1].to_pydatetime()
ts_pos = timestamp_position if isinstance(timestamp_position,self.TIMESTAMP_POSITION) else self.TIMESTAMP_POSITION[timestamp_position]
if ts_pos == self.TIMESTAMP_POSITION['period_beginning']:
end = end + eres
elif ts_pos == self.TIMESTAMP_POSITION['period_midpoint']:
start = start - (bres / 2)
end = end + (eres / 2)
elif ts_pos == self.TIMESTAMP_POSITION['period_ending']:
start = start - bres
return (start, end)
def to_datetime_index(self,return_timezone=None):
"""
Return a Pandas DatetimeIndex corresponding to this TimeEnumeration.
By default, localizes the timestamps to the timezone inferred based on
the text of the first enumeration id. If return_timezpone is None, this
is what is returned. If return_timezone is not None, the index is
converted to that timezone before being returned.
Parameters
----------
return_timezone : None or pytz.timezone
timezone of the returned index. If None, this is inferred from
self.ids[0]
Returns
-------
pandas.DatetimeIndex
same length as self.ids, but strings are converted to
datetime.datetime objects and localized to a timezone.
"""
df = pd.DataFrame([],index=self.ids)
return_timezone = self._timezone_object(return_timezone,default=self.store_timezone)
logger.info("Stored timezone is {}. Returning in timezone {}.".format(self.store_timezone,return_timezone))
try:
df.index = pd.to_datetime(df.index).tz_localize('UTC').tz_convert(return_timezone)
except:
# pandas version issue
df.index = pd.to_datetime(df.index).tz_convert(return_timezone)
df.index.name = 'time'
return df.index
def get_datetime_map(self,return_timezone=None):
"""
Converts self.ids and result of to_datetime_index into dict that can be
used to map ids to datetimes in contexts other than a single DataFrame
index.
Parameters
----------
return_timezone : None or pytz.timezone
timezone of the returned index. If None, this is inferred from
self.ids[0]
Returns
-------
dict
{id: localized datetime}
"""
index = self.to_datetime_index(return_timezone=return_timezone)
result = {}
for i, _id in enumerate(self.ids):
result[_id] = index[i]
return result
# Define data units -- these are ultimately associated with end-uses
class EndUseEnumeration(EndUseEnumerationBase):
"""
Provided for backward compatibility with dsgrid v0.1.0 datasets.
"""
def fuel(self,id):
logger.warning("Deprecated: Fuel type has not been explicitly specified. Returning default value.")
return 'Electricity'
def units(self,id):
logger.warning("Deprecated: Units have not been explicitly specified. Returning default value.")
return 'MWh'
@classmethod
def read_csv(cls, filepath, name=None):
enum = pd.read_csv(filepath, dtype=str)
name = cls._name_from_filepath(filepath) if name is None else name
return cls(name, list(enum.id), list(enum.name))
class SingleFuelEndUseEnumeration(EndUseEnumerationBase):
"""
If the end-use enumeration only applies to a single fuel type, and all the
data is in the same units, just give the fuel and units.
"""
def __init__(self, name, ids, names, fuel='Electricity', units='MWh'):
super(SingleFuelEndUseEnumeration, self).__init__(name,ids,names)
self._fuel = fuel
self._units = units
def __str__(self):
if len(self.ids) == 1:
return (f"{self.__class__.__name__}({self.name}, [{self.ids[0]}], "
f"[{self.names[0]}], fuel = {self._fuel!r}, units = {self._units!r})")
return (f"{self.__class__.__name__}({self.name}, [{self.ids[0]}, ...], "
f"[{self.names[0]}, ...], fuel = {self._fuel!r}, units = {self._units!r})")
def fuel(self,id):
return self._fuel
def units(self,id):
return self._units
def create_subset_enum(self,ids):
"""
Returns a new enumeration that is a subset of this one, based on keeping
the items in ids.
Parameters
----------
ids : list
subset of self.ids that should be kept in the new enumeration
Returns
-------
self.__class__
"""
_ids, _names = self._get_subset_ids_names(ids)
return self.__class__(self.name + ' Subset',_ids,_names,fuel=self._fuel,units=self._units)
def persist(self, h5group):
dset = super(SingleFuelEndUseEnumeration, self).persist(h5group)
dset.attrs["fuel"] = self._fuel
dset.attrs["units"] = self._units
return dset
@classmethod
def read_csv(cls, filepath, name=None, fuel='Electricity', units='MWh'):
enum = pd.read_csv(filepath , dtype=str)
if ('fuel' in enum.columns):
assert len(enum['fuel'].unique()) == 1, "There must be exactly 1 fuel, but {} are listed".format(len(enum.fuel.unique()))
fuel = enum['fuel'].unique()[0]
if ('units' in enum.columns):
assert len(enum['units'].unique()) == 1, "There must be exactly 1 units, but {} are listed".format(len(enum.units.unique()))
units = enum['units'].unique()[0]
name = cls._name_from_filepath(filepath) if name is None else name
return cls(name, list(enum.id), list(enum.name), fuel=fuel, units=units)
def to_csv(self, filedir=None, filepath=None, overwrite=False):
p = self._default_filepath()
if filepath is not None:
p = filepath
elif filedir is not None:
p = os.path.join(filedir,self._default_filename())
if not overwrite and os.path.exists(p):
msg = "{} already exists".format(p)
logger.error(msg)
raise DSGridRuntimeError(msg)
data = [list(x) + [self._fuel, self._units] for x in zip(self.ids,self.names)]
df = pd.DataFrame(data,columns=['id','name','fuel','units'])
df.to_csv(p,index=False)
class FuelEnumeration(Enumeration):
dimension = "fuel"
enum_dtype = np.dtype([
("id", "S" + str(Enumeration.max_id_len)),
("name", "S" + str(Enumeration.max_name_len)),
("units", "S" + str(Enumeration.max_id_len))
])
def __init__(self, name, ids, names, units):
self.units = units
super(FuelEnumeration, self).__init__(name,ids,names)
def __str__(self):
return (f"{self.__class__.__name__}({self.name}, {self.ids}, {self.names}, {self.units})")
def checkvalues(self):
super(FuelEnumeration, self).checkvalues()
# make sure units is as long as ids
ids = list(self.ids); units = list(self.units)
n_ids = len(ids); n_units = len(units)
if n_ids != n_units:
raise DSGridValueError("Number of units (" + str(n_units) +
") must match number of ids (" + str(n_ids) + ")")
if max(len(unit) for unit in units) > self.max_id_len:
raise DSGridValueError("Enumeration units cannot exceed " +
"{} characters".format(self.max_id_len))
def get_units(self,id):
ind = list(self.ids).index(id)
return self.units[ind]
def create_subset_enum(self,ids):
"""
Returns a new enumeration that is a subset of this one, based on keeping
the items in ids.
Parameters
----------
ids : list
subset of self.ids that should be kept in the new enumeration
Returns
-------
self.__class__
"""
n = len(ids)
_ids = [None] * n; _names = [None] * n; _units = [None] * n
for i, full_id in enumerate(self.ids):
if full_id in ids:
j = ids.index(full_id)
logger.debug("Found info for {}, which is entry {} of {}".format(full_id,j,len(_ids)))
_ids[j] = self.ids[i]
_names[j] = self.names[i]
_units[j] = self.units[i]
if len([x for x in _ids if x is None]):
raise DSGridRuntimeError("At least one of {} is not in {}".format(ids,self.ids))
return self.__class__(self.name + ' Subset',_ids,_names,_units)
def persist(self, h5group):
dset = super(FuelEnumeration, self).persist(h5group)
dset["units"] = np.array(self.units)
return dset
@classmethod
def load(cls, h5group):
h5dset = h5group[cls.dimension]
h5dset_data = h5group[cls.dimension][...] # workaround for h5pyd
return cls(
get_str(h5dset.attrs["name"]),
[get_str(vid) for vid in h5dset_data["id"]],
[get_str(vname) for vname in h5dset_data["name"]],
[get_str(vunits) for vunits in h5dset_data["units"]]
)
@classmethod
def read_csv(cls, filepath, name=None):
enum = pd.read_csv(filepath , dtype=str)
name = cls._name_from_filepath(filepath) if name is None else name
return cls(name, list(enum.id), list(enum.name), list(enum.units))
def to_csv(self, filedir=None, filepath=None, overwrite=False):
p = self._default_filepath()
if filepath is not None:
p = filepath
elif filedir is not None:
p = os.path.join(filedir,self._default_filename())
if not overwrite and os.path.exists(p):
msg = "{} already exists".format(p)
logger.error(msg)
raise DSGridRuntimeError(msg)
df = pd.DataFrame(list(zip(self.ids,self.names,self.units)),
columns=['id','name','units'])
df.to_csv(p,index=False)
class MultiFuelEndUseEnumeration(EndUseEnumerationBase):
enum_dtype = np.dtype([
("id", "S" + str(Enumeration.max_id_len)),
("name", "S" + str(Enumeration.max_name_len)),
("fuel_id", "S" + str(Enumeration.max_id_len))
])
def __init__(self, name, ids, names, fuel_enum, fuel_ids):
self.name = name
self._ids = ids
self._names = names
self.fuel_enum = fuel_enum
self._fuel_ids = fuel_ids
self.checkvalues()
return
def __str__(self):
return (f"{self.__class__.__name__}({self.name}, [{self._ids[0]}, ...], "
f"[{self._names[0]}, ...], {self.fuel_enum}, [{self._fuel_ids[0]}, ...])")
def checkvalues(self):
ids = self._ids; fuel_ids = self._fuel_ids; fuel_enum = self.fuel_enum
n_ids = len(ids); n_fuel_ids = len(fuel_ids)
# make sure fuel_ids is as long as ids
if n_fuel_ids != n_ids:
raise DSGridValueError("Number of fuel ids (" + str(n_fuel_ids) +
") must match number of ids (" + str(n_ids) + ")")
if not isinstance(fuel_enum,FuelEnumeration):
raise DSGridValueError("The fuel_enum must be of type " +
"{}, but is instead of type {}".format(FuelEnumeration.__class__,
type(fuel_enum)))
# make sure fuel_ids are in fuel enum
for fuel_id in set(fuel_ids):
if fuel_id not in fuel_enum.ids:
raise DSGridValueError("The fuel_ids must each be an id in the fuel_enum." +
"fuel_id: {}, fuel_enum.ids: {}".format(fuel_id,fuel_enum.ids))
super(MultiFuelEndUseEnumeration, self).checkvalues()
return
@property
def ids(self):
return list(zip(self._ids,self._fuel_ids))
@property
def names(self):
for i, _id in enumerate(self._ids):
yield "{} ({})".format(self._names[i],self.fuel((_id,self._fuel_ids[i])))
def fuel(self,id):
assert isinstance(id,tuple) & (len(id) == 2), "The ids for MultiFuelEndUseEnumerations are (enduse_id, fuel_id). Got {!r}".format(id)
return self.fuel_enum.names[self.fuel_enum.ids.index(id[1])]
def units(self,id):
assert isinstance(id,tuple) & (len(id) == 2), "The ids for MultiFuelEndUseEnumerations are (enduse_id, fuel_id). Got {!r}".format(id)
return self.fuel_enum.units[self.fuel_enum.ids.index(id[1])]
def create_subset_enum(self,ids):
"""
Returns a new enumeration that is a subset of this one, based on keeping
the items in ids.
Parameters
----------
ids : list of 2-tuples
subset of self.ids that should be kept in the new enumeration
Returns
-------
MultiFuelEndUseEnumeration
"""
n = len(ids)
_ids = [None] * n; _names = [None] * n; _fuel_ids = [None] * n
for i, full_id in enumerate(self.ids):
if full_id in ids:
j = ids.index(full_id)
logger.debug("Found info for {}, which is entry {} of {}".format(full_id,j,len(_ids)))
_ids[j] = self._ids[i]
_fuel_ids[j] = self._fuel_ids[i]
_names[j] = self._names[i]
if len([x for x in _ids if x is None]):
raise DSGridRuntimeError("At least one of {} is not in {}".format(ids,self.ids))
fuel_enum = copy.deepcopy(self.fuel_enum)
return self.__class__(self.name + ' Subset',_ids,_names,fuel_enum,_fuel_ids)
def persist(self, h5group):
dset = h5group.create_dataset(
self.dimension,
dtype=self.enum_dtype,
shape=(len(self),))
dset.attrs["name"] = self.name
dset["id"] = np.array(self._ids)
dset["name"] = np.array([name.encode(ENCODING) for name in self._names])
dset["fuel_id"] = np.array(self._fuel_ids)
self.fuel_enum.persist(h5group)
return dset
@classmethod
def load(cls, h5group):
fuel_enum = FuelEnumeration.load(h5group)
h5dset = h5group[cls.dimension]
h5dset_data = h5group[cls.dimension][...] # workaround for h5pyd
return cls(
get_str(h5dset.attrs["name"]),
[get_str(vid) for vid in h5dset_data["id"]],
[get_str(vname) for vname in h5dset_data["name"]],
fuel_enum,
[get_str(vfuel_id) for vfuel_id in h5dset_data["fuel_id"]]
)
@classmethod
def read_csv(cls, filepath, name=None, fuel_enum=None):
"""
id, name, fuel_id + pass in file_enum
or
id, name, fuel_id, fuel_name, units
or
id, name, fuel_id, units (and fuel_name will be guessed from fuel_id)
"""
enum = pd.read_csv(filepath , dtype=str)
name = cls._name_from_filepath(filepath) if name is None else name
if fuel_enum is None:
fuel_enum_name = name + ' Fuels'
if 'fuel_name' in enum.columns:
# fuel enum fully defined in this file
fuel_enum = enum[["fuel_id","fuel_name","units"]].drop_duplicates()
fuel_enum = FuelEnumeration(
fuel_enum_name,
list(fuel_enum.fuel_id),
list(fuel_enum.fuel_name),
list(fuel_enum.units))
else:
# create fuel enum names from fuel enum ids
fuel_enum = enum[["fuel_id","units"]].drop_duplicates()
fuel_ids = list(fuel_enum.fuel_id)
fuel_names = [fuel_id.replace("_"," ").title() for fuel_id in fuel_ids]
fuel_enum = FuelEnumeration(
fuel_enum_name,
fuel_ids,
fuel_names,
list(fuel_enum.units))
assert fuel_enum is not None
return cls(name, list(enum.id), list(enum.name), fuel_enum, list(enum.fuel_id))
def to_csv(self, filedir=None, filepath=None, overwrite=False):
p = self._default_filepath()
if filepath is not None:
p = filepath
elif filedir is not None:
p = os.path.join(filedir,self._default_filename())
if not overwrite and os.path.exists(p):
msg = "{} already exists".format(p)
logger.error(msg)
raise DSGridRuntimeError(msg)
simple_fuel_name = True
for fuel_id in self.fuel_enum.ids:
if not (fuel_id.replace("_"," ").title() == self.fuel_enum.get_name(fuel_id)):
simple_fuel_name = False
break
data = list(zip(self._ids,self._names,self._fuel_ids))
cols = ['id','name','fuel_id']
if not simple_fuel_name:
data = [list(x) + [self.fuel_enum.get_name(x[2])] for x in data]
cols += ['fuel_name']
data = [list(x) + [self.fuel_enum.get_units(x[2])] for x in data]
cols += ['units']
df = pd.DataFrame(data,columns=cols)
df.to_csv(p,index=False)
# Define standard enumerations
enumdata_folder = os.path.join(os.path.dirname(__file__), "enumeration_data/")
## Sectors
sectors_subsectors = SectorEnumeration.read_csv(
enumdata_folder + "sectors_subsectors.csv", "standard_sector_subsectors")
mecs_subsectors = SectorEnumeration.read_csv(
enumdata_folder + "mecs_subsectors.csv", "mecs_subsectors")
sectors = SectorEnumeration.read_csv(
enumdata_folder + "sectors.csv", "standard_sectors")
sectors_eia_extended = SectorEnumeration.read_csv(
enumdata_folder + "sectors_eia_extended.csv", "sectors_eia_extended")
allsectors = SectorEnumeration("all_sectors", ["All"], ["All Sectors"])
## Geographies
counties = GeographyEnumeration.read_csv(
enumdata_folder + "counties.csv", "counties")
conus_counties = GeographyEnumeration.read_csv(
os.path.join(enumdata_folder,'conus_counties.csv'))
states = GeographyEnumeration.read_csv(
enumdata_folder + "states.csv", "states")
conus_states = GeographyEnumeration.read_csv(
os.path.join(enumdata_folder,'conus_states.csv'))
census_divisions = GeographyEnumeration.read_csv(
enumdata_folder + "census_divisions.csv", "census_divisions")
res_state_groups = GeographyEnumeration.read_csv(
enumdata_folder + "res_state_groups.csv", "state_groups")
loss_state_groups = GeographyEnumeration.read_csv(
enumdata_folder + "loss_state_groups.csv", "loss_state_groups")
census_regions = GeographyEnumeration.read_csv(
enumdata_folder + "census_regions.csv", "census_regions")
conus = GeographyEnumeration("conus", ["conus"], ["Continental United States"])
## End Uses
enduses = EndUseEnumeration.read_csv(
enumdata_folder + "enduses.csv", "standard_enduses")
gaps_enduses = EndUseEnumeration.read_csv(
enumdata_folder + "gaps_enduses.csv", "gaps_enduses")
fuel_types = EndUseEnumeration.read_csv(
enumdata_folder + "fuel_types.csv", "fuel_types")
deprecated_allenduses = EndUseEnumeration("all_enduses", ["All"], ["All End-uses"])
allenduses = SingleFuelEndUseEnumeration("all_enduses", ["All"], ["All End-uses"])
loss_factor = SingleFuelEndUseEnumeration('Loss Factor',['loss_factor'],
['Loss Factor'],fuel='N/A',units='dimensionless')
# Time
hourly2012 = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'hourly2012.csv'))
daily2012 = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'daily2012.csv'))
weekdays = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'weekdays.csv'))
daytypes = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'day_types.csv'))
weekly2012 = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'weekly2012.csv'))
seasons = TimeEnumeration.read_csv(
os.path.join(enumdata_folder,'seasons.csv'))
annual = TimeEnumeration("annual", ["Annual"], ["Annual"])
|
<filename>src/python/Sailing/simple_crawler/crawler.py
# -*- coding: gbk -*-
"""
HTTP Client
http://www.hzfc365.com/house_search/search_prj.jsp?lpid=1374
"""
import sys, os, logging, re
from http_client import HTTPClient
class SimpleCrawler(object):
def __init__(self):
self.http = HTTPClient()
self.debug = True
def start(self, lpid):
url = "http://www.hzfc365.com/house_search/search_prj.jsp?lpid=%s" % lpid
reps_text = self.http.download(url)
if self.debug:
self._save_temp(reps_text, lpid)
build_list = self.parse_build_info(reps_text)
for build in build_list:
logging.info("start fetch:%s, <NAME> jian:%s" % (1, 2))
build.lpid = lpid
for zh_nm in build.zh_nm_list:
self.fetch_zh_nm_pid_data(build, *zh_nm)
def fetch_zh_nm_pid_data(self, build, zh_nm, pid, name):
url = "http://www.hzfc365.com/house_view/lpxx-xs-2.jsp"\
"?zh_nm=%s&pid=%s" % (zh_nm, pid)
referer_url = "http://www.hzfc365.com/house_search/search_prj.jsp?lpid=%s" % build.lpid
reps_text = self.http.download(url, {"Referer": referer_url})
if self.debug:
self._save_temp(reps_text, zh_nm)
self._parse_room_info(reps_text, referer_url=url)
def _parse_room_info(self, reps_text=None, cache_name = None, referer_url=None):
if cache_name: reps_text = self._read_temp(cache_name)
#http://www.hzfc365.com/house_view/lpxx-xs-2.jsp?zh_nm=120620&pid=87401
"""http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?zh_nm=120618&q_area=&keytime=1288790113772&sessionid=2ECA879E33D7BECC3941443553DAD4FC"""
"""http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?
zh_nm=120618&
q_area=&
keytime=1288790113772&
//12887910169
sessionid=2ECA879E33D7BECC3941443553DAD4FC"""
r_zh_nm = re.search('<input id="info_zh_nm" type="hidden" value="(\d+)">', reps_text).group(1)
sessionid = re.search('<input id="sessionid" type="hidden" value="(\w+)">', reps_text).group(1)
import time
cur_time = time.time()
logging.info("r_zh_nm=%s, sessionid=%s, time=%s" % (r_zh_nm, sessionid, cur_time))
url = "http://www.hzfc365.com/house_view/lpxx-xs-2-yt.jsp?zh_nm=%s&q_area=&keytime=%s&sessionid=%s" % (r_zh_nm, cur_time * 100, sessionid)
reps_text = self.http.download(url, {"Referer": referer_url})
if self.debug:
self._save_temp(reps_text, "d%s" % r_zh_nm)
return self._parse_room_detail_info(reps_text)
def _parse_room_detail_info(self, reps_text=None, cache_name=None):
if cache_name: reps_text = self._read_temp(cache_name)
regex = "title='([^']+)'"
factor = re.compile(regex, re.I)
data = []
for item in factor.finditer(reps_text):
logging.info("details:%s" % str(item.groups()))
data.append(RoomInfo(*item.groups()))
return data
#<input id="info_zh_nm" type="hidden" value="120620">
#<input id="sessionid" type="hidden" value="27D3C558B4A632ABFE27FC1B48ADAB44">
def parse_build_info(self, reps_text=None, cache_name = None):
if cache_name: reps_text = self._read_temp(cache_name)
td = r"\s+<td[^>]+>(.*?)</td>"
regex = r"<TR onmouseover=[^>]+><A [^>]+>%s</A>" % (td * 7)
regex += "\s+<td[^>]+>(.*?)</td>"
regex += "\s+</tr>"
factor = re.compile(regex, re.I)
#items = ( e.group(1) for e in factor.finditer(expr) )
data = []
for item in factor.finditer(reps_text):
logging.info("data:%s" % str(item.groups()))
data.append(BuidingInfo(*item.groups()))
return data
def _save_temp(self, data, name):
fd = open("temp_cache_%s.txt" % name, "w")
fd.write(data)
fd.close()
def _read_temp(self, name):
data = ""
fd = open("temp_cache_%s.txt" % name, "r")
data = fd.read()
fd.close()
return data
class BuidingInfo(object):
def __init__(self, yszh, kpsj, ksts, ksmj, ysts, ysjj, yydts, ysds):
self.id = None
self.yszh = yszh
self.kpsj = kpsj
self.ksts = ksts
self.ksmj = ksmj
self.ysts = ysts
self.ysjj = ysjj
self.yydts = yydts
self.ysds = ysds
#yszh, kpsj, ksts, ksmj, ysts, ysjj, yydts, ysds
#self.rooms = {}
self.zh_nm_list = self._parse_zh_nm_list(ysds)
def _parse_zh_nm_list(self, ysds):
regex = r'<a href=".*?zh_nm=(\d+)&pid=(\d+)".*?>(.*?)</a>'
factor = re.compile(regex, re.I)
data = []
for item in factor.finditer(ysds):
logging.info("data:%s" % str(item.groups()))
data.append(item.groups())
return data
#<a href="/house_view/lpxx-xs-2.jsp?zh_nm=119218&pid=86101" target="_blank" class="main">2\xb4\xb1</a>
class RoomInfo(object):
def __init__(self, data):
self.id = None
def main(lpid):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info("starting process lpid:%s" % lpid)
crawler = SimpleCrawler()
crawler.debug = True
lpid = "1374"
crawler.start(lpid)
#xxx = crawler.parse_build_info(None, "1374")
#build = lambda:1
#build.lpid = "1374"
#crawler.fetch_zh_nm_pid_data(build, "120620", "87401", "4")
#crawler._parse_room_detail_info(None, "d120620")
logging.info("end process lpid %s." % lpid)
if "__main__" == __name__:
main("")
if len(sys.argv) != 2:
print "python crawler.py <lpid>"
else:
main(sys.argv[1])
|
import json
import re
from moto.core.responses import BaseResponse
from .exceptions import InvalidParameterValueException
from .models import dax_backends
class DAXResponse(BaseResponse):
@property
def dax_backend(self):
return dax_backends[self.region]
def create_cluster(self):
params = json.loads(self.body)
cluster_name = params.get("ClusterName")
node_type = params.get("NodeType")
description = params.get("Description")
replication_factor = params.get("ReplicationFactor")
iam_role_arn = params.get("IamRoleArn")
tags = params.get("Tags", [])
sse_specification = params.get("SSESpecification", {})
self._validate_arn(iam_role_arn)
self._validate_name(cluster_name)
cluster = self.dax_backend.create_cluster(
cluster_name=cluster_name,
node_type=node_type,
description=description,
replication_factor=replication_factor,
iam_role_arn=iam_role_arn,
tags=tags,
sse_specification=sse_specification,
)
return json.dumps(dict(Cluster=cluster.to_json()))
def delete_cluster(self):
cluster_name = json.loads(self.body).get("ClusterName")
cluster = self.dax_backend.delete_cluster(cluster_name)
return json.dumps(dict(Cluster=cluster.to_json()))
def describe_clusters(self):
params = json.loads(self.body)
cluster_names = params.get("ClusterNames", [])
max_results = params.get("MaxResults")
next_token = params.get("NextToken")
for name in cluster_names:
self._validate_name(name)
clusters, next_token = self.dax_backend.describe_clusters(
cluster_names=cluster_names, max_results=max_results, next_token=next_token
)
return json.dumps(
{"Clusters": [c.to_json() for c in clusters], "NextToken": next_token}
)
def _validate_arn(self, arn):
if not arn.startswith("arn:"):
raise InvalidParameterValueException(f"ARNs must start with 'arn:': {arn}")
sections = arn.split(":")
if len(sections) < 3:
raise InvalidParameterValueException(
f"Second colon partition not found: {arn}"
)
if len(sections) < 4:
raise InvalidParameterValueException(f"Third colon vendor not found: {arn}")
if len(sections) < 5:
raise InvalidParameterValueException(
f"Fourth colon (region/namespace delimiter) not found: {arn}"
)
if len(sections) < 6:
raise InvalidParameterValueException(
f"Fifth colon (namespace/relative-id delimiter) not found: {arn}"
)
def _validate_name(self, name):
msg = "Cluster ID specified is not a valid identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens."
if not re.match("^[a-z][a-z0-9-]+[a-z0-9]$", name):
raise InvalidParameterValueException(msg)
if "--" in name:
raise InvalidParameterValueException(msg)
def list_tags(self):
params = json.loads(self.body)
resource_name = params.get("ResourceName")
tags = self.dax_backend.list_tags(resource_name=resource_name)
return json.dumps(tags)
def increase_replication_factor(self):
params = json.loads(self.body)
cluster_name = params.get("ClusterName")
new_replication_factor = params.get("NewReplicationFactor")
cluster = self.dax_backend.increase_replication_factor(
cluster_name=cluster_name, new_replication_factor=new_replication_factor
)
return json.dumps({"Cluster": cluster.to_json()})
def decrease_replication_factor(self):
params = json.loads(self.body)
cluster_name = params.get("ClusterName")
new_replication_factor = params.get("NewReplicationFactor")
node_ids_to_remove = params.get("NodeIdsToRemove")
cluster = self.dax_backend.decrease_replication_factor(
cluster_name=cluster_name,
new_replication_factor=new_replication_factor,
node_ids_to_remove=node_ids_to_remove,
)
return json.dumps({"Cluster": cluster.to_json()})
|
<reponame>joy13975/covidprof_submission
# Entry point for the COVID Professor main app
import logging
import re
import sys
from time import time
import RAKE
from multiprocessing.pool import ThreadPool
from adapter.elastic_search import ElasticSearchAdapter as SearchEngine
from adapter.twitter import TwitterAdapter
from adapter.c3ai import C3aiAdapter
from adapter.gpt3 import GPT3Adapter
from adapter.web import WebAdapter
from question_type import QuestionType
from numerical import Numerical
from base.config_loader import ConfigLoader
from intent_type import IntentType
rake = RAKE.Rake(RAKE.SmartStopList())
class Professor(ConfigLoader):
safe_words_regex = [
re.compile(fmt) for fmt in [
r'^stop[\.\,\!]*$',
r'^shut up[\.\,\!\?]*$',
r'^.*fuck.*$',
r'^.*idiot.*$',
r'^you (are )?stupid[\.\,\!\?]*$',
r'^wtf[\.\,\!\?]*$',
r'^ok[\.\,\!\?]*$',
]
]
def __init__(self):
'''Constructor'''
super().__init__()
# Initialize components
self.messaging = TwitterAdapter(message_handler=self.message_handler)
self.search_engine = SearchEngine()
self.datalake = C3aiAdapter()
self.language = GPT3Adapter()
self.web = WebAdapter()
self.numerical = Numerical()
def _get_auxiliary_url(self, msg, corrected_msg):
'''Select a COVID-19 related webpage URL based on input message'''
# TODO: Smarter URL selection or update to an actual search
msg_words = f'{msg} {corrected_msg}'.lower().split()
macro = len(set(msg_words) & set(self.macro_words)) > 0
if macro:
url = self.macro_url
else:
url = self.micro_url
return url
@classmethod
def _calc_relevance(cls, words, docs):
'''Calculate simple relevance measure by counting keywords' presence
in docs'''
return sum(
[w.lower() in doc.lower() for w in words for doc in docs]
) / len(words)/len(docs)
def _get_document_urls(self, docs):
'''Extract urls from search result documents'''
urls = []
for d in docs:
try:
# Need to remove api links because they fail to
# actually lead to the paper
url = next(
u for u in d['url'].split(';')
if all(w not in u.lower()
for w in self.banned_url_words)
)
except StopIteration:
url = d['url']
urls.append(url)
return urls
def _shorten_url(self, url):
'''Shortens a URL or return original URL on failure'''
if not self.shorten_urls:
return url
try:
return self.web.shorten_url(url)
except Exception as e:
logging.error(f'Could not shorten URL "{url}" due to {e}')
return url
def _shorten_urls(self, urls, max_threads=2):
'''Shortens URLs (multithreaded)'''
with ThreadPool(min(len(urls), max_threads)) as pool:
return pool.map(self._shorten_url, urls)
def _answer_textual(self, msg, corrected_msg):
'''Search for a textual answer to a human question'''
covid_crct_msg = self.input_msg_header + corrected_msg
# Extract keywords for search because they perform
# better than simply feeding the whole message.
# The result contains phrase parts and score in tuples.
rake_results = rake.run(covid_crct_msg)
logging.info(f'RAKE results: {rake_results}')
# Search backend for relevant text
keyword_str = ' '.join(w for w, _ in rake_results)
# Still make it configurable whether to use keyword or
# message in search
search_query = keyword_str if self.use_keyword_to_search \
else corrected_msg
t0 = time()
search_results = self.search_engine.search(
search_query,
n=self.search_n_docs,
n_frags=self.search_n_frags,
frag_size=self.search_frag_size)
logging.info(f'Search engine took {time()-t0:.1f}s')
# Extract fragments wthin the results
relevant_text = [
self.search_engine.get_highlight_frags(r)
for r in search_results
]
# Log relevance as a measure of search performance
keyword_list = keyword_str.split(' ')
search_relevance = self._calc_relevance(keyword_list, relevant_text)
logging.info(f'Search relevance: {search_relevance:.2f}')
# Get auxiliary text
aux_url = self._get_auxiliary_url(msg, corrected_msg)
_, aux_text = self.web.parse_page(aux_url, page_type='wikipedia')
docs = [d for d in [aux_text, *relevant_text] if d]
total_len = sum(len(d) for d in docs)
logging.info(
f'Sending docs of total {total_len} chars into excerpt extraction')
t0 = time()
i_excerpts = self.web.get_excerpts(question=corrected_msg, docs=docs)
logging.info(f'Excerpt extraction took {time()-t0:.1f}s')
excerpt_relevance = self._calc_relevance(
keyword_list, [e for _, e in i_excerpts])
logging.info(f'Excerpt relevance: {excerpt_relevance:.2f}')
top_excerpts = i_excerpts[:self.n_excerpts_considered]
t0 = time()
answer = self.disclaimer['medical'] + \
self.language.extract_answer(covid_crct_msg, top_excerpts)
logging.info(f'Answer formatting took {time()-t0:.1f}s')
urls = [aux_url, *self._get_document_urls(search_results)]
top_urls = [urls[i] for i, _ in top_excerpts]
t0 = time()
short_urls = self._shorten_urls(top_urls)
logging.info(f'URL shortening took {time()-t0:.1f}s')
return answer, short_urls
def _answer_question(self, msg, corrected_msg):
'''Determine question type and answer each type appropriately'''
question_type = self.language.classify_question(corrected_msg)
answer = ''
urls = []
png_filename = None
if question_type == QuestionType.Stats:
answer, png_filename = self.numerical.handle_request(corrected_msg)
elif question_type == QuestionType.Textual:
answer, urls = self._answer_textual(msg, corrected_msg)
else:
answer = 'I don\'t know how to answer that.'
return answer, urls, png_filename, question_type
def _generate_reply(self, msg):
'''Returns empty string if input is to be ignored.'''
# Remove any twitter handles first
msg = re.sub(r'@(\w){1,15}', '', msg)
msg = re.sub(r' +', ' ', msg).strip()
# Set default return values
answer = ''
urls = []
png_filename = None
question_type = None
def retvals(): return answer, urls, png_filename, question_type
# Check for empty message
if not msg or re.search(r'[a-z]+', msg.lower().strip()) is None:
# No info inside msg
logging.warning(f'Message has no info: {msg}')
answer = self.confused_msg
return retvals()
# Autocorrect message
corrected_msg = self.language.autocorrect(msg)
logging.info(f'Message corrected to: {corrected_msg}')
# Check for safe words
has_safe_word = any(r.match(corrected_msg.lower()) is not None
for r in self.safe_words_regex)
if has_safe_word:
logging.warning(f'Message has safe word: {corrected_msg}')
# Simply ignore (answer is empty) and return early
return retvals()
# Detect message intent
intent = self.language.get_intent(corrected_msg)
logging.info(f'Conversation intent is: {intent.name}')
if intent == IntentType.Over:
# Simply ignore (answer is empty)
pass
elif intent == IntentType.Confused:
answer = self.confused_msg
elif intent == IntentType.AboutMe:
answer = self.disclaimer['controversy'] + \
self.language.answer_question_about_me(corrected_msg)
else:
# Answer the question
answer, urls, png_filename, question_type = \
self._answer_question(msg, corrected_msg)
return retvals()
def message_handler(self, msg):
'''Upon receiving a message this function starts the whole Q&A process
'''
logging.info(f'Received new message: {msg}')
t0 = time()
reply, urls, png_filename, question_type = self._generate_reply(msg)
if reply:
media_ids = []
if question_type == QuestionType.Stats and \
png_filename is not None:
# upload image first
media = self.messaging.upload_media(png_filename)
media_ids = [media.media_id]
logging.info(f'Time to reply #1: {time()-t0:.1f}s')
rely_to_id = self.messaging.reply(reply, media_ids=media_ids)
if 'I think your question is nonsense.'.lower() in reply:
logging.info(
'Not sending sources because question is detected as nonsense.')
return
urls = [u for u in urls if u]
if urls:
# Send sources, but repeat last url because in Twitter it
# disappears into a preview.
failsafe_phrases = ('i don\'t know.', 'i\'m not sure.')
if any(ph in reply.lower().strip() for ph in failsafe_phrases):
# Exclude wiki (first) link
sources_str = \
self.articles_might_help_heading + '\n' +\
'\n'.join(f'[{i+1}]{url}'
for i, url in enumerate(urls[1:]))
else:
sources_str = \
self.references_heading + '\n' +\
'\n'.join(f'[{i+1}]{url}'
for i, url in enumerate(urls))
logging.info(f'Time to reply #2: {time()-t0:.1f}s')
self.messaging.reply(sources_str, reply_to_id=rely_to_id)
def start(self):
'''Main entry to start listening for messages'''
logging.info('Listening for messages...')
self.messaging.listen(is_async=False)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
p = Professor(**dict(v.split('=') for v in sys.argv[1:]))
p.start()
|
<reponame>jmikeowen/Spheral
import mpi
from Spheral2d import *
from MultiScaleMedialGenerator import *
from SpheralTestUtilities import *
from VoronoiDistributeNodes import distributeNodes2d as distributeNodes
from siloPointmeshDump import *
commandLine(ncore = 2000,
rhocore0 = 10.0,
rhomantle0 = 5.0,
Rcore = 1.0,
Rmantle = 10.0,
Rc = 0.25,
ncirc = 360,
hmin = 1e-5,
hmax = 1e6,
nPerh = 2.01,
centroidFrac = 1.0,
maxIterations = 1000,
fracTol = 1e-5)
#-------------------------------------------------------------------------------
# The density profiles we're going to fit.
# Note we don't have to provide the rho gradient methods, but providing them is
# probably more accurate and they're trivial to compute for these profiles.
#-------------------------------------------------------------------------------
def rhocore(posi):
r2 = posi.magnitude2()
return rhocore0/(r2 + Rc*Rc)
def gradrhocore(posi):
r = posi.magnitude()
rhat = posi.unitVector()
return -2.0*rhocore0*r/(r*r + Rc*Rc)**2 * rhat
def rhomantle(posi):
r2 = posi.magnitude2()
return rhomantle0/r2
def gradrhomantle(posi):
r = posi.magnitude()
rhat = posi.unitVector()
return -2.0*rhomantle0/(r*r*r) * rhat
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
gamma = 1.4
mu = 2.0
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
output("WT")
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodesCore = makeFluidNodeList("core", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodesMantle = makeFluidNodeList("mantle", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodeSet = [nodesCore, nodesMantle]
for nodes in nodeSet:
output("nodes.name")
output(" nodes.hmin")
output(" nodes.hmax")
output(" nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Make our boundaries.
#-------------------------------------------------------------------------------
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for i in xrange(ncirc):
theta = 2.0*pi/ncirc * i
bcpoints.append(Vector(Rcore*cos(theta), Rcore*sin(theta)))
for i in xrange(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
boundaryCore = Polygon(bcpoints, bcfacets)
for i in xrange(ncirc):
bcpoints[i] *= Rmantle/Rcore
boundaryMantle = Polygon(bcpoints, bcfacets)
#-------------------------------------------------------------------------------
# Generate them nodes.
#-------------------------------------------------------------------------------
# First, figure out the appropriate number of nodes we should have in the mantle
# to mass match those in the core.
Mcore = pi*rhocore0*(log(Rcore*Rcore + Rc*Rc) - log(Rc*Rc))
Mmantle = 2.0*pi*rhomantle0*(log(Rmantle) - log(Rcore))
nmantle = int(Mmantle/Mcore*ncore + 0.5)
print " Core mass: ", Mcore
print "Mantle mass: ", Mmantle
print "Resulting target point mass and number of points in mantle: ", Mcore/ncore, nmantle
generatorCore = MultiScaleMedialGenerator2d(n = ncore,
rho = rhocore,
gradrho = gradrhocore, # This is not necessary, but we'll use it if provided
boundary = boundaryCore,
centroidFrac = centroidFrac,
maxIterationsPerStage = maxIterations,
fracTol = fracTol,
tessellationFileName = "test_medial2d_core_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
generatorMantle = MultiScaleMedialGenerator2d(n = nmantle,
rho = rhomantle,
gradrho = gradrhomantle, # This is not necessary, but we'll use it if provided
boundary = boundaryMantle,
holes = [boundaryCore],
centroidFrac = centroidFrac,
maxIterationsPerStage = maxIterations,
fracTol = fracTol,
tessellationFileName = "test_medial2d_mantle_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
distributeNodes((nodesCore, generatorCore),
(nodesMantle, generatorMantle))
#-------------------------------------------------------------------------------
# Drop a viz file for inspection.
#-------------------------------------------------------------------------------
db = DataBase()
for nodes in nodeSet:
db.appendNodeList(nodes)
vizfile = siloPointmeshDump(baseName = "test_medial_maxiter=%i_tol=%g" % (maxIterations, fracTol),
baseDirectory = "test_medial2d_sphere_density",
fieldLists = [db.fluidMassDensity,
db.fluidMass,
db.fluidVelocity,
db.fluidSpecificThermalEnergy,
db.fluidHfield]
)
#-------------------------------------------------------------------------------
# Plot a few profiles of interest.
#-------------------------------------------------------------------------------
from SpheralGnuPlotUtilities import *
massPlot = plotFieldList(db.fluidMass,
xFunction = "%s.magnitude()",
plotStyle = "points",
winTitle = "mass",
colorNodeLists = False, plotGhosts = False)
rhoPlot = plotFieldList(db.fluidMassDensity,
xFunction = "%s.magnitude()",
plotStyle = "points",
winTitle = "mass density",
colorNodeLists = False, plotGhosts = False)
rhoPlot("set yrange [1e-2:200]; set logscale y"); rhoPlot.refresh()
massPlot.hardcopy("test_medial2d_mass.png", terminal="png")
rhoPlot.hardcopy("test_medial2d_rho.png", terminal="png")
from fieldStatistics import fieldStatistics
for nodes in nodeSet:
print "Mass statistics for ", nodes.name, " (min, max, avg, std dev) : ", fieldStatistics(nodes.mass())
|
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.Cechomesh import cechomesh
from echomesh.util.TestCase import TestCase
class ScrollTest(TestCase):
def setUp(self):
self.data = cechomesh.ColorList(
['red', 'green', 'blue', 'yellow',
'beige', 'khaki', 'olive', 'tan',
'plum', 'teal', 'wheat', 'orchid', ])
def doTest(self, dx, dy, expected):
result = cechomesh.scroll_color_list(self.data, dx, dy, columns=4)
expected = cechomesh.ColorList(expected) # , columns=4)
self.assertEquals(result, expected)
def test_empty(self):
self.doTest(0, 0, self.data)
def test_one_right(self):
self.doTest(1, 0,
['black', 'red', 'green', 'blue',
'black', 'beige', 'khaki', 'olive',
'black', 'plum', 'teal', 'wheat', ])
def test_three_right(self):
self.doTest(3, 0,
['black', 'black', 'black', 'red',
'black', 'black', 'black', 'beige',
'black', 'black', 'black', 'plum', ])
def test_four_right(self):
self.doTest(4, 0,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_five_right(self):
self.doTest(5, 0,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_one_left(self):
self.doTest(-1, 0,
['green', 'blue', 'yellow', 'black',
'khaki', 'olive', 'tan', 'black',
'teal', 'wheat', 'orchid', 'black', ])
def test_three_left(self):
self.doTest(-3, 0,
['yellow', 'black', 'black', 'black',
'tan', 'black', 'black', 'black',
'orchid', 'black', 'black', 'black', ])
def test_four_left(self):
self.doTest(-4, 0,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_five_left(self):
self.doTest(-5, 0,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_one_down(self):
self.doTest(0, 1,
['black', 'black', 'black', 'black',
'red', 'green', 'blue', 'yellow',
'beige', 'khaki', 'olive', 'tan', ])
def test_two_down(self):
self.doTest(0, 2,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'red', 'green', 'blue', 'yellow', ])
def test_three_down(self):
self.doTest(0, 3,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_four_down(self):
self.doTest(0, 4,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_one_up(self):
self.doTest(0, -1,
['beige', 'khaki', 'olive', 'tan',
'plum', 'teal', 'wheat', 'orchid',
'black', 'black', 'black', 'black', ])
def test_two_up(self):
self.doTest(0, -2,
['plum', 'teal', 'wheat', 'orchid',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_three_up(self):
self.doTest(0, -3,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_four_up(self):
self.doTest(0, -4,
['black', 'black', 'black', 'black',
'black', 'black', 'black', 'black',
'black', 'black', 'black', 'black', ])
def test_down_right(self):
self.doTest(1, 1,
['black', 'black', 'black', 'black',
'black', 'red', 'green', 'blue',
'black', 'beige', 'khaki', 'olive', ])
def test_up_left(self):
self.doTest(-1, -1,
['khaki', 'olive', 'tan', 'black',
'teal', 'wheat', 'orchid', 'black',
'black', 'black', 'black', 'black', ])
def doWrapTest(self, dx, dy, expected):
result = cechomesh.scroll_color_list(self.data, dx, dy, 4, wrap=True)
expected = cechomesh.ColorList(expected)
# , columns=4) why was this there
self.assertEquals(result, expected)
def test_empty_wrap(self):
self.doWrapTest(0, 0, self.data)
def test_one_right_wrap(self):
self.doWrapTest(1, 0,
['yellow', 'red', 'green', 'blue',
'tan', 'beige', 'khaki', 'olive',
'orchid', 'plum', 'teal', 'wheat',])
def test_three_right_wrap(self):
self.doWrapTest(3, 0,
['green', 'blue', 'yellow','red',
'khaki', 'olive', 'tan', 'beige',
'teal', 'wheat', 'orchid', 'plum', ])
def test_four_right_wrap(self):
self.doWrapTest(4, 0, self.data)
def test_five_right_wrap(self):
self.doWrapTest(5, 0,
['yellow', 'red', 'green', 'blue',
'tan', 'beige', 'khaki', 'olive',
'orchid', 'plum', 'teal', 'wheat',])
def test_one_left_wrap(self):
self.doWrapTest(-1, 0,
['green', 'blue', 'yellow', 'red',
'khaki', 'olive', 'tan', 'beige',
'teal', 'wheat', 'orchid', 'plum', ])
def test_three_left_wrap(self):
self.doWrapTest(-3, 0,
['yellow', 'red', 'green', 'blue',
'tan', 'beige', 'khaki', 'olive',
'orchid', 'plum', 'teal', 'wheat',])
def test_four_left_wrap(self):
self.doWrapTest(-4, 0, self.data)
def test_five_left_wrap(self):
self.doWrapTest(-5, 0,
['green', 'blue', 'yellow', 'red',
'khaki', 'olive', 'tan', 'beige',
'teal', 'wheat', 'orchid', 'plum', ])
def test_one_down_wrap(self):
self.doWrapTest(0, 1,
['plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',
'beige', 'khaki', 'olive', 'tan',])
def test_two_down_wrap(self):
self.doWrapTest(0, 2,
['beige', 'khaki', 'olive', 'tan',
'plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',])
def test_three_down_wrap(self):
self.doWrapTest(0, 3, self.data)
def test_four_down_wrap(self):
self.doWrapTest(0, 4,
['plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',
'beige', 'khaki', 'olive', 'tan',])
def test_one_up_wrap(self):
self.doWrapTest(0, -1,
['beige', 'khaki', 'olive', 'tan',
'plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',])
def test_two_up_wrap(self):
self.doWrapTest(0, -2,
['plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',
'beige', 'khaki', 'olive', 'tan',])
def test_three_up_wrap(self):
self.doWrapTest(0, -3, self.data)
def test_four_up_wrap(self):
self.doWrapTest(0, -4,
['beige', 'khaki', 'olive', 'tan',
'plum', 'teal', 'wheat', 'orchid',
'red', 'green', 'blue', 'yellow',])
def test_down_right_wrap(self):
self.doWrapTest(1, 1,
['orchid', 'plum', 'teal', 'wheat',
'yellow', 'red', 'green', 'blue',
'tan', 'beige', 'khaki', 'olive'])
def test_up_left_wrap(self):
self.doWrapTest(-1, -1,
['khaki', 'olive', 'tan', 'beige',
'teal', 'wheat', 'orchid', 'plum',
'green', 'blue', 'yellow', 'red'])
|
"""Helper functions"""
import numpy as np
import os
import pandas as pd
def TNTP_to_pandas(net_file, node_file, trips_file, flow_file=None):
"""
Converts a set of TNTP files to panda dataframes.
https://github.com/bstabler/TransportationNetworks
Parameters
----------
file : TNTP file {}_net.tntp
TNTP net file
file : TNTP file {}_node.tntp
TNTP node file
file : TNTP file {}_trips.tntp
TNTP trips file
file : TNTP file {}_flow.tntp
Optional - TNTP flow file
Returns
-------
pandas dataframe:
pandas dataframe containing networkx edgelist
pandas dataframe:
pandas dataframe containing node positional data (x,y)
pandas dataframe:
pandas dataframe containing origin/destination data
pandas dataframe:
pandas dataframe containing the optimal solution
"""
print('Convert ...')
def TNTP_net_to_pandas(filename, start_line, save=False):
"""
Converts a TNTP net file to panda edge dataframe.
https://github.com/bstabler/TransportationNetworks
Parameters
----------
filename : filename (with location)
TNTP net file
Returns
-------
pandas dataframe:
pandas dataframe containing networkx edgelist
"""
print('converting TNPT net file to pandas edge dataframe')
df_net = pd.read_csv(filename, header=start_line, sep='\t')
# clean up dataframe
df_net.columns = df_net.columns.str.strip()
df_net.drop(['~', ';'], axis=1, inplace=True)
s = [int(item) for item in df_net['init_node'].tolist()]
t = [int(item) for item in df_net['term_node'].tolist()]
a = np.array([item for item in df_net['free_flow_time'].tolist()], dtype=np.float64)
b = np.array(df_net['b'].tolist(), dtype=np.float64)
c = np.array(df_net['capacity'].tolist(), dtype=np.float64)
n = np.array(df_net['power'].tolist(), dtype=np.float64)
data = list(zip(s,t,a,b,c,n))
df_edges = pd.DataFrame(data, columns =['source', 'target', 'a', 'b', 'c', 'n'])
if save:
df_edges.to_csv('{}.csv'.format(os.path.splitext(filename)[0]))
print('Saved file to {}'.format('{}.csv'.format(os.path.splitext(filename)[0])))
return df_edges
def TNTP_node_to_pandas(filename, save=False):
print('converting TNPT node file to pandas node dataframe')
df_nodes = pd.read_csv(filename, sep='\t')
# clean up dataframe
df_nodes.columns = df_nodes.columns.str.strip()
df_nodes.drop([';'], axis=1, inplace=True)
df_nodes = pd.DataFrame.from_dict(dict(zip(df_nodes['node'], zip(df_nodes['X'], df_nodes['Y']))), orient='index', columns =['X', 'Y'])
# note that we want the node index to start at 0
if save:
df_nodes.to_csv('{}.csv'.format(os.path.splitext(filename)[0]))
print('Saved file to {}'.format('{}.csv'.format(os.path.splitext(filename)[0])))
return df_nodes
def TNTP_trips_to_pandas(filename, save=False):
print('converting TNPT trips file to pandas dataframe')
#df = pd.DataFrame()
commodities_dict = {}
with open(filename) as in_file:
data = []
data_bool = False
for index, line in enumerate(in_file):
#print(index)
#print(line)
if index > 4:
if line[0]=='O':
if data_bool and data_row:
commodities_dict[source_node] = data_row
data.append(data_row)
data_bool = True
data_row = {}
source_node = int(line.replace("Origin ", ""))
else:
line = line.strip()[:-1]
#print(line)
col_demand = [l.strip().split(':') for l in line.split(";")]
if len(col_demand) > 1:
col_demand = [[float(c.strip()) for c in l] for l in col_demand]
for cd in col_demand:
data_row[cd[0]] = cd[1]
if data_bool:
data.append(data_row)
commodities_dict[source_node] = data_row
df_trips = pd.DataFrame.from_dict(commodities_dict, orient='index')
df_trips.fillna(0, inplace=True)
if save:
df_trips.to_csv('{}.csv'.format(os.path.splitext(filename)[0]))
print('Saved file to {}'.format('{}.csv'.format(os.path.splitext(filename)[0])))
return df_trips
def TNTP_flow_to_pandas(file):
print('converting TNPT flow file to pandas dataframe')
def readTNTPMetadata(demand_filename):
"""
Read metadata tags and values from a TNTP file, returning a dictionary whose
keys are the tags (strings between the <> characters) and corresponding values.
The last metadata line (reading <END OF METADATA>) is stored with a value giving
the line number this tag was found in. You can use this to proceed with reading
the rest of the file after the metadata.
"""
with open(demand_filename, "r") as demand_file:
lines = demand_file.read().splitlines()
metadata = dict()
lineNumber = 0
for line in lines:
lineNumber += 1
line.strip()
commentPos = line.find("~")
if commentPos >= 0: # strip comments
line = line[:commentPos]
if len(line) == 0:
continue
startTagPos = line.find("<")
endTagPos = line.find(">")
if startTagPos < 0 or endTagPos < 0 or startTagPos >= endTagPos:
print("Error reading this metadata line, ignoring: '%s'" % line)
metadataTag = line[startTagPos+1 : endTagPos]
metadataValue = line[endTagPos+1:]
if metadataTag == 'END OF METADATA':
metadata['END OF METADATA'] = lineNumber
return metadata
metadata[metadataTag] = metadataValue.strip()
print("Warning: END OF METADATA not found in file")
return metadata
# no zones == no_nodes. e.g. sioux falls
def pandas_trips_to_TNTP(df_trips, tntp_filename, no_zones):
df_trips.fillna(0, inplace=True)
with open(tntp_filename, 'w') as f:
f.write("<NUMBER OF ZONES> {}\n".format(no_zones))
f.write("<TOTAL OD FLOW> {}\n".format(df_trips.sum().sum()))
f.write("<END OF METADATA>\n\n\n".format(no_zones))
for key, value in df_trips.iteritems():
f.write("Origin {}\n".format(key))
l_all = ["\t{} :\t{};".format(index,v) for index, v in value.iteritems()]
for l in [l_all[x:x+5] for x in range(0, len(l_all),5)]:
f.write("".join(l))
f.write("\n")
f.write("\n\n")
return None
# this is only applicable for speed =0 and toll= 0 and length = free flow time.
# also number of zones = number of nodes
# see Sioux Falls
def pandas_net_to_TNTP(df_edges, tntp_filename, no_nodes, no_edges):
with open(tntp_filename, 'w') as f:
f.write("".join(["<NUMBER OF ZONES>", " {}".format(no_nodes),"\n\n"]))
f.write("".join(["<NUMBER OF NODES>", " {}".format(no_nodes),"\n\n"]))
f.write("".join(["<FIRST THRU NODE>", " {}".format(1),"\n\n"]))
f.write("".join(["<NUMBER OF LINKS>", " {}".format(no_edges),"\n"]))
f.write("".join([
"<ORIGINAL HEADER>~\t",
"Init node\t",
"Term node\t",
"Capacity\t",
"Length\t",
"Free Flow Time\t",
"B\t",
"Power\t",
"Speed\t",
"limit\t",
"Toll\t",
"Type\t",
";\n",
"<END OF METADATA>\n"
]))
f.write("\n\n\n")
f.write("".join(
[
"~\t",
"init_node\t",
"term_node\t",
"capacity\t",
"length\t",
"free_flow_time\t",
"b\t",
"power\t",
"speed\t"
"toll\t",
"link_type\t",
";\n"
]
))
for index,row in df_edges.iterrows():
f.write("".join(["\t",str(int(row['source'])),
"\t", str(int(row['target'])),
"\t", str(row['c']),
"\t", str(row['a']),
"\t", str(row['a']),
"\t", str(row['b']),
"\t", str(row['n']),
"\t", str(0),
"\t", str(0),
"\t", str(1),
"\t", ";",
"\n"]
))
#with open('file_to_write', 'w') as f:
#f.write('file contents\n')
return None |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import urllib.request
# from tqdm import tqdm
# from pandas.util._validators import validate_bool_kwarg
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
# paso imports
from paso.base import pasoFunction, raise_PasoError, _array_to_string
from paso.base import pasoDecorators, _check_non_optional_kw, _dict_value
# from loguru import logger
import sys, os.path
__author__ = "Bruce_H_Cottman"
__license__ = "MIT License"
#
def _formats_supported(path):
for format in Inputers._formats_.keys():
if path.endswith(format):
return format
raise raise_PasoError("format of this file not supported: {}".format(path))
def _url_path_exists(url):
"""
Checks that a given URL is reachable.
:Parameters:
url: (str) url
Returns: (bool)
"""
request = urllib.request.Request(url)
request.get_method = lambda: "HEAD"
try:
urllib.request.urlopen(request)
return True
except urllib.request.HTTPError:
return False
def _inputer_exec(self, **kwargs):
# must always be data = ' train' or no dataset =
if self.dataset != "train" and ("train" not in kwargs):
raise_PasoError(
"dataset='{}' not recognized: in {} ".format(self.dataset, kwargs)
)
key = ["pre", "post"]
if key[0] in kwargs and kwargs[key[0]] != None:
for stmt in kwargs[key[0]]:
exec(stmt)
dfkey = "create-df"
if dfkey in kwargs and kwargs[dfkey] != None:
result = eval(kwargs[dfkey])
if key[1] in kwargs and kwargs[key[1]] != "None":
for stmt in kwargs[key[1]]:
exec(stmt)
return result
def _create_path(kw, dictnary, directory_path, default):
if kw in dictnary:
return directory_path + _dict_value(dictnary, kw, default)
else:
return default
# todo refactor this mess
def _inputer_cvs(self, **kwargs):
kw = "names"
self.names = _dict_value(kwargs, kw, [])
kw = "directory_path"
self.directory_path = _dict_value(kwargs, kw, "")
kw = "train"
if kw in kwargs:
self.train_path = self.directory_path + kwargs[kw]
if os.path.exists(self.train_path) or _url_path_exists(self.train_path):
if self.names != []:
train = pd.read_csv(self.train_path, names=self.names)
elif self.names == []:
train = pd.read_csv(self.train_path)
else:
raise_PasoError(
"Inputer train dataset path does not exist: {} or there might not be a directory_path:{}".format(
self.train_path, self.directory_path
)
)
kw = "test"
if kw in kwargs:
self.test_path = self.directory_path + kwargs[kw]
if os.path.exists(self.test_path):
if self.names != []:
test = pd.read_csv(self.test_path, names=self.names)
elif self.names == []:
test = pd.read_csv(self.test_path)
else:
raise_PasoError(
"Inputer test dataset path does not exist: {}".format(self.test_path)
)
kw = "sampleSubmission"
if kw in kwargs:
self.sampleSubmission_path = self.directory_path + kwargs[kw]
if os.path.exists(self.sampleSubmission_path):
if self.names != []:
sampleSubmission = pd.read_csv(
self.sampleSubmission_path, names=self.names
)
elif self.names == []:
sampleSubmission = pd.read_csv(self.sampleSubmission_path)
else:
raise_PasoError(
"Inputer sampleSubmission dataset path does not exist: {}".format(
self.test_path
)
)
# no case in python
if self.dataset == "train":
return train
elif self.dataset == "valid":
return valid
elif self.dataset == "test":
return test
elif self.dataset == "sampleSubmission":
return sampleSubmission
else:
raise_PasoError("dataset not recognized: {} ".format(self.dataset))
def _inputer_xls(self, **kwargs):
return None
def _inputer_xlsm(self, **kwargs):
return None
def _inputer_text(self, **kwargs):
return None
def _inputer_image2d(self, **kwargs):
return None
def _inputer_image3d(self, **kwargs):
return None
### Inputer
class Inputers(pasoFunction):
"""
class to input file or url that is cvs or zip(cvs)
or an error will be raised.
parameters: None
keywords:
input_path: (str) the data source source path name.
The path can be url or local. Format must be csv or csv/zip.
target: the dependent feature name of this data_set.
drop: (list) list of feature names to drop from
dataset, X,y are then extracted from dataset.
attributes set:
self.target: (str)
self.input_path = input_path
returns:
dataset: (DataFrame) complete dataset input from data source.
"""
_formats_ = {
"csv": True,
"zip": True,
"data": True,
"sklearn.datasets": True,
"yaml": True,
}
_inputer_ = {
"exec": _inputer_exec,
"cvs": _inputer_cvs,
"xls": _inputer_xls,
"xlsm": _inputer_xlsm,
"text": _inputer_text,
"image2D": _inputer_image2d,
"image3D": _inputer_image3d,
}
_datasets_available_ = [
"train",
"valid",
"test",
"sampleSubmission",
"directory_path",
]
@pasoDecorators.InitWrap()
def __init__(self, **kwargs):
"""
Parameters:
filepath: (string)
verbose: (boolean) (optiona) can be set. Default:True
Note:
"""
super().__init__()
self.input_data_set = False
@staticmethod
def inputers():
"""
Parameters:
None
Returns:
List of available inputer names.
"""
return [k for k in Inputers._inputer_.keys()]
@staticmethod
def formats():
"""
Parameters:
None
Returns:
List of available inputer names.
"""
return [k for k in Inputers._formats_.keys()]
@staticmethod
def datasets():
"""
List type of files available
Parameters: None
Returns: lists of datasets
"""
return Inputers._datasets_available_
@pasoDecorators.TTWrapNoArg(array=False)
def transform(self, *args, **kwargs):
# Todo:Rapids numpy
""""
main method to input file or url,
or an error will be raised.
parameters: None
keywords:
input_path: (str) the data source source path name.
The path can be url or local. Format must be csv or csv/zip.
target: the dependent feature name of this data_set.
drop: (list) list of feature names to drop from
dataset, X,y are then extracted from dataset.
attributes set:
self.target: (str)
self.input_path = input_path
returns:
dataset: (DataFrame) complete dataset input from data source.
"""
# currently support only one inputer, very brittle parser
kwa = "target"
self.target = _dict_value(self.kind_name_kwargs, kwa, None)
_check_non_optional_kw(
kwa, "Inputer: needs target keyword. probably not set in ontological file."
)
# currently just can only be in inputer/transformkwarg
kwa = "dataset"
self.dataset = _dict_value(kwargs, kwa, "train")
# create instance of this particular learner
# checks for non-optional keyword
if self.kind_name not in Inputers._inputer_:
raise_PasoError(
"transform; no format named: {} not in Inputers;: {}".format(
self.kind_name, Inputers._inputer_.keys()
)
)
if _formats_supported(self.description_filepath):
self.input_data_set = True
return Inputers._inputer_[self.kind_name](self, **self.kind_name_kwargs)
### Splitters
class Splitters(pasoFunction):
"""
Input returns dataset.
Tne metadata is the instance attibutesof Inputer prperties.
Note:
Warning:
"""
@pasoDecorators.InitWrap()
def __init__(self, **kwargs):
"""
Parameters:
filepath: (string)
verbose: (boolean) (optiona) can be set. Default:True
Note:
"""
super().__init__()
self.inplace = False
return self
@pasoDecorators.TTWrapXy(array=False)
def transform(self, X, y, **kwargs):
# Todo:Rapids numpy
"""
Parameters:
target: dependent feature which is "target" of trainer
Returns:
[train DF , test DF] SPLIT FROM X
Raises:
Note:
"""
# note arrays
# stratify =True them reset to y
if "stratify" in self.kind_name_kwargs:
self.kind_name_kwargs["stratify"] = y
X_train, X_test, y_train, y_test = train_test_split(
X, y, **self.kind_name_kwargs
)
return X_train, X_test, y_train, y_test
###
|
# Copyright (c) 2003-2013 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: <NAME>
#
# Description:
# [MS-DTYP] Interface mini implementation
#
import random
from struct import pack, unpack
from impacket.dcerpc.v5.ndr import NDRULONG, NDRUHYPER, NDRUSMALL, NDRSHORT, NDRLONG, NDRPOINTER, NDRUniConformantArray, NDRUniFixedArray, NDR, NDRHYPER, NDRSMALL, NDRPOINTERNULL, NDRSTRUCT, NULL, NDRUSMALL, NDRBOOLEAN, NDRUSHORT, NDRFLOAT, NDRDOUBLEFLOAT
DWORD = NDRULONG
BOOL = NDRULONG
UCHAR = NDRUSMALL
SHORT = NDRSHORT
class LPDWORD(NDRPOINTER):
referent = (
('Data', DWORD),
)
class PSHORT(NDRPOINTER):
referent = (
('Data', SHORT),
)
class PBOOL(NDRPOINTER):
referent = (
('Data', BOOL),
)
class LPBYTE(NDRPOINTER):
referent = (
('Data', NDRUniConformantArray),
)
PBYTE = LPBYTE
# 2.2.4 BOOLEAN
BOOLEAN = NDRBOOLEAN
# 2.2.6 BYTE
BYTE = NDRUSMALL
# 2.2.7 CHAR
CHAR = NDRSMALL
class PCHAR(NDRPOINTER):
referent = (
('Data', CHAR),
)
class WIDESTR(NDRUniFixedArray):
def getDataLen(self, data):
return data.find('\x00\x00\x00') + 3
def __setitem__(self, key, value):
if key == 'Data':
self.fields[key] = value.encode('utf-16le')
self.data = None # force recompute
else:
return NDR.__setitem__(self, key, value)
def __getitem__(self, key):
if key == 'Data':
return self.fields[key].decode('utf-16le')
else:
return NDR.__getitem__(self,key)
class STR(NDRSTRUCT):
commonHdr = (
('MaximumCount', '<L=len(Data)'),
('Offset','<L=0'),
('ActualCount','<L=len(Data)'),
)
commonHdr64 = (
('MaximumCount', '<Q=len(Data)'),
('Offset','<Q=0'),
('ActualCount','<Q=len(Data)'),
)
structure = (
('Data',':'),
)
def dump(self, msg=None, indent=0):
if msg is None: msg = self.__class__.__name__
ind = ' ' * indent
if msg != '':
print("%s" % (msg), end=' ')
# Here just print the data
print(" %r" % (self['Data']), end=' ')
def __setitem__(self, key, value):
if key == 'Data':
self.fields[key] = value
self.fields['MaximumCount'] = None
self.fields['ActualCount'] = None
self.data = None # force recompute
else:
return NDR.__setitem__(self, key, value)
def getDataLen(self, data):
return self["ActualCount"]
class LPSTR(NDRPOINTER):
referent = (
('Data', STR),
)
class WSTR(NDRSTRUCT):
commonHdr = (
('MaximumCount', '<L=len(Data)/2'),
('Offset','<L=0'),
('ActualCount','<L=len(Data)/2'),
)
commonHdr64 = (
('MaximumCount', '<Q=len(Data)/2'),
('Offset','<Q=0'),
('ActualCount','<Q=len(Data)/2'),
)
structure = (
('Data',':'),
)
def dump(self, msg=None, indent=0):
if msg is None: msg = self.__class__.__name__
ind = ' ' * indent
if msg != '':
print("%s" % (msg), end=' ')
# Here just print the data
print(" %r" % (self['Data']), end=' ')
def getDataLen(self, data):
return self["ActualCount"] * 2
def __setitem__(self, key, value):
if key == 'Data':
self.fields[key] = value.encode('utf-16le')
self.fields['MaximumCount'] = None
self.fields['ActualCount'] = None
self.data = None # force recompute
else:
return NDR.__setitem__(self, key, value)
def __getitem__(self, key):
if key == 'Data':
return self.fields[key].decode('utf-16le')
else:
return NDR.__getitem__(self,key)
class LPWSTR(NDRPOINTER):
referent = (
('Data', WSTR),
)
# 2.2.5 BSTR
BSTR = LPWSTR
# 2.2.8 DOUBLE
DOUBLE = NDRDOUBLEFLOAT
class PDOUBLE(NDRPOINTER):
referent = (
('Data', DOUBLE),
)
# 2.2.15 FLOAT
FLOAT = NDRFLOAT
class PFLOAT(NDRPOINTER):
referent = (
('Data', FLOAT),
)
# 2.2.18 HRESULT
HRESULT = NDRLONG
class PHRESULT(NDRPOINTER):
referent = (
('Data', HRESULT),
)
# 2.2.19 INT
INT = NDRLONG
class PINT(NDRPOINTER):
referent = (
('Data', INT),
)
# 2.2.26 LMSTR
LMSTR = LPWSTR
# 2.2.27 LONG
LONG = NDRLONG
class LPLONG(NDRPOINTER):
referent = (
('Data', LONG),
)
PLONG = LPLONG
# 2.2.28 LONGLONG
LONGLONG = NDRHYPER
class PLONGLONG(NDRPOINTER):
referent = (
('Data', LONGLONG),
)
# 2.2.31 LONG64
LONG64 = NDRUHYPER
class PLONG64(NDRPOINTER):
referent = (
('Data', LONG64),
)
# 2.2.32 LPCSTR
LPCSTR = LPSTR
# 2.2.36 NET_API_STATUS
NET_API_STATUS = DWORD
# 2.3.2 GUID and UUID
class GUID(NDRSTRUCT):
structure = (
('Data','16s=""'),
)
def getAlignment(self):
return 4
class PGUID(NDRPOINTER):
referent = (
('Data', GUID),
)
UUID = GUID
PUUID = PGUID
# 2.2.37 NTSTATUS
NTSTATUS = DWORD
# 2.2.45 UINT
UINT = NDRULONG
class PUINT(NDRPOINTER):
referent = (
('Data', UINT),
)
# 2.2.50 ULONG
ULONG = NDRULONG
class PULONG(NDRPOINTER):
referent = (
('Data', ULONG),
)
LPULONG = PULONG
# 2.2.54 ULONGLONG
ULONGLONG = NDRUHYPER
class PULONGLONG(NDRPOINTER):
referent = (
('Data', ULONGLONG),
)
# 2.2.57 USHORT
USHORT = NDRUSHORT
class PUSHORT(NDRPOINTER):
referent = (
('Data', USHORT),
)
# 2.2.59 WCHAR
WCHAR = WSTR
PWCHAR = LPWSTR
# 2.2.61 WORD
WORD = NDRUSHORT
class PWORD(NDRPOINTER):
referent = (
('Data', WORD),
)
LPWORD = PWORD
# 2.3.1 FILETIME
class FILETIME(NDRSTRUCT):
structure = (
('dwLowDateTime', DWORD),
('dwHighDateTime', LONG),
)
class PFILETIME(NDRPOINTER):
referent = (
('Data', FILETIME),
)
# 2.3.3 LARGE_INTEGER
LARGE_INTEGER = NDRHYPER
class PLARGE_INTEGER(NDRPOINTER):
referent = (
('Data', LARGE_INTEGER),
)
# 2.3.5 LUID
class LUID(NDRSTRUCT):
structure = (
('LowPart', DWORD),
('HighPart', LONG),
)
# 2.3.8 RPC_UNICODE_STRING
class RPC_UNICODE_STRING(NDRSTRUCT):
# Here we're doing some tricks to make this data type
# easier to use. It's exactly the same as defined. I changed the
# Buffer name for Data, so users can write directly to the datatype
# instead of writing to datatype['Buffer'].
# The drawback is you cannot directly access the Length and
# MaximumLength fields.
# If you really need it, you will need to do it this way:
# class TT(NDRCALL):
# structure = (
# ('str1', RPC_UNICODE_STRING),
# )
#
# nn = TT()
# nn.fields['str1'].fields['MaximumLength'] = 30
structure = (
('Length','<H=0'),
('MaximumLength','<H=0'),
('Data',LPWSTR),
)
def __setitem__(self, key, value):
if key == 'Data' and isinstance(value, NDR) is False:
self['Length'] = len(value) * 2
self['MaximumLength'] = len(value) * 2
return NDRSTRUCT.__setitem__(self, key, value)
def dump(self, msg=None, indent=0):
if msg is None: msg = self.__class__.__name__
ind = ' ' * indent
if msg != '':
print("%s" % (msg), end=' ')
if isinstance(self.fields['Data'], NDRPOINTERNULL):
print(" NULL", end=' ')
elif self.fields['Data']['ReferentID'] == 0:
print(" NULL", end=' ')
else:
return self.fields['Data'].dump('',indent)
class PRPC_UNICODE_STRING(NDRPOINTER):
referent = (
('Data', RPC_UNICODE_STRING),
)
# 2.3.9 OBJECT_TYPE_LIST
ACCESS_MASK = DWORD
class OBJECT_TYPE_LIST(NDRSTRUCT):
structure = (
('Level', WORD),
('Remaining',ACCESS_MASK),
('ObjectType',PGUID),
)
class POBJECT_TYPE_LIST(NDRPOINTER):
referent = (
('Data', OBJECT_TYPE_LIST),
)
# 2.4.2.3 RPC_SID
class DWORD_ARRAY(NDRUniConformantArray):
item = '<L'
class RPC_SID_IDENTIFIER_AUTHORITY(NDRUniFixedArray):
align = 1
align64 = 1
def getDataLen(self, data):
return 6
class RPC_SID(NDRSTRUCT):
structure = (
('Revision',NDRSMALL),
('SubAuthorityCount',NDRSMALL),
('IdentifierAuthority',RPC_SID_IDENTIFIER_AUTHORITY),
('SubAuthority',DWORD_ARRAY),
)
def getData(self, soFar=0):
self['SubAuthorityCount'] = len(self['SubAuthority'])
return NDRSTRUCT.getData(self, soFar)
def fromCanonical(self, canonical):
items = canonical.split('-')
self['Revision'] = int(items[1])
self['IdentifierAuthority'] = RPC_SID_IDENTIFIER_AUTHORITY()
self['IdentifierAuthority'] = '\x00\x00\x00\x00\x00' + pack('B',int(items[2]))
self['SubAuthorityCount'] = len(items) - 3
ans = ''
for i in range(self['SubAuthorityCount']):
self['SubAuthority'].append(int(items[i + 3]))
def formatCanonical(self):
ans = 'S-%d-%d' % (self['Revision'], ord(self['IdentifierAuthority'][5]))
for i in range(self['SubAuthorityCount']):
ans += '-%d' % self['SubAuthority'][i]
return ans
class PRPC_SID(NDRPOINTER):
referent = (
('Data', RPC_SID),
)
PSID = PRPC_SID
# 2.4.3 ACCESS_MASK
ACCESS_MASK = DWORD
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x4000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
MAXIMUM_ALLOWED = 0x02000000
ACCESS_SYSTEM_SECURITY = 0x01000000
SYNCHRONIZE = 0x00100000
WRITE_OWNER = 0x00080000
WRITE_DACL = 0x00040000
READ_CONTROL = 0x00020000
DELETE = 0x00010000
# 2.4.5.1 ACL--RPC Representation
class ACL(NDRSTRUCT):
structure = (
('AclRevision',NDRSMALL),
('Sbz1',NDRSMALL),
('AclSize',NDRSHORT),
('AceCount',NDRSHORT),
('Sbz2',NDRSHORT),
)
class PACL(NDRPOINTER):
referent = (
('Data', ACL),
)
# 2.4.6.1 SECURITY_DESCRIPTOR--RPC Representation
class SECURITY_DESCRIPTOR(NDRSTRUCT):
structure = (
('Revision',UCHAR),
('Sbz1',UCHAR),
('Control',USHORT),
('Owner',PSID),
('Group',PSID),
('Sacl',PACL),
('Dacl',PACL),
)
# 2.4.7 SECURITY_INFORMATION
OWNER_SECURITY_INFORMATION = 0x00000001
GROUP_SECURITY_INFORMATION = 0x00000002
DACL_SECURITY_INFORMATION = 0x00000004
SACL_SECURITY_INFORMATION = 0x00000008
LABEL_SECURITY_INFORMATION = 0x00000010
UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000
PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000
ATTRIBUTE_SECURITY_INFORMATION = 0x00000020
SCOPE_SECURITY_INFORMATION = 0x00000040
BACKUP_SECURITY_INFORMATION = 0x00010000
SECURITY_INFORMATION = DWORD
class PSECURITY_INFORMATION(NDRPOINTER):
referent = (
('Data', SECURITY_INFORMATION),
)
|
# USAGE
# python server.py --prototxt MobileNetSSD_deploy.prototxt --model MobileNetSSD_deploy.caffemodel --montageW 2 --montageH 2
# import the necessary packages
from imutils import build_montages
from datetime import datetime
import numpy as np
import imagezmq
import argparse
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
ap.add_argument("-mW", "--montageW", required=True, type=int,
help="montage frame width")
ap.add_argument("-mH", "--montageH", required=True, type=int,
help="montage frame height")
args = vars(ap.parse_args())
# initialize the ImageHub object
imageHub = imagezmq.ImageHub()
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the consider set (class labels we care about and want
# to count), the object count dictionary, and the frame dictionary
CONSIDER = set(["dog", "person", "car"])
objCount = {obj: 0 for obj in CONSIDER}
frameDict = {}
# initialize the dictionary which will contain information regarding
# when a device was last active, then store the last time the check
# was made was now
lastActive = {}
lastActiveCheck = datetime.now()
# stores the estimated number of Pis, active checking period, and
# calculates the duration seconds to wait before making a check to
# see if a device was active
ESTIMATED_NUM_PIS = 4
ACTIVE_CHECK_PERIOD = 10
ACTIVE_CHECK_SECONDS = ESTIMATED_NUM_PIS * ACTIVE_CHECK_PERIOD
# assign montage width and height so we can view all incoming frames
# in a single "dashboard"
mW = args["montageW"]
mH = args["montageH"]
key = cv2.waitKey(1) & 0xFF
print("[INFO] detecting: {}...".format(", ".join(obj for obj in
CONSIDER)))
# start looping over all the frames
while True:
# receive RPi name and frame from the RPi and acknowledge
# the receipt
(rpiName, frame) = imageHub.recv_image()
imageHub.send_reply(b'OK')
# if a device is not in the last active dictionary then it means
# that its a newly connected device
if rpiName not in lastActive.keys():
print("[INFO] receiving data from {}...".format(rpiName))
# record the last active time for the device from which we just
# received a frame
lastActive[rpiName] = datetime.now()
# resize the frame to have a maximum width of 400 pixels, then
# grab the frame dimensions and construct a blob
frame = imutils.resize(frame, width=400)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# reset the object count for each object in the CONSIDER set
objCount = {obj: 0 for obj in CONSIDER}
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections
idx = int(detections[0, 0, i, 1])
# check to see if the predicted class is in the set of
# classes that need to be considered
if CLASSES[idx] in CONSIDER:
# increment the count of the particular object
# detected in the frame
objCount[CLASSES[idx]] += 1
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box around the detected object on
# the frame
cv2.rectangle(frame, (startX, startY), (endX, endY),
(255, 0, 0), 2)
# draw the sending device name on the frame
cv2.putText(frame, rpiName, (10, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# draw the object count on the frame
label = ", ".join("{}: {}".format(obj, count) for (obj, count) in
objCount.items())
cv2.putText(frame, label, (10, h - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255,0), 2)
# update the new frame in the frame dictionary
frameDict[rpiName] = frame
# build a montage using images in the frame dictionary
montages = build_montages(frameDict.values(), (w, h), (mW, mH))
# display the montage(s) on the screen
# for (i, montage) in enumerate(montages):
#cv2.imshow("Home pet location monitor ({})".format(i),
# montage)
# detect any kepresses
#
# if current time *minus* last time when the active device check
# was made is greater than the threshold set then do a check
if (datetime.now() - lastActiveCheck).seconds > ACTIVE_CHECK_SECONDS:
# loop over all previously active devices
for (rpiName, ts) in list(lastActive.items()):
# remove the RPi from the last active and frame
# dictionaries if the device hasn't been active recently
if (datetime.now() - ts).seconds > ACTIVE_CHECK_SECONDS:
print("[INFO] lost connection to {}".format(rpiName))
lastActive.pop(rpiName)
frameDict.pop(rpiName)
# set the last active check time as current time
lastActiveCheck = datetime.now()
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows() |
<reponame>swghosh/machine-config-operator<filename>hack/get-mcd-nodes.py
#!/usr/bin/env python3
# This is a simple script that grabs all the cluster nodes and associates them
# with a given MCD pod and outputs their roles. There is probably an easier way
# to do this that I am too naïve to be aware of :). Running this script yields
# the following output:
#
# Current MCD Pods:
# machine-config-daemon-9px26 ip-10-0-161-151.ec2.internal worker
# machine-config-daemon-cpt8q ip-10-0-130-41.ec2.internal master
# machine-config-daemon-jnjx4 ip-10-0-137-167.ec2.internal worker
# machine-config-daemon-klclf ip-10-0-152-65.ec2.internal master
# machine-config-daemon-kml9h ip-10-0-171-232.ec2.internal master
# machine-config-daemon-t6v5j ip-10-0-155-187.ec2.internal worker
import json
import os
import shutil
import subprocess
import sys
def run_oc_cmd_json(oc_cmd):
"""Runs an arbitrary oc command and returns a dictionary with JSON from the
output.
"""
oc_cmd = oc_cmd.split(" ")
oc_cmd.append("--output=json")
cmd = subprocess.run(oc_cmd, capture_output=True)
return json.loads(cmd.stdout)
def get_max_len(in_string, max_len):
"""Gets current length of string and returns it if it exceeds the provided
max_len. Otherwise, returns the provided max_len.
"""
curr_len = len(in_string)
if curr_len > max_len:
return curr_len
return max_len
def can_run():
kubeconfig = os.environ.get("KUBECONFIG")
if not kubeconfig:
print("ERROR: Expected to find $KUBECONFIG")
return False
if not os.path.exists(kubeconfig):
print("ERROR: No kubeconfig found at", kubeconfig)
return False
if not shutil.which("oc"):
print("ERROR: 'oc' command missing from your $PATH")
return True
def main():
if not can_run():
sys.exit(1)
# Get all the MCD pods
mcd_pods = run_oc_cmd_json("oc get pods -n openshift-machine-config-operator -l k8s-app=machine-config-daemon")
# Get our nodes and group by node name
nodes_by_name = {node["metadata"]["name"]: node
for node in run_oc_cmd_json("oc get nodes")["items"]}
out = []
node_name_max_len = 0
pod_name_max_len = 0
for pod in mcd_pods["items"]:
pod_name = pod["metadata"]["name"]
node_name = pod["spec"]["nodeName"]
# Get the node the MCD pod is running on
node = nodes_by_name[node_name]
# Get max pod name length; used to format output
pod_name_max_len = get_max_len(pod_name, pod_name_max_len)
# Get max node name length; used to format output
node_name_max_len = get_max_len(node_name, node_name_max_len)
# Lazily get node roles
roles = (label.split("/")[1]
for label in node["metadata"]["labels"].keys()
if "node-role.kubernetes.io" in label)
# Insert our results into an output list
out.append((pod_name, node_name, ','.join(roles)))
# Output format template
tmpl = "{: <%s}\t{: <%s}\t{: <6}" % (pod_name_max_len, node_name_max_len)
# Print our output
for item in out:
print(tmpl.format(*item))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#from pyramid.arima import auto_arima
import numpy as np
import logging
import sys
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf
import matplotlib.pylab as plt
#from fbprophet import Prophet
#from tbats import BATS, TBATS
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
def getClosest_DateCode(ref_data_seg, current_dateCode, last_year_date):
"""
This function returns the closest day with the same code according to last year reference data
:param current_date: this is the current date. The starting point to make forecast
:param current_dateCode: code into forecast date
:param last_year_date: last year date (reference data)
:return: day (date): closest day
"""
i = 0
first = True
code1 = 0
code2 = 0
day_plus = None
day_minus = None
if last_year_date.year == ref_data_seg['FECHA'].iloc[0].year:
while((code1!=current_dateCode) & (code2 != current_dateCode)):
if first: # TODO: refractor this part of code and put at the begining of the function
code1 = ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == last_year_date]['COD_LABORALIDAD'].item()
first = False
i = i+1
day_plus = pd.to_datetime(last_year_date)
day_minus = pd.to_datetime(last_year_date)
else:
try:
day_plus = day_plus + timedelta(days=i)
if (day_plus.year == last_year_date.year):
if len(ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == day_plus]['COD_LABORALIDAD'].index) == 0:
code1 = current_dateCode
else:
code1 = ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == day_plus]['COD_LABORALIDAD'].iloc[0]
day_minus = day_minus - timedelta(days=i)
if(day_minus.year == last_year_date.year):
if len(ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == day_minus]['COD_LABORALIDAD'].index)==0:
code2=current_dateCode
else:
code2 = ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == day_minus]['COD_LABORALIDAD'].iloc[0]
except OverflowError as err:
print("Not found day with same code in last year data, using the same day")
return last_year_date
if code2==current_dateCode:
return day_minus
else:
return day_plus
else:
logging.error("No reference data available for the descriptive model")
sys.exit(0)
def descriptive_model(ref_data_seg, current_date, segment_id, current_dateCode, total_vehiclesBuffer, smooth_mode, forecast_horizon):
"""
:param reference_data: it could be the collected anual data, the collected anual data using the monthly code average
:param current_date: this is the current date. The starting point to make forecast
:param segment_id: segment_id selected in forecast (forecast are made by segment)
:param current_dateCode: code into forecast date
:param total_vehiclesBuffer: n old values (before forecast date)
:param smooth_mode: type of smoothing method
:param forecast_horizon: number of predictions to be made
:return: predictions (list): a list with the forecast values
"""
last_year_date= current_date - relativedelta(years=1)
if len(ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) == last_year_date].index) == 0: # NO DATA AVAILABLE FOR THAT DAY
closest_ref_date=ref_data_seg.loc[pd.to_datetime(ref_data_seg['FECHA']) >= last_year_date]['FECHA'].iloc[0] # get first available
else:
closest_ref_date=getClosest_DateCode(ref_data_seg, current_dateCode, last_year_date)
predictions = ref_data_seg.loc[(pd.to_datetime(ref_data_seg['FECHA'])>=closest_ref_date)&(pd.to_datetime(ref_data_seg['FECHA'])<=(closest_ref_date+ timedelta(minutes=forecast_horizon)))]
return predictions['TOTAL_VEHICULOS'].values
def stats_model(reference_data, current_date, segment_id, current_dateCode, total_vehiclesBuffer, smooth_mode, forecast_horizon, algo_name):
"""
:param reference_data: it could be the collected anual data, the collected anual data using the monthly code average
:param current_date: this is the current date. The starting point to make forecast
:param segment_id: segment_id selected in forecast (forecast are made by segment)
:param current_dateCode: code into forecast date
:param total_vehiclesBuffer: n old values (before forecast date)
:param smooth_mode: type of smoothing method
:param forecast_horizon: number of predictions to be made
:return: predictions (list): a list with the forecast values
"""
predictions=[]
if algo_name=='fbprophet':
predictions = reference_data.loc[(reference_data.MES == current_date.month) & (reference_data.ID_SEGMENT == segment_id) & ((reference_data.DAY*24*60)+(reference_data.HOUR*60 + reference_data.MINUTE) >=((current_date.day*24*60)+current_date.hour*60 +current_date.minute))]['TOTAL_VEHICULOS'].values
else:
ref_data = reference_data.loc[(reference_data.MES==current_date.month)&(reference_data.ID_SEGMENT==segment_id)&(reference_data.COD_LABORALIDAD==current_dateCode)]
if current_date.hour >=22: # circle hour
predictions=[]
predictions.extend(ref_data.loc[(pd.to_datetime(ref_data['TIME']).dt.hour * 60 + (pd.to_datetime(ref_data['TIME']).dt.minute)) >= (current_date.hour * 60 + current_date.minute)]['TOTAL_VEHICULOS'].values)
predictions.extend(ref_data.loc[(pd.to_datetime(ref_data['TIME']).dt.hour >= 0) & (pd.to_datetime(ref_data['TIME']).dt.hour <= 2)]['TOTAL_VEHICULOS'].values) # take 2 hours more to complete
else:
predictions = ref_data.loc[(pd.to_datetime(ref_data['TIME']).dt.hour*60+(pd.to_datetime(ref_data['TIME']).dt.minute))>=(current_date.hour*60+current_date.minute)]['TOTAL_VEHICULOS'].values
return predictions
def auto_arima_model(data):
""" This method train an ARIMA model using the maximum likelihood method to find automatically the best paramaters.
See pkg documentation, for implementation details.
Args:
data (dataframe): The training dataset used to train the model
Returns:
model: The trained model
"""
""" stepwise_model = auto_arima(data, start_p=2, start_q=2,
max_p=3, max_q=3,m=96,
start_P=0, seasonal=True,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
"""
return None #stepwise_model
def arima_model(train):
""" This method train an ARIMA model
Args:
data (dataframe): The training dataset used to train the model
Returns:
model: The trained model
"""
#model = ARIMA(train, order=(96, 0, 1))
model = sm.tsa.statespace.SARIMAX(train, trend='ct', order=(1, 1, 1), seasonal_order=(1, 1, 1, 96), enforce_stationarity=True, enforce_invertibility=False)
return model
def prophet_model(df_training):
""" This method train an FBPROPHET model using the well-known developed pkg by facebook called fbprophet.
See https://facebook.github.io/prophet/docs/quick_start.html for implementation details
Args:
df_training (dataframe): The training dataset used to train the model
Returns:
model: The trained model
"""
df_training['ds'] = df_training.index
df_training['y'] = df_training['TOTAL_VEHICULOS']
train_cols = df_training[['ds', 'y']]
model = None#Prophet() # instantiate Prophet
model.fit(train_cols); # fit the model with your dataframe
return model
def holtwinters_exp_smoothing_model(train):
""" This method train an Exponential Smoothing model (Holt-Winters)
See https://otexts.com/fpp2/holt-winters.html for implementation details
Args:
df_training (dataframe): The training dataset used to train the model
Returns:
model: The trained model
"""
ses_model = ExponentialSmoothing(np.asarray(train), seasonal_periods=int(96), seasonal='add').fit()
return ses_model
def tbats_model():
""" This method train an TBATs model
Args:
df_training (dataframe): The training dataset used to train the model
Returns:
model: The trained model
"""
model = None#TBATS(seasonal_periods=[14, 30.5])
return model
def seasonal_decompose_model(train):
""" Performns a seasonal decomposition of the training dataset passed as argument program
Args:
df_training (dataframe): The training dataset used to train the model
Returns:
bool: The return value. True for success, False otherwise
"""
sm.tsa.seasonal_decompose(train.Count).plot()
result = sm.tsa.stattools.adfuller(train.Count)
plt.show()
return True
def autocorrelation_function(train):
""" Plots the autocorrelation from the training dataset
Args:
df_training (dataframe): The training dataset used to train the model
Returns:
bool: The return value. True for success, False otherwise
"""
#https://www.vinta.com.br/blog/2018/understanding-time-series-forecasting-python/
data = np.log(train)
lac_acf = acf(data, nlags=40)
plt.figure(figsize=(15, 5))
plt.subplot(121)
plt.stem(lac_acf)
plt.axhline(y=0, linestyle='-', color='black')
plt.xlabel('Lag')
plt.ylabel('ACF')
plt.show()
return True
|
import networkx as nx
import copy
import nltk
import subprocess
import sys
def single_cluster_modularityOV(graph, Clusters, f, nCluster):#, resultPosition):
E_in = 0
E_out = 0
E = 0
for e in graph.edges():
E += graph[e[0]][e[1]]['weight']
for v in Clusters[nCluster]:
for e in graph[v]:
for c in Clusters:
if e in c:
if c == Clusters[nCluster]:
E_in += 1/f[v] * 1/f[e] * graph[v][e]['weight'] / 2
else:
E_out += 1/f[v] * 1/f[e] * graph[v][e]['weight']
#thread_result[resultPosition] = (E_in / E) - ((((2*E_in) + E_out)/(2*E))**2)
return (E_in / E) - ((((2*E_in) + E_out)/(2*E))**2)
def calc_f(graph, Clusters):
f = {}
for i in graph.nodes():
count = 0
for c in Clusters:
if i in c:
count += 1
if count < 1:
count = 1
f[i] = count
return f
def merge_Clusters(Clusters, i, j, f):
for n in Clusters[j]:
if n not in Clusters[i]:
Clusters[i].append(n)
else:
if f[n] > 1:
f[n] -= 1
Clusters.remove(Clusters[j])
return Clusters, f
###################------------ MERGE SMALL CLUSTERS INTO LARGER ONES -----------------######################3
def reduceClusters(g, Clusters, nFinalClusters):
f = calc_f(g, Clusters)
for j in range(len(Clusters)-1, nFinalClusters-1, -1):
better_i_value = 1000
better_i = -1
#print('Merging cluster', j)
index_max = nFinalClusters
if j < nFinalClusters:
index_max = j-1
for c,i in zip(Clusters[:index_max], range(index_max)):
mod_i = single_cluster_modularityOV(g, Clusters, f, i)
mod_j = single_cluster_modularityOV(g, Clusters, f, j)
mod_iUj = single_cluster_modularityOV(g, *merge_Clusters(copy.deepcopy(Clusters), i, j, copy.deepcopy(f)), i)
if mod_i + mod_j - mod_iUj < better_i_value:
better_i = i
better_i_value = mod_i + mod_j - mod_iUj
#print('Analyzing option', i)
#print(better_i_value)
merge_Clusters(Clusters, better_i, j, f)
return Clusters
def clusterRelationGraph(g, Clusters):
f = calc_f(g, Clusters)
connections = []
#init connections
for c in Clusters:
connection = []
for c2 in Clusters:
connection.append(0)
connections.append(connection)
#calc connections
for e in g.edges():
for c in range(len(Clusters)):
if e[0] in Clusters[c]:
for c2 in range(len(Clusters)):
if e[1] in Clusters[c2]:
connections[c][c2] += (1/f[e[0]]) * (1/f[e[1]]) * g[e[0]][e[1]]['weight']
print('\n\n', c, c2, connections[c][c2], f[e[0]], f[e[1]], g[e[0]][e[1]]['weight'])
#for c in range(len(Clusters)):
# for c2 in range(len(Clusters)):
cluster_relation_graph = nx.Graph()
id_Cluster = 0
for n in connections:
cluster_relation_graph.add_node(id_Cluster, peso=len(Clusters[id_Cluster]))
id_Cluster += 1
for c1 in range(len(connections)):
for c2 in range(len(connections)):
if cluster_relation_graph.has_edge(c1,c2):
cluster_relation_graph[c1][c2]['weight'] += connections[c1][c2]
else:
cluster_relation_graph.add_edge(c1,c2, weight=connections[c1][c2])
maxWeight = 0
minWeight = sys.maxsize
for e in cluster_relation_graph.edges():
if cluster_relation_graph[e[0]][e[1]]['weight'] > maxWeight:
maxWeight = cluster_relation_graph[e[0]][e[1]]['weight']
if cluster_relation_graph[e[0]][e[1]]['weight'] < minWeight:
minWeight = cluster_relation_graph[e[0]][e[1]]['weight']
return cluster_relation_graph, maxWeight, minWeight
################################################################### KEYPHRASE EXTRACTION ########################################################
def nodeRank(g):
r = nx.degree_centrality(g)
rank = {}
for p in r:
rank[p] = r[str(p)]
rank = sorted(rank.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
return rank
def takeSecond(elem):
return elem[1]
def keyPhrasesCompilation(keyWords, g, g2, dictionaryCode,lenght,totalWords):
keyphrases_dict = {code : value for code, value in keyWords}
coumpound_keyphrases = []
#identify compound keyphrases
for k in keyWords[:lenght]:
for k2 in keyWords[:lenght]:
if g2.has_edge(k[0], k2[0]) and g2[k[0]][k2[0]]['weight'] >= int(totalWords / 1000) + 2: #verify occurrences
if g2.has_edge(k2[0], k[0]) == False or g2[k2[0]][k[0]]['weight'] < g2[k[0]][k2[0]]['weight']:
weight = g.out_degree(k[0], weight='weight') + g.in_degree(k2[0], weight='weight') #normalization factor | w(Out(i)) + w(In(j))
phrase = [k[0] + ',' + k2[0], g[k[0]][k2[0]]['weight'] / weight] #weight compound keyphrase | NIE_{i,j}
if phrase not in coumpound_keyphrases:
coumpound_keyphrases.append(phrase)
coumpound_keyphrases = sorted(coumpound_keyphrases, key=lambda kv: (kv[1], kv[0]), reverse=True)
keyphrases_weight = [t[1] for t in coumpound_keyphrases]
keyphrases_weight_norm = [float(i)/sum(keyphrases_weight) for i in keyphrases_weight] #normalize NIE | NIE_{i,j} / (sum_all(NIE))
keyphrases = [t for t in coumpound_keyphrases]
for kp, n in zip(keyphrases, keyphrases_weight_norm):
codes = kp[0].split(',')
#rank keyphrases
kp[1] = ((keyphrases_dict[codes[0]] + keyphrases_dict[codes[1]])) * n # CC_{i,j}
kp[0] = dictionaryCode[codes[0]] + ' ' + dictionaryCode[codes[1]]
soma = sum([v[1] for v in keyphrases])
keyphrases = [[k[0], k[1]/soma] for k in keyphrases] #NCC_{i,j}
keywords = [[dictionaryCode[k[0]], k[1]] for k in keyWords]
merged = keyphrases[:6] + keywords #FWC U NCC_{1:6}
merged.sort(key=takeSecond, reverse=True)
return merged
def extract_keyphrases(g, dictionaryCode):
g = nx.Graph(g)
phrases = []
words = []
#First Rank
keyphrases = nodeRank(g)
#Exclude words different from nouns, verbs and adjectives
new_keyphrases = []
for k in keyphrases:
words = dictionaryCode[k[0]]
tokens = nltk.word_tokenize(words)
notDesiredTags = False
for w in nltk.pos_tag(tokens):
if w[1][0] != 'N' and w[1][0] != 'J' and w[1][0] != 'V':
notDesiredTags = True
if notDesiredTags:
bla = [k[0], 0]
new_keyphrases.append(bla)
else:
new_keyphrases.append(k)
keyphrases = new_keyphrases
keywords = sorted(keyphrases, key=lambda kv: (kv[1], kv[0]), reverse=True)
#excludes last 87% keyphrases
lenght = int(.13*len(keywords))
summation = sum([v[1] for v in keywords[:lenght]])
keywords = [[k[0], k[1]/summation] for k in keywords[:lenght]]
#re-weight mult-term keyphrases
keywords = [[k[0], (k[1]**(1/(len(dictionaryCode[k[0]].split(' ')))))] for k in keywords[:lenght]]
keywords = sorted(keywords, key=lambda kv: (kv[1], kv[0]), reverse=True)
nodesToRemove = []
for n in g:
inKeywords = False
for k in keywords:
if k[0] == n:
inKeywords = True
if inKeywords == False:
nodesToRemove.append(n)
for n in nodesToRemove:
g.remove_node(n)
totalWords = 0
for n in g.nodes():
totalWords += g.nodes()[n]['peso']
keyphrases = keyPhrasesCompilation(keywords,g,g,dictionaryCode,lenght,totalWords)
return keyphrases
########################### COMPARE COVERS ############################################
def parseAnswer(answer):
parsedAnswer = []
for line in answer:
line = line.split('\t')
if len(line) >= 2:
parsedAnswer.append(line)
for l in parsedAnswer:
l[0] = l[0].replace(':', '').replace(' ', '')
return parsedAnswer
def compareFullCovers(cover1, cover2, folderNMI, NMI_type='NMI<Max>'):
command = folderNMI + ' ' + cover1 + ' ' + cover2
p = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
answer = p.stdout.decode('ascii').split('\n')
parsedAnswer = parseAnswer(answer)
if NMI_type == 'NMI<Max>':
return parsedAnswer[0][1]
elif NMI_type == 'lfkNMI':
return parsedAnswer[1][1]
elif NMI_type == 'NMI<Sum>':
return parsedAnswer[2][1]
else:
print('Wrong NMI_type!\n')
return parsedAnswer
def calcDegreeCentrality(g):
grau = {}
for v in g:
grau[v] = len(g[v])
#sorted_grau = sorted(grau.items(), key=operator.itemgetter(1), reverse=True)
return grau
def calcNodesCentrality(g1,g2):
grau = {}
for v in g1:
grau[v] = len(g1[v])
for v2 in g2:
if v2 in grau:
grau[v2] = (len(g1[v2]) + len(g2[v2])) / 2
else:
grau[v2] = len(g2[v2])
return grau
def clusterCentrality(cluster, g, nodeCentrality):
total = 0
for n in cluster:
total += nodeCentrality[n]
return total
def calcClustersCentralities(cover, g, nodeCentrality):
centralities = {}
for c,i in zip(cover, range(len(cover))):
centralities[i] = clusterCentrality(c, g, nodeCentrality)
return centralities
def comunitySimilarity(c1,c2, n1,n2,nodeCentrality, clustersCentralities1, clustersCentralities2):
similarity = 0
for n in c1:
if n in c2:
#similarity += 1
similarity += nodeCentrality[n]
#return similarity / max(len(c1), len(c2))
return [similarity / max(clustersCentralities1[n1], clustersCentralities2[n2]), similarity / min(clustersCentralities1[n1], clustersCentralities2[n2])]
def bestComunitySimilarity(comunity, cover1, nodeCentrality, clustersCentralities1, clustersCentralities2):
higherSimilarity = -1
nCluster = -1
for c, i in zip(cover1, range(len(cover1))):
similarity = comunitySimilarity(c, comunity, nodeCentrality, clustersCentralities1, clustersCentralities2)
if similarity > higherSimilarity:
higherSimilarity = similarity
nCluster = i
return higherSimilarity, nCluster
def coverSimilarities(cover1, cover2, nodeCentrality, clustersCentralities1, clustersCentralities2, sizeThreshold=10):
all_similarities = []
for c1, n1 in zip(cover1, range(len(cover1))):
if len(c1) >= sizeThreshold:
local_similarities = []
for c2, n2 in zip(cover2, range(len(cover2))):
if len(c2) >= sizeThreshold:
local_similarities.append(comunitySimilarity(c1,c2,n1,n2, nodeCentrality, clustersCentralities1, clustersCentralities2))
else:
local_similarities.append([0,0])
all_similarities.append(local_similarities)
else:
local_similarities = []
for c2 in cover2:
local_similarities.append([0,0])
all_similarities.append(local_similarities)
return all_similarities
def compareCovers(all_similarities, threshold):
similar_clusters = []
for c1 in range(len(all_similarities)):
for c2 in range(len(all_similarities[c1])):
if all_similarities[c1][c2][0] >= threshold:
#if [c2,c1,all_similarities[c1][c2]] not in similar_clusters:
similar_clusters.append([c1,c2,all_similarities[c1][c2][0]])
return similar_clusters
############################## EVOLUTION ################################################
def evolution(c1, c2, sKGraph1, sKGraph2):
new_graph = nx.Graph()
for n in c1:
if n in c2:
new_graph.add_node(n, peso=sKGraph1.sciKGraph.nodes()[n]['peso']+sKGraph2.sciKGraph.nodes()[n]['peso'], clusters=3, dicionario=sKGraph1.dictionaryCodeMerged[n])
else:
new_graph.add_node(n, peso=sKGraph1.sciKGraph.nodes()[n]['peso'], clusters=1, dicionario=sKGraph1.dictionaryCodeMerged[n])
for n in c2:
if n not in c1:
new_graph.add_node(n, peso=sKGraph2.sciKGraph.nodes()[n]['peso'], clusters=2, dicionario=sKGraph2.dictionaryCodeMerged[n])
for e in sKGraph1.sciKGraph.edges():
if new_graph.has_node(e[0]) and new_graph.has_node(e[1]):
if e in sKGraph2.sciKGraph.edges():
new_graph.add_edge(e[0], e[1], weight= sKGraph1.sciKGraph[e[0]][e[1]]['weight'] + sKGraph2.sciKGraph[e[0]][e[1]]['weight'])
else:
new_graph.add_edge(e[0], e[1], weight= sKGraph1.sciKGraph[e[0]][e[1]]['weight'])
for e in sKGraph2.sciKGraph.edges():
if new_graph.has_node(e[0]) and new_graph.has_node(e[1]):
if e not in sKGraph1.sciKGraph.edges():
new_graph.add_edge(e[0], e[1], weight= sKGraph2.sciKGraph[e[0]][e[1]]['weight'])
return new_graph
|
<gh_stars>0
import os
import pickle
import tempfile
import zipfile
from contextlib import contextmanager
import pytest
import fsspec
from fsspec.core import (
OpenFile,
OpenFiles,
_expand_paths,
get_compression,
open_files,
open_local,
)
@contextmanager
def tempzip(data={}):
f = tempfile.mkstemp(suffix="zip")[1]
with zipfile.ZipFile(f, mode="w") as z:
for k, v in data.items():
z.writestr(k, v)
try:
yield f
finally:
try:
os.remove(f)
except (IOError, OSError):
pass
@pytest.mark.parametrize(
"path, name_function, num, out",
[
[["apath"], None, 1, ["apath"]],
["apath.*.csv", None, 1, ["apath.0.csv"]],
["apath.*.csv", None, 2, ["apath.0.csv", "apath.1.csv"]],
["a*", lambda x: "abc"[x], 2, ["aa", "ab"]],
],
)
def test_expand_paths(path, name_function, num, out):
assert _expand_paths(path, name_function, num) == out
def test_expand_error():
with pytest.raises(ValueError):
_expand_paths("*.*", None, 1)
def test_openfile_api(m):
m.open("somepath", "wb").write(b"data")
of = OpenFile(m, "somepath")
assert str(of) == "<OpenFile 'somepath'>"
f = of.open()
assert f.read() == b"data"
f.close()
with OpenFile(m, "somepath", mode="rt") as f:
f.read() == "data"
def test_openfile_open(m):
of = OpenFile(m, "somepath", mode="wt")
f = of.open()
f.write("hello")
assert m.size("somepath") == 0 # no flush yet
del of
assert m.size("somepath") == 0 # still no flush
f.close()
assert m.size("somepath") == 5
def test_open_local():
d1 = str(tempfile.mkdtemp())
f1 = os.path.join(d1, "f1")
open(f1, "w").write("test1")
d2 = str(tempfile.mkdtemp())
fn = open_local("simplecache://" + f1, cache_storage=d2, target_protocol="file")
assert isinstance(fn, str)
assert open(fn).read() == "test1"
assert d2 in fn
def test_xz_lzma_compressions():
pytest.importorskip("lzma")
# Ensure that both 'xz' and 'lzma' compression names can be parsed
assert get_compression("some_file.xz", "infer") == "xz"
assert get_compression("some_file.xz", "xz") == "xz"
assert get_compression("some_file.xz", "lzma") == "lzma"
def test_list():
here = os.path.abspath(os.path.dirname(__file__))
flist = os.listdir(here)
plist = [os.path.join(here, p).replace("\\", "/") for p in flist]
of = open_files(plist)
assert len(of) == len(flist)
assert [f.path for f in of] == plist
def test_pathobject(tmpdir):
import pathlib
tmpdir = str(tmpdir)
plist_str = [os.path.join(str(tmpdir), f).replace("\\", "/") for f in ["a", "b"]]
open(plist_str[0], "w").write("first file")
open(plist_str[1], "w").write("second file")
plist = [pathlib.Path(p) for p in plist_str]
of = open_files(plist)
assert len(of) == 2
assert [f.path for f in of] == plist_str
of = open_files(plist[0])
assert len(of) == 1
assert of[0].path == plist_str[0]
with of[0] as f:
assert f.read() == open(plist_str[0], "rb").read()
def test_automkdir(tmpdir):
dir = os.path.join(str(tmpdir), "a")
of = fsspec.open(os.path.join(dir, "afile"), "w")
with of:
pass
assert "afile" in os.listdir(dir)
dir = os.path.join(str(tmpdir), "b")
of = fsspec.open(os.path.join(dir, "bfile"), "w", auto_mkdir=True)
with of:
pass
assert "bfile" in os.listdir(dir)
dir = os.path.join(str(tmpdir), "c")
with pytest.raises(FileNotFoundError):
of = fsspec.open(os.path.join(dir, "bfile"), "w", auto_mkdir=False)
with of:
pass
def test_automkdir_readonly(tmpdir):
dir = os.path.join(str(tmpdir), "d")
with pytest.raises(FileNotFoundError):
of = fsspec.open(os.path.join(dir, "dfile"), "r")
with of:
pass
def test_openfile_pickle_newline():
# GH#318
test = fsspec.open(__file__, newline=b"")
pickled = pickle.dumps(test)
restored = pickle.loads(pickled)
assert test.newline == restored.newline
def test_mismatch():
with pytest.raises(ValueError, match="protocol"):
open_files(["s3://test/path.csv", "/other/path.csv"])
def test_url_kwargs_chain(ftp_writable):
host, port, username, password = ftp_writable
data = b"hello"
with fsspec.open(
"ftp:///afile", "wb", host=host, port=port, username=username, password=password
) as f:
f.write(data)
with fsspec.open(
f"simplecache::ftp://{username}:{password}@{host}:{port}//afile",
"rb",
) as f:
assert f.read() == data
def test_multi_context(tmpdir):
fns = [os.path.join(tmpdir, fn) for fn in ["a", "b"]]
files = open_files(fns, "wb")
assert isinstance(files, OpenFiles)
assert isinstance(files[0], OpenFile)
assert len(files) == 2
assert isinstance(files[:1], OpenFiles)
assert len(files[:1]) == 1
with files as of:
assert len(of) == 2
assert not of[0].closed
assert of[0].name.endswith("a")
assert of[0].closed
assert repr(files) == "<List of 2 OpenFile instances>"
def test_not_local():
with pytest.raises(ValueError, match="attribute local_file=True"):
open_local("memory://afile")
def test_url_to_fs(ftp_writable):
host, port, username, password = ftp_writable
data = b"hello"
with fsspec.open(f"ftp://{username}:{password}@{host}:{port}/afile", "wb") as f:
f.write(data)
fs, url = fsspec.core.url_to_fs(
f"simplecache::ftp://{username}:{password}@{host}:{port}/afile"
)
assert url == "/afile"
fs, url = fsspec.core.url_to_fs(f"ftp://{username}:{password}@{host}:{port}/afile")
assert url == "/afile"
with fsspec.open(f"ftp://{username}:{password}@{host}:{port}/afile.zip", "wb") as f:
import zipfile
with zipfile.ZipFile(f, "w") as z:
with z.open("inner", "w") as f2:
f2.write(b"hello")
f.write(data)
fs, url = fsspec.core.url_to_fs(
f"zip://inner::ftp://{username}:{password}@{host}:{port}/afile.zip"
)
assert url == "inner"
fs, url = fsspec.core.url_to_fs(
f"simplecache::zip::ftp://{username}:{password}@{host}:{port}/afile.zip"
)
assert url == ""
def test_target_protocol_options(ftp_writable):
host, port, username, password = ftp_writable
data = {"afile": b"hello"}
options = {"host": host, "port": port, "username": username, "password": password}
with tempzip(data) as lfile, fsspec.open(
"ftp:///archive.zip", "wb", **options
) as f:
f.write(open(lfile, "rb").read())
with fsspec.open(
"zip://afile",
"rb",
target_protocol="ftp",
target_options=options,
fo="archive.zip",
) as f:
assert f.read() == data["afile"]
def test_chained_url(ftp_writable):
host, port, username, password = ftp_writable
data = {"afile": b"hello"}
cls = fsspec.get_filesystem_class("ftp")
fs = cls(host=host, port=port, username=username, password=password)
with tempzip(data) as lfile:
fs.put_file(lfile, "archive.zip")
urls = [
"zip://afile",
"zip://afile::simplecache",
"simplecache::zip://afile",
"simplecache::zip://afile::simplecache",
]
for url in urls:
url += f"::ftp://{username}:{password}@{host}:{port}/archive.zip"
with fsspec.open(url, "rb") as f:
assert f.read() == data["afile"]
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import sys
from pathlib import Path
import subprocess
import time
import yaml
csvFile = sys.argv[1]
planPatchPath = sys.argv[2]
m4aPipelineTemplateFile = sys.argv[3]
image = sys.argv[4]
def verify_line(line):
"""Makes sure the line is formatted correctly, and the contents are valid"""
# Check the OS is Supported
if line[3] not in ["Linux"]:
print(f"[ERROR] OS not supported: {line[3]}")
return False
# Check Intent is Supported
if line[4] not in ["Image", "ImageAndData", "Data"]:
print(f"[ERROR] Intent not supported: {line[4]}")
return False
# Check Intent is Supported
if line[5] not in ["system", "tomcat", "open-liberty"]:
print(f"[ERROR] AppType not supported: {line[5]}")
return False
# Check Plan Patch File
if len(line) > 6:
plan_patch_file = Path(planPatchPath + '/' + line[6])
if plan_patch_file.is_file() is False:
print(f"[ERROR] Plan Patch File doesn't exist: {plan_patch_file} ")
return False
if plan_patch_file.suffix not in [".yaml", ".YAML", ".yml", ".YML", ".json", ".JSON"]:
print(f"[ERROR] Plan Patch File suffix not supported: {plan_patch_file}")
return False
# TODO: Add More Verifications
# All Checks Passed
return True
def run_migration(line):
"""Runs the Migration Pipeline for the Line"""
# Load PipelineRun Manifest YAML Template
with open(m4aPipelineTemplateFile) as m:
pipelinerun_yaml = yaml.load(m, Loader=yaml.FullLoader)
# Configure Basic YAML Params
pipelinerun_yaml["metadata"]["name"] = line[0]
for param in pipelinerun_yaml["spec"]["params"]:
if param["name"] == "migrationName":
param["value"] = line[0]
if param["name"] == "migrationAppType":
param["value"] = line[5]
if param["name"] == "migrationIntent":
param["value"] = line[4]
if param["name"] == "migrationOS":
param["value"] = line[3]
if param["name"] == "migrationSource":
param["value"] = line[2]
if param["name"] == "migrationVmId":
param["value"] = line[1]
if param["name"] == "image":
param["value"] = image
# Add Plan Patch Params if needed
if len(line) > 6:
patch_name = line[6]
file_param = dict()
file_param["name"] = "migrationPlanPatchFile"
file_param["value"] = patch_name
pipelinerun_yaml["spec"]["params"].append(file_param)
plan_patch_cm_name = line[0] + "-plan-patch-cm"
os.system("kubectl delete configmap " + plan_patch_cm_name)
cm_create_cmd = ['kubectl', 'create', 'configmap', plan_patch_cm_name,
'--from-file=' + patch_name + '=' + planPatchPath + '/' + patch_name]
execute_command(cm_create_cmd)
cm_param = dict()
cm_param["name"] = "planPatchConfigMapName"
cm_param["value"] = plan_patch_cm_name
pipelinerun_yaml["spec"]["params"].append(cm_param)
# Write Configured YAML
pipelinerun_manifest = "/" + line[0] + ".yaml"
with open(pipelinerun_manifest, "w") as m:
yaml.dump(pipelinerun_yaml, m)
# Start PipelineRun
os.system("cat " + pipelinerun_manifest)
apply_cmd = ['kubectl', 'apply', '-f', pipelinerun_manifest]
execute_command(apply_cmd)
def execute_command(command):
output = subprocess.run(command, capture_output=True, encoding='utf-8')
if len(output.stderr) > 0:
print(output.stderr)
output.check_returncode()
return output
def check_migration(line):
"""Checks the status of the migration pipeline run, returns true when migration is finished"""
pipelinerun_name = line[0]
status_cmd = ['kubectl', 'get', 'pipelinerun', pipelinerun_name, '-o', 'jsonpath={.status.conditions[0].status}']
status = execute_command(status_cmd).stdout
if status == "True":
print("Migration '", pipelinerun_name, "' Completed Successfully")
return True
elif status == "False":
reason_cmd = ['kubectl', 'get', 'pipelinerun', pipelinerun_name, '-o',
'jsonpath={.status.conditions[0].reason}']
reason = execute_command(reason_cmd).stdout
print("Migration '", pipelinerun_name, "' Completed with Errors. Reason: ", reason)
return True
else:
print("Migration '", pipelinerun_name, "' In Progress")
return False
# Start
print("CSV: ", csvFile)
print("Template: ", m4aPipelineTemplateFile)
# Read CSV
with open(csvFile, 'r') as f:
reader = csv.reader(f, delimiter=',')
# Skip Past Headers
next(reader, None)
# Parce CSV Lines, and check which ones can be migrated
migration_lines = list()
for line in reader:
good = verify_line(line)
if not good:
print("[ERROR] line failed verification: ", line)
continue
# TODO: Add check if already migrated
migration_lines.append(line)
# Migrate the new/good Instances
for line in migration_lines:
run_migration(line)
# Monitoring Migrations
print()
print("Starting Monitoring")
while True:
migrations_left = migration_lines.copy()
print()
# Cycle through migrations and check if complete
for line in migration_lines:
done = check_migration(line)
if done:
migrations_left.remove(line)
migration_lines = migrations_left
# Exit when all migrations have completed
if len(migration_lines) == 0:
print("All migrations complete!")
break
# Delay next round of checks
time.sleep(45)
|
<filename>attackz.py
import os
import re
from typing import List
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import torch.nn as nn
import legacy
# Attack
import sys
sys.path.append('../boosted-implicit-models')
from experimental import AttackExperiment
import torch.optim as optim
import torchvision.utils as vutils
import matplotlib.pylab as plt
from tqdm import tqdm
from likelihood_model import ReparameterizedMVN
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
def save_images(all_images, fpath):
fakegrid = vutils.make_grid(all_images[:100].clamp(-1,1) * .5 + .5, nrow=10, padding=4, pad_value=1, normalize=False)
fig, ax = plt.subplots(1,1,figsize=(12,12))
ax.imshow(np.transpose(fakegrid.cpu().numpy(), (1,2,0)), interpolation='bilinear')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(fpath, bbox_inches='tight', pad_inches=0)
class MineGAN(nn.Module):
def __init__(self, miner, Gmapping):
super(MineGAN, self).__init__()
self.nz = miner.nz0
self.miner = miner
self.Gmapping = Gmapping
def forward(self, z0):
z = self.miner(z0)
w = self.Gmapping(z, None)
return w
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--layers', 'l_identity', type=num_range, help='Style layer range', default='0-6', show_default=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--outdir', type=str, required=True)
@click.option('--fixed_id', type=int, required=True)
def attack(
network_pkl: str,
l_identity: List[int],
truncation_psi: float,
noise_mode: str,
outdir: str,
fixed_id: int
):
"""Generate images using pretrained network pickle.
Examples:
\b
python attack.py --outdir=out-celeba --network=training-runs/00015-celeba-aux-auto1/network-snapshot-000240.pkl --trunc .5
"""
device = torch.device('cuda')
# Prepare attack settings
# experiment = AttackExperiment('/h/wangkuan/projects/boosted-implicit-models/configs/celeba_crop--dcgan--ResNet10--ft.yml', device, False, fixed_id =0, run_target_feat_eval=0)
experiment = AttackExperiment('/h/wangkuan/projects/boosted-implicit-models/configs/celeba_db--dcgan--ResNet10--ft.yml', device, False, fixed_id =fixed_id, run_target_feat_eval=0)
target_logsoftmax = experiment.target_logsoftmax
assert truncation_psi == 1 # TODO: incorporate this if necessary
#
print('Loading networks from "%s"...' % network_pkl)
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
os.makedirs(os.path.join(outdir, 'samples_pt'), exist_ok=True)
os.makedirs(os.path.join(outdir, 'ws_pt'), exist_ok=True)
miner = ReparameterizedMVN(G.mapping.z_dim).to(device).double()
minegan_Gmapping = MineGAN(miner, G.mapping)
identity_mask = torch.zeros(1, G.mapping.num_ws, 1).to(device)
identity_mask[:, l_identity, :] = 1
optimizerG = optim.Adam(miner.parameters(), lr=0.01)
fixed_z_nuisance = torch.randn(100, G.z_dim).to(device).double()
fixed_z_identity = torch.randn(100, G.z_dim).to(device).double()
pbar = tqdm(range(0, 10000), desc='Train loop')
for i in pbar:
optimizerG.zero_grad()
def sample(z_nuisance, z_identity):
w_nuisance = G.mapping(z_nuisance, None)
w_identity = minegan_Gmapping(z_identity)
w = (1-identity_mask) * w_nuisance + identity_mask * w_identity
x = G.synthesis(w, noise_mode=noise_mode)
return x
# all_w[:, l_identity] = opt_w
z_nu = torch.randn(100, G.z_dim).to(device).double()
z_id = torch.randn(100, G.z_dim).to(device).double()
fake = sample(z_nu, z_id)
# import ipdb; ipdb.set_trace()
lsm = target_logsoftmax(fake.clamp(-1,1) / 2 + .5)
fake_y = fixed_id * torch.ones(100).to(device).long()
target_loss = -lsm.gather(1, fake_y.view(-1,1)).mean()
target_loss.backward()
optimizerG.step()
if i % 100 == 0:
with torch.no_grad():
fake = sample(fixed_z_nuisance, fixed_z_identity)
save_images(fake, f'{outdir}/i{i:05d}.jpeg')
torch.save( fake[:10].detach().cpu(), os.path.join(outdir, 'samples_pt', f'i{i:05d}.pt'))
#----------------------------------------------------------------------------
if __name__ == "__main__":
attack() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
|
<reponame>qbilius/psychopy_ext<gh_stars>10-100
# Part of the psychopy_ext library
# Copyright 2010-2014 <NAME>
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A library of helper functions for creating and running experiments.
All experiment-related methods are kept here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys, os, csv, glob, random, warnings, copy
from UserDict import DictMixin
from collections import OrderedDict
import numpy as np
import wx
# for HTML rendering
import pyglet
import textwrap
from HTMLParser import HTMLParser
# for exporting stimuli to svg
try:
import svgwrite
except:
no_svg = True
else:
no_svg = False
import psychopy.info
from psychopy import visual, core, event, logging, misc, monitors, data
from psychopy.visual.shape import ShapeStim
from psychopy.data import TrialHandler, ExperimentHandler
from psychopy.tools.attributetools import attributeSetter
from psychopy_ext import ui
from psychopy_ext.version import __version__ as psychopy_ext_version
# pandas does not come by default with PsychoPy but that should not prevent
# people from running the experiment
try:
import pandas
except:
pass
class default_computer:
"""The default computer parameters. Hopefully will form a full class at
some point.
"""
recognized = False
# computer defaults
root = '.' # means store output files here
stereo = False # not like in Psychopy; this merely creates two Windows
default_keys = {'exit': ('lshift', 'escape'),
'trigger': 'space'} # "special" keys
valid_responses = {'f': 0, 'j': 1} # organized as input value: output value
# monitor defaults
name = 'default'
distance = 80
width = 37.5
# window defaults
screen = 0 # default screen is 0
view_scale = [1,1]
def __init__(self):
pass
def set_paths(exp_root='.', computer=default_computer, fmri_rel=''):
"""Set paths to data storage.
:Args:
exp_root (str)
Path to where the main file that starts the program is.
:Kwargs:
- computer (Namespace, default: :class:`default_computer`)
A class with a computer parameters defined, such as the default
path for storing data, size of screen etc. See
:class:`default_computer` for an example.
- fmri_rel (str, default: '')
A path to where fMRI data and related analyzes should be stored.
This is useful because fMRI data takes a lot of space so you may
want to keep it on an external hard drive rather than on Dropbox
where your scripts might live, for example.
:Returns:
paths (dict):
A dictionary of paths.
"""
fmri_root = os.path.join(computer.root, fmri_rel)
if exp_root != '':
exp_root += '/'
paths = {
'root': computer.root,
'exp_root': exp_root,
'fmri_root': fmri_root,
'analysis': os.path.join(exp_root, 'analysis/'), # where analysis files are stored
'logs': os.path.join(exp_root, 'logs/'),
'data': os.path.join(exp_root, 'data/'),
'report': 'report/',
'data_behav': os.path.join(fmri_root, 'data_behav/'), # for fMRI behav data
'data_fmri': os.path.join(fmri_root,'data_fmri/'),
'data_struct': os.path.join(fmri_root,'data_struct/'), # anatomical data
'spm_analysis': os.path.join(fmri_root, 'analysis/'),
'rec': os.path.join(fmri_root,'reconstruction/'), # CARET reconstructions
'rois': os.path.join(fmri_root,'rois/'), # ROIs (no data, just masks)
'data_rois': os.path.join(fmri_root,'data_rois/'), # preprocessed and masked data
'sim': exp_root, # path for storing simulations of models
}
return paths
def run_tests(computer):
"""Runs basic tests before starting the experiment.
At the moment, it only checks if the computer is recognized and if not,
it waits for a user confirmation to continue thus preventing from running
an experiment with incorrect settings, such as stimuli size.
:Kwargs:
computer (Namespace)
A class with a computer parameters defined, such as the default
path for storing data, size of screen etc. See
:class:`default_computer` for an example.
"""
if not computer.recognized:
resp = raw_input("WARNING: This computer is not recognized.\n"
"To continue, simply hit Enter (default)\n"
#"To memorize this computer and continue, enter 'm'\n"
"To quit, enter 'q'\n"
"Your choice [C,q]: ")
while resp not in ['', 'c', 'q']:
resp = raw_input("Choose between continue (c) and quit (q): ")
if resp == 'q':
sys.exit()
#elif resp == 'm':
#mac = uuid.getnode()
#if os.path.isfile('computer.py'):
#write_head = False
#else:
#write_head = True
#try:
#dataFile = open(datafile, 'ab')
#print ("Computer %d is memorized. Remember to edit computer.py"
#"file to " % mac
class Task(TrialHandler):
def __init__(self,
parent,
name='',
version='0.1',
method='random',
data_fname=None,
blockcol=None
):
"""
An extension of TrialHandler with many useful functions.
:Args:
parent (:class:`Experiment`)
The Experiment to which this Tast belongs.
:Kwargs:
- name (str, default: '')
Name of the task. Currently not used anywhere.
- version (str, default: '0.1')
Version of your experiment.
- method ({'sequential', 'random'}, default: 'random')
Order of trials:
- sequential: trials and blocks presented sequentially
- random: trials presented randomly, blocks sequentially
- fullRandom: converted to 'random'
Note that there is no explicit possibility to randomize
the order of blocks. This is intentional because you
in fact define block order in the `blockcol`.
- data_fname (str, default=None)
The name of the main data file for storing output. If None,
reuses :class:`~psychopy_ext.exp.Datafile` instance from
its parent; otherwise, a new one is created
(stored in ``self.datafile``).
- blockcol (str, default: None)
Column name in `self.exp_plan` that defines which trial
should be presented during which block.
"""
self.parent = parent
self.computer = self.parent.computer
self.paths = self.parent.paths
self.name = name
self.version = version
self.nReps = 1 # fixed
self.method = method
if method == 'randomFull':
self.method = 'random'
if data_fname is None:
self.datafile = parent.datafile
else:
self.datafile = Datafile(data_fname, writeable=not self.parent.rp['no_output'])
self.blockcol = blockcol
self.computer.valid_responses = parent.computer.valid_responses
self._exit_key_no = 0
self.blocks = []
#self.info = parent.info
#self.extraInfo = self.info # just for compatibility with PsychoPy
#self.rp = parent.rp
def __str__(self, **kwargs):
"""string representation of the object"""
return 'psychopy_ext.exp.Task'
def quit(self, message=''):
"""What to do when exit is requested.
"""
print # in case there was anything without \n
logging.warning(message)
self.win.flip()
#self.rectimes.append(self.win.frameIntervals[-1])
if not self.rp['no_output']:
named_flips = [n for n in self.win.flipnames[:-1] if n != '']
if len(named_flips) > 0:
framelogname = self.paths['logs'] + self.info['subjid'] + '_timing.csv'
self.framelog = Datafile(framelogname, writeable=not self.rp['no_output'])
with open(framelogname, 'ab') as f:
writer = csv.writer(f, lineterminator = '\n')
writer.writerow(['event', 'time'])
for name, time in zip(self.win.flipnames[:-1], self.win.frameIntervals):
if name != '':
writer.writerow([name, '%.6f' % time])
self.win.close()
if not self.rp['no_output']:
self.logfile.write('End time: %s\n' % data.getDateStr(format="%Y-%m-%d %H:%M"))
self.logfile.write('end')
core.quit()
def flip(self, name=None, *args, **kwargs):
self.win.flip_orig(*args, **kwargs)
if self.win.recordFrameIntervals and not self.win.recordFrameIntervalsJustTurnedOn:
if name is None:
try:
name = self.this_event.name
except:
name = ''
self.win.flipnames.append(name)
def setup_task(self):
"""
Does all the dirty setup before running the experiment.
Steps include:
- Logging file setup (:func:`set_logging`)
- Creating a :class:`~psychopy.visual.Window` (:func:`create_window`)
- Creating stimuli (:func:`create_stimuli`)
- Creating trial structure (:func:`create_trial`)
- Combining trials into a trial list (:func:`create_triaList`)
- Creating a :class:`~psychopy.data.TrialHandler` using the
defined trialList (:func:`create_TrialHandler`)
:Kwargs:
create_win (bool, default: True)
If False, a window is not created. This is useful when you have
an experiment consisting of a couple of separate sessions. For
the first one you create a window and want everything to be
presented on that window without closing and reopening it
between the sessions.
"""
if not self.parent._initialized:
raise Exception('You must first call Experiment.setup()')
self.win = self.parent.win
self.logfile = self.parent.logfile
self.info = self.parent.info
self.rp = self.parent.rp
self.mouse = self.parent.mouse
self.datafile.writeable = not self.rp['no_output']
self._set_keys_flat()
self.set_seed()
self.create_stimuli()
self.create_trial()
if not hasattr(self, 'trial'):
raise Exception('self.trial variable must be created '
'with the self.create_trial() method')
# for backward compatibility: convert event dict into Event
if isinstance(self.trial[0], dict):
self.trial = [Event._fromdict(self, ev) for ev in self.trial]
self.create_exp_plan()
if not hasattr(self, 'exp_plan'):
raise Exception('self.exp_plan variable must be created '
'with the self.create_exp_plan() method')
## convert Event.dur to a list of exp_plan length
#for ev in self.trial:
#if isinstance(ev.dur, (int, float)):
#ev.dur = [ev.dur] * len(self.exp_plan)
# determine if syncing to global time is necessary
self.global_timing = True
for ev in self.trial:
# if event sits there waiting, global time does not apply
if np.any(ev.dur == 0) or np.any(np.isinf(ev.dur)):
self.global_timing = False
break
if self.rp['autorun'] > 0:
# speed up the experiment
for ev in self.trial: # speed up each event
#ev.dur = map(lambda x: float(x)/self.rp['autorun'], ev.dur)
ev.dur /= self.rp['autorun']
self.exp_plan = self.set_autorun(self.exp_plan)
self.get_blocks()
self.win.flip = self.flip
def _set_keys_flat(self):
#if keylist is None:
keylist = self.computer.default_keys.values()
#else:
#keylist.extend(self.computer.default_keys.values())
keys = []
for key in keylist:
if isinstance(key, (tuple, list)):
keys.append(key)
else:
keys.append([key])
# keylist might have key combinations; get rid of them for now
self.keylist_flat = []
for key in keys:
self.keylist_flat.extend(key)
def set_seed(self):
# re-initialize seed for each block of task
# (if there is more than one task or more than one block)
if len(self.parent.tasks) > 1 or len(self.blocks) > 1:
self.seed = int(core.getAbsTime()) # generate a new seed
date = data.getDateStr(format="%Y_%m_%d %H:%M (Year_Month_Day Hour:Min)")
random.seed(self.seed)
np.random.seed(self.seed)
if not self.rp['no_output']:
try:
message = 'Task %s: block %d' % (self.__str__, self.this_blockn+1)
except:
message = 'Task %s' % self.__str__
self.logfile.write('\n')
self.logfile.write('#[ PsychoPy2 RuntimeInfoAppendStart ]#\n')
self.logfile.write(' #[[ %s ]] #---------\n' % message)
self.logfile.write(' taskRunTime: %s\n' % date)
self.logfile.write(' taskRunTime.epoch: %d\n' % self.seed)
self.logfile.write('#[ PsychoPy2 RuntimeInfoappendEnd ]#\n')
self.logfile.write('\n')
else:
self.seed = self.parent.seed
def show_text(self, text='', stimuli=None, wait=0, wait_stim=None, auto=0):
"""
Presents an instructions screen.
:Kwargs:
- text (str, default: None)
Text to show.
- stimuli (obj or list, default: None)
Any stimuli to show along with the text?
- wait (float, default: 0)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot.
- auto (float, default: 0)
Duration of time-out of the instructions screen,
in seconds.
"""
def _get_rect(stim):
rect = (stim.pos[0]-stim.size[0]/2,
stim.pos[1]-stim.size[1]/2,
stim.pos[0]+stim.size[0]/2,
stim.pos[1]+stim.size[1]/2)
if stim.units == 'cm':
func = misc.cm2pix
elif stim.units == 'deg':
func = misc.deg2pix
return tuple([func(r, self.win.monitor) for r in rect])
# for some graphics drivers (e.g., mine:)
# draw() command needs to be invoked once
# before it can draw properly
visual.TextStim(self.win, text='').draw()
self.win.flip()
rect = []
if stimuli is not None:
if not isinstance(stimuli, (tuple, list)):
stimuli = [stimuli]
rect = [_get_rect(stim) for stim in stimuli]
if len(rect) > 0:
rect = np.array(rect).T
stim_height = np.max(rect[3]) - np.min(rect[1])
#[np.max(rect[2]) - np.min(rect[0]),
#]
else:
stim_height = 0
if text is not None:
instructions = self._parse_instructions(text)
text_height = instructions._pygletTextObj.content_height
if stimuli is not None:
gap = misc.deg2pix(1, self.win.monitor) / 2
else:
gap = 0
instructions.pos = (0, stim_height/2 + gap)
else:
text_height = 0
if stimuli is not None:
for stim in stimuli:
if stim.units == 'deg':
func = misc.pix2deg
elif stim.units == 'cm':
func = misc.pix2cm
y = func(-text_height/2, self.win.monitor)
if text is not None:
y -= .5
stim.pos = (stim.pos[0], y)
stim.draw()
if text is not None:
instructions.draw()
self.win.flip()
if self.rp['unittest']:
print(text)
if auto > 0: # show text and blank out
if self.rp['autorun']:
auto = auto / self.rp['autorun']
core.wait(auto)
elif not self.rp['autorun'] or not self.rp['unittest']:
this_key = None
while this_key != self.computer.default_keys['trigger']:
this_key = self.last_keypress()
if len(this_key) > 0:
this_key = this_key.pop()
if self.rp['autorun']:
wait /= self.rp['autorun']
self.win.flip()
if wait_stim is not None:
if not isinstance(wait_stim, (tuple, list)):
wait_stim = [wait_stim]
for stim in wait_stim:
stim.draw()
self.win.flip()
core.wait(wait) # wait a little bit before starting the experiment
event.clearEvents() # clear keys
def _parse_instructions(self, text):
#instructions = visual.TextStim(self.win, text=text,
#color='white', height=20, units='pix',
#pos=(0, 0), # don't know why
#wrapWidth=40*20)
text = textwrap.dedent(text)
if text.find('\n') < 0: # single line, no formatting
html = '<h2><font face="sans-serif">%s</font></h2>' % text
instr = visual.TextStim(self.win, units='pix')
instr._pygletTextObj = pyglet.text.HTMLLabel(html)
width = instr._pygletTextObj.content_width
multiline = False
else:
try:
import docutils.core
except: # will make plain formatting
html = '<p><font face="sans-serif">%s</font></p>' % text
html = html.replace('\n\n', '</font></p><p><font face="sans-serif">')
html = html.replace('\n', '</font><br /><font face="sans-serif">')
width = 40*12
multiline = True
else:
html = docutils.core.publish_parts(text,
writer_name='html')['html_body']
html = _HTMLParser().feed(html)
width = 40*12
multiline = True
instructions = visual.TextStim(self.win, units='pix',
wrapWidth=width)
instructions._pygletTextObj = pyglet.text.HTMLLabel(html,
width=width, multiline=multiline,
x=0, anchor_x='left', anchor_y='center')
return instructions
def create_fixation(self, shape='complex', color='black', size=.2):
"""Creates a fixation spot.
:Kwargs:
- shape: {'dot', 'complex'} (default: 'complex')
Choose the type of fixation:
- dot: a simple fixation dot (.2 deg visual angle)
- complex: the 'best' fixation shape by `Thaler et al., 2012
<http://dx.doi.org/10.1016/j.visres.2012.10.012>`_ which
looks like a combination of s bulls eye and cross hair
(outer diameter: .6 deg, inner diameter: .2 deg). Note
that it is constructed by superimposing two rectangles on
a disk, so if non-uniform background will not be visible.
- color (str, default: 'black')
Fixation color.
"""
if shape == 'complex':
r1 = size # radius of outer circle (degrees)
r2 = size/3. # radius of inner circle (degrees)
edges = 8
d = np.pi*2 / (4*edges)
verts = [(r1*np.sin(e*d), r1*np.cos(e*d)) for e in xrange(edges+1)]
verts.append([0,0])
oval_pos = [(r2,r2), (r2,-r2), (-r2,-r2), (-r2,r2)]
oval = []
for i in range(4):
oval.append(visual.ShapeStim(
self.win,
name = 'oval',
fillColor = color,
lineColor = None,
vertices = verts,
ori = 90*i,
pos = oval_pos[i]
))
center = visual.Circle(
self.win,
name = 'center',
fillColor = color,
lineColor = None,
radius = r2,
)
fixation = GroupStim(stimuli=oval + [center],
name='fixation')
fixation.color = color
self.fixation = fixation
elif shape == 'dot':
self.fixation = GroupStim(
stimuli=visual.PatchStim(
self.win,
name = 'fixation',
color = 'red',
tex = None,
mask = 'circle',
size = size,
),
name='fixation')
def create_stimuli(self):
"""
Define stimuli as a dictionary
Example::
self.create_fixation(color='white')
line1 = visual.Line(self.win, name='line1')
line2 = visual.Line(self.win, fillColor='DarkRed')
self.s = {
'fix': self.fixation,
'stim1': [visual.ImageStim(self.win, name='stim1')],
'stim2': GroupStim(stimuli=[line1, line2], name='lines')
}
"""
raise NotImplementedError
def create_trial(self):
"""
Create a list of events that constitute a trial (``self.trial``).
Example::
self.trial = [exp.Event(self,
dur=.100,
display=self.s['fix'],
func=self.idle_event),
exp.Event(self,
dur=.300,
display=self.s['stim1'],
func=self.during_trial),
]
"""
raise NotImplementedError
def create_exp_plan(self):
"""
Put together trials into ``self.exp_plan``.
Example::
self.exp_plan = []
for ...:
exp_plan.append([
OrderedDict([
('cond', cond),
('name', names[cond]),
('onset', ''),
('dur', trial_dur),
('corr_resp', corr_resp),
('subj_resp', ''),
('accuracy', ''),
('rt', ''),
])
])
"""
raise NotImplementedError
def get_mouse_resp(self, keyList=None, timeStamped=False):
"""
Returns mouse clicks.
If ``self.respmap`` is provided, records clicks only when clicked
inside respmap. This respmap is supposed to be a list of shape
objects that determine boundaries of where one can click.
Might change in the future if it gets incorporated in stimuli
themselves.
Note that mouse implementation is a bit shaky in PsychoPy at
the moment. In particular, ``getPressed`` method returns
multiple key down events per click. Thus, when calling
``get_mouse_resp`` from a while loo[, it is best to limit
sampling to, for example, 150 ms (see `Jeremy's response <https://groups.google.com/d/msg/psychopy-users/HG4L-UDG93Y/FvyuB-OrsqoJ>`_).
"""
mdict = {0: 'left-click', 1: 'middle-click', 2: 'right-click'}
valid_mouse = [k for k,v in mdict.items() if v in self.computer.valid_responses or v in keyList]
valid_mouse.sort()
if timeStamped:
mpresses, mtimes = self.mouse.getPressed(getTime=True)
else:
mpresses = self.mouse.getPressed(getTime=False)
resplist = []
if sum(mpresses) > 0:
for but in valid_mouse:
if mpresses[but] > 0:
if timeStamped:
resplist.append([mdict[but],mtimes[but]])
else:
resplist.append([mdict[but], None])
if hasattr(self, 'respmap'):
clicked = False
for box in self.respmap:
if box.contains(self.mouse):
resplist = [tuple(r+[box]) for r in resplist]
clicked = True
break
if not clicked:
resplist = []
return resplist
def get_resp(self, keyList=None, timeStamped=False):
resplist = event.getKeys(keyList=keyList, timeStamped=timeStamped)
if resplist is None:
resplist = []
mresp = self.get_mouse_resp(keyList=keyList, timeStamped=timeStamped)
resplist += mresp
return resplist
def last_keypress(self, keyList=None, timeStamped=False):
"""
Extract the last key pressed from the event list.
If exit key is pressed (default: 'Left Shift + Esc'), quits.
:Returns:
A list of keys pressed.
"""
if keyList is None:
keyList = self.keylist_flat
this_keylist = self.get_resp(keyList=keyList+self.keylist_flat,
timeStamped=timeStamped)
keys = []
for this_key in this_keylist:
isexit = self._check_if_exit(this_key)
if not isexit:
self._exit_key_no = 0
isin_keylist = self._check_if_in_keylist(this_key, keyList)
if isin_keylist: # don't want to accept triggers and such
keys.append(this_key)
return keys
def _check_if_exit(self, this_key):
"""
Checks if there one of the exit keys was pressed.
:Args:
this_key (str or tuple)
Key or time-stamped key to check
:Returns:
True if any of the ``self.computer.default_keys['exit']``
keys were pressed, False otherwise.
"""
exit_keys = self.computer.default_keys['exit']
if isinstance(this_key, tuple):
this_key_exit = this_key[0]
else:
this_key_exit = this_key
if this_key_exit in exit_keys:
if self._exit_key_no < len(exit_keys):
if exit_keys[self._exit_key_no] == this_key_exit:
if self._exit_key_no == len(exit_keys) - 1:
self.quit('Premature exit requested by user.')
else:
self._exit_key_no += 1
else:
self._exit_key_no = 0
else:
self._exit_key_no = 0
return self._exit_key_no > 0
def _check_if_in_keylist(self, this_key, keyList):
if isinstance(this_key, tuple):
this_key_check = this_key[0]
else:
this_key_check = this_key
return this_key_check in keyList
def before_event(self):
for stim in self.this_event.display:
stim.draw()
self.win.flip()
def after_event(self):
pass
def wait_until_response(self, draw_stim=True):
"""
Waits until a response key is pressed.
Returns last key pressed, timestamped.
:Kwargs:
draw_stim (bool, default: True)
Controls if stimuli should be drawn or have already
been drawn (useful if you only want to redefine
the drawing bit of this function).
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
if draw_stim:
self.before_event()
event_keys = []
event.clearEvents() # key presses might be stored from before
while len(event_keys) == 0: # if the participant did not respond earlier
if 'autort' in self.this_trial:
if self.trial_clock.getTime() > self.this_trial['autort']:
event_keys = [(self.this_trial['autoresp'], self.this_trial['autort'])]
else:
event_keys = self.last_keypress(
keyList=self.computer.valid_responses.keys(),
timeStamped=self.trial_clock)
return event_keys
def idle_event(self, draw_stim=True):
"""
Default idle function for an event.
Sits idle catching default keys (exit and trigger).
:Kwargs:
draw_stim (bool, default: True)
Controls if stimuli should be drawn or have already
been drawn (useful if you only want to redefine
the drawing bit of this function).
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
if draw_stim:
self.before_event()
event_keys = None
event.clearEvents() # key presses might be stored from before
if self.this_event.dur == 0 or self.this_event.dur == np.inf:
event_keys = self.last_keypress()
else:
event_keys = self.wait()
return event_keys
def feedback(self):
"""
Gives feedback by changing fixation color.
- Correct: fixation change to green
- Wrong: fixation change to red
"""
this_resp = self.all_keys[-1]
if hasattr(self, 'respmap'):
subj_resp = this_resp[2]
else:
subj_resp = self.computer.valid_responses[this_resp[0]]
#subj_resp = this_resp[2] #self.computer.valid_responses[this_resp[0]]
# find which stimulus is fixation
if isinstance(self.this_event.display, (list, tuple)):
for stim in self.this_event.display:
if stim.name in ['fixation', 'fix']:
fix = stim
break
else:
if self.this_event.display.name in ['fixation', 'fix']:
fix = self.this_event.display
if fix is not None:
orig_color = fix.color # store original color
if self.this_trial['corr_resp'] == subj_resp:
fix.setFillColor('DarkGreen') # correct response
else:
fix.setFillColor('DarkRed') # incorrect response
for stim in self.this_event.display:
stim.draw()
self.win.flip()
# sit idle
self.wait()
# reset fixation color
fix.setFillColor(orig_color)
def wait(self):
"""
Wait until the event is over, register key presses.
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
all_keys = []
while self.check_continue():
keys = self.last_keypress()
if keys is not None:
all_keys += keys
return all_keys
def check_continue(self):
"""
Check if the event is not over yet.
Uses ``event_clock``, ``trial_clock``, and, if
``self.global_timing`` is True, ``glob_clock`` to check whether
the current event is not over yet. The event cannot last longer
than event and trial durations and also fall out of sync from
global clock.
:Returns:
A list of tuples with a key name (str) and a response time (float).
"""
event_on = self.event_clock.getTime() < self.this_event.dur
if self.global_timing:
trial_on = self.trial_clock.getTime() < self.this_trial['dur']
time_on = self.glob_clock.getTime() < self.cumtime + self.this_trial['dur']
else:
trial_on = True
time_on = True
return (event_on and trial_on and time_on)
def set_autorun(self, exp_plan):
"""
Automatically runs experiment by simulating key responses.
This is just the absolute minimum for autorunning. Best practice would
be extend this function to simulate responses according to your
hypothesis.
:Args:
exp_plan (list of dict)
A list of trial definitions.
:Returns:
exp_plan with ``autoresp`` and ``autort`` columns included.
"""
def rt(mean):
add = np.random.normal(mean,scale=.2)/self.rp['autorun']
return self.trial[0].dur + add
inverse_resp = invert_dict(self.computer.valid_responses)
for trial in exp_plan:
# here you could do if/else to assign different values to
# different conditions according to your hypothesis
trial['autoresp'] = random.choice(inverse_resp.values())
trial['autort'] = rt(.5)
return exp_plan
def set_TrialHandler(self, trial_list, trialmap=None):
"""
Converts a list of trials into a `~psychopy.data.TrialHandler`,
finalizing the experimental setup procedure.
"""
if len(self.blocks) > 1:
self.set_seed()
TrialHandler.__init__(self,
trial_list,
nReps=self.nReps,
method=self.method,
extraInfo=self.info,
name=self.name,
seed=self.seed)
if trialmap is None:
self.trialmap = range(len(trial_list))
else:
self.trialmap = trialmap
def get_blocks(self):
"""
Finds blocks in the given column of ``self.exp_plan``.
The relevant column is stored in ``self.blockcol`` which is
given by the user when initializing the experiment class.
Produces a list of trial lists and trial mapping for each block.
Trial mapping indicates where each trial is in the original
`exp_plan` list.
The output is stored in ``self.blocks``.
"""
if self.blockcol is not None:
blocknos = np.array([trial[self.blockcol] for trial in self.exp_plan])
_, idx = np.unique(blocknos, return_index=True)
blocknos = blocknos[idx].tolist()
blocks = [None] * len(blocknos)
for trialno, trial in enumerate(self.exp_plan):
blockno = blocknos.index(trial[self.blockcol])
if blocks[blockno] is None:
blocks[blockno] = [[trial], [trialno]]
else:
blocks[blockno][0].append(trial)
blocks[blockno][1].append(trialno)
else:
blocks = [[self.exp_plan, range(len(self.exp_plan))]]
self.blocks = blocks
def before_task(self, text=None, wait=.5, wait_stim=None, **kwargs):
"""Shows text from docstring explaining the task.
:Kwargs:
- text (str, default: None)
Text to show.
- wait (float, default: .5)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if len(self.parent.tasks) > 1:
# if there are no blocks, try to show fixation
if wait_stim is None:
if len(self.blocks) <= 1:
try:
wait_stim = self.s['fix']
except:
wait = 0
else:
wait = 0
if text is None:
self.show_text(text=self.__doc__, wait=wait,
wait_stim=wait_stim, **kwargs)
else:
self.show_text(text=text, wait=wait,
wait_stim=wait_stim, **kwargs)
def run_task(self):
"""Sets up the task and runs it.
If ``self.blockcol`` is defined, then runs block-by-block.
"""
self.setup_task()
self.before_task()
self.datafile.open()
for blockno, (block, trialmap) in enumerate(self.blocks):
self.this_blockn = blockno
# set TrialHandler only to the current block
self.set_TrialHandler(block, trialmap=trialmap)
self.run_block()
self.datafile.close()
self.after_task()
def after_task(self, text=None, auto=1, **kwargs):
"""Useful for showing feedback after a task is done.
For example, you could display accuracy.
:Kwargs:
- text (str, default: None)
Text to show. If None, this is skipped.
- auto (float, default: 1)
Duration of time-out of the instructions screen,
in seconds.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if text is not None:
self.show_text(text, auto=auto, **kwargs)
def before_block(self, text=None, auto=1, wait=.5, wait_stim=None):
"""Show text before the block starts.
Will not show anything if there's only one block.
:Kwargs:
- text (str, default: None)
Text to show. If None, defaults to showing block number.
- wait (float, default: .5)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot. If None, this
fixation spot will be attempted to be drawn.
- auto (float, default: 1)
Duration of time-out of the instructions screen,
in seconds.
"""
if len(self.blocks) > 1:
if wait_stim is None:
try:
wait_stim = self.s['fix']
except:
pass
if text is None:
self.show_text(text='Block %d' % (self.this_blockn+1),
auto=auto, wait=wait, wait_stim=wait_stim)
else:
self.show_text(text=text, auto=auto, wait=wait, wait_stim=wait_stim)
def run_block(self):
"""Run a block in a task.
"""
self.before_block()
# set up clocks
self.glob_clock = core.Clock()
self.trial_clock = core.Clock()
self.event_clock = core.Clock()
self.cumtime = 0
# go over the trial sequence
for this_trial in self:
self.this_trial = this_trial
self.run_trial()
self.after_block()
def after_block(self, text=None, **kwargs):
"""Show text at the end of a block.
Will not show this text after the last block in the task.
:Kwargs:
- text (str, default: None)
Text to show. If None, will default to
'Pause. Hit ``trigger`` to continue.'
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
# clear trial counting in the terminal
sys.stdout.write('\r' + ' '*70)
sys.stdout.write('\r')
sys.stdout.flush()
if text is None:
text = ('Pause. Hit %s to continue.' %
self.computer.default_keys['trigger'])
# don't show this after the last block
if self.this_blockn+1 < len(self.blocks):
self.show_text(text=text, **kwargs)
def before_trial(self):
"""What to do before trial -- nothing by default.
"""
pass
def run_trial(self):
"""Presents a trial.
"""
self.before_trial()
self.trial_clock.reset()
self.this_trial['onset'] = self.glob_clock.getTime()
sys.stdout.write('\rtrial %s' % (self.thisTrialN+1))
sys.stdout.flush()
self.this_trial['dur'] = 0
for ev in self.trial:
if ev.durcol is not None:
ev.dur = self.this_trial[ev.durcol]
self.this_trial['dur'] += ev.dur
self.all_keys = []
self.rectimes = []
for event_no, this_event in enumerate(self.trial):
self.this_event = this_event
self.event_no = event_no
self.run_event()
# if autorun and responses were not set yet, get them now
if len(self.all_keys) == 0 and self.rp['autorun'] > 0:
self.all_keys += [(self.this_trial['autoresp'], self.this_trial['autort'])]
self.post_trial()
# correct timing if autorun
if self.rp['autorun'] > 0:
try:
self.this_trial['autort'] *= self.rp['autorun']
self.this_trial['rt'] *= self.rp['autorun']
except: # maybe not all keys are present
pass
self.this_trial['onset'] *= self.rp['autorun']
self.this_trial['dur'] *= self.rp['autorun']
self.datafile.write_header(self.info.keys() + self.this_trial.keys())
self.datafile.write(self.info.values() + self.this_trial.values())
self.cumtime += self.this_trial['dur']
# update exp_plan with new values
try:
self.exp_plan[self.trialmap[self.thisIndex]] = self.this_trial
except: # for staircase
self.exp_plan.append(self.this_trial)
def after_trial(self):
"""Alias to :func:`~psychopy_ext.exp.Task.post_trial()`
"""
self.post_trial()
def post_trial(self):
"""A default function what to do after a trial is over.
It records the participant's response as the last key pressed,
calculates accuracy based on the expected (correct) response value,
and records the time of the last key press with respect to the onset
of a trial. If no key was pressed, participant's response and response
time are recorded as an empty string, while accuracy is assigned a
'No response'.
:Args:
- this_trial (dict)
A dictionary of trial properties
- all_keys (list of tuples)
A list of tuples with the name of the pressed key and the time
of the key press.
:Returns:
this_trial with ``subj_resp``, ``accuracy``, and ``rt`` filled in.
"""
if len(self.all_keys) > 0:
this_resp = self.all_keys.pop()
if hasattr(self, 'respmap'):
subj_resp = this_resp[2]
else:
subj_resp = self.computer.valid_responses[this_resp[0]]
self.this_trial['subj_resp'] = subj_resp
try:
acc = signal_det(self.this_trial['corr_resp'], subj_resp)
except:
pass
else:
self.this_trial['accuracy'] = acc
self.this_trial['rt'] = this_resp[1]
else:
self.this_trial['subj_resp'] = ''
try:
acc = signal_det(self.this_trial['corr_resp'], self.this_trial['subj_resp'])
except:
pass
else:
self.this_trial['accuracy'] = acc
self.this_trial['rt'] = ''
def run_event(self):
"""Presents a trial and catches key presses.
"""
# go over each event in a trial
self.event_clock.reset()
self.mouse.clickReset()
# show stimuli
event_keys = self.this_event.func()
if isinstance(event_keys, tuple):
event_keys = [event_keys]
elif event_keys is None:
event_keys = []
if len(event_keys) > 0:
self.all_keys += event_keys
# this is to get keys if we did not do that during trial
self.all_keys += self.last_keypress(
keyList=self.computer.valid_responses.keys(),
timeStamped=self.trial_clock)
#if self.this_event.rectime:
#if len(self.win.frameIntervals) > 0:
#self.rectimes.append(self.win.frameIntervals[-1])
#self.win.frameIntervals = []
def get_behav_df(self, pattern='%s'):
"""
Extracts data from files for data analysis.
:Kwargs:
pattern (str, default: '%s')
A string with formatter information. Usually it contains a path
to where data is and a formatter such as '%s' to indicate where
participant ID should be incorporated.
:Returns:
A `pandas.DataFrame` of data for the requested participants.
"""
return get_behav_df(self.info['subjid'], pattern=pattern)
class SVG(object):
def __init__(self, win, filename='image'):
if no_svg:
raise ImportError("Module 'svgwrite' not found.")
#visual.helpers.setColor(win, win.color)
win.contrast = 1
self.win = win
self.aspect = self.win.size[0]/float(self.win.size[1])
self.open(filename)
def open(self, filename):
filename = filename.split('.svg')[0]
self.svgfile = svgwrite.Drawing(profile='tiny',filename='%s.svg' % filename,
size=('%dpx' % self.win.size[0],
'%dpx' % self.win.size[1]),
# set default units to px; from http://stackoverflow.com/a/13008664
viewBox=('%d %d %d %d' %
(0,0,
self.win.size[0],
self.win.size[1]))
)
bkgr = self.svgfile.rect(insert=(0,0), size=('100%','100%'),
fill=self.color2rgb255(self.win))
self.svgfile.add(bkgr)
def save(self):
self.svgfile.save()
def color2attr(self, stim, attr, color='black', colorSpace=None, kwargs=None):
if kwargs is None: kwargs = {}
col = self.color2rgb255(stim, color=color, colorSpace=colorSpace)
if col is None:
kwargs[attr + '_opacity'] = 0
else:
kwargs[attr] = col
kwargs[attr + '_opacity'] = 1
return kwargs
def write(self, stim):
if 'Circle' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
svgstim = self.svgfile.circle(
center=self.get_pos(stim),
r=self.get_size(stim, stim.radius),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'ImageStim' in str(stim):
raise NotImplemented
elif 'Line' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
svgstim = self.svgfile.line(
start=self.get_pos(stim, stim.start),
end=self.get_pos(stim, stim.end),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'Polygon' in str(stim):
raise NotImplemented
#svgstim = self.svgfile.polygon(
#points=...,
#stroke_width=stim.lineWidth,
#stroke=self.color2rgb255(stim, color=stim.lineColor,
#colorSpace=stim.lineColorSpace),
#fill=self.color2rgb255(stim, color=stim.fillColor,
#colorSpace=stim.fillColorSpace)
#)
elif 'Rect' in str(stim):
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
svgstim = self.svgfile.rect(
insert=self.get_pos(stim, offset=(-stim.width/2., -stim.height/2.)),
size=(self.get_size(stim, stim.width), self.get_size(stim, stim.height)),
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
elif 'ThickShapeStim' in str(stim):
svgstim = stim.to_svg(self)
elif 'ShapeStim' in str(stim):
points = self._calc_attr(stim, np.array(stim.vertices))
points[:, 1] *= -1
color_kw = self.color2attr(stim, 'stroke', color=stim.lineColor,
colorSpace=stim.lineColorSpace)
color_kw = self.color2attr(stim, 'fill', color=stim.fillColor,
colorSpace=stim.fillColorSpace,
kwargs=color_kw)
if stim.closeShape:
svgstim = self.svgfile.polygon(
points=points,
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
else:
svgstim = self.svgfile.polyline(
points=points,
stroke_width=stim.lineWidth,
opacity=stim.opacity,
**color_kw
)
tr = self.get_pos(stim)
svgstim.translate(tr[0], tr[1])
elif 'SimpleImageStim' in str(stim):
raise NotImplemented
elif 'TextStim' in str(stim):
if stim.font == '':
font = 'arial'
else:
font = stim.font
svgstim = self.svgfile.text(text=stim.text,
insert=self.get_pos(stim) + np.array([0,stim.height/2.]),
fill=self.color2rgb255(stim),
font_family=font,
font_size=self._calc_attr(stim, stim.height),
text_anchor='middle',
opacity=stim.opacity
)
else:
svgstim = stim.to_svg(self)
if not isinstance(svgstim, list):
svgstim = [svgstim]
for st in svgstim:
self.svgfile.add(st)
def get_pos(self, stim, pos=None, offset=None):
if pos is None:
pos = stim.pos
if offset is not None:
offset = self._calc_attr(stim, np.array(offset))
else:
offset = np.array([0,0])
pos = self._calc_attr(stim, pos)
pos = self.win.size/2 + np.array([pos[0], -pos[1]]) + offset
return pos
def get_size(self, stim, size=None):
if size is None:
size = stim.size
size = self._calc_attr(stim, size)
return size
def _calc_attr(self, stim, attr):
if stim.units == 'height':
try:
len(attr) == 2
except:
out = (attr * stim.win.size[1])
else:
out = (attr * stim.win.size * np.array([1./self.aspect, 1]))
elif stim.units == 'norm':
try:
len(attr) == 2
except:
out = (attr * stim.win.size[1]/2)
else:
out = (attr * stim.win.size/2)
elif stim.units == 'pix':
out = attr
elif stim.units == 'cm':
out = misc.cm2pix(attr, stim.win.monitor)
elif stim.units in ['deg', 'degs']:
out = misc.deg2pix(attr, stim.win.monitor)
else:
raise NotImplementedError
return out
def color2rgb255(self, stim, color=None, colorSpace=None):
"""
Convert color to RGB255 while adding contrast
#Requires self.color, self.colorSpace and self.contrast
Modified from psychopy.visual.BaseVisualStim._getDesiredRGB
"""
if color is None:
color = stim.color
if isinstance(color, str) and stim.contrast == 1:
color = color.lower() # keep the nice name
else:
# Ensure that we work on 0-centered color (to make negative contrast values work)
if colorSpace is None:
colorSpace = stim.colorSpace
if colorSpace not in ['rgb', 'dkl', 'lms', 'hsv']:
color = (color / 255.0) * 2 - 1
# Convert to RGB in range 0:1 and scaled for contrast
# although the shader then has to convert it back it gets clamped en route otherwise
try:
color = (color * stim.contrast + 1) / 2.0 * 255
color = 'rgb(%d,%d,%d)' % (color[0],color[1],color[2])
except:
color = None
return color
class Datafile(object):
def __init__(self, filename, writeable=True, header=None):
"""
A convenience class for managing data files.
Output is recorded in a comma-separeated (csv) file.
.. note:: In the output file, floats are formatted to 1 ms precision so
that output files are nice.
:Args:
filename (str)
Path to the file name
:Kwargs:
- writeable (bool, defualt: True)
Can data be written in file or not. Might seem a bit silly
but it is actually very useful because you can create
a file and tell it to write data without thinking
whether `no_output` is set.
- header (list, default: None)
If you give a header, then it will already be written
in the datafile. Usually it's better to wait and write
it only when the first data line is available.
"""
self.filename = filename
self.writeable = writeable
self._header_written = False
if header is not None:
self.write_header(header)
else:
self.header = header
def open(self):
"""Opens a csv file for writing data
"""
if self.writeable:
try_makedirs(os.path.dirname(self.filename))
try:
self.dfile = open(self.filename, 'ab')
self.datawriter = csv.writer(self.dfile, lineterminator = '\n')
except IOError:
raise IOError('Cannot write to the data file %s!' % self.filename)
def close(self):
"""Closes the file
"""
if self.writeable:
self.dfile.close()
def write(self, data):
"""
Writes data list to a file.
.. note:: In the output file, floats are formatted to 1 ms precision so
that output files are nice.
:Args:
data (list)
A list of values to write in a datafile
"""
if self.writeable:
# cut down floats to 1 ms precision
dataf = ['%.3f'%i if isinstance(i,float) else i for i in data]
self.datawriter.writerow(dataf)
def write_header(self, header):
"""Determines if a header should be writen in a csv data file.
Works by reading the first line and comparing it to the given header.
If the header already is present, then a new one is not written.
:Args:
header (list of str)
A list of column names
"""
self.header = header
if self.writeable and not self._header_written:
write_head = False
# no header needed if the file already exists and has one
try:
dataf_r = open(self.filename, 'rb')
dataread = csv.reader(dataf_r)
except:
pass
else:
try:
header_file = dataread.next()
except: # empty file
write_head = True
else:
if header == header_file:
write_head = False
else:
write_head = True
dataf_r.close()
if write_head:
self.datawriter.writerow(header)
self._header_written = True
class Experiment(ExperimentHandler, Task):
def __init__(self,
name='',
version='0.1',
info=None,
rp=None,
actions=None,
computer=default_computer,
paths=None,
data_fname=None,
**kwargs
):
"""
An extension of ExperimentHandler and TrialHandler with many
useful functions.
.. note:: When you inherit this class, you must have at least
``info`` and ``rp`` (or simply ``**kwargs``) keywords
because :class:`~psychopy.ui.Control` expects them.
:Kwargs:
- name (str, default: '')
Name of the experiment. It will be used to call the
experiment from the command-line.
- version (str, default: '0.1')
Version of your experiment.
- info (tuple, list of tuples, or dict, default: None)
Information about the experiment that you want to see in the
output file. This is equivalent to PsychoPy's ``extraInfo``.
It will contain at least ``('subjid', 'subj')`` even if a
user did not specify that.
- rp (tuple, list of tuples, or dict, default: None)
Run parameters that apply for this particular run but need
not be stored in the data output. It will contain at least
the following::
[('no_output', False), # do you want output? or just playing around?
('debug', False), # not fullscreen presentation etc
('autorun', 0), # if >0, will autorun at the specified speed
('unittest', False), # like autorun but no breaks at show_instructions
('repository', ('do nothing', 'commit and push', 'only commit')), # add, commit and push to a hg repo?
# add and commit changes, like new data files?
]
- actions (list of function names, default: None)
A list of function names (as ``str``) that can be called from
GUI.
- computer (module, default: ``default_computer``)
Computer parameter module.
- paths (dict, default: None)
A dictionary of paths where to store different outputs.
If None, :func:`~psychopy_ext.exp.set_paths()` is called.
- data_fname (str, default=None)
The name of the main data file for storing output. If None,
becomes ``self.paths['data'] + self.info['subjid'] + '.csv'``.
Then a :class:`~psychopy_ext.exp.Datafile` instance is
created in ``self.datafile`` for easy writing to a csv
format.
- \*\*kwargs
"""
ExperimentHandler.__init__(self,
name=name,
version=version,
extraInfo=info,
dataFileName='.data' # for now so that PsychoPy doesn't complain
)
self.computer = computer
if paths is None:
self.paths = set_paths()
else:
self.paths = paths
self._initialized = False
# minimal parameters that Experiment expects in info and rp
self.info = OrderedDict([('subjid', 'subj')])
if info is not None:
if isinstance(info, (list, tuple)):
try:
info = OrderedDict(info)
except:
info = OrderedDict([info])
self.info.update(info)
self.rp = OrderedDict([ # these control how the experiment is run
('no_output', False), # do you want output? or just playing around?
('debug', False), # not fullscreen presentation etc
('autorun', 0), # if >0, will autorun at the specified speed
('unittest', False), # like autorun but no breaks when instructions shown
('repository', ('do nothing', 'commit & push', 'only commit')), # add, commit and push to a hg repo?
# add and commit changes, like new data files?
])
if rp is not None:
if isinstance(rp, (tuple, list)):
try:
rp = OrderedDict(rp)
except:
rp = OrderedDict([rp])
self.rp.update(rp)
#if not self.rp['notests']:
#run_tests(self.computer)
self.actions = actions
if data_fname is None:
filename = self.paths['data'] + self.info['subjid'] + '.csv'
self.datafile = Datafile(filename, writeable=not self.rp['no_output'])
else:
self.datafile = Datafile(data_fname, writeable=not self.rp['no_output'])
if self.rp['unittest']:
self.rp['autorun'] = 100
self.tasks = [] # a list to store all tasks for this exp
Task.__init__(self,
self,
#name=name,
version=version,
**kwargs
)
def __str__(self, **kwargs):
"""string representation of the object"""
return 'psychopy_ext.exp.Experiment'
#def add_tasks(self, tasks):
#if isinstance(tasks, str):
#tasks = [tasks]
#for task in tasks:
#task = task()
#task.computer = self.computer
#task.win = self.win
#if task.info is not None:
#task.info.update(self.info)
#if task.rp is not None:
#task.rp.update(self.rp)
#self.tasks.append(task)
def set_logging(self, logname='log.log', level=logging.WARNING):
"""Setup files for saving logging information.
New folders might be created.
:Kwargs:
logname (str, default: 'log.log')
The log file name.
"""
if not self.rp['no_output']:
# add .log if no extension given
if not logname.endswith('.log'): logname += '.log'
# Setup logging file
try_makedirs(os.path.dirname(logname))
if os.path.isfile(logname):
writesys = False # we already have sysinfo there
else:
writesys = True
self.logfile = logging.LogFile(logname, filemode='a', level=level)
# Write system information first
if writesys:
self.logfile.write('%s' % self.runtime_info)
self.logfile.write('\n\n\n' + '#'*40 + '\n\n')
self.logfile.write('$ python %s\n\n' % ' '.join(sys.argv))
self.logfile.write('Start time: %s\n\n' % data.getDateStr(format="%Y-%m-%d %H:%M"))
else:
self.logfile = None
# output to the screen
logging.console.setLevel(level)
def create_seed(self, seed=None):
"""
SUPERSEDED by `psychopy.info.RunTimeInfo`
Creates or assigns a seed for a reproducible randomization.
When a seed is set, you can, for example, rerun the experiment with
trials in exactly the same order as before.
:Kwargs:
seed (int, default: None)
Pass a seed if you already have one.
:Returns:
self.seed (int)
"""
if seed is None:
try:
self.seed = np.sum([ord(d) for d in self.info['date']])
except:
self.seed = 1
logging.warning('No seed provided. Setting seed to 1.')
else:
self.seed = seed
return self.seed
def _guess_participant(self, data_path, default_subjid='01'):
"""Attempts to guess participant ID (it must be int).
.. :Warning:: Not usable yet
First lists all csv files in the data_path, then finds a maximum.
Returns maximum+1 or an empty string if nothing is found.
"""
datafiles = glob.glob(data_path+'*.csv')
partids = []
#import pdb; pdb.set_trace()
for d in datafiles:
filename = os.path.split(d)[1] # remove the path
filename = filename.split('.')[0] # remove the extension
partid = filename.split('_')[-1] # take the numbers at the end
try:
partids.append(int(partid))
except:
logging.warning('Participant ID %s is invalid.' %partid)
if len(partids) > 0: return '%02d' %(max(partids) + 1)
else: return default_subjid
def _guess_runno(self, data_path, default_runno = 1):
"""Attempts to guess run number.
.. :Warning:: Not usable yet
First lists all csv files in the data_path, then finds a maximum.
Returns maximum+1 or an empty string if nothing is found.
"""
if not os.path.isdir(data_path): runno = default_runno
else:
datafiles = glob.glob(data_path + '*.csv')
# Splits file names into ['data', %number%, 'runType.csv']
allnums = [int(os.path.basename(thisfile).split('_')[1]) for thisfile in datafiles]
if allnums == []: # no data files yet
runno = default_runno
else:
runno = max(allnums) + 1
# print 'Guessing runNo: %d' %runNo
return runno
def get_mon_sizes(self, screen=None):
warnings.warn('get_mon_sizes is deprecated; '
'use exp.get_mon_sizes instead')
return get_mon_sizes(screen=screen)
def create_win(self, debug=False, color='DimGray', units='deg',
winType='pyglet', **kwargs):
"""Generates a :class:`psychopy.visual.Window` for presenting stimuli.
:Kwargs:
- debug (bool, default: False)
- If True, then the window is half the screen size.
- If False, then the windon is full screen.
- color (str, str with a hexadecimal value, or a tuple of 3 values, default: "DimGray')
Window background color. Default is dark gray. (`See accepted
color names <http://www.w3schools.com/html/html_colornames.asp>`_
"""
current_level = logging.getLevel(logging.console.level)
logging.console.setLevel(logging.ERROR)
monitor = monitors.Monitor(self.computer.name,
distance=self.computer.distance,
width=self.computer.width)
logging.console.setLevel(current_level)
res = get_mon_sizes(self.computer.screen)
monitor.setSizePix(res)
if 'size' not in kwargs:
try:
kwargs['size'] = self.computer.win_size
except:
if not debug:
kwargs['size'] = tuple(res)
else:
kwargs['size'] = (res[0]/2, res[1]/2)
for key in kwargs:
if key in ['monitor', 'fullscr', 'allowGUI', 'screen', 'viewScale']:
del kwargs[key]
self.win = visual.Window(
monitor=monitor,
units=units,
fullscr=not debug,
allowGUI=debug, # mouse will not be seen unless debugging
color=color,
winType=winType,
screen=self.computer.screen,
viewScale=self.computer.view_scale,
**kwargs
)
self.win.flip_orig = self.win.flip
self.win.flipnames = []
def setup(self):
"""
Initializes the experiment.
A random seed is set for `random` and `numpy.random`. The seed
is set using the 'set:time' option.
Also, runtime information is fully recorded, log file is set
and a window is created.
"""
try:
with open(sys.argv[0], 'r') as f: lines = f.read()
except:
author = 'None'
version = 'None'
else:
author = None
version = None
#if not self.rp['no_output']:
self.runtime_info = psychopy.info.RunTimeInfo(author=author,
version=version, verbose=True, win=False)
key, value = get_version()
self.runtime_info[key] = value # updates with psychopy_ext version
self._set_keys_flat()
self.seed = int(self.runtime_info['experimentRunTime.epoch'])
np.random.seed(self.seed)
#else:
#self.runtime_info = None
#self.seed = None
self.set_logging(self.paths['logs'] + self.info['subjid'])
self.create_win(debug=self.rp['debug'])
self.mouse = event.Mouse(win=self.win)
self._initialized = True
#if len(self.tasks) == 0:
##self.setup = Task.setup
#Task.setup(self)
def before_exp(self, text=None, wait=.5, wait_stim=None, **kwargs):
"""
Instructions at the beginning of the experiment.
:Kwargs:
- text (str, default: None)
Text to show.
- wait (float, default: .5)
How long to wait after the end of showing instructions,
in seconds.
- wait_stim (stimulus or a list of stimuli, default: None)
During this waiting, which stimuli should be shown.
Usually, it would be a fixation spot.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if wait_stim is None:
if len(self.tasks) <= 1:
try:
wait_stim = self.s['fix']
except:
wait = 0
else:
wait = 0
if text is None:
self.show_text(text=self.__doc__, wait=wait,
wait_stim=wait_stim, **kwargs)
else:
self.show_text(text=text, wait=wait, wait_stim=wait_stim,
**kwargs)
#self.win._refreshThreshold=1/85.0+0.004
self.win.setRecordFrameIntervals(True)
def run(self):
"""Alias to :func:`~psychopy_ext.exp.Experiment.run_exp()`
"""
self.run_exp()
def run_exp(self):
"""Sets everything up and calls tasks one by one.
At the end, committing to a repository is possible. Use
``register`` and ``push`` flags (see
:class:`~psychopy_ext.exp.Experiment` for more)
"""
self.setup()
self.before_exp()
if len(self.tasks) == 0:
self.run_task()
else:
for task in self.tasks:
task(self).run_task()
self.after_exp()
self.repo_action()
self.quit()
def after_exp(self, text=None, auto=1, **kwargs):
"""Text after the experiment is over.
:Kwargs:
- text (str, default: None)
Text to show. If None, defaults to
'End of Experiment. Thank you!'
- auto (float, default: 1)
Duration of time-out of the instructions screen,
in seconds.
- \*\*kwargs
Other parameters for :func:`~psychopy_ext.exp.Task.show_text()`
"""
if text is None:
self.show_text(text='End of Experiment. Thank you!',
auto=auto, **kwargs)
else:
self.show_text(text=text, auto=auto, **kwargs)
def autorun(self):
"""
Automatically runs the experiment just like it would normally
work but automatically (as defined in
:func:`~psychopy_ext.exp.set_autorun()`) and
at the speed specified by `self.rp['autorun']` parameter. If
speed is not specified, it is set to 100.
"""
if not hasattr(self.rp, 'autorun'):
self.rp['autorun'] = 100
self.run()
def repo_action(self):
if isinstance(self.rp['repository'], tuple):
self.rp['repository'] = self.rp['repository'][0]
if self.rp['repository'] == 'commit & push':
text = 'committing data and pushing to remote server...'
elif self.rp['repository'] == 'only commit':
text = 'commiting data...'
if self.rp['repository'] != 'do nothing':
textstim = visual.TextStim(self.win, text=text, height=.3)
textstim.draw()
timer = core.CountdownTimer(2)
self.win.flip()
try:
if self.rp['repository'] == 'commit & push':
self.commitpush()
elif self.rp['repository'] == 'only commit':
self.commit()
except:
pass # no version control found
while timer.getTime() > 0 and len(self.last_keypress()) == 0:
pass
def register(self, **kwargs):
"""Alias to :func:`~psychopy_ext.exp.commit()`
"""
return self.commit(**kwargs)
def commit(self, message=None):
"""
Add and commit changes in a repository.
TODO: How to set this up.
"""
if message is None:
message = 'data for participant %s' % self.info['subjid']
output = ui._repo_action('commit', message=message)
if not self.rp['no_output']:
self.logfile.write(output)
def commitpush(self, message=None):
"""
Add, commit, and push changes to a remote repository.
Currently, only Mercurial repositories are supported.
TODO: How to set this up.
TODO: `git` support
"""
self.commit(message=message)
output = ui._repo_action('push')
if not self.rp['no_output']:
self.logfile.write(output)
class Event(object):
def __init__(self, parent, name='', dur=.300, durcol=None,
display=None, func=None):
"""
Defines event displays.
:Args:
parent (:class:`~psychopy_ext.exp.Experiment` or
:class:`~psychopy_ext.exp.Task`)
:Kwargs:
- name (str, default: '')
Event name.
- dur (int/float or a list of int/float, default: .300)
Event duration (in seconds). If events have different
durations throughout experiment, you can provide a list
of durations which must be of the same length as the
number of trials.
- display (stimulus or a list of stimuli, default: None)
Stimuli that are displayed during this event. If *None*,
displays a fixation spot (or, if not created, creates
one first).
- func (function, default: None)
Function to perform . If *None*, defaults to
:func:`~psychopy_ext.exp.Task.idle_event`.
"""
self.parent = parent
self.name = name
self.dur = dur # will be converted to a list during setup
self.durcol = durcol
if display is None:
try:
self.display = parent.fixation
except:
parent.create_fixation()
self.display = parent.fixation
else:
self.display = display
if isinstance(self.display, tuple):
self.display = list(self.display)
elif not isinstance(self.display, list):
self.display = [self.display]
if func is None:
self.func = parent.idle_event
else:
self.func = func
@staticmethod
def _fromdict(parent, entries):
"""
Create an Event instance from a dictionary.
This is only meant for backward compatibility and should not
be used in general.
"""
if 'defaultFun' in entries:
entries['func'] = entries['defaultFun']
del entries['defaultFun']
return Event(parent, **entries)
#self.__dict__.update(entries)
#for key, value in dictionary.items():
#self.key = value
class ThickShapeStim(ShapeStim):
"""
Draws thick shape stimuli as a collection of lines.
PsychoPy has a bug in some configurations of not drawing lines thicker
than 2px. This class fixes the issue. Note that it's really just a
collection of rectanges so corners will not look nice.
..note:: `lineWidth` is specified in your units, not pixels (as is default
in PsychoPy)
Modified from :class:`~visual.shape.ShapeStim`.
"""
def __init__(self, win, lineWidth=.01, **kwargs):
super(ThickShapeStim, self).__init__(win, lineWidth=lineWidth, **kwargs)
self.setVertices(self.vertices)
@attributeSetter
def vertices(self, value=None):
"""a list of lists or a numpy array (Nx2) specifying xy positions of
each vertex, relative to the centre of the field.
If you're using `Polygon`, `Circle` or `Rect`, this shouldn't be used.
:ref:`Operations <attrib-operations>` supported.
"""
self.__dict__['vertices'] = np.array(value, float)
# Check shape
# if not (self.vertices.shape==(2,) or (len(self.vertices.shape) == 2 and self.vertices.shape[1] == 2)):
# raise ValueError("New value for setXYs should be 2x1 or Nx2")
# self._needVertexUpdate=True
if isinstance(value[0][0], int) or isinstance(value[0][0], float):
self.vertices_all = [value]
else:
self.vertices_all = value
self.stimulus = []
theta = self.ori/180.*np.pi #(newOri - self.ori)/180.*np.pi
rot = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
self._rend_vertices = []
if self.units == 'pix':
w = 1
elif self.units in ['height', 'norm']:
w = 1./self.win.size[1]
elif self.units == 'cm':
w = misc.pix2cm(1, self.win.monitor)
elif self.units in ['deg', 'degs']:
w = misc.pix2deg(1, self.win.monitor)
wh = self.lineWidth/2. - w
for vertices in self.vertices_all:
rend_verts = []
if self.closeShape:
numPairs = len(vertices)
else:
numPairs = len(vertices)-1
for i in range(numPairs):
thisPair = np.array([vertices[i],vertices[(i+1)%len(vertices)]])
thisPair_rot = np.dot(thisPair, rot.T)
edges = [
thisPair_rot[1][0]-thisPair_rot[0][0],
thisPair_rot[1][1]-thisPair_rot[0][1]
]
lh = np.sqrt(edges[0]**2 + edges[1]**2)/2.
rend_vert = [[-lh,-wh],[-lh,wh], [lh,wh],[lh,-wh]]
line = visual.ShapeStim(
self.win,
lineWidth = 1,
lineColor = self.lineColor,#None,
interpolate = True,
fillColor = self.lineColor,
ori = -np.arctan2(edges[1],edges[0])*180/np.pi,
pos = np.mean(thisPair_rot,0) + self.pos,
# [(thisPair_rot[0][0]+thisPair_rot[1][0])/2. + self.pos[0],
# (thisPair_rot[0][1]+thisPair_rot[1][1])/2. + self.pos[1]],
vertices = rend_vert
)
#line.setOri(self.ori-np.arctan2(edges[1],edges[0])*180/np.pi)
self.stimulus.append(line)
rend_verts.append(rend_vert[0])
rend_verts.append(rend_vert[1])
self._rend_vertices.append(rend_verts)
#import pdb; pdb.set_trace()
#self.setSize(self.size)
def draw(self, **kwargs):
for stim in self.stimulus:
stim.draw(**kwargs)
def to_svg(self, svg):
rects = []
for stim, vertices in zip(self.stimulus,self.vertices_all):
size = svg.get_size(stim, np.abs(stim.vertices[0])*2)
points = svg._calc_attr(stim, np.array(vertices))
points[:, 1] *= -1
rect = svg.svgfile.polyline(
points=points,
stroke_width=svg._calc_attr(self,self.lineWidth),
stroke=svg.color2rgb255(self, color=self.lineColor,
colorSpace=self.lineColorSpace),
fill_opacity=0
)
tr = svg.get_pos(self)#+size/2.
rect.translate(tr[0], tr[1])
rects.append(rect)
return rects
def setSize(self, *args, **kwargs):
super(ThickShapeStim, self).setSize(*args, **kwargs)
self.setVertices(self.vertices_all)
def setOri(self, *args, **kwargs):
super(ThickShapeStim, self).setOri(*args, **kwargs)
self.setVertices(self.vertices_all)
def setPos(self, *args, **kwargs):
super(ThickShapeStim, self).setPos(*args, **kwargs)
self.setVertices(self.vertices_all)
class GroupStim(object):
"""
A convenience class to put together stimuli in a single group.
You can then do things like `stimgroup.draw()`.
"""
def __init__(self, stimuli=None, name=None):
if not isinstance(stimuli, (tuple, list)):
self.stimuli = [stimuli]
else:
self.stimuli = stimuli
if name is None:
self.name = self.stimuli[0].name
else:
self.name = name
def __getattr__(self, name):
"""Do whatever asked but per stimulus
"""
def method(*args, **kwargs):
outputs =[getattr(stim, name)(*args, **kwargs) for stim in self.stimuli]
# see if only None returned, meaning that probably the function
# doesn't return anything
notnone = [o for o in outputs if o is not None]
if len(notnone) != 0:
return outputs
try:
return method
except TypeError:
return getattr(self, name)
def __iter__(self):
return self.stimuli.__iter__()
class MouseRespGroup(object):
def __init__(self, win, stimuli, respmap=None, multisel=False,
on_color='#ec6b00', off_color='white', pos=(0,0),
height=.2, name=''):
#super(MouseRespGroup, self).__init__(stimuli=stimuli, name=name)
self.win = win
self.multisel = multisel
self.on_color = on_color
self.off_color = off_color
self.pos = pos
self.name = name
if isinstance(stimuli, str):
stimuli = [stimuli]
self.stimuli = []
for i, stim in enumerate(stimuli):
if isinstance(stim, str):
add = np.array([0, (len(stimuli)/2-i)*height*1.5])
stim = visual.TextStim(self.win, text=stim, height=height,
pos=pos+add)
stim.size = (5*height, height)
stim._calcSizeRendered()
size = (stim._sizeRendered[0]*1.2, stim._sizeRendered[1]*1.2)
else:
stim._calcSizeRendered()
size = stim._sizeRendered
stim._calcPosRendered()
stim.respbox = visual.Rect(
self.win,
name=stim.name,
lineColor=None,
fillColor=None,
pos=stim._posRendered,
height=size[1],
width=size[0],
units='pix'
)
stim.respbox.selected = False
self.stimuli.append(stim)
self.selected = [False for stim in self.stimuli]
self.clicked_on = [False for stim in self.stimuli]
def setPos(self, newPos):
for stim in self.stimuli:
stim.pos += self.pos - newPos
stim.respbox.pos += self.pos - newPos
def draw(self):
for stim in self.stimuli:
stim.draw()
stim.respbox.draw()
def contains(self, *args, **kwargs):
self.clicked_on = [stim.respbox.contains(*args, **kwargs) for stim in self.stimuli]
#self.state = [(s and st) for s, st in zip(sel, self.state)]
return any(self.clicked_on)
def select(self, stim=None):
if stim is None:
try:
idx = self.clicked_on.index(True)
except:
return
else:
stim = self.stimuli[idx]
#if any(self.state):
#for stim, state in zip(self.stimuli, self.state):
#if self.multisel:
#self._try_set_color(stim, state)
#else:
#self._try_set_color(stim, False)
#if not self.multisel:
#self._try_set_color(stim, True)
#else:
if not self.multisel:
for st in self.stimuli:
if st == stim:
self._try_set_color(stim)
else:
self._try_set_color(st, state=False)
else:
self._try_set_color(stim)
def reset(self):
for stim in self.stimuli:
self._try_set_color(stim, state=False)
def _try_set_color(self, stim, state=None):
if state is None:
if not stim.respbox.selected:
color = self.on_color
stim.respbox.selected = True
else:
color = self.off_color
stim.respbox.selected = False
else:
if state:
color = self.on_color
stim.respbox.selected = True
else:
color = self.off_color
stim.respbox.selected = False
self.selected = [s.respbox.selected for s in self.stimuli]
try:
stim.setColor(color)
except:
stim.setLineColor(color)
stim.setFillColor(color)
class _HTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag in self.tags:
ft = '<font face="sans-serif">'
else:
ft = ''
self.output += self.get_starttag_text() + ft
def handle_endtag(self, tag):
if tag in self.tags:
ft = '</font>'
else:
ft = ''
self.output += ft + '</' + tag + '>'
def handle_data(self, data):
self.output += data
def feed(self, data):
self.output = ''
self.tags = ['h%i' %(i+1) for i in range(6)] + ['p']
HTMLParser.feed(self, data)
return self.output
def combinations(iterable, r):
"""
Produces combinations of `iterable` elements of lenght `r`.
Examples:
- combinations('ABCD', 2) --> AB AC AD BC BD CD
- combinations(range(4), 3) --> 012 013 023 123
`From Python 2.6 docs <http://docs.python.org/library/itertools.html#itertools.combinations>`_
under the Python Software Foundation License
:Args:
- iterable
A list-like or a str-like object that contains some elements
- r
Number of elements in each ouput combination
:Returns:
A generator yielding combinations of lenght `r`
"""
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
"""
Produces combinations of `iterable` elements of length `r` with
replacement: identical elements can occur in together in some combinations.
Example: combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
`From Python 2.6 docs <http://docs.python.org/library/itertools.html#itertools.combinations_with_replacement>`_
under the Python Software Foundation License
:Args:
- iterable
A list-like or a str-like object that contains some elements
- r
Number of elements in each ouput combination
:Returns:
A generator yielding combinations (with replacement) of length `r`
"""
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def try_makedirs(path):
"""Attempts to create a new directory.
This function improves :func:`os.makedirs` behavior by printing an
error to the log file if it fails and entering the debug mode
(:mod:`pdb`) so that data would not be lost.
:Args:
path (str)
A path to create.
"""
if not os.path.isdir(path) and path not in ['','.','./']:
try: # if this fails (e.g. permissions) we will get an error
os.makedirs(path)
except:
logging.error('ERROR: Cannot create a folder for storing data %s' %path)
# FIX: We'll enter the debugger so that we don't lose any data
import pdb; pdb.set_trace()
def signal_det(corr_resp, subj_resp):
"""
Returns an accuracy label according the (modified) Signal Detection Theory.
================ =================== =================
Response present Response absent
================ =================== =================
Stimulus present correct / incorrect miss
Stimulus absent false alarm (empty string)
================ =================== =================
:Args:
corr_resp
What one should have responded. If no response expected
(e.g., no stimulus present), then it should be an empty string
('')
subj_resp
What the observer responsed. If no response, it should be
an empty string ('').
:Returns:
A string indicating the type of response.
"""
if corr_resp == '': # stimulus absent
if subj_resp == '': # response absent
resp = ''
else: # response present
resp = 'false alarm'
else: # stimulus present
if subj_resp == '': # response absent
resp = 'miss'
elif corr_resp == subj_resp: # correct response present
resp = 'correct'
else: # incorrect response present
resp = 'incorrect'
return resp
def invert_dict(d):
"""
Inverts a dictionary: keys become values.
This is an instance of an OrderedDict, and so the new keys are
sorted.
:Args:
d: dict
"""
inv_dict = dict([[v,k] for k,v in d.items()])
sortkeys = sorted(inv_dict.keys())
inv_dict = OrderedDict([(k,inv_dict[k]) for k in sortkeys])
return inv_dict
def get_version():
"""Get psychopy_ext version
If using a repository, then git head information is used.
Else version number is used.
:Returns:
A key where to store version in `self.runtime_info` and
a string value of psychopy_ext version.
"""
d = os.path.abspath(os.path.dirname(__file__))
githash = psychopy.info._getHashGitHead(gdir=d) # should be .../psychopy/psychopy/
if not githash: # a workaround when Windows cmd has no git
git_head_file = os.path.join(d, '../.git/HEAD')
try:
with open(git_head_file) as f:
pointer = f.readline()
pointer = pointer.strip('\r\n').split('ref: ')[-1]
git_branch = pointer.split('/')[-1]
pointer = os.path.join(d, '../.git', pointer)
with open(pointer) as f:
git_hash = f.readline()
githash = git_branch + ' ' + git_hash.strip('\r\n')
except:
pass
if githash:
key = 'pythonPsychopy_extGitHead'
value = githash
else:
key = 'pythonPsychopy_extVersion'
value = psychopy_ext_version
return key, value
def get_mon_sizes(screen=None):
"""Get a list of resolutions for each monitor.
Recipe from <http://stackoverflow.com/a/10295188>_
:Args:
screen (int, default: None)
Which screen's resolution to return. If None, the a list of all
screens resolutions is returned.
:Returns:
a tuple or a list of tuples of each monitor's resolutions
"""
app = wx.App(False) # create an app if there isn't one and don't show it
nmons = wx.Display.GetCount() # how many monitors we have
mon_sizes = [wx.Display(i).GetGeometry().GetSize() for i in range(nmons)]
if screen is None:
return mon_sizes
else:
return mon_sizes[screen]
def get_para_no(file_pattern, n=6):
"""Looks up used para numbers and returns a new one for this run
"""
all_data = glob.glob(file_pattern)
if all_data == []: paranos = random.choice(range(n))
else:
paranos = []
for this_data in all_data:
lines = csv.reader( open(this_data) )
try:
header = lines.next()
ind = header.index('paraNo')
this_parano = lines.next()[ind]
paranos.append(int(this_parano))
except: pass
if paranos != []:
count_used = np.bincount(paranos)
count_used = np.hstack((count_used,np.zeros(n-len(count_used))))
poss_paranos = np.arange(n)
paranos = random.choice(poss_paranos[count_used == np.min(count_used)].tolist())
else: paranos = random.choice(range(n))
return paranos
def get_unique_trials(trial_list, column='cond'):
unique = []
conds = []
for trial in trial_list:
if trial[column] not in conds:
unique.append(OrderedDict(trial))
conds.append(trial[column])
# this does an argsort
order = sorted(range(len(conds)), key=conds.__getitem__)
# return an ordered list
return [unique[c] for c in order]
def weighted_sample(probs):
warnings.warn("weighted_sample is deprecated; "
"use weighted_choice instead")
return weighted_choice(weights=probs)
def weighted_choice(choices=None, weights=None):
"""
Chooses an element from a list based on it's weight.
:Kwargs:
- choices (list, default: None)
If None, an index between 0 and ``len(weights)`` is returned.
- weights (list, default: None)
If None, all choices get equal weights.
:Returns:
An element from ``choices``
"""
if choices is None:
if weights is None:
raise Exception('Please specify either choices or weights.')
else:
choices = range(len(weights))
elif weights is None:
weights = np.ones(len(choices)) / float(len(choices))
if not np.allclose(np.sum(weights), 1):
raise Exception('Weights must add up to one.')
which = np.random.random()
ind = 0
while which>0:
which -= weights[ind]
ind +=1
ind -= 1
return choices[ind]
def get_behav_df(subjid, pattern='%s'):
"""
Extracts data from files for data analysis.
:Kwargs:
pattern (str, default: '%s')
A string with formatter information. Usually it contains a path
to where data is and a formatter such as '%s' to indicate where
participant ID should be incorporated.
:Returns:
A `pandas.DataFrame` of data for the requested participants.
"""
if type(subjid) not in (list, tuple):
subjid_list = [subjid]
else:
subjid_list = subjid
df_fnames = []
for subjid in subjid_list:
fnames = glob.glob(pattern % subjid)
fnames.sort()
df_fnames += fnames
dfs = []
for dtf in df_fnames:
data = pandas.read_csv(dtf)
if data is not None:
dfs.append(data)
if dfs == []:
print(df_fnames)
raise IOError('Behavioral data files not found.\n'
'Tried to look for %s' % (pattern % subjid))
df = pandas.concat(dfs, ignore_index=True)
return df
def latin_square(n=6):
"""
Generates a Latin square of size n. n must be even.
Based on `<NAME>'s suggestion
<http://rintintin.colorado.edu/~chathach/balancedlatinsquares.html>`_
:Kwargs:
n (int, default: 6)
Size of Latin square. Should be equal to the number of
conditions you have.
.. :note: n must be even. For an odd n, I am not aware of a
general method to produce a Latin square.
:Returns:
A `numpy.array` with each row representing one possible ordering
of stimuli.
"""
if n%2 != 0:
raise Exception('n must be even!')
latin = []
col = np.arange(1,n+1)
first_line = []
for i in range(n):
if i%2 == 0:
first_line.append((n-i/2)%n + 1)
else:
first_line.append((i+1)/2+1)
latin = np.array([np.roll(col,i-1) for i in first_line])
return latin.T
def make_para(n=6):
"""
Generates a symmetric para file with fixation periods approximately 25%
of the time.
:Kwargs:
n (int, default: 6)
Size of Latin square. Should be equal to the number of
conditions you have.
:note: n must be even. For an odd n, I am not aware of a
general method to produce a Latin square.
:Returns:
A `numpy.array` with each row representing one possible ordering
of stimuli (fixations are coded as 0).
"""
latin = latin_square(n=n).tolist()
out = []
for j, this_latin in enumerate(latin):
this_latin = this_latin + this_latin[::-1]
temp = []
for i, item in enumerate(this_latin):
if i%4 == 0:
temp.append(0)
temp.append(item)
temp.append(0)
out.append(temp)
return np.array(out)
class Rectangle(object):
'''Draws a rectangle into a batch.'''
def __init__(self, x1, y1, x2, y2, batch):
self.vertex_list = batch.add(4, pyglet.gl.GL_QUADS, None,
('v2i', [x1, y1, x2, y1, x2, y2, x1, y2]),
('c4B', [200, 200, 220, 255] * 4)
)
class TextWidget(object):
def __init__(self, text, x, y, width, batch):
self.document = pyglet.text.document.UnformattedDocument(text)
self.document.set_style(0, len(self.document.text),
dict(color=(0, 0, 0, 255))
)
font = self.document.get_font()
height = font.ascent - font.descent
self.layout = pyglet.text.layout.IncrementalTextLayout(
self.document, width, height, multiline=False, batch=batch)
self.caret = pyglet.text.caret.Caret(self.layout)
self.layout.x = x
self.layout.y = y
# Rectangular outline
pad = 2
self.rectangle = Rectangle(x - pad, y - pad,
x + width + pad, y + height + pad, batch)
def hit_test(self, x, y):
return (0 < x - self.layout.x < self.layout.width and
0 < y - self.layout.y < self.layout.height)
|
<reponame>zeou1/maggot_models
#%% Imports
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.cluster import GaussianCluster, KMeansCluster
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, OmnibusEmbed
from graspy.models import DCSBMEstimator
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import augment_diagonal, binarize, cartprod, pass_to_ranks, to_laplace
from joblib.parallel import Parallel, delayed
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import entropy
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import ParameterGrid
from spherecluster import SphericalKMeans
from src.data import load_everything, load_networkx
from src.models import GridSearchUS
from src.utils import get_best, meta_to_array, relabel, savefig
from src.visualization import incidence_plot, screeplot
# Global general parameters
MB_VERSION = "mb_2019-09-23"
BRAIN_VERSION = "2019-09-18-v2"
GRAPH_TYPES = ["Gad", "Gaa", "Gdd", "Gda"]
GRAPH_TYPE_LABELS = [r"A $\to$ D", r"A $\to$ A", r"D $\to$ D", r"D $\to$ A"]
N_GRAPH_TYPES = len(GRAPH_TYPES)
SAVEFIGS = True
# Functions
def annotate_arrow(ax, coords=(0.061, 0.93)):
arrow_args = dict(
arrowstyle="-|>",
color="k",
connectionstyle="arc3,rad=-0.4", # "angle3,angleA=90,angleB=90"
)
t = ax.annotate("Target", xy=coords, xycoords="figure fraction")
ax.annotate(
"Source", xy=(0, 0.5), xycoords=t, xytext=(-1.4, -2.1), arrowprops=arrow_args
)
def ase(adj, n_components):
if PTR:
adj = pass_to_ranks(adj)
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(adj)
latent = np.concatenate(latent, axis=-1)
return latent
def omni(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
omni = OmnibusEmbed(n_components=n_components // len(adjs))
latent = omni.fit_transform(adjs)
latent = np.concatenate(latent, axis=-1) # first is for in/out
latent = np.concatenate(latent, axis=-1) # second is for concat. each graph
return latent
def ase_concatenate(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))
graph_latents = []
for a in adjs:
latent = ase.fit_transform(a)
latent = np.concatenate(latent, axis=-1)
graph_latents.append(latent)
latent = np.concatenate(graph_latents, axis=-1)
return latent
def degree(adjs, *args):
deg_mat = np.zeros((n_verts, 2 * N_GRAPH_TYPES))
for i, g in enumerate(adjs):
deg_mat[:, i] = g.sum(axis=0)
deg_mat[:, i + N_GRAPH_TYPES] = g.sum(axis=1)
return deg_mat
def calc_weighted_entropy(true_labels, pred_labels):
total_entropy = 0
unique_true_labels = np.unique(true_labels)
unique_pred_labels = np.unique(pred_labels)
for true_label in unique_true_labels:
if (
true_label == -1 or true_label == "Unknown"
): # this is for "unlabeled" points
continue
probs = np.zeros(unique_pred_labels.shape)
true_inds = np.where(true_labels == true_label)[0]
class_pred_labels = pred_labels[
true_inds
] # get the predicted class assignments for this true class
uni_inds, counts = np.unique(class_pred_labels, return_counts=True)
probs[uni_inds] = counts
probs /= len(class_pred_labels)
e = entropy(probs)
e *= len(class_pred_labels) / len(true_labels)
e /= np.log(len(unique_pred_labels))
total_entropy += e
return total_entropy
def generate_experiment_arglist(latents, true_labels):
arglist = []
for i, (latent, latent_name) in enumerate(zip(latents, EMBED_FUNC_NAMES)):
for j, (estimator, estimator_name) in enumerate(
zip(ESTIMATORS, ESTIMATOR_NAMES)
):
for k in range(MIN_CLUSTERS, MAX_CLUSTERS):
arglist.append(
(
true_labels,
latent,
latent_name,
estimator,
estimator_name,
k,
params[j],
)
)
return arglist
def ari_scorer(estimator, latent, y=None):
pred_labels = estimator.fit_predict(latent)
return adjusted_rand_score(y, pred_labels)
def entropy_scorer(estimator, latent, y=None):
pred_labels = estimator.fit_predict(latent)
return calc_weighted_entropy(y, pred_labels)
def bic_scorer(estimator, latent, y=None):
if type(estimator) == GaussianCluster:
bic = estimator.model_.bic(latent)
return bic
else:
return np.nan
def inertia_scorer(estimator, latent, y=None):
if type(estimator) == KMeans or type(estimator) == SphericalKMeans:
inert = estimator.inertia_
return inert
else:
return np.nan
def run_clustering(
seed,
true_labels,
latent,
latent_name,
estimator,
estimator_name,
n_clusters,
params,
):
np.random.seed(seed)
if estimator == GaussianCluster:
e = estimator(min_components=n_clusters, max_components=n_clusters, **params)
else:
e = estimator(n_clusters=n_clusters, **params)
e.fit(latent)
ari = ari_scorer(e, latent, y=true_labels)
ent = entropy_scorer(e, latent, y=true_labels)
bic = bic_scorer(e, latent, y=true_labels)
inert = inertia_scorer(e, latent, y=true_labels)
out_dict = {
"ARI": ari,
"Entropy": ent,
"Embed": latent_name,
"Cluster": estimator_name,
"# Clusters": n_clusters,
"BIC": bic,
"Inertia": inert,
}
return out_dict
def run_clustering_experiment(
latents, true_labels, min_clusters, max_clusters, n_sims, seed=None
):
if seed is not None:
np.random.seed(seed)
arglist = generate_experiment_arglist(latents, true_labels)
arglist = arglist * n_sims
seeds = np.random.randint(1e8, size=n_sims * len(arglist))
outs = Parallel(n_jobs=-2, verbose=10)(
delayed(run_clustering)(s, *i) for s, i in zip(seeds, arglist)
)
cluster_df = pd.DataFrame.from_dict(outs)
return cluster_df
# Global alg parameters
PTR = True
EMBED_FUNC_NAMES = ["ASE", "OMNI", "Degree"] # "ASE-Cat"]
EMBED_FUNCS = [ase, omni, degree] # ase_concatenate]
ESTIMATORS = [GaussianCluster, SphericalKMeans, KMeans]
ESTIMATOR_NAMES = ["GMM", "SKmeans", "Kmeans"]
MAX_CLUSTERS = 12
MIN_CLUSTERS = 2
N_SIMS = 1
N_INIT = 200
# Set up plotting constants
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=1)
# Experiment 1: Compare clustering on right mushroom body
# Preliminaries:
# Load the right mushroom body data
# Plot the summed graph
# Plot the 4-color graphs, split up
# Plot the ASE on summed latent positions
# Plot the OMNI on 4-color latent positions
# Plot the MASE on 4-color latent positions
# Plot the split 4-color ASE concatenated positions
# TODO: Compare to LSE
# Experiment:
# Cluster each of the above embeddings using skmeans, kmeans, gmm
# Plot ARI vs. number of clusters
# Plot Entropy metric vs. number of clusters
#%% Load the Mushroom Body Right
# Load graph and some metadata
adj, class_labels, side_labels = load_everything(
"G", version=MB_VERSION, return_class=True, return_side=True
)
right_inds = np.where(side_labels == "right")[0]
adj = adj[np.ix_(right_inds, right_inds)]
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
class_labels = class_labels[right_inds] # need to do right inds, then sort_inds
class_labels = class_labels[sort_inds]
# Remap the names
name_map = {
"APL": "APL",
"Gustatory PN": "PN",
"KC 1 claw": "KC",
"KC 2 claw": "KC",
"KC 3 claw": "KC",
"KC 4 claw": "KC",
"KC 5 claw": "KC",
"KC 6 claw": "KC",
"KC young": "KC",
"MBIN": "MBIN",
"MBON": "MBON",
"ORN mPN": "PN",
"ORN uPN": "PN",
"Unknown PN": "PN",
"tPN": "PN",
"vPN": "PN",
}
simple_class_labels = np.array(itemgetter(*class_labels)(name_map))
# Now load all 4 colors
color_adjs = []
for t in GRAPH_TYPES:
adj = load_everything(t, version=MB_VERSION)
adj = adj[np.ix_(right_inds, right_inds)]
adj = adj[np.ix_(sort_inds, sort_inds)]
color_adjs.append(adj)
sum_adj = np.array(color_adjs).sum(axis=0)
# Print some stats
n_verts = adj.shape[0]
print("Right Mushroom Body")
print()
print(f"Number of vertices: {n_verts}")
print()
for g, name in zip(color_adjs, GRAPH_TYPES):
print(name)
print(f"Number of edges: {np.count_nonzero(g)}")
print(f"Number of synapses: {int(g.sum())}")
median_in_degree = np.median(np.count_nonzero(g, axis=0))
median_out_degree = np.median(np.count_nonzero(g, axis=1))
print(f"Median node in degree: {median_in_degree}")
print(f"Median node out degree: {median_out_degree}")
print()
#%%
# Plot the adjacency matrix for the summed graph
plt.figure(figsize=(5, 5))
ax = heatmap(
sum_adj,
inner_hier_labels=simple_class_labels,
transform="simple-all",
hier_label_fontsize=18,
sort_nodes=False,
cbar=False,
title="Right Mushroom Body (summed 4 channels)",
title_pad=90,
font_scale=1.7,
)
annotate_arrow(ax, (0.135, 0.88))
savefig("flat_mb", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
# Plot the adjacency matrix for the 4-color graphs
fig, ax = plt.subplots(2, 2, figsize=(20, 20))
ax = ax.ravel()
for i, g in enumerate(color_adjs):
heatmap(
g,
inner_hier_labels=simple_class_labels,
transform="simple-all",
hier_label_fontsize=18,
sort_nodes=False,
ax=ax[i],
cbar=False,
title=GRAPH_TYPE_LABELS[i],
title_pad=70,
font_scale=1.7,
)
plt.suptitle("Right Mushroom Body (4 channels)", fontsize=45, x=0.525, y=1.02)
plt.tight_layout()
annotate_arrow(ax[0])
savefig("4color_mb", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
#%% Embed the graphs for the mushroom body right
n_components = 4
ase_latent = ase(sum_adj, n_components)
omni_latent = omni(color_adjs, n_components)
ase_cat_latent = ase_concatenate(color_adjs, n_components)
degree_mat = degree(color_adjs)
# latents = [ase_latent, omni_latent, ase_cat_latent, degree_mat]
latents = [ase_latent, omni_latent, degree_mat]
for latent, name in zip(latents, EMBED_FUNC_NAMES):
pairplot(latent, labels=simple_class_labels, title=name)
#%%
# degree_clusts = [SphericalKMeans, KMeans]
# for k in range(2, 12):
# print(k)
# est = SphericalKMeans(n_clusters=k)
# pred_labels = est.fit_predict(deg_mat)
# ari = adjusted_rand_score(simple_class_labels, pred_labels)
# print(ari)
# print()
# for k in range(2, 12):
# print(k)
# est = KMeans(n_clusters=k)
# pred_labels = est.fit_predict(deg_mat)
# ari = adjusted_rand_score(simple_class_labels, pred_labels)
# print(ari)
# for k in range(2, 12):
# print(k)
# est = GaussianCluster(min_components=k, max_components=k, covariance_type="all")
# pred_labels = est.fit_predict(deg_mat)
# ari = adjusted_rand_score(simple_class_labels, pred_labels)
# print(ari)
#%% Run a clustering experiment on the mushroom body right
gmm_params = {"n_init": N_INIT, "covariance_type": "all"}
skmeans_params = {"n_init": N_INIT}
kmeans_params = {"n_init": N_INIT}
true_labels = simple_class_labels
params = [gmm_params, skmeans_params, kmeans_params]
cluster_df = run_clustering_experiment(
latents, true_labels, MIN_CLUSTERS, MAX_CLUSTERS, N_SIMS, seed=8888
)
#%% Plot results of clustering experiments
sns.set_context("talk", font_scale=1.75)
figsize = (20, 10)
plt.figure(figsize=figsize)
sns.lineplot(
data=cluster_df,
x="# Clusters",
y="ARI",
hue="Embed",
hue_order=EMBED_FUNC_NAMES,
style="Cluster",
style_order=ESTIMATOR_NAMES,
)
plt.legend(bbox_to_anchor=(1, 1))
plt.title(f"Right MB, n_inits = {N_INIT}")
savefig("right_mb_ari", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
plt.figure(figsize=figsize)
sns.lineplot(
data=cluster_df,
x="# Clusters",
y="Entropy",
hue="Embed",
hue_order=EMBED_FUNC_NAMES,
style="Cluster",
style_order=ESTIMATOR_NAMES,
)
plt.legend(bbox_to_anchor=(1, 1))
plt.title(f"Right MB, n_inits = {N_INIT}")
savefig("right_mb_ent", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
plot_bic_df = cluster_df.sort_values("BIC", ascending=False).drop_duplicates(
["Embed", "Cluster", "# Clusters"]
)
plt.figure(figsize=figsize)
sns.lineplot(
data=plot_bic_df,
x="# Clusters",
y="BIC",
hue="Embed",
hue_order=EMBED_FUNC_NAMES,
style="Cluster",
style_order=ESTIMATOR_NAMES,
)
plt.legend(bbox_to_anchor=(1, 1))
plt.title(f"Right MB, n_inits = {N_INIT}")
savefig("right_mb_bic", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
#%%
adj, class_labels, side_labels = load_everything(
"G", version=BRAIN_VERSION, return_class=True, return_side=True
)
right_inds = np.where(side_labels == " mw right")[0]
adj = adj[np.ix_(right_inds, right_inds)]
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
class_labels = class_labels[right_inds] # need to do right inds, then sort_inds
class_labels = class_labels[sort_inds]
# Remap the names
name_map = {
"CN": "Unknown",
"DANs": "MBIN",
"KCs": "KC",
"LHN": "Unknown",
"LHN; CN": "Unknown",
"MBINs": "MBIN",
"MBON": "MBON",
"MBON; CN": "MBON",
"OANs": "MBIN",
"ORN mPNs": "PN",
"ORN uPNs": "PN",
"tPNs": "PN",
"vPNs": "PN",
"Unidentified": "Unknown",
"Other": "Unknown",
}
simple_class_labels = np.array(itemgetter(*class_labels)(name_map))
# Now load all 4 colors
color_adjs = []
for t in GRAPH_TYPES:
adj = load_everything(t)
adj = adj[np.ix_(right_inds, right_inds)]
adj = adj[np.ix_(sort_inds, sort_inds)]
color_adjs.append(adj)
sum_adj = np.array(color_adjs).sum(axis=0)
# Print some stats
n_verts = adj.shape[0]
print("Right Brain")
print()
print(f"Number of vertices: {n_verts}")
print()
for g, name in zip(color_adjs, GRAPH_TYPES):
print(name)
print(f"Number of edges: {np.count_nonzero(g)}")
print(f"Number of synapses: {int(g.sum())}")
median_in_degree = np.median(np.count_nonzero(g, axis=0))
median_out_degree = np.median(np.count_nonzero(g, axis=1))
print(f"Median node in degree: {median_in_degree}")
print(f"Median node out degree: {median_out_degree}")
print()
# Plot the adjacency matrix for the summed graph
sns.set_context("talk", font_scale=1)
plt.figure(figsize=(5, 5))
ax = heatmap(
sum_adj,
inner_hier_labels=simple_class_labels,
transform="simple-all",
hier_label_fontsize=10,
sort_nodes=False,
cbar=False,
title="Right Brain (summed 4 channels)",
title_pad=90,
font_scale=1.7,
)
annotate_arrow(ax, (0.135, 0.88))
# Plot the adjacency matrix for the 4-color graphs
fig, ax = plt.subplots(2, 2, figsize=(20, 20))
ax = ax.ravel()
for i, g in enumerate(color_adjs):
heatmap(
binarize(g),
inner_hier_labels=simple_class_labels,
# transform="si",
hier_label_fontsize=10,
sort_nodes=False,
ax=ax[i],
cbar=False,
title=GRAPH_TYPE_LABELS[i],
title_pad=70,
font_scale=1.7,
)
plt.suptitle("Right Brain (4 channels)", fontsize=45, x=0.525, y=1.02)
plt.tight_layout()
annotate_arrow(ax[0])
savefig("4color_brain", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
#%% Embed the graphs for the right hemisphere graph
n_components = 4
ase_latent = ase(sum_adj, n_components)
omni_latent = omni(color_adjs, n_components)
ase_cat_latent = ase_concatenate(color_adjs, n_components)
latents = [ase_latent, omni_latent, ase_cat_latent]
for latent, name in zip(latents, EMBED_FUNC_NAMES):
pairplot(latent, labels=simple_class_labels, title=name)
#%% Run a clustering experiment on the full graph right
gmm_params = {"n_init": N_INIT, "covariance_type": "all"}
skmeans_params = {"n_init": N_INIT}
kmeans_params = {"n_init": N_INIT}
true_labels = simple_class_labels
params = [gmm_params, skmeans_params, kmeans_params]
cluster_df = run_clustering_experiment(
latents, true_labels, MIN_CLUSTERS, MAX_CLUSTERS, N_SIMS, seed=8888
)
#%% Plot results of clustering experiments
sns.set_context("talk", font_scale=1.75)
figsize = (20, 10)
plt.figure(figsize=figsize)
sns.lineplot(
data=cluster_df,
x="# Clusters",
y="Entropy",
hue="Embed",
hue_order=EMBED_FUNC_NAMES,
style="Cluster",
style_order=ESTIMATOR_NAMES,
)
plt.legend(bbox_to_anchor=(1, 1))
plt.title(f"Entropy, n_init = {N_INIT}")
savefig("right_brain_ent", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
plot_ari_df = cluster_df.sort_values("BIC", ascending=False).drop_duplicates(
["Embed", "Cluster", "# Clusters"]
)
plt.figure(figsize=figsize)
sns.lineplot(
data=plot_ari_df,
x="# Clusters",
y="BIC",
hue="Embed",
hue_order=EMBED_FUNC_NAMES,
style="Cluster",
style_order=ESTIMATOR_NAMES,
)
plt.legend(bbox_to_anchor=(1, 1))
plt.title(f"BIC, n_init = {N_INIT}")
savefig("right_brain_bic", fmt="png", dpi=150, bbox_inches="tight", pad_inches=0.5)
#####################
# Experiment 2: Compare clustering on the full mushroom body
# Preliminaries:
# Load the full mushroom body
# Generate same visualization plots as for the above
# Experiment:
# Same plots as for Experiment 1
# Other
# Get best ARI
# plot_ari_df = cluster_df.sort_values("ARI", ascending=False).drop_duplicates(
# ["Embed", "Cluster", "# Clusters"]
# )
# plt.figure(figsize=figsize)
# sns.lineplot(data=cluster_df, x="# Clusters", y="ARI", hue="Embed", style="Cluster")
# plt.legend(bbox_to_anchor=(1, 1))
# plt.title(f"Mean ARIs +/- 95% CI, n_sims = {n_sims}")
# Get best entropy
# plot_ent_df = cluster_df.sort_values("Entropy", ascending=True).drop_duplicates(
# ["Embed", "Cluster", "# Clusters"]
# )
# plt.figure(figsize=figsize)
# sns.lineplot(data=cluster_df, x="# Clusters", y="Entropy", hue="Embed",
# style="Cluster")
# plt.legend(bbox_to_anchor=(1, 1))
# plt.title(f"Mean entropy +/- 95% CI, n_sims = {n_sims}")
# Experiment 3: Compare clustering on one hemisphere of the full data
# Preliminaries:
# Load the full graph
# Generate the same visualization plots as for the above
# Experiment:
# Same plots as for Experiment 1
# %%
|
import os
from ekorpkit import eKonf
def test_compose_config():
cfg = eKonf.compose()
cfg = eKonf.to_dict(cfg)
assert type(cfg) == dict
def test_about():
from ekorpkit.cli import about
cfg = eKonf.compose()
about(**cfg)
assert True
def test_mecab_cfg():
config_group = "preprocessor/tokenizer=mecab"
cfg = eKonf.compose(config_group=config_group)
cfg.verbose = True
mecab = eKonf.instantiate(cfg)
text = "IMF가 推定한 우리나라의 GDP갭률은 今年에도 소폭의 마이너스(−)를 持續하고 있다."
tokens = mecab.tokenize(text)
assert type(tokens) == list
def test_mecab():
from ekorpkit.preprocessors.tokenizer import MecabTokenizer
mecab = MecabTokenizer()
text = "IMF가 推定한 우리나라의 GDP갭률은 今年에도 소폭의 마이너스(−)를 持續하고 있다."
tokens = mecab.tokenize(text)
assert type(tokens) == list
def test_nltk():
from ekorpkit import eKonf
from ekorpkit.preprocessors.tokenizer import NLTKTokenizer
config_group = "preprocessor/tokenizer=nltk"
cfg = eKonf.compose(config_group=config_group)
cfg.verbose = True
nltk = eKonf.instantiate(cfg)
text = "I shall reemphasize some of those thoughts today in the context of legislative proposals that are now before the current Congress."
tokens = nltk.tokenize(text)
nltk = NLTKTokenizer()
tokens = nltk.tokenize(text)
assert type(tokens) == list
def test_KSSSegmenter():
from ekorpkit.preprocessors.segmenter import KSSSegmenter
seg = KSSSegmenter()
text = "일본기상청과 태평양지진해일경보센터는 3월 11일 오후 2시 49분경에 일본 동해안을 비롯하여 대만, 알래스카, 하와이, 괌, 캘리포니아, 칠레 등 태평양 연안 50여 국가에 지진해일 주의보와 경보를 발령하였다. 다행히도 우리나라는 지진발생위치로부터 1,000km 이상 떨어진데다 일본 열도가 가로막아 지진해일이 도달하지 않았다. 지진해일은 일본 소마항에 7.3m, 카마이시항에 4.1m, 미야코항에 4m 등 일본 동해안 전역에서 관측되었다. 지진해일이 원해로 전파되면서 대만(19시 40분)에서 소규모 지진해일과 하와이 섬에서 1.4m(23시 9분)의 지진해일이 관측되었다. 다음날인 3월 12일 새벽 1시 57분경에는 진앙지로부터 약 7,500km 떨어진 캘리포니아 크레센트시티에서 2.2m의 지진해일이 관측되었다."
sents = seg(text)
assert type(sents) == list
def test_PySBDSegmenter():
from ekorpkit.preprocessors.segmenter import PySBDSegmenter
seg = PySBDSegmenter()
text = "For strains harboring the pYV plasmid and Yop-encoding plasmids, bacteria were grown with aeration at 26 °C overnight in broth supplemented with 2.5 mm CaCl2 and 100 μg/ml ampicillin and then subcultured and grown at 26 °C until A600 of 0.2. At this point, the cultures were shifted to 37 °C and aerated for 1 h. A multiplicity of infection of 50:1 was used for YPIII(p-) incubations, and a multiplicity of infection of 25:1 was used for other derivatives. For the pYopE-expressing plasmid, 0.1 mm isopropyl-β-d-thiogalactopyranoside was supplemented during infection to induce YopE expression."
sents = seg(text)
assert type(sents) == list
def test_NLTKSegmenter():
from ekorpkit.preprocessors.segmenter import NLTKSegmenter
seg = NLTKSegmenter()
text = "For strains harboring the pYV plasmid and Yop-encoding plasmids, bacteria were grown with aeration at 26 °C overnight in broth supplemented with 2.5 mm CaCl2 and 100 μg/ml ampicillin and then subcultured and grown at 26 °C until A600 of 0.2. At this point, the cultures were shifted to 37 °C and aerated for 1 h. A multiplicity of infection of 50:1 was used for YPIII(p-) incubations, and a multiplicity of infection of 25:1 was used for other derivatives. For the pYopE-expressing plasmid, 0.1 mm isopropyl-β-d-thiogalactopyranoside was supplemented during infection to induce YopE expression."
sents = seg(text)
chunks = seg.chunk(sents, max_length=100)
assert type(chunks) == list
def test_SimpleSegmenter():
from ekorpkit.preprocessors.segmenter import SimpleSegmenter
seg = SimpleSegmenter()
text = "For strains harboring the pYV plasmid and Yop-encoding plasmids, bacteria were grown with aeration at 26 °C overnight in broth supplemented with 2.5 mm CaCl2 and 100 μg/ml ampicillin and then subcultured and grown at 26 °C until A600 of 0.2. At this point, the cultures were shifted to 37 °C and aerated for 1 h. A multiplicity of infection of 50:1 was used for YPIII(p-) incubations, and a multiplicity of infection of 25:1 was used for other derivatives. For the pYopE-expressing plasmid, 0.1 mm isopropyl-β-d-thiogalactopyranoside was supplemented during infection to induce YopE expression."
sents = seg(text)
assert type(sents) == list
def test_normalizer():
from ekorpkit.preprocessors.normalizer import Normalizer
text = "IMF가 推定한 우리나라의 GDP갭률은 今年에도 소폭의 마이너스(−)를 持續하고 있다."
text = Normalizer().normalize(text)
assert type(text) == str
def test_dummy_corpus():
cfg = eKonf.compose(config_group="fetch/fetcher=_dummy")
cfg.verbose = True
cfg.name = "fomc_minutes"
eKonf.instantiate(cfg)
output_file = cfg["output_file"]
assert os.path.exists(output_file)
os.remove(output_file)
assert not os.path.exists(output_file)
def test_build_corpora():
cfg = eKonf.compose(config_group="corpus/builtin=_dummy_fomc_minutes")
cfg.verbose = True
cfg.data_dir = "./data/tmp/fomc_minutes"
db = eKonf.instantiate(cfg)
db.build()
cfg = eKonf.compose(config_group="corpus/builtin=_dummy_bok_minutes")
cfg.verbose = True
cfg.data_dir = "./data/tmp/bok_minutes"
db = eKonf.instantiate(cfg)
db.build()
cfg = eKonf.compose(config_group="corpus=corpora")
cfg.verbose = True
cfg.name = ["bok_minutes", "fomc_minutes"]
cfg.data_dir = "./data/tmp"
crps = eKonf.instantiate(cfg)
# crps.concat_corpora()
assert len(crps.corpora) == 2
def test_corpus_task():
corpus_cfg = eKonf.compose(config_group="corpus=corpus")
corpus_cfg.verbose = True
corpus_cfg.name = "bok_minutes"
corpus_cfg.automerge = True
corpus_cfg.data_dir = "./data/tmp"
cfg = eKonf.compose(config_group="task=corpus")
cfg.verbose = True
cfg.corpus = corpus_cfg
cfg.pipeline._pipeline_ = ["filter_query", "save_dataframe"]
cfg.pipeline.filter_query.query = "filename in ['BOK_20181130_20181218']"
cfg.pipeline.save_dataframe.output_dir = "./data/tmp"
cfg.pipeline.save_dataframe.output_file = "corpus_filtered.parquet"
eKonf.instantiate(cfg)
assert os.path.exists("./data/tmp/corpus_filtered.parquet")
def test_corpora_task():
corpus_cfg = eKonf.compose(config_group="corpus=corpora")
corpus_cfg.verbose = True
corpus_cfg.name = ["bok_minutes", "fomc_minutes"]
corpus_cfg.automerge = True
corpus_cfg.data_dir = "./data/tmp"
cfg = eKonf.compose(config_group="task=corpora")
cfg.verbose = True
cfg.corpus = corpus_cfg
cfg.pipeline._pipeline_ = ["filter_query", "save_dataframe"]
cfg.pipeline.filter_query.query = "id == 0"
cfg.pipeline.save_dataframe.output_dir = "./data/tmp"
cfg.pipeline.save_dataframe.output_file = "corpora_filtered.parquet"
eKonf.instantiate(cfg)
assert os.path.exists("./data/tmp/corpora_filtered.parquet")
|
#!/usr/bin/python
# ./grading-script.py <test-dir>
import os,re,sys,shutil,random,subprocess,threading
test_dir = 'tests'
if len(sys.argv) > 1:
test_dir=sys.argv[1]
# Set up scratch space for grading
dir="grading"
try:
shutil.rmtree(dir)
except:
pass
os.mkdir(dir)
os.mkdir(dir + '/src')
re_cp=re.compile('\.h$|\.cpp$|^Makefile$')
for f in os.listdir('.'):
if re_cp.search(f):
shutil.copyfile(f, dir+"/"+f)
for f in os.listdir('src'):
if re_cp.search(f):
shutil.copyfile('src/' + f, dir+"/src/"+f)
# Check for cheating
token='TOKEN'+str(random.randrange(100000,999999))
header_cmd=['g++','-Wall','-g','-O3','-std=c++11','-M' ,'-DNO_OPENGL']
re_header=re.compile('[\\\\ :\n]+')
ok_h={'':1}
header_base=subprocess.check_output(header_cmd+['src/header-check.cpp']);
for h in re_header.split(header_base):
ok_h[h]=1
for src_file in [
'camera',
'dump_png',
'flat_shader',
'main',
'parse',
'phong_shader',
'plane',
'reflective_shader',
'render_world',
'sphere']:
ok_h['src/' + src_file + '.cpp']=1
ok_h[src_file + '.o']=1
header_mini=subprocess.check_output(header_cmd+['src/' + src_file + '.cpp']);
for h in re_header.split(header_mini):
if not ok_h.has_key(h):
print("FAIL: forbidden include: "+h)
exit()
if subprocess.call(['make','ray_tracer'],cwd=dir)!=0:
print("FAIL: Did not compile")
exit()
def run_command_with_timeout(cmd, timeout_sec):
proc = subprocess.Popen(cmd,cwd=dir)
proc_thread = threading.Thread(target=proc.communicate)
proc_thread.start()
proc_thread.join(timeout_sec)
if proc_thread.is_alive():
try:
proc.kill()
except OSError, e:
return True
return False
return True
hashed_tests={}
total_score=0
ignore_line=re.compile('^\s*(#|$)')
grade_line=re.compile('^(\S+)\s+(\S+)\s+(\S+)\s*$')
gs=0
try:
gs=open('grading-scheme.txt')
except:
print("FAIL: could not open grading scheme.")
exit()
diff_parse=re.compile('diff: (.*)')
time_parse=re.compile('time: (.*)')
grade_cmd=['./ray_tracer', '-i', 'file.txt', '-s', 'file.png', '-o', token+'.txt']
for line in gs.readlines():
if ignore_line.search(line):
continue
g=grade_line.search(line)
if not g:
print("Unrecognized command: "+line)
exit()
points=float(g.groups()[0])
max_error=float(g.groups()[1])
max_time=15000
file=g.groups()[2]
pass_error = 0
pass_time = 0
if not hashed_tests.has_key(file):
timeout = max(int(max_time*1.2*3/1000)+1,2)
shutil.copyfile(test_dir+'/'+file+".txt", dir+"/file.txt")
shutil.copyfile(test_dir+'/'+file+".png", dir+"/file.png")
if not run_command_with_timeout(grade_cmd, timeout):
hashed_tests[file]="TIMEOUT"
else:
d=False
try:
results_file=open(dir+'/'+token+'.txt')
d=diff_parse.match(results_file.readline())
results_file.close()
os.remove(dir+'/'+token+'.txt')
if d: d=float(d.groups()[0])
except IOError:
# print 'Test failed'
d="CRASH"
hashed_tests[file]=d
d=hashed_tests[file]
if d=="CRASH":
print("FAIL: (%s) Program crashed."%file)
points=0
elif d=="TIMEOUT":
print("FAIL: (%s) Test timed out."%file)
points=0
elif d==None:
print("FAIL: (%s) Program failed to report statistics."%file)
points=0
else:
if d>max_error:
print("FAIL: (%s) Too much error. Actual: %g Max: %g."%(file,d,max_error))
points=0
else:
print("PASS: (%s) diff %g vs %g."%(file,d,max_error))
if points>0:
print("+%g points"%points)
total_score+=points
else:
print("no points")
print("FINAL SCORE: %g"%total_score)
|
<filename>sitePjt/accounts/views.py
from django.shortcuts import render, redirect
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.decorators import login_required
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseServerError, HttpResponseNotAllowed, HttpResponseForbidden
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout
)
from .forms import UserLoginForm, UserProfileForm, UserCreationForm
from .models import Author,ServerNode
from .permissions import IsActivated, IsActivatedOrReadOnly
from posting import views as PostingView
from friendship.models import Friend
import requests
from .serializers import AuthorSerializer
'''
check if input email/password is valid and the user actually exist before login
'''
def login_view(request):
form = UserLoginForm(request.POST or None)
context = {
'form': form,
}
if form.is_valid():
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = authenticate(email=email, password=password)
if user:
login(request, user)
return redirect('/posts/')
else:
context['error_msg'] = "Invalid Username/Password."
return render(request, "accounts/login.html", context)
def register_view(request):
'''
registe new user by creating and saving a UserCreationForm
'''
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('<PASSWORD>')
new_user = authenticate(email=email, password=password)
login(request, new_user)
return redirect('/posts/')
else:
form = UserCreationForm()
return render(request, "accounts/signup.html", context={'form': form})
def logout_view(request):
'''
simply logout and jump back to login page
'''
logout(request)
return redirect('/accounts/login/')
class ProfileView(APIView):
"""
View to a detail of author profile and its posts list in the system.
* Requires token authentication.
* Only authenticated authors are able to access this view.
"""
#authentication_classes = [authentication.TokenAuthentication]
permission_classes = [IsActivated]
def get(self, request, author_id, format=None):
'''
current user is browsing others profile page, so only show allowed posts
'''
try:
author = Author.objects.filter(Q(id=author_id) & Q(host=settings.HOSTNAME))
#view remote user profile
if not author.exists():
author = getRemoteAuthor(author_id)
if not author:
return HttpResponseNotFound("Author Profile Not Found.")
else:
author = author[0]
posts_list = []
#Viewing other's profile. Get all visible posts of that author.
if request.user.id != author_id:
posts_list = PostingView.getVisiblePosts(request.user, author)
posts_list.sort(key=lambda x: x.published, reverse=True)
context = {
'author': author,
'post_list': posts_list,
}
return render(request, "accounts/profile.html", context)
except Exception as e:
return HttpResponseServerError(e)
def post(self, request, author_id, format=None):
try:
if not request.user.id == author_id:
return HttpResponseForbidden("You can't modify other's profile.")
author = Author.objects.filter(Q(id=author_id) & Q(host=settings.HOSTNAME))
if not author.exists():
return HttpResponseNotFound("Author Profile Not Found.")
form = request.POST
Author.objects.filter(id=author_id).update(
displayName=form['displayName'],
bio=form['bio'],
github=form['github'],
)
Friend.objects.filter(id=author_id).update(
displayName=form['displayName']
)
author = Author.objects.get(id=author_id)
posts_list = []
#Viewing other's profile. Get all visible posts of that author.
if request.user.id != author_id:
posts_list = PostingView.getVisiblePosts(request.user, author)
posts_list.sort(key=lambda x: x.published, reverse=True)
context = {
'author': author,
'post_list': posts_list,
}
return render(request, "accounts/profile.html", context)
except Exception as e:
return HttpResponseServerError(e)
def getRemoteAuthor(author_id):
author = None
#TODO Stop abusing all nodes, need author object instead of author_id
for node in ServerNode.objects.all():
url = '{}author/{}'.format(node.host_url, str(author_id))
try:
response = requests.get(url, auth=(node.server_username, node.server_password))
if response.status_code == 200:
remote_author = response.json()
author = getJsonDecodeAuthor(remote_author)
break
else:
continue
except Exception as e:
pass
return author
def findAuthorIdFromUrl(url):
if '/' not in url:
return url
elif url[-1] == '/':
idx = url[:-1].rindex('/')
return url[idx+1:-1]
else:
idx = url.rindex('/')
return url[idx+1:]
def getJsonDecodeAuthor(remote_author):
author = Author()
author.id = findAuthorIdFromUrl(remote_author['url'])
author.url = remote_author['url'] if 'url' in remote_author.keys() else 'None'
author.displayName = remote_author['displayName'] if 'displayName' in remote_author.keys() else 'None'
author.bio = remote_author['bio'] if 'bio' in remote_author.keys() else 'None'
author.host = remote_author['host'] if 'host' in remote_author.keys() else 'None'
author.github = remote_author['github'] if 'github' in remote_author.keys() else 'None'
author.date_joined = remote_author['date_joined'] if 'date_joined' in remote_author.keys() else 'None'
author.last_login = remote_author['last_login'] if 'last_login' in remote_author.keys() else 'None'
return author
|
from datetime import datetime, timedelta
from agagd_core.json_response import JsonResponse
from agagd_core.models import Game, Member
from django.db.models import Avg, Count
from django.db.models.functions import TruncMonth, TruncWeek
from django.http import HttpResponse
from django.views import View
class ApiStatusView(View):
def get(self, request):
response = {"health_status_code": 200, "health_status": "The AGAGD is running."}
return JsonResponse(response)
class ApiGameCountView(View):
def get(self, request):
games_by_date = []
for game_obj in Game.objects.values("game_date").annotate(Count("game_date")):
try:
game_date = datetime.strptime(str(game_obj["game_date"]), "%Y-%m-%d")
games_by_date.append(
{
"date": game_date.strftime("%Y-%m-%d"),
"count": game_obj["game_date__count"],
}
)
except ValueError:
pass
sorted_games_by_date = sorted(games_by_date, key=lambda d: d["date"])
return JsonResponse(sorted_games_by_date)
class ApiPlayerRatings(View):
def __get_ratings_json(self, ratings_obj):
ratings_json = []
for rating in ratings_obj:
elab_date = None
sigma = None
players_rating = None
if isinstance(rating, dict):
if "week_date" in rating:
elab_date = rating["week_date"]
elif "month_date" in rating:
elab_date = rating["month_date"]
sigma = rating["sigma__avg"]
players_rating = rating["rating__avg"]
else:
elab_date = rating.elab_date
sigma = rating.sigma
players_rating = rating.rating
if elab_date != None:
ratings_json.append(
{"sigma": sigma, "elab_date": elab_date, "rating": players_rating}
)
return ratings_json
def __get_less_current_date(self, number_of_weeks):
return datetime.now() - timedelta(weeks=number_of_weeks)
def get(self, request, *args, **kwargs):
member_id = self.kwargs.get("player_id")
time_period = self.kwargs.get("time_period")
player = Member.objects.get(pk=member_id)
ratings = None
min_ratings = 3
if time_period == 1:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(52).year)
.order_by("elab_date")
)
elif time_period == 5:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(260).year)
.annotate(week_date=TruncWeek("elab_date"))
.values("week_date")
.annotate(Avg("rating"), Avg("sigma"))
.order_by("week_date")
)
elif time_period == 10:
ratings = (
player.ratings_set.all()
.filter(elab_date__year__gte=self.__get_less_current_date(520).year)
.annotate(month_date=TruncMonth("elab_date"))
.values("month_date")
.annotate(Avg("rating"), Avg("sigma"))
.order_by("month_date")
)
if ratings == None or ratings.count() < min_ratings:
return JsonResponse(
{
"status": "not enough data",
"status_message": "Not enough data to produce a rating graph.",
}
)
return JsonResponse(self.__get_ratings_json(ratings))
|
<gh_stars>10-100
from connect import netmiko_connect
import re
SHOW_IP_ROUTE = "ip route"
SHOW_ARP = "arp"
SHOW_INT_DESCRIPTION = "int description"
SHOW_INT_BRIEF = "int brief"
SHOW_VERSION = "version"
IOS = "ios"
NXOS = "nxos"
IOSXR = "iosxr"
commands = {SHOW_IP_ROUTE: {IOS: "show ip route",
NXOS: "show ip route",
IOSXR: "show ip route"},
SHOW_ARP: {IOS: "show arp",
NXOS: "show ip arp",
IOSXR: "show arp"},
SHOW_INT_DESCRIPTION: {IOS: "show interfaces description",
NXOS: "show interface description",
IOSXR: "show interfaces description"},
SHOW_INT_BRIEF: {IOS: "show ip interface brief",
NXOS: "show interface brief",
IOSXR: "show ip interface brief"},
SHOW_VERSION: {IOS: "show version",
NXOS: "show version",
IOSXR: "show version"}
}
# CYCLE THROUGH DIFFERENT DEVICE TYPES
for device_type in [IOSXR]:
connection = netmiko_connect(device_type)
print('connection:', connection)
print(f"\n\n----- showing running configuration for {device_type} -------------------")
output = connection.send_command("show running-config")
print(output)
print(f"\n\n----- showing ip route for {device_type} -------------------")
output = connection.send_command(commands[SHOW_IP_ROUTE][device_type])
print(output)
print(f"\n\n----- showing arp table for {device_type} -------------------")
output = connection.send_command(commands[SHOW_ARP][device_type])
print(output)
print(f"\n\n----- showing interface description for {device_type} -------------------")
output = connection.send_command(commands[SHOW_INT_DESCRIPTION][device_type])
print(output)
print(f"\n\n----- showing interface brief for {device_type} -------------------")
output = connection.send_command(commands[SHOW_INT_BRIEF][device_type])
print(output)
connection.disconnect()
# CYCLE THROUGH DIFFERENT SHOW COMMANDS
print("\n\nBEGIN CYCLE THROUGH DIFFERENT SHOW COMMANDS")
# csr_connection = netmiko_connect(IOS)
# nxos_connection = netmiko_connect(NXOS)
xr_connection = netmiko_connect(IOSXR)
if xr_connection:
print("--- connections successful")
else:
exit()
nxos_version_raw = None
csr_version_raw = None
xr_version_raw = None
for command_type, command in commands.items():
print(f"\n----- command: {command_type} ---------------------")
# print(f"\n----- ... for IOS: {command[IOS]} ---------------------")
# csr_output = csr_connection.send_command(command[IOS])
# print(csr_output)
#
# print(f"\n----- ... for NXOS: {command[NXOS]} ---------------------")
# nxos_output = nxos_connection.send_command(command[NXOS])
# print(nxos_output)
print(f"\n----- ... for IOSXR: {command[IOSXR]} ---------------------")
xr_output = xr_connection.send_command(command[IOSXR])
print(xr_output)
# saving versions for later parsing
if command_type == SHOW_VERSION:
# csr_version_raw = csr_output
# nxos_version_raw = nxos_output
xr_version_raw = xr_output
# csr_connection.disconnect()
# nxos_connection.disconnect()
xr_connection.disconnect()
# Now the harder part - parsing the output into some 'normalized' format
# if nxos_version_raw and csr_version_raw:
if xr_version_raw:
# re_nxos_version_pattern = r"NXOS: version (.*)"
# re_csr_version_pattern = r"Cisco IOS XE Software, Version (.*)"
re_xr_version_pattern = r"Cisco IOS XR Software, Version (.*)"
# nxos_version_match = re.search(re_nxos_version_pattern, nxos_version_raw)
# csr_version_match = re.search(re_csr_version_pattern, csr_version_raw)
xr_version_match = re.search(re_xr_version_pattern, xr_version_raw)
# if nxos_version_match:
# print(f"---> NXOS version parsed from output: {nxos_version_match.group(1)}")
#
# if csr_version_match:
# print(f"---> IOS version parsed from output: {csr_version_match.group(1)}")
if xr_version_match:
print(f"---> IOSXR version parsed from output: {xr_version_match.group(1)}")
else:
print(f"!!! error, no version data to parse")
|
import matplotlib as mpl
# Allow matplotlib plots to be rendered without active window manager
mpl.use('agg')
from readCPython import load_C_output
from getXi1 import find_root
import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
def set_style(usetex=False):
""" set a proffesional looking matplotlib style
Keyword Arguments:
usetex -- use the LaTeX Rendering engine, requires
LaTeX to be in the PATH
"""
plt.rc('text', usetex=usetex)
plt.rc('font', family='Serif')
mpl.rcParams['figure.figsize'] = [10, 7]
mpl.rcParams['font.size'] = 17
mpl.rcParams['savefig.dpi'] = 150
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['xtick.major.size'] = 6
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['ytick.major.size'] = 6
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['xtick.labelsize'] = 13
mpl.rcParams['ytick.labelsize'] = 13
def extract_theta_dot_xi(dataFiles):
""" return the polytroic index assosiated solution for every given binary file path
Positional Arguments:
dataFiles -- iterator of path to binary data files produced from c executable itegrate
Returns -> (N, STATE):
STATE -- List of data from binary file paths (parallel to N)
META -- Metadata extracted from header of dump files
"""
STATE = list()
META = list()
for dataFile in dataFiles:
state, metadata = load_C_output(dataFile)
META.append(metadata)
STATE.append(state)
return STATE, META
if __name__ == '__main__':
# Argument Parser
parser = argparse.ArgumentParser(description='Plot Data')
parser.add_argument('files', metavar='<path/to/data/files>', type=str, nargs='+', help="Files to plot")
parser.add_argument('-o', '--output', type=str, default="Figures/ThetaXi.pdf", metavar='<path/to/output/file>', help='output location')
parser.add_argument('-r', '--root', action='store_true', help='Also plot the crosshairs showing xi1')
parser.add_argument('-t', '--tex', action='store_true', help='Use the tex rendering engine when plotting')
parser.add_argument('-d', '--derivitive', action='store_true', help='Plot dtheta/dxi along with theta')
parser.add_argument('-k', '--key', action='store_true', help='Include a key/legend in the output graph')
parser.add_argument('-l', '--log', action='store_true', help='plot on a semilogy scale')
args = parser.parse_args()
# Filter Given paths to only select .binary files
dataFiles = filter(lambda x: '.dat' in x, args.files)
# Set the style to include minor ticks, larger labels, and ticks on all sides
set_style(usetex=args.tex)
# Plot all Given Solutions
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
for file in dataFiles:
state, meta = load_C_output(file)
# deal with either degenerate or non degenerate identifier
if 'n' in meta:
identifier = meta['n']
else:
identifier = meta['theta_c']
# deal with either log or non log plotting
if args.log:
ax.semilogy(state[0], state[1], label=r"$\theta_{{{}}}$".format(identifier), color='black')
else:
ax.plot(state[0], state[1], label=r"$\theta_{{{}}}$".format(identifier), color='black')
if args.derivitive:
if args.log:
ax.semilogy(state[0], state[2], linestyle='--', label=r"$\left(\frac{{d\theta}}{{d\xi}}\right)_{{{}}}$".format(identifier), color='black')
else:
ax.plot(state[0], state[2], linestyle='--', label=r"$\left(\frac{{d\theta}}{{d\xi}}\right)_{{{}}}$".format(identifier), color='black')
# If requested show where each solution has its root
if args.root:
xi1Approx, theta1Approx = find_root(state[0], state[1])
ax.axvline(x=xi1Approx, linestyle=':', color='black', alpha=0.5)
ax.axhline(y=0, linestyle=':', color='black', alpha=0.5)
# explicitly clear data when plotting multiple solutions in an attempt to keep the memory manager happy
# does not help in most cases because the python GC is pretty okay but no harm in being explicit here
del state
del meta
ax.set_xlabel(r'$\xi$', fontsize=17)
ax.set_ylabel(r'$\theta$, $\frac{d\theta}{d\xi}$', fontsize=17)
if args.key:
plt.legend()
# Save File to requested Location
plt.savefig(args.output, bbox_inches='tight')
|
<gh_stars>1-10
import logging
import re
import requests
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
logger = logging.getLogger(__name__)
class ClickHouse(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"url": {
"type": "string",
"default": "http://127.0.0.1:8123"
},
"user": {
"type": "string",
"default": "default"
},
"password": {
"type": "string"
},
"dbname": {
"type": "string",
"title": "Database Name"
},
"timeout": {
"type": "number",
"title": "Request Timeout",
"default": 30
}
},
"required": ["dbname"],
"secret": ["password"]
}
@classmethod
def type(cls):
return "clickhouse"
def _get_tables(self, schema):
query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')"
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results['rows']:
table_name = '{}.{}'.format(row['database'], row['table'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['name'])
return schema.values()
def _send_query(self, data, stream=False):
r = requests.post(
self.configuration.get('url', "http://1172.16.31.10:8123"),
data=data.encode("utf-8"),
stream=stream,
timeout=self.configuration.get('timeout', 30),
params={
'user': self.configuration.get('user', "default"),
'password': self.configuration.get('password', ""),
'database': self.configuration['dbname']
}
)
if r.status_code != 200:
raise Exception(r.text)
# logging.warning(r.json())
return r.json()
@staticmethod
def _define_column_type(column):
c = column.lower()
f = re.search(r'^nullable\((.*)\)$', c)
if f is not None:
c = f.group(1)
if c.startswith('int') or c.startswith('uint'):
return TYPE_INTEGER
elif c.startswith('float'):
return TYPE_FLOAT
elif c == 'datetime':
return TYPE_DATETIME
elif c == 'date':
return TYPE_DATE
else:
return TYPE_STRING
def _clickhouse_query(self, query):
query += '\nFORMAT JSON'
result = self._send_query(query)
columns = []
columns_int64 = [] # db converts value to string if its type equals UInt64
columns_totals = {}
for r in result['meta']:
column_name = r['name']
column_type = self._define_column_type(r['type'])
if r['type'] in ('Int64', 'UInt64', 'Nullable(Int64)', 'Nullable(UInt64)'):
columns_int64.append(column_name)
else:
columns_totals[column_name] = 'Total' if column_type == TYPE_STRING else None
columns.append({'name': column_name, 'friendly_name': column_name, 'type': column_type})
rows = result['data']
for row in rows:
for column in columns_int64:
try:
row[column] = int(row[column])
except TypeError:
row[column] = None
if 'totals' in result:
totals = result['totals']
for column, value in columns_totals.iteritems():
totals[column] = value
rows.append(totals)
return {'columns': columns, 'rows': rows}
def run_query(self, query, user):
logger.debug("Clickhouse is about to execute query: %s", query)
if query == "":
json_data = None
error = "Query is empty"
return json_data, error
try:
q = self._clickhouse_query(query)
data = json_dumps(q)
error = None
except Exception as e:
data = None
logging.exception(e)
error = unicode(e)
return data, error
register(ClickHouse)
|
<reponame>bdmbdsm/openprocurement.client.python<filename>openprocurement_client/client.py<gh_stars>1-10
import logging
from functools import wraps
from io import FileIO
from os import path
from urlparse import parse_qs, urlparse
from iso8601 import parse_date
from munch import munchify
from restkit import BasicAuth, Resource, request
from restkit.errors import ResourceNotFound
from retrying import retry
from simplejson import dumps, loads
from .exceptions import InvalidResponse, NoToken
logger = logging.getLogger(__name__)
IGNORE_PARAMS = ('uri', 'path')
def verify_file(fn):
@wraps(fn)
def wrapper(self, file_, *args, **kwargs):
if isinstance(file_, basestring):
# Using FileIO here instead of open()
# to be able to override the filename
# which is later used when uploading the file.
#
# Explanation:
#
# 1) Restkit reads the filename
# from "name" attribute of a file-like object,
# there is no other way to specify a filename;
#
# 2) The attribute may contain the full path to file,
# which does not work well as a filename;
#
# 3) The attribute is readonly when using open(),
# unlike FileIO object.
file_ = FileIO(file_, 'rb')
file_.name = path.basename(file_.name)
if hasattr(file_, 'read'):
# A file-like object must have 'read' method
return fn(self, file_, *args, **kwargs)
else:
raise TypeError('Expected either a string '
'containing a path to file or a '
'file-like object, got {}'.format(type(file_)))
return wrapper
class APIBaseClient(Resource):
"""base class for API"""
def __init__(self, key,
host_url,
api_version,
resource,
params=None,
**kwargs):
super(APIBaseClient, self).__init__(
host_url,
filters=[BasicAuth(key, "")],
**kwargs
)
self.prefix_path = '/api/{}/{}'.format(api_version, resource)
if not isinstance(params, dict):
params = {"mode": "_all_"}
self.params = params
self.headers = {"Content-Type": "application/json"}
# To perform some operations (e.g. create a tender)
# we first need to obtain a cookie. For that reason,
# here we send a HEAD request to a neutral URL.
self.head('/api/{}/spore'.format(api_version))
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
_headers = dict(self.headers)
_headers.update(headers or {})
try:
response = super(APIBaseClient, self).request(
method, path=path, payload=payload, headers=_headers,
params_dict=params_dict, **params
)
if 'Set-Cookie' in response.headers:
self.headers['Cookie'] = response.headers['Set-Cookie']
return response
except ResourceNotFound as e:
if 'Set-Cookie' in e.response.headers:
self.headers['Cookie'] = e.response.headers['Set-Cookie']
raise e
def patch(self, path=None, payload=None, headers=None,
params_dict=None, **params):
""" HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("PATCH", path=path, payload=payload,
headers=headers, params_dict=params_dict, **params)
def delete(self, path=None, headers=None):
""" HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("DELETE", path=path, headers=headers)
def _update_params(self, params):
for key in params:
if key not in IGNORE_PARAMS:
self.params[key] = params[key]
def _create_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.post(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 201:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _get_resource_item(self, url, headers={}):
headers.update(self.headers)
response_item = self.get(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _patch_resource_item(self, url, payload, headers={}):
headers.update(self.headers)
response_item = self.patch(
url, headers=headers, payload=dumps(payload)
)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _upload_resource_file(self, url, data, headers={}, method='post'):
file_headers = {}
file_headers.update(self.headers)
file_headers.update(headers)
file_headers['Content-Type'] = "multipart/form-data"
response_item = getattr(self, method)(
url, headers=file_headers, payload=data
)
if response_item.status_int in (201, 200):
return munchify(loads(response_item.body_string()))
raise InvalidResponse
def _delete_resource_item(self, url, headers={}):
response_item = self.delete(url, headers=headers)
if response_item.status_int == 200:
return munchify(loads(response_item.body_string()))
raise InvalidResponse
class TendersClient(APIBaseClient):
"""client for tenders"""
def __init__(self, key,
host_url="https://api-sandbox.openprocurement.org",
api_version='2.0',
params=None,
resource='tenders'):
super(TendersClient, self).__init__(key, host_url, api_version, resource, params)
###########################################################################
# GET ITEMS LIST API METHODS
###########################################################################
@retry(stop_max_attempt_number=5)
def get_tenders(self, params={}, feed='changes'):
params['feed'] = feed
try:
self._update_params(params)
response = self.get(
self.prefix_path,
params_dict=self.params)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
except ResourceNotFound:
del self.params['offset']
raise
raise InvalidResponse
def get_latest_tenders(self, date, tender_id):
iso_dt = parse_date(date)
dt = iso_dt.strftime("%Y-%m-%d")
tm = iso_dt.strftime("%H:%M:%S")
response = self._get_resource_item(
'{}?offset={}T{}&opt_fields=tender_id&mode=test'.format(
self.prefix_path,
dt,
tm
)
)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
self._update_params(tender_list.next_page)
return tender_list.data
raise InvalidResponse
def _get_tender_resource_list(self, tender, items_name):
return self._get_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, items_name),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_questions(self, tender, params={}):
return self._get_tender_resource_list(tender, "questions")
def get_documents(self, tender, params={}):
return self._get_tender_resource_list(tender, "documents")
def get_awards_documents(self, tender, award_id, params={}):
return self._get_resource_item(
'{}/{}/awards/{}/documents'.format(self.prefix_path, tender.data.id, award_id),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_qualification_documents(self, tender, qualification_id, params={}):
return self._get_resource_item(
'{}/{}/qualifications/{}/documents'.format(self.prefix_path, tender.data.id, qualification_id),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def get_awards(self, tender, params={}):
return self._get_tender_resource_list(tender, "awards")
def get_lots(self, tender, params={}):
return self._get_tender_resource_list(tender, "lots")
###########################################################################
# CREATE ITEM API METHODS
###########################################################################
def _create_tender_resource_item(self, tender, item_obj, items_name):
return self._create_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, items_name),
item_obj,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def create_tender(self, tender):
return self._create_resource_item(self.prefix_path, tender)
def create_question(self, tender, question):
return self._create_tender_resource_item(tender, question, "questions")
def create_bid(self, tender, bid):
return self._create_tender_resource_item(tender, bid, "bids")
def create_lot(self, tender, lot):
return self._create_tender_resource_item(tender, lot, "lots")
def create_award(self, tender, award):
return self._create_tender_resource_item(tender, award, "awards")
def create_cancellation(self, tender, cancellation):
return self._create_tender_resource_item(tender, cancellation, "cancellations")
def create_complaint(self, tender, complaint):
return self._create_tender_resource_item(tender, complaint, "complaints")
def create_award_complaint(self, tender, complaint, award_id):
return self._create_resource_item(
'{}/{}/{}'.format(self.prefix_path, tender.data.id, "awards/{0}/complaints".format(award_id)),
complaint,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def create_thin_document(self, tender, document_data):
return self._create_resource_item(
'{}/{}/documents'.format(
self.prefix_path,
tender.data.id
),
document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
# GET ITEM API METHODS
###########################################################################
def get_tender(self, id):
return self._get_resource_item('{}/{}'.format(self.prefix_path, id))
def _get_tender_resource_item(self, tender, item_id, items_name,
access_token=""):
if access_token:
headers = {'X-Access-Token': access_token}
else:
headers = {'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
return self._get_resource_item(
'{}/{}/{}/{}'.format(self.prefix_path,
tender.data.id,
items_name,
item_id),
headers=headers
)
def get_question(self, tender, question_id):
return self._get_tender_resource_item(tender, question_id, "questions")
def get_bid(self, tender, bid_id, access_token):
return self._get_tender_resource_item(tender, bid_id, "bids",
access_token)
def get_lot(self, tender, lot_id):
return self._get_tender_resource_item(tender, lot_id, "lots")
def get_file(self, tender, url, access_token=None):
parsed_url = urlparse(url)
headers = {}
if access_token:
headers = {'X-Access-Token': access_token}
headers.update(self.headers)
response_item = self.get(parsed_url.path,
headers=headers,
params_dict=parse_qs(parsed_url.query))
if response_item.status_int == 302:
response_obj = request(response_item.headers['location'])
if response_obj.status_int == 200:
return response_obj.body_string(), \
response_obj.headers['Content-Disposition'] \
.split("; filename=")[1].strip('"')
raise InvalidResponse
def extract_credentials(self, id):
return self._get_resource_item('{}/{}/extract_credentials'.format(self.prefix_path, id))
###########################################################################
# PATCH ITEM API METHODS
###########################################################################
def _patch_tender_resource_item(self, tender, item_obj, items_name):
return self._patch_resource_item(
'{}/{}/{}/{}'.format(
self.prefix_path, tender.data.id, items_name, item_obj['data']['id']
),
payload=item_obj,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_tender(self, tender):
return self._patch_resource_item(
'{}/{}'.format(self.prefix_path, tender["data"]["id"]),
payload=tender,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_question(self, tender, question):
return self._patch_tender_resource_item(tender, question, "questions")
def patch_bid(self, tender, bid):
return self._patch_tender_resource_item(tender, bid, "bids")
def patch_bid_document(self, tender, document_data, bid_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "bids", bid_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_award(self, tender, award):
return self._patch_tender_resource_item(tender, award, "awards")
def patch_award_document(self, tender, document_data, award_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "awards", award_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_cancellation(self, tender, cancellation):
return self._patch_tender_resource_item(tender, cancellation, "cancellations")
def patch_cancellation_document(self, tender, cancellation, cancellation_id, cancellation_doc_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "cancellations", cancellation_id, cancellation_doc_id
),
payload=cancellation,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_complaint(self, tender, complaint):
return self._patch_tender_resource_item(tender, complaint, "complaints")
def patch_award_complaint(self, tender, complaint, award_id):
return self._patch_resource_item(
'{}/{}/awards/{}/complaints/{}'.format(
self.prefix_path, tender.data.id, award_id, complaint.data.id
),
payload=complaint,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_lot(self, tender, lot):
return self._patch_tender_resource_item(tender, lot, "lots")
def patch_document(self, tender, document):
return self._patch_tender_resource_item(tender, document, "documents")
def patch_qualification(self, tender, qualification):
return self._patch_tender_resource_item(tender, qualification, "qualifications")
def patch_contract(self, tender, contract):
return self._patch_tender_resource_item(tender, contract, "contracts")
def patch_contract_document(self, tender, document_data, contract_id, document_id):
return self._patch_resource_item(
'{}/{}/{}/{}/documents/{}'.format(
self.prefix_path, tender.data.id, "contracts", contract_id, document_id
),
payload=document_data,
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
def patch_credentials(self, id, access_token):
return self._patch_resource_item('{}/{}/credentials'.format(self.prefix_path, id),
payload={},
headers={'X-Access-Token': access_token})
###########################################################################
# UPLOAD FILE API METHODS
###########################################################################
@verify_file
def upload_document(self, file_, tender):
return self._upload_resource_file(
'{}/{}/documents'.format(
self.prefix_path,
tender.data.id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_bid_document(self, file_, tender, bid_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/bids/{}/{}'.format(
self.prefix_path,
tender.data.id,
bid_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def update_bid_document(self, file_, tender, bid_id, document_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/bids/{}/{}/{}'.format(
self.prefix_path,
tender.data.id,
bid_id,
doc_type,
document_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')},
method='put'
)
@verify_file
def upload_cancellation_document(self, file_, tender, cancellation_id):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents'.format(
self.prefix_path,
tender.data.id,
cancellation_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def update_cancellation_document(self, file_, tender, cancellation_id, document_id):
return self._upload_resource_file(
'{}/{}/cancellations/{}/documents/{}'.format(
self.prefix_path,
tender.data.id,
cancellation_id,
document_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')},
method='put'
)
@verify_file
def upload_complaint_document(self, file_, tender, complaint_id):
return self._upload_resource_file(
'{}/{}/complaints/{}/documents'.format(
self.prefix_path,
tender.data.id,
complaint_id),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_award_complaint_document(self, file_, tender, award_id, complaint_id):
return self._upload_resource_file(
'{}/{}/awards/{}/complaints/{}/documents'.format(
self.prefix_path,
tender.data.id,
award_id,
complaint_id),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_qualification_document(self, file_, tender, qualification_id):
return self._upload_resource_file(
'{}/{}/qualifications/{}/documents'.format(
self.prefix_path,
tender.data.id,
qualification_id
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_award_document(self, file_, tender, award_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/awards/{}/{}'.format(
self.prefix_path,
tender.data.id,
award_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
@verify_file
def upload_contract_document(self, file_, tender, contract_id, doc_type="documents"):
return self._upload_resource_file(
'{}/{}/contracts/{}/documents'.format(
self.prefix_path,
tender.data.id,
contract_id,
doc_type
),
data={"file": file_},
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
# DELETE ITEMS LIST API METHODS
###########################################################################
def delete_bid(self, tender, bid, access_token=None):
logger.info("delete_lot is deprecated. In next update this function will takes bid_id and access_token instead bid.")
if isinstance(bid, basestring):
bid_id = bid
access_token = access_token
else:
bid_id = bid.data.id
access_token = getattr(getattr(bid, 'access', ''), 'token', '')
return self._delete_resource_item(
'{}/{}/bids/{}'.format(
self.prefix_path,
tender.data.id,
bid_id
),
headers={'X-Access-Token': access_token}
)
def delete_lot(self, tender, lot):
logger.info("delete_lot is deprecated. In next update this function will takes lot_id instead lot.")
if isinstance(lot, basestring):
lot_id = lot
else:
lot_id = lot.data.id
return self._delete_resource_item(
'{}/{}/lots/{}'.format(
self.prefix_path,
tender.data.id,
lot_id
),
headers={'X-Access-Token':
getattr(getattr(tender, 'access', ''), 'token', '')}
)
###########################################################################
class Client(TendersClient):
"""client for tenders for backward compatibility"""
class TendersClientSync(TendersClient):
def sync_tenders(self, params={}, extra_headers={}):
params['feed'] = 'changes'
self.headers.update(extra_headers)
response = self.get(self.prefix_path, params_dict=params)
if response.status_int == 200:
tender_list = munchify(loads(response.body_string()))
return tender_list
@retry(stop_max_attempt_number=5)
def get_tender(self, id, extra_headers={}):
self.headers.update(extra_headers)
return super(TendersClientSync, self).get_tender(id)
class EDRClient(Resource):
""" Client for validate members by EDR """
def __init__(self, host_url, api_version, username, password, **kwargs):
prefix_path = '{}/api/{}'.format(host_url, api_version)
super(EDRClient, self).__init__(prefix_path,
filters=[BasicAuth(username, password)],
**kwargs)
self.headers = {"Content-Type": "application/json"}
def request(self, method, path=None, payload=None, headers=None,
params_dict=None, **params):
_headers = dict(self.headers)
_headers.update(headers or {})
try:
response = super(EDRClient, self).request(
method, path=path, payload=payload, headers=_headers,
params_dict=params_dict, **params
)
if 'Set-Cookie' in response.headers:
self.headers['Cookie'] = response.headers['Set-Cookie']
return response
except ResourceNotFound as e:
if 'Set-Cookie' in e.response.headers:
self.headers['Cookie'] = e.response.headers['Set-Cookie']
raise e
def verify_member(self, edrpou, headers=None):
response = self.request("GET", "/verify",
params_dict={'id': edrpou},
headers=headers)
if response.status_int == 200:
return munchify(loads(response.body_string()))
raise InvalidResponse
|
<filename>src/visitpy/visit_flow/flow/tests/test_generator.py
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: test_parser.py
author: <NAME> <<EMAIL>>
created: 8/29/2012
description:
unittest test cases for parser front-end.
"""
import unittest
try:
import numpy as npy
except:
pass
from visit_flow import *
from visit_flow.filters import npy_ops
from decorators import numpy_test, ply_test
# uncomment for detailed exe info
#import logging
#logging.basicConfig(level=logging.INFO)
class TestParser(unittest.TestCase):
@ply_test
def test_01_simple_expr_gen(self):
filters = Generator.parse_network("vel_mag = sqrt(vx^2 + vy^2 + vz^2)")
print ""
for f in filters:
print f
self.assertTrue(True)
@ply_test
def test_02_simple_expr_gen_context(self):
w = Workspace()
w.register_filters(npy_ops)
v_a = npy.array(range(10),dtype=npy.double)
v_b = npy.array(range(10),dtype=npy.double)
c_2 = 2.0
w.registry_add(":v_a",v_a)
w.registry_add(":v_b",v_b)
w.registry_add(":c_2",c_2)
print ""
expr = "res = (v_a + v_b)^c_2 + (v_a - v_b)^c_2"
print "test_expr: " + expr
filters = Generator.parse_network(expr,w)
print ""
print w.graph
print w.execution_plan()
act_res = w.execute()
# get output and test
test_res = npy.power((v_a + v_b),2.0)+ npy.power((v_a - v_b),2.0)
dsum = npy.sum(act_res - test_res)
print "Filter Graph Result: %s" % str(act_res)
print "Test Result: %s" % str(test_res)
print "Difference: %s" % str(dsum)
self.assertTrue(dsum < 1e-6)
@ply_test
def test_03_simple_expr_gen_from_workspace(self):
w = Workspace()
w.register_filters(npy_ops)
v_a = npy.array(range(10),dtype=npy.double)
v_b = npy.array(range(10),dtype=npy.double)
w.registry_add(":v_a",v_a)
w.registry_add(":v_b",v_b)
print ""
expr = "res = (v_a + v_b)^2 + (v_a - v_b)^2"
print "test_expr: " + expr
w.setup_expression_network(expr)
print ""
print w.graph
print w.execution_plan()
act_res = w.execute()
# get output and test
test_res = npy.power((v_a + v_b),2.0)+ npy.power((v_a - v_b),2.0)
dsum = npy.sum(act_res - test_res)
print "Filter Graph Result: %s" % str(act_res)
print "Test Result: %s" % str(test_res)
print "Difference: %s" % str(dsum)
self.assertTrue(dsum < 1e-6)
@ply_test
def test_04_simple_decomp_gen_context(self):
w = Workspace()
w.register_filters(npy_ops)
v_a = npy.array(range(10),dtype=npy.double)
v_b = npy.ndarray((10,2),dtype=npy.double)
c_2 = 2.0
w.registry_add(":v_a",v_a)
w.registry_add(":v_b",v_b)
w.registry_add(":c_2",c_2)
v_b[:,0] = range(10)
v_b[:,1] = range(10,20)
expr = "res = (v_a + v_b[0])^c_2 + (v_a - v_b[1])^c_2"
print "test_expr: " + expr
w.setup_expression_network(expr)
print ""
print w.graph
print w.execution_plan()
act_res = w.execute()
test_res = npy.power((v_a + v_b[:,0]),2.0)+ npy.power((v_a - v_b[:,1]),2.0)
dsum = npy.sum(act_res - test_res)
print "Filter Graph Result: %s" % str(act_res)
print "Test Result: %s" % str(test_res)
print "Difference: %s" % str(dsum)
self.assertTrue(dsum < 1e-6)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import pandas as pd
from TACT.computation.adjustments import Adjustments, empirical_stdAdjustment
from TACT.computation.ml import machine_learning_TI
def perform_SS_LTERRA_ML_adjustment(inputdata):
inputdata_test_result = pd.DataFrame()
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
adj = Adjustments()
if inputdata.empty or len(inputdata) < 2:
results = adj.post_adjustment_stats([None], results, "Ref_TI", "adjTI_RSD_TI")
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ref_TI"].copy()
all_train["x_train"] = inputdata_train["RSD_TI"].copy()
all_test = pd.DataFrame()
all_test["y_test"] = inputdata_test["Ref_TI"].copy()
all_test["x_test"] = inputdata_test["RSD_TI"].copy()
all_test["TI_test"] = inputdata_test["RSD_TI"].copy()
all_test["RSD_SD"] = inputdata_test["RSD_SD"].copy()
all_train = all_train.dropna()
all_test = all_test.dropna()
if len(all_train) < 5 and len(all_test) < 5:
results = adj.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
m = np.NaN
c = np.NaN
else:
m = np.NaN
c = np.NaN
TI_pred_RF = machine_learning_TI(
all_train["x_train"],
all_train["y_train"],
all_test["x_test"],
all_test["y_test"],
"RF",
all_test["TI_test"],
)
all_test["adjTI_RSD_TI"] = TI_pred_RF
all_test["Ref_TI"] = all_test["y_test"]
inputdata_test_result = pd.merge(inputdata_test, all_test, how="left")
results = adj.post_adjustment_stats(
inputdata_test_result, results, "Ref_TI", "adjTI_RSD_TI"
)
if (
"Ane_TI_Ht1" in inputdata.columns
and "RSD_TI_Ht1" in inputdata.columns
and "RSD_SD_Ht1" in inputdata.columns
):
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ane_TI_Ht1"].copy()
all_train["x_train"] = inputdata_train["RSD_TI_Ht1"].copy()
all_test = pd.DataFrame()
all_test["y_test"] = inputdata_test["Ane_TI_Ht1"].copy()
all_test["x_test"] = inputdata_test["RSD_TI_Ht1"].copy()
all_test["TI_test"] = inputdata_test["RSD_TI_Ht1"].copy()
all_test["RSD_SD"] = inputdata_test["RSD_SD_Ht1"].copy()
all_train = all_train.dropna()
all_test = all_test.dropna()
if len(all_train) < 5 and len(all_test) < 5:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
m = np.NaN
c = np.NaN
else:
m = np.NaN
c = np.NaN
TI_pred_RF = machine_learning_TI(
all_train["x_train"],
all_train["y_train"],
all_test["x_test"],
all_test["y_test"],
"RF",
all_test["TI_test"],
)
all_test["adjTI_RSD_TI_Ht1"] = TI_pred_RF
all_test["Ane_TI_Ht1"] = all_test["y_test"]
inputdata_test_result = pd.merge(
inputdata_test_result, all_test, how="left"
)
results = adj.post_adjustment_stats(
inputdata_test_result, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if (
"Ane_TI_Ht2" in inputdata.columns
and "RSD_TI_Ht2" in inputdata.columns
and "RSD_SD_Ht2" in inputdata.columns
):
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ane_TI_Ht2"].copy()
all_train["x_train"] = inputdata_train["RSD_TI_Ht2"].copy()
all_test = pd.DataFrame()
all_test["y_test"] = inputdata_test["Ane_TI_Ht2"].copy()
all_test["x_test"] = inputdata_test["RSD_TI_Ht2"].copy()
all_test["TI_test"] = inputdata_test["RSD_TI_Ht2"].copy()
all_test["RSD_SD"] = inputdata_test["RSD_SD_Ht2"].copy()
all_train = all_train.dropna()
all_test = all_test.dropna()
if len(all_train) < 5 and len(all_test) < 5:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
m = np.NaN
c = np.NaN
else:
m = np.NaN
c = np.NaN
TI_pred_RF = machine_learning_TI(
all_train["x_train"],
all_train["y_train"],
all_test["x_test"],
all_test["y_test"],
"RF",
all_test["TI_test"],
)
all_test["adjTI_RSD_TI_Ht2"] = TI_pred_RF
all_test["Ane_TI_Ht2"] = all_test["y_test"]
inputdata_test_result = pd.merge(
inputdata_test_result, all_test, how="left"
)
results = adj.post_adjustment_stats(
inputdata_test_result, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if (
"Ane_TI_Ht3" in inputdata.columns
and "RSD_TI_Ht3" in inputdata.columns
and "RSD_SD_Ht3" in inputdata.columns
):
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ane_TI_Ht3"].copy()
all_train["x_train"] = inputdata_train["RSD_TI_Ht3"].copy()
all_test = pd.DataFrame()
all_test["y_test"] = inputdata_test["Ane_TI_Ht3"].copy()
all_test["x_test"] = inputdata_test["RSD_TI_Ht3"].copy()
all_test["TI_test"] = inputdata_test["RSD_TI_Ht3"].copy()
all_test["RSD_SD"] = inputdata_test["RSD_SD_Ht3"].copy()
all_train = all_train.dropna()
all_test = all_test.dropna()
if len(all_train) < 5 and len(all_test) < 5:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
m = np.NaN
c = np.NaN
else:
m = np.NaN
c = np.NaN
TI_pred_RF = machine_learning_TI(
all_train["x_train"],
all_train["y_train"],
all_test["x_test"],
all_test["y_test"],
"RF",
all_test["TI_test"],
)
all_test["adjTI_RSD_TI_Ht3"] = TI_pred_RF
all_test["Ane_TI_Ht3"] = all_test["y_test"]
inputdata_test_result = pd.merge(
inputdata_test_result, all_test, how="left"
)
results = adj.post_adjustment_stats(
inputdata_test_result, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if (
"Ane_TI_Ht4" in inputdata.columns
and "RSD_TI_Ht4" in inputdata.columns
and "RSD_Sd_Ht4" in inputdata.columns
):
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ane_TI_Ht4"].copy()
all_train["x_train"] = inputdata_train["RSD_TI_Ht4"].copy()
all_test = pd.DataFrame()
all_test["y_test"] = inputdata_test["Ane_TI_Ht4"].copy()
all_test["x_test"] = inputdata_test["RSD_TI_Ht4"].copy()
all_test["TI_test"] = inputdata_test["RSD_TI_Ht4"].copy()
all_test["RSD_SD"] = inputdata_test["RSD_SD_Ht4"].copy()
all_train = all_train.dropna()
if len(all_train) < 5 and len(all_test) < 5:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
else:
m = np.NaN
c = np.NaN
TI_pred_RF = machine_learning_TI(
all_train["x_train"],
all_train["y_train"],
all_test["x_test"],
all_test["y_test"],
"RF",
all_test["TI_test"],
)
all_test["adjTI_RSD_TI_Ht4"] = TI_pred_RF
all_test["Ane_TI_Ht4"] = all_test["y_test"]
inputdata_test_result = pd.merge(
inputdata_test_result, all_test, how="left"
)
results = adj.post_adjustment_stats(
inputdata_test_result, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
if inputdata_test_result.empty:
inputdata_test_result = inputdata_test
return inputdata_test_result, results, m, c
|
<gh_stars>0
import os
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, confusion_matrix
from sklearn.preprocessing import OneHotEncoder
sys.path.append('src')
from mappings import MAPPINGS
from models import KernelClassifier
# Define parameter names (AUs) and target label (EMOTIONS)
PARAM_NAMES = np.loadtxt('data/au_names_new.txt', dtype=str).tolist()
EMOTIONS = np.array(['anger', 'disgust', 'fear', 'happy', 'sadness', 'surprise'])
ohe = OneHotEncoder(sparse=False)
ohe.fit(EMOTIONS[:, np.newaxis])
df_abl = pd.read_csv('results/scores_ablation.tsv', sep='\t', index_col=0)
df_abl = df_abl.drop(['sub', 'beta', 'kernel'], axis=1)
subs = [str(s).zfill(2) for s in range(1, 61)]
scores_all = []
cm_df = []
for mapp_name, mapp in MAPPINGS.items():
print(mapp_name)
model = KernelClassifier(au_cfg=mapp, param_names=PARAM_NAMES, kernel='cosine',
ktype='similarity', binarize_X=False, normalization='softmax', beta=1)
if mapp_name == 'JS':
these_subs = subs[1::2]
else:
these_subs = subs
model.fit(None, None)
Z_orig = model.Z_.copy()
# Initialize scores (one score per subject and per emotion)
scores = np.zeros((len(these_subs), len(EMOTIONS)))
scores[:] = np.nan
cm_old, cm_new = np.zeros((6, 6)), np.zeros((6, 6))
# Compute model performance per subject!
for i, sub in enumerate(these_subs):
data = pd.read_csv(f'data/ratings/sub-{sub}_ratings.tsv', sep='\t', index_col=0)
data = data.query("emotion != 'other'")
data = data.loc[data.index != 'empty', :]
if mapp_name == 'JS':
data = data.query("data_split == 'test'")
X, y = data.iloc[:, :33], data.loc[:, 'emotion']
# Predict data + compute performance (AUROC)
y_pred = pd.DataFrame(model.predict_proba(X), index=X.index, columns=EMOTIONS)
y_ohe = ohe.transform(y.to_numpy()[:, np.newaxis])
idx = y_ohe.sum(axis=0) != 0
scores[i, idx] = roc_auc_score(y_ohe[:, idx], y_pred.to_numpy()[:, idx], average=None)
cm_old += confusion_matrix(y_ohe.argmax(axis=1), y_pred.to_numpy().argmax(axis=1))
# APPEND TO CONFIG
for emo in EMOTIONS:
df_abl_emo = df_abl.query("ablated_from == @emo & emotion == @emo & score != 0")
aus = df_abl_emo.groupby('ablated_au').mean().query("score < 0").index.tolist()
for au in aus:
delta = df_abl_emo.query("ablated_au == @au & mapping == @mapp_name")['score'].to_numpy()
#if not len(delta):
# continue
#from scipy.stats import ttest_1samp
#t, p = ttest_1samp(delta, popmean=0, alternative='less')
#if p < 0.05:
model.Z_.loc[emo, au] = 1
aus = df_abl_emo.groupby('ablated_au').mean().query("score > 0").index.tolist()
for au in aus:
#delta = df_abl_emo.query("ablated_au == @au & mapping == @mapp_name")['score'].to_numpy()
#if not len(delta):
# continue
#t, p = ttest_1samp(delta, popmean=0, alternative='greater')
#if p < 0.05:
model.Z_.loc[emo, au] = 0
y_pred = pd.DataFrame(model.predict_proba(X), index=X.index, columns=EMOTIONS)
new = roc_auc_score(y_ohe[:, idx], y_pred.to_numpy()[:, idx], average=None)
scores[i, idx] = (new - scores[i, idx])# / scores[i, idx]) * 100
cm_new += confusion_matrix(y_ohe.argmax(axis=1), y_pred.to_numpy().argmax(axis=1))
model.Z_ = Z_orig.copy()
cm_old = pd.DataFrame(cm_old, index=EMOTIONS, columns=EMOTIONS)
cm_old['mapping'] = mapp_name
cm_old['type'] = 'orig'
cm_new = pd.DataFrame(cm_new, index=EMOTIONS, columns=EMOTIONS)
cm_new['mapping'] = mapp_name
cm_new['type'] = 'opt'
cm_df.append(pd.concat((cm_old, cm_new), axis=0))
# Store scores and raw predictions
scores = pd.DataFrame(scores, columns=EMOTIONS, index=these_subs).reset_index()
scores = pd.melt(scores, id_vars='index', value_name='score', var_name='emotion')
scores = scores.rename({'index': 'sub'}, axis=1)
scores['mapping'] = mapp_name
scores_all.append(scores)
scores = pd.concat(scores_all, axis=0)
scores.to_csv('results/scores_optimal.tsv', sep='\t')
cm = pd.concat(cm_df, axis=0)
cm.to_csv('results/cm_optimal.tsv', sep='\t')
|
#!/usr/bin/env python3
import os
import argparse
import pandas as pd
from skbio.diversity import beta_diversity
from skbio.stats.ordination import pcoa
from skbio.stats.distance import anosim
import matplotlib.pyplot as plt
import warnings
#How to use:
#Standalone:
# navigate '/media/nls/Storage/HomeBU/Python/skbio' in teminal
# put transposed otu table in Data folder
# run 'py37' to activate python environment
# run 'python ./biodivEA.py --frac <fraction> --input_xl otus_all.xlsx
def make_pcoa(dist_mat, sample_meta, distance):
fig = pcoa(dist_mat).plot(sample_meta, "type",
axis_labels=("PC 1", "PC 2", "PC3"),
title="Samples coloured by type, distance = " + distance, cmap="jet", s=10)
return fig
def assign_meta(otu_tab):
samples = list(otu_tab.index)
sample_metadata = []
for sample in samples:
first_char = str(sample)[0].lower()
if first_char == "b":
meta = (sample, ["blank", "control"])
elif first_char == "p":
meta = (sample, ["positive", "control"])
elif first_char == "n":
meta = (sample, ["ntc", "control"])
elif first_char == "t":
meta = (sample, ["technical replicate", "control"])
elif first_char == "g":
meta = (sample, ["gblock", "control"])
elif str(sample)[0] == "4":
meta = (sample, ["sample", "EA"])
elif str(sample)[0] == "3":
meta = (sample, ["sample", "SEPA"])
else:
meta = (sample, ["unknown", "unknown"])
try:
sample_metadata.append(meta)
except UnboundLocalError:
print("Meta for " + str(sample) + " was not be added.")
return pd.DataFrame.from_dict(
dict(sample_metadata), columns=["type", "area"], orient="index")
def import_otu_table(inputxl, directory, fraction):
dir_abspath = os.path.abspath(directory)
xl_abspath = os.path.join(directory, inputxl)
print("OTU input file: " + xl_abspath)
print("Fraction of subsample: " + str(fraction))
xl = pd.ExcelFile(xl_abspath)
otu_tab = xl.parse(sheet_name=0, index_col=0)
otu_sub_sample = otu_tab.sample(frac=float(fraction))
return otu_sub_sample
def get_args():
parser = argparse.ArgumentParser(description="Processes diatom data into regions.")
parser.add_argument("--input_xl", help="Semi-Optional: input excel file in .xlsx format.", default="otus_all.xlsx", required=False)
parser.add_argument("--input_dir", help="Semi-Optional: input directory for all input files files.", default="Data", required=False)
parser.add_argument("--frac", help="Semi-Optional: Fraction of total samples to use for analysis, must be a float.", default="1.0", required=False)
args = parser.parse_args()
return args
def plot_data_from_otu(options):
distance = "braycurtis"
otu_tab = import_otu_table(options.input_xl, options.input_dir, options.frac)
bc_dm = beta_diversity(distance, otu_tab.values, list(otu_tab.index))
sample_meta = assign_meta(otu_tab)
results = anosim(bc_dm, sample_meta, column="type", permutations=999)
print(results)
bc_pcoa = make_pcoa(bc_dm, sample_meta, distance)
plt.show()
if __name__ == '__main__':
options = get_args()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_data_from_otu(options)
|
## Core App
# holds all of the central code that is important to the rest of the sub apps that we create in our system
# creates anything that is shared between 1 or more apps so things like the migrations and the database. (All in One Place)
# A universally unique identifier (UUID) is a 128-bit "Encrypted" label used for information in computer systems.
# this is the python 'uuid' package that lets us generate the 'uid'
import uuid
# this is used for 'os.path' to create a valid path for our file destination.
import os
from django.db import models
# these ar all things that are required to extend the Django user model while making use of some of the features that come with the django user model out of the box.
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
""" Generate file path for new recipe image """
# Slice the list and return the last item.(extension)
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
# we can simply join this up to the destination path that
# we want to store the file so we want to return.
return os.path.join('uploads/recipe/', filename)
## User Manager Class:
# A class that provides the helper functions for creating a user or a super user
class UserManager(BaseUserManager):
# Overridden Functions
# "password=<PASSWORD>" = in case, you want to create a user that is not active, that does not have a password
# "**extra_fields" = says: take any of the extra functions that are passed in,
# when you call the "create_user" and pass them into extra fields
# so that we can then just add any additional fields that we create without user model.
# Not Required
# Little more flexible bec. every time we add new fields to our user, it means we don't have to add them in here.
def create_user(self, email, password=<PASSWORD>, **extra_fields):
""" Creates and Saves a new User """
# the manager can access the model.
# "self.model" = creating a new user model
# "normalize_email" is a helper function that comes with the "BaseUserManager"
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
# "using=self._db" - supporting multiple databases (Good Practice)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" Creates and Saves a new super user """
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
""" Custom User Model that supports using email instead of username """
# fields of our database model
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
""" Tag to be used for a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
""" Ingredient to be used in a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
""" Recipe object """
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
# this string here, you can actually remove the quotes and just pass i the class directly.
# The issue with this is you would have to then have your classes in a correct order.
# So the Django has this useful feature where you can just provide the name of the class in a string
# and then it doesn't matter which order you place your models in.
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
# we don't need to make '()' bec. we don't wanna actually call the function.
# we just wanna pass a reference to the function so it can be every time we upload.
# and it gets called in the background by Django by the image filled feature.
# we'll save the file.
image = models.FileField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
from __future__ import print_function, absolute_import, division, unicode_literals
import os
from setuptools import find_packages, setup
_HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_HERE, 'README.md'), 'r') as f:
long_desc = f.read()
setup(
name='hyp3lib',
use_scm_version=True,
description='Common library for HyP3 plugins',
long_description=long_desc,
long_description_content_type='text/markdown',
url='https://github.com/ASFHyP3/hyp3-lib',
author='ASF APD/Tools Team',
author_email='<EMAIL>',
license='BSD-3-Clause',
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries',
],
python_requires='~=3.6',
install_requires=[
'boto3',
'gdal',
'imageio',
'importlib_metadata',
'lxml',
'matplotlib',
'netCDF4',
'numpy',
'pillow',
'pyproj~=2.0',
'pyshp',
'requests',
'scipy',
'six',
'statsmodels',
'urllib3',
],
extras_require={
'develop': [
'botocore',
'pytest',
'pytest-cov',
'pytest-console-scripts',
'responses',
]
},
packages=find_packages(),
# FIXME: this could/should be converted to python so it can be registered as an entrypoint
scripts=['scripts/GC_map_mod'],
entry_points={'console_scripts': [
'apply_wb_mask.py = hyp3lib.apply_wb_mask:main',
'byteSigmaScale.py = hyp3lib.byteSigmaScale:main',
'copy_metadata.py = hyp3lib.copy_metadata:main',
'createAmp.py = hyp3lib.createAmp:main',
'cutGeotiffsByLine.py = hyp3lib.cutGeotiffsByLine:main',
'cutGeotiffs.py = hyp3lib.cutGeotiffs:main',
'draw_polygon_on_raster.py = hyp3lib.draw_polygon_on_raster:main',
'dem2isce.py = hyp3lib.dem2isce:main',
'enh_lee_filter.py = hyp3lib.enh_lee_filter:main',
'extendDateline.py = hyp3lib.extendDateline:main',
'geotiff_lut.py = hyp3lib.geotiff_lut:main',
'get_bounding.py = hyp3lib.get_bounding:main',
'getDemFor.py = hyp3lib.getDemFor:main',
'get_asf.py = hyp3lib.get_asf:main',
'get_dem.py = hyp3lib.get_dem:main',
'get_orb.py = hyp3lib.get_orb:main',
'iscegeo2geotif.py = hyp3lib.iscegeo2geotif:main',
'make_arc_thumb.py = hyp3lib.make_arc_thumb:main',
'makeAsfBrowse.py = hyp3lib.makeAsfBrowse:main',
'makeChangeBrowse.py = hyp3lib.makeChangeBrowse:main',
'make_cogs.py = hyp3lib.make_cogs:main',
'makeColorPhase.py = hyp3lib.makeColorPhase:main',
'makeKml.py = hyp3lib.makeKml:main',
'offset_xml.py = hyp3lib.offset_xml:main',
'ps2dem.py = hyp3lib.ps2dem:main',
'raster_boundary2shape.py = hyp3lib.raster_boundary2shape:main',
'rasterMask.py = hyp3lib.rasterMask:main',
'resample_geotiff.py = hyp3lib.resample_geotiff:main',
'rtc2colordiff.py = hyp3lib.rtc2colordiff:main',
'rtc2color.py = hyp3lib.rtc2color:main',
'simplify_shapefile.py = hyp3lib.simplify_shapefile:main',
'SLC_copy_S1_fullSW.py = hyp3lib.SLC_copy_S1_fullSW:main',
'subset_geotiff_shape.py = hyp3lib.subset_geotiff_shape:main',
'tileList2shape.py = hyp3lib.tileList2shape:main',
'utm2dem.py = hyp3lib.utm2dem:main',
'verify_opod.py = hyp3lib.verify_opod:main',
]
},
zip_safe=False,
)
|
<reponame>JonasZehn/ntopo
import os
import math
import json
import jsonpickle
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Model, Sequential, Input
from tensorflow.keras.layers import Layer, Dense, Concatenate, BatchNormalization, LeakyReLU
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.keras import __version__ as keras_version
from ntopo.utils import transform_minus11, load_file
class IdentityFeatures(Layer):
def __init__(self, n_input):
super().__init__()
self.n_input = n_input
self.n_output = n_input
def get_config(self):
return {'n_input': self.n_input}
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, inputs):
return inputs
class ConcatSquareFeatures(Layer):
def __init__(self, n_input):
super().__init__()
self.n_input = n_input
self.n_output = 2*n_input
def get_config(self):
return {'n_input': self.n_input}
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, inputs):
out = tf.concat((inputs, inputs*inputs), axis=-1)
return out
class ConcatSineFeatures(Layer):
def __init__(self, n_input):
super().__init__()
self.n_input = n_input
self.n_output = 2*n_input
def get_config(self):
return {'n_input': self.n_input}
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, inputs):
out = tf.concat((inputs, tf.math.sin(inputs)), axis=-1)
return out
class DenseSIRENModel(Model):
def __init__(self, n_input, n_output, n_hidden, last_layer_init_scale, omega0, use_omega_split = False):
super().__init__()
self.n_input = n_input
self.n_output = n_output
self.n_hidden = n_hidden
self.last_layer_init_scale = last_layer_init_scale
self.omega0 = omega0
sin_activation_first_layer = lambda x: K.sin(omega0*x)
first_layer_initializer = tf.keras.initializers.RandomUniform(
minval=-1.0/n_input, maxval=1.0/n_input)
k_sqrt_first = np.sqrt(1.0/n_input)
bias_initializer_first = tf.keras.initializers.RandomUniform(
minval=-k_sqrt_first, maxval=k_sqrt_first)
if use_omega_split:
sin_activation = lambda x: K.sin(omega0*x)
w_middle_deviation = np.sqrt(6.0 / n_hidden) / omega0
k_sqrt_middle = np.sqrt(1.0/n_hidden) / omega0
else:
sin_activation = lambda x: K.sin(x)
w_middle_deviation = np.sqrt(6.0 / n_hidden)
k_sqrt_middle = np.sqrt(1.0/n_hidden)
weight_initializer_middle_layer = tf.keras.initializers.RandomUniform(
minval=-w_middle_deviation, maxval=w_middle_deviation)
bias_initializer = tf.keras.initializers.RandomUniform(
minval=-k_sqrt_middle, maxval=k_sqrt_middle)
last_initializer = tf.keras.initializers.RandomUniform(
minval=-np.sqrt(6 / n_hidden) * last_layer_init_scale, maxval=np.sqrt(6 / n_hidden) * last_layer_init_scale)
self.dense0 = Dense(n_hidden, activation=sin_activation_first_layer, kernel_initializer=first_layer_initializer,
bias_initializer=bias_initializer_first)
self.dense1 = Dense(n_hidden, activation=sin_activation, kernel_initializer=weight_initializer_middle_layer,
bias_initializer=bias_initializer)
self.dense2 = Dense(n_hidden, activation=sin_activation, kernel_initializer=weight_initializer_middle_layer,
bias_initializer=bias_initializer)
self.dense3 = Dense(n_hidden, activation=sin_activation, kernel_initializer=weight_initializer_middle_layer,
bias_initializer=bias_initializer)
self.dense4 = Dense(n_hidden, activation=sin_activation, kernel_initializer=weight_initializer_middle_layer,
bias_initializer=bias_initializer)
self.dense5 = Dense(n_output, kernel_initializer=last_initializer)
def get_config(self):
config = {
'n_input': self.n_input,
'n_output': self.n_output,
'n_hidden': self.n_hidden,
'last_layer_init_scale': self.last_layer_init_scale,
'omega0': self.omega0
}
return config
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, inputs):
l0 = self.dense0(inputs)
l1 = self.dense1(l0)
l2 = self.dense2(l1)
l2_concate = Concatenate()([inputs, l2])
l3 = self.dense3(l2_concate)
l4 = self.dense4(l3)
l4_concate = Concatenate()([l2_concate, l4])
l5 = self.dense5(l4_concate)
return l5
class FCBNLeakyReluModel(Model):
def __init__(self, n_input, n_output, n_hidden):
super().__init__()
self.n_input = n_input
self.n_output = n_output
self.n_hidden = n_hidden
self.model = Sequential(
[
Input(n_input),
BatchNormalization(),
Dense(n_hidden),
LeakyReLU(),
BatchNormalization(),
Dense(n_hidden),
LeakyReLU(),
BatchNormalization(),
Dense(n_hidden),
LeakyReLU(),
BatchNormalization(),
Dense(n_hidden),
LeakyReLU(),
BatchNormalization(),
Dense(n_hidden),
LeakyReLU(),
BatchNormalization(),
Dense(n_output)
]
)
def get_config(self):
return {
'n_input': self.n_input,
'n_output': self.n_output,
'n_hidden': self.n_hidden,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, inputs):
return self.model(inputs)
class ModelPartialQ(Model):
def __init__(self, model, q):
super().__init__()
self.model = model
assert isinstance(q, np.ndarray), 'type of q is ' + str(type(q))
assert q.shape[0] == 1 and q.shape[1] == 1
self.q = tf.constant(q, dtype=tf.float32)
def call(self, inputs):
inputs_with_q = tf.concat(
(inputs, tf.tile(self.q, multiples=tf.stack((tf.shape(inputs)[0], 1)))), axis=1)
return self.model(inputs_with_q)
class DispModel(Model):
def __init__(
self,
input_domain,
dim,
bc,
features=None,
model=None,
):
super().__init__()
self.input_domain = np.array(input_domain, dtype=np.float32)
self.dim = dim
if isinstance(bc, str):
self.bc = jsonpickle.decode(bc)
else:
self.bc = bc
self.features = getattr(
globals()[features['class_name']], 'from_config')(features['config'])
self.model = getattr(
globals()[model['class_name']], 'from_config')(model['config'])
assert self.features.n_output == self.model.n_input
# call model with some stuff to initialize the shapes
some_stuff = tf.ones((5, len(input_domain)//2))
self.predict(some_stuff)
def get_config(self):
assert jsonpickle.encode(self.bc) != 'null', 'serializing bc failed'
config = {
'input_domain': self.input_domain,
'dim': self.dim,
'bc': jsonpickle.encode(self.bc),
'features': {
'class_name': self.features.__class__.__name__,
'config': self.features.get_config(),
},
'model': {
'class_name': self.model.__class__.__name__,
'config': self.model.get_config(),
},
}
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@classmethod
def load_from_config_file(cls, filename):
return tf.keras.models.model_from_json(load_file(filename), custom_objects={cls.__name__: cls})
def call(self, inputs, training=None):
assert len(self.input_domain) // 2 == inputs.shape.as_list()[1]
x = tf.gather(inputs, [0], axis=1)
y = tf.gather(inputs, [1], axis=1)
if self.dim == 3:
z = tf.gather(inputs, [2], axis=1)
inputs_m11 = transform_minus11(inputs, self.input_domain)
features_output = self.features(inputs_m11)
displacement = self.model(features_output)
if self.dim == 2:
bc_output = self.bc([x, y])
else:
assert self.dim == 3
bc_output = self.bc([x, y, z])
displacement_fixed = bc_output * displacement
return displacement_fixed
def get_model_partial_q(self, q):
return ModelPartialQ(self, q)
def has_q(self):
return len(self.input_domain) // 2 > self.dim
class DensityModel(Model):
def __init__(self,
input_domain,
dim,
volume_ratio,
constraint=None,
constraint_config=None,
features=None,
model=None,
volume_ratio_q_idx=-1
):
super().__init__()
if constraint_config is None:
assert constraint is not None
self.constraint = constraint
else:
self.constraint = jsonpickle.decode(constraint_config)
self.input_domain = np.array(input_domain, dtype=np.float32)
self.dim = dim
self.volume_ratio = volume_ratio
self.volume_ratio_q_idx = volume_ratio_q_idx
self.features = getattr(
globals()[features['class_name']], 'from_config')(features['config'])
self.model = getattr(
globals()[model['class_name']], 'from_config')(model['config'])
assert self.features.n_output == self.model.n_input
# throw some stuff at the model to initialize the shapes
self.predict(tf.ones((5, len(input_domain)//2)))
def get_config(self):
config = {
'input_domain': self.input_domain,
'dim': self.dim,
'volume_ratio': self.volume_ratio,
'volume_ratio_q_idx': self.volume_ratio_q_idx,
'constraint_config': jsonpickle.encode(self.constraint),
'features': {
'class_name': self.features.__class__.__name__,
'config': self.features.get_config(),
},
'model': {
'class_name': self.model.__class__.__name__,
'config': self.model.get_config(),
}
}
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@classmethod
def load_from_config_file(cls, filename):
return tf.keras.models.model_from_json(load_file(filename), custom_objects={cls.__name__: cls})
def call(self, inputs, training=None):
assert len(self.input_domain) // 2 == inputs.shape.as_list()[1]
if self.volume_ratio_q_idx == -1:
volume_ratio = self.volume_ratio
else:
volume_ratio = tf.gather(
inputs, [self.dim + self.volume_ratio_q_idx], axis=1)
inputs_m11 = transform_minus11(inputs, self.input_domain)
inputs_features = self.features(inputs_m11)
model_output = self.model(inputs_features)
# sigmoid is y = 1/(1 + exp(-x))
# we want that initialization sigmoid(0 + offset) = volume_ratio, inverse of sigmoid x = ln(y/(1-y))
alpha = 5.0
offset = tf.math.log(volume_ratio / (1.0 - volume_ratio))
densities = tf.math.sigmoid(alpha * model_output + offset)
densities_constrained = self.constraint.apply(inputs, densities)
return densities_constrained
def get_model_partial_q(self, inputs):
return ModelPartialQ(self, inputs)
|
<reponame>ledomone/kurs_django
"""
Django settings for biblio project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from configurations import Configuration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import hashlib
import os
import uuid
def get_secret_key(base_dir='.'):
def gen_key(key_path):
with open(key_path, 'w') as key_file:
key = hashlib.sha512(str(uuid.uuid4()).encode('utf8')).hexdigest()
key_file.write(key)
return key
path = os.path.join(base_dir, '.secret.key')
try:
secret_key = open(path).read()
assert secret_key, "Wrong secret key"
except (IOError, AssertionError):
secret_key = gen_key(path)
return secret_key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Production(Configuration):
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_secret_key(BASE_DIR)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = (
'shelf',
'contact',
'rental',
'users',
#########################################
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
'bootstrap3',
'crispy_forms',
'rest_framework',
#########################################
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'biblio.urls'
WSGI_APPLICATION = 'biblio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True # internationalization
USE_L10N = True # localization
USE_TZ = True
AUTH_USER_MODEL = 'users.BiblioUser'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'media'), 'media',
]
TEMPLATE_DIRS = {
os.path.join(BASE_DIR, 'templates'),
}
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Required by allauth template tags
"django.core.context_processors.request",
# allauth specific context processors
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
SITE_ID = 1 # because of 'django.contrib.sites'
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = "main-page"
CRISPY_TEMPLATE_PACK = "bootstrap3"
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
]
}
class Dev(Production):
DEBUG = True
TEMPLATE_DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
<reponame>LB-JakubSkorupka/o3de
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import copy
from typing import (Dict, List)
from unittest import TestCase
from unittest.mock import (MagicMock, mock_open, patch)
from utils import file_utils
from utils import json_utils
from model import constants
from model.resource_mapping_attributes import (ResourceMappingAttributes, ResourceMappingAttributesBuilder,
ResourceMappingAttributesStatus)
class TestJsonUtils(TestCase):
"""
json utils unit test cases
TODO: add test cases once error handling is ready
"""
_expected_file_name: str = "dummy.json"
_expected_key: str = "TestBucketKey"
_expected_bucket_type: str = constants.AWS_RESOURCE_TYPES[constants.AWS_RESOURCE_S3_BUCKET_INDEX]
_expected_bucket_name: str = "TestBucketName"
_expected_account_id: str = "123456789012"
_expected_invalid_account_id: str = "12345"
_expected_region: str = "us-west-2"
_expected_invalid_region: str = "dummy-region"
_expected_bucket_resource_mapping: ResourceMappingAttributes = ResourceMappingAttributesBuilder() \
.build_key_name(_expected_key) \
.build_type(_expected_bucket_type) \
.build_name_id(_expected_bucket_name) \
.build_account_id(_expected_account_id) \
.build_region(_expected_region) \
.build()
_expected_json_dict: Dict[str, any] = {
json_utils._RESOURCE_MAPPING_JSON_KEY_NAME: {_expected_key: {
json_utils._RESOURCE_MAPPING_TYPE_JSON_KEY_NAME: _expected_bucket_type,
json_utils._RESOURCE_MAPPING_NAMEID_JSON_KEY_NAME: _expected_bucket_name
}},
json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME: _expected_account_id,
json_utils._RESOURCE_MAPPING_REGION_JSON_KEY_NAME: _expected_region,
json_utils._RESOURCE_MAPPING_VERSION_JSON_KEY_NAME: json_utils._RESOURCE_MAPPING_JSON_FORMAT_VERSION
}
def setUp(self) -> None:
schema_path: str = file_utils.join_path(file_utils.get_parent_directory_path(__file__, 4),
'resource_mapping_schema.json')
json_utils.load_resource_mapping_json_schema(schema_path)
self._mock_open = mock_open()
open_patcher: patch = patch("utils.json_utils.open", self._mock_open)
self.addCleanup(open_patcher.stop)
open_patcher.start()
json_dump_patcher: patch = patch("json.dump")
self.addCleanup(json_dump_patcher.stop)
self._mock_json_dump: MagicMock = json_dump_patcher.start()
json_load_patcher: patch = patch("json.load")
self.addCleanup(json_load_patcher.stop)
self._mock_json_load: MagicMock = json_load_patcher.start()
def test_convert_resources_to_json_dict_return_expected_json_dict(self) -> None:
old_json_dict: Dict[str, any] = {
json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME: TestJsonUtils._expected_account_id,
json_utils._RESOURCE_MAPPING_REGION_JSON_KEY_NAME: TestJsonUtils._expected_region
}
actual_json_dict: Dict[str, any] = \
json_utils.convert_resources_to_json_dict([TestJsonUtils._expected_bucket_resource_mapping], old_json_dict)
assert actual_json_dict == TestJsonUtils._expected_json_dict
def test_convert_json_dict_to_resources_return_expected_resources(self) -> None:
actual_resources: List[ResourceMappingAttributes] = \
json_utils.convert_json_dict_to_resources(TestJsonUtils._expected_json_dict)
assert actual_resources == [TestJsonUtils._expected_bucket_resource_mapping]
def test_read_from_json_file_return_expected_json_dict(self) -> None:
mocked_open: MagicMock = MagicMock()
self._mock_open.return_value.__enter__.return_value = mocked_open
expected_json_dict: Dict[str, any] = {}
self._mock_json_load.return_value = expected_json_dict
actual_json_dict: Dict[str, any] = json_utils.read_from_json_file(TestJsonUtils._expected_file_name)
self._mock_open.assert_called_once_with(TestJsonUtils._expected_file_name, "r")
self._mock_json_load.assert_called_once_with(mocked_open,
object_pairs_hook=json_utils._validate_json_dict_unique_keys)
assert actual_json_dict == expected_json_dict
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_no_version(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict.pop(json_utils._RESOURCE_MAPPING_VERSION_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_no_resource_mappings(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict.pop(json_utils._RESOURCE_MAPPING_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_no_accountid(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict.pop(json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_empty_accountid(self) -> None:
valid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
valid_json_dict[json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME] = ''
json_utils.validate_json_dict_according_to_json_schema(valid_json_dict)
def test_validate_json_dict_according_to_json_schema_pass_when_json_dict_has_template_accountid(self) -> None:
valid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
valid_json_dict[json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME] = \
json_utils.RESOURCE_MAPPING_ACCOUNTID_TEMPLATE_VALUE
json_utils.validate_json_dict_according_to_json_schema(valid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_invalid_accountid(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict[json_utils.RESOURCE_MAPPING_ACCOUNTID_JSON_KEY_NAME] = \
TestJsonUtils._expected_invalid_account_id
self.assertRaises(ValueError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_no_region(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict.pop(json_utils._RESOURCE_MAPPING_REGION_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_has_invalid_region(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict[json_utils._RESOURCE_MAPPING_REGION_JSON_KEY_NAME] = \
TestJsonUtils._expected_invalid_region
self.assertRaises(ValueError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_resource_has_no_type(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict[json_utils._RESOURCE_MAPPING_JSON_KEY_NAME][TestJsonUtils._expected_key].pop(
json_utils._RESOURCE_MAPPING_TYPE_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_json_dict_according_to_json_schema_raise_error_when_json_dict_resource_has_no_nameid(self) -> None:
invalid_json_dict: Dict[str, any] = copy.deepcopy(TestJsonUtils._expected_json_dict)
invalid_json_dict[json_utils._RESOURCE_MAPPING_JSON_KEY_NAME][TestJsonUtils._expected_key].pop(
json_utils._RESOURCE_MAPPING_NAMEID_JSON_KEY_NAME)
self.assertRaises(KeyError, json_utils.validate_json_dict_according_to_json_schema, invalid_json_dict)
def test_validate_resources_according_to_json_schema_return_expected_rows_when_resource_is_invalid(self) -> None:
invalid_bucket_resource_mapping: ResourceMappingAttributes = TestJsonUtils._expected_bucket_resource_mapping
invalid_bucket_resource_mapping.key_name = ""
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([invalid_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == [0]
def test_validate_resources_according_to_json_schema_return_empty_when_resource_is_valid(self) -> None:
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([TestJsonUtils._expected_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == []
def test_validate_resources_according_to_json_schema_return_expected_rows_when_non_succeed_resource_has_same_key_as_succeed_one(self) -> None:
succeed_bucket_resource_mapping: ResourceMappingAttributes = \
copy.deepcopy(TestJsonUtils._expected_bucket_resource_mapping)
succeed_bucket_resource_mapping.status = \
ResourceMappingAttributesStatus(ResourceMappingAttributesStatus.SUCCESS_STATUS_VALUE)
non_succeed_bucket_resource_mapping: ResourceMappingAttributes = \
copy.deepcopy(TestJsonUtils._expected_bucket_resource_mapping)
non_succeed_bucket_resource_mapping.status = \
ResourceMappingAttributesStatus(ResourceMappingAttributesStatus.MODIFIED_STATUS_VALUE)
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([
succeed_bucket_resource_mapping, non_succeed_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == [1]
def test_validate_resources_according_to_json_schema_return_expected_rows_when_non_succeed_resources_have_duplicated_key(self) -> None:
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([
TestJsonUtils._expected_bucket_resource_mapping, TestJsonUtils._expected_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == [0, 1]
def test_validate_resources_according_to_json_schema_return_expected_rows_when_resource_has_invalid_accountid(self) -> None:
invalid_bucket_resource_mapping: ResourceMappingAttributes = \
copy.deepcopy(TestJsonUtils._expected_bucket_resource_mapping)
invalid_bucket_resource_mapping.account_id = TestJsonUtils._expected_invalid_account_id
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([invalid_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == [0]
def test_validate_resources_according_to_json_schema_return_expected_rows_when_resource_has_invalid_region(self) -> None:
invalid_bucket_resource_mapping: ResourceMappingAttributes = \
copy.deepcopy(TestJsonUtils._expected_bucket_resource_mapping)
invalid_bucket_resource_mapping.region = TestJsonUtils._expected_invalid_region
actual_invalid_rows: Dict[int, List[str]] = \
json_utils.validate_resources_according_to_json_schema([invalid_bucket_resource_mapping])
assert list(actual_invalid_rows.keys()) == [0]
def test_write_into_json_file_succeed(self) -> None:
mocked_open: MagicMock = MagicMock()
self._mock_open.return_value.__enter__.return_value = mocked_open
json_utils.write_into_json_file(TestJsonUtils._expected_file_name, TestJsonUtils._expected_json_dict)
self._mock_open.assert_called_once_with(TestJsonUtils._expected_file_name, "w")
self._mock_json_dump.assert_called_once_with(
TestJsonUtils._expected_json_dict, mocked_open, indent=4, sort_keys=True)
|
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import urllib.parse
import re
import random
from bs4 import BeautifulSoup
from PyQt5 import QtCore, QtWidgets
from player_functions import ccurl, send_notification
class LoginWidget(QtWidgets.QDialog):
def __init__(self, parent=None, server=None):
super(LoginWidget, self).__init__(parent)
self.parent = parent
self.server = server
self.server_ip = QtWidgets.QLineEdit(self)
self.text_name = QtWidgets.QLineEdit(self)
self.text_pass = QtWidgets.QLineEdit(self)
self.server_ip.setPlaceholderText('FULL IP ADDRESS OF SERVER')
self.text_name.setPlaceholderText('USER')
self.text_pass.setPlaceholderText('PASSWORD')
if self.server.server_name:
self.server_ip.setText(self.server.server_name)
self.text_pass.setEchoMode(QtWidgets.QLineEdit.Password)
self.btn_login = QtWidgets.QPushButton('Login', self)
self.btn_login.clicked.connect(self.handleLogin)
self.setWindowTitle('Credentials Required')
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.server_ip)
layout.addWidget(self.text_name)
layout.addWidget(self.text_pass)
layout.addWidget(self.btn_login)
self.auth_info = ''
self.auth_64 = ''
self.show()
self.count = 0
self.found = True
def handleLogin(self):
self.hide()
text_val = self.text_name.text()
pass_val = self.text_pass.text()
self.auth_info = text_val+':'+pass_val
url = self.server_ip.text()
if url:
if not url.endswith('/'):
url = url+'/'
if not url.startswith('http'):
send_notification('Enter full IP address starting with http/https properly')
else:
content = ccurl(
'{0}get_all_category.htm#-c#{1}'.format(url, self.server.cookie_file),
user_auth=self.auth_info, verify_peer=False
)
print(content, '>>>>>')
if ('Access Not Allowed, Authentication Failed' in content or
'You are not authorized to access the content' in content):
self.server.login_success = False
send_notification('Authentication Failed. Either Username or Password is incorrect')
elif not content:
send_notification('Curl failure: may be server is not running or misconfigured')
else:
self.server.passwd = self.auth_<PASSWORD>
self.server.url = url
self.server.login_success = True
send_notification('Login Success. Now click on Login Again')
with open(self.server.server_list, 'w') as f:
f.write(self.server.url)
self.server.server_name = url
else:
send_notification('Server IP Address Needed')
class MyServer:
def __init__(self, tmp):
self.hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
self.tmp_dir = tmp
self.cookie_file = os.path.join(tmp, 'myserver.txt')
self.site_dict = {}
self.site_arr = []
self.site = None
self.opt = None
self.url = None
self.passwd = ':'
self.login_success = None
self.login_widget = None
self.server_list = os.path.join(tmp, 'server_list.txt')
self.server_name = None
def getOptions(self):
criteria = ['Login', 'Logout', 'Discover', 'History', 'newversion']
return criteria
def getFinalUrl(self, name, epn, mirror, quality):
if self.url:
url = self.url+'quality='+quality
content = ccurl(
'{0}#-b#{1}'.format(url, self.cookie_file),verify_peer=False
)
print(content)
final = epn
if '\t' in epn:
final = epn.split('\t')[1]
return final
def search(self, name):
m = ['Not Available']
return m
def handle_login(self, server_name=None):
if os.path.isfile(self.server_list) and not server_name:
with open(self.server_list, 'r') as f:
self.server_name = f.read()
elif server_name:
self.server_name = server_name
if not self.url:
self.login_widget = LoginWidget(server=self)
self.login_widget.show()
#self.login_widget.setWindowModality(QtCore.Qt.WindowModal)
else:
content = ccurl(
'{0}get_all_category.htm#-c#{1}'.format(self.url, self.cookie_file),
user_auth=self.passwd, verify_peer=False
)
print(content, '>>>>>')
if ('Access Not Allowed, Authentication Failed' in content
or 'You are not authorized to access the content' in content):
self.login_success = False
self.login_widget = LoginWidget(server=self)
self.login_widget.show()
elif not content:
send_notification('Curl failure: may be server is not running or misconfigured')
else:
self.login_success = True
def getCompleteList(self, opt, genre_num=None, next_login=None):
print(self.site, opt, '--')
m = []
if opt.lower() == 'login':
self.handle_login()
print(self.login_success, 'login_success')
if self.login_success:
content = ccurl(
'{0}get_all_category.htm#-b#{1}'.format(self.url, self.cookie_file),
user_auth=self.passwd, verify_peer=False
)
print(content, '>>>>>160')
soup = BeautifulSoup(content, 'lxml')
print(soup.prettify())
link_text = soup.find('div', {'id':'site_option'})
print(link_text)
if link_text:
link_text = link_text.text
arr = re.search('Video[^"]*', link_text).group()
arr_split = arr.split(';')
print(arr_split)
old_j = None
site_opt = []
for l,i in enumerate(arr_split):
if i:
j, k = i.split(':')
if old_j != j:
old_j = j
site_opt.clear()
if old_j not in self.site_arr:
self.site_arr.append(old_j)
site_opt.append(k)
if site_opt:
self.site_dict.update({old_j:site_opt.copy()})
print(self.site_dict)
try:
i = self.site_arr.index('MyServer')
del self.site_arr[i]
del self.site_dict['MyServer']
except Exception as err:
print(err, '--111---')
m = self.site_arr.copy()
m.append('<--')
m.append(0)
elif opt in self.site_arr:
self.site = opt
m = self.site_dict.get(opt).copy()
if self.site.lower() == 'playlists':
m.append(1)
else:
m.append('<----')
m.append(0)
elif opt == '<----' or opt == '<--':
self.site = None
self.opt = None
if opt == '<----':
m = self.site_arr.copy()
m.append('<--')
else:
m = ['Login', 'Logout', 'Discover', 'History']
m.append(0)
elif opt == 'History':
if self.site is None:
m.append(6)
else:
self.opt = opt
url_new = self.url+urllib.parse.quote(
'site={0}&opt={1}'.format(self.site.lower(), self.opt.lower())
)
print(url_new)
content = ccurl(url_new+'#'+'-b'+'#'+self.cookie_file, verify_peer=False)
#print(content)
m = content.split('\n')
if self.site.lower() == 'video' or self.site.lower() == 'music':
m = [i.replace('::::', '\t', 1) for i in m]
m.append(1)
elif opt.lower() == 'discover':
self.opt = opt
m.append(4)
elif opt.lower() == 'logout':
url_new = self.url+'logout'
content = ccurl(url_new+'#'+'-b'+'#'+self.cookie_file, verify_peer=False)
self.opt = opt
self.url = None
self.passwd = <PASSWORD>
self.login_success = False
self.site_arr.clear()
self.site_dict.clear()
if os.path.isfile(self.cookie_file):
os.remove(self.cookie_file)
m.append(5)
else:
self.opt = opt
url_new = self.url+urllib.parse.quote(
'site={0}&opt={1}'.format(self.site.lower(), self.opt.lower())
)
print(url_new)
content = ccurl(url_new+'#'+'-b'+'#'+self.cookie_file, verify_peer=False)
#print(content)
m = content.split('\n')
if self.site.lower() == 'video' or self.site.lower() == 'music':
m = [i.replace('::::', '\t', 1) for i in m]
m.append(1)
if not m and opt.lower() == 'login':
m.append(3)
return m
def get_playlist(self, content):
lines = content.split('\n')
length = len(lines)
i = 0
m = []
while i < length:
try:
if 'EXTINF' in lines[i]:
n_epn = (lines[i].strip()).split(',', 1)[1]
n_epn = n_epn.strip()
if n_epn.startswith('NONE - '):
n_epn = n_epn.replace('NONE - ', '', 1)
if n_epn.startswith('-'):
n_epn = n_epn.replace('-', '', 1)
if '/' in n_epn:
n_epn = n_epn.replace('/', '-')
n_epn = n_epn.strip()
if i+1 < length:
entry_epn = n_epn+'\t'+lines[i+1].strip()+'\t'+'NONE'
m.append(entry_epn)
i = i+2
else:
i = i+1
except Exception as e:
print(e)
return m
def getEpnList(self, name, opt, depth_list, extra_info, siteName, category):
summary = 'None'
picn = 'No.jpg'
record_history = False
print(self.site, self.opt, opt)
if self.site:
if self.site.lower() == 'playlists':
opt_val = name
name_val = ''
else:
opt_val = self.opt.lower()
name_val = name
if self.site.lower() == 'video' or self.site.lower() == 'music':
name_val = extra_info+'.hash'
url_new = 'site={0}&opt={1}&s={2}&exact.m3u'.format(self.site.lower(), opt_val, name_val)
url_new = urllib.parse.quote(url_new)
url = self.url+url_new
content = ccurl(url+'#'+'-b'+'#'+self.cookie_file, verify_peer=False)
m = self.get_playlist(content)
record_history = True
elif self.opt == 'Discover':
self.handle_login(server_name=name)
m = []
record_history = False
else:
m = []
return (m, summary, picn, record_history, depth_list)
|
from typing import (
TYPE_CHECKING,
Any,
Callable,
Coroutine,
)
from eth_utils.toolz import (
assoc,
)
from web3.exceptions import (
InvalidTransaction,
TransactionTypeMismatch,
)
from web3.types import (
BlockData,
RPCEndpoint,
RPCResponse,
TxParams,
Wei,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
def validate_transaction_params(
transaction: TxParams, latest_block: BlockData, generated_gas_price: Wei
) -> TxParams:
# gas price strategy explicitly set:
if (
generated_gas_price is not None
and 'gasPrice' not in transaction
and all(_ not in transaction for _ in ('maxFeePerGas', 'maxPriorityFeePerGas'))
):
transaction = assoc(transaction, 'gasPrice', hex(generated_gas_price))
# legacy and 1559 tx variables used:
if "gasPrice" in transaction and (
"maxFeePerGas" in transaction or "maxPriorityFeePerGas" in transaction
):
raise TransactionTypeMismatch()
# 1559 - canonical tx:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' in transaction:
if int(str(transaction["maxFeePerGas"]), 16) < int(
str(transaction["maxPriorityFeePerGas"]), 16
):
raise InvalidTransaction("maxFeePerGas must be >= maxPriorityFeePerGas")
# 1559 - no max fee:
elif 'maxFeePerGas' not in transaction and 'maxPriorityFeePerGas' in transaction:
base_fee = latest_block['baseFeePerGas']
priority_fee = int(str(transaction['maxPriorityFeePerGas']), 16)
max_fee_per_gas = priority_fee + 2 * base_fee
transaction = assoc(transaction, 'maxFeePerGas', hex(max_fee_per_gas))
# 1559 - no priority fee:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' not in transaction:
raise InvalidTransaction(
"maxPriorityFeePerGas must be defined in a 1559 transaction."
)
# should be a fully formed (legacy or 1559) tx or no fee values were specified
return transaction
def gas_price_strategy_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
"""
- Uses a gas price strategy if one is set. This is only supported for legacy transactions.
It is recommended to send 1559 transactions whenever possible.
- Validates transaction params against legacy and 1559 values.
"""
def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if method == 'eth_sendTransaction':
transaction = params[0]
generated_gas_price = web3.eth.generate_gas_price(transaction)
latest_block = web3.eth.get_block('latest')
transaction = validate_transaction_params(
transaction, latest_block, generated_gas_price
)
return make_request(method, (transaction,))
return make_request(method, params)
return middleware
async def async_gas_price_strategy_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], Coroutine[Any, Any, RPCResponse]]:
"""
- Uses a gas price strategy if one is set. This is only supported for legacy transactions.
It is recommended to send 1559 transactions whenever possible.
- Validates transaction params against legacy and 1559 values.
"""
async def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if method == 'eth_sendTransaction':
transaction = params[0]
generated_gas_price = await web3.eth.generate_gas_price(transaction) # type: ignore
latest_block = await web3.eth.get_block('latest') # type: ignore
transaction = validate_transaction_params(
transaction, latest_block, generated_gas_price
)
return await make_request(method, (transaction,))
return await make_request(method, params)
return middleware
|
<filename>Segment/performance.py
import os
import time
import math
import pathlib
import cv2 as cv
import numpy as np
import pandas as pd
from tqdm import tqdm
from functools import reduce
import matplotlib.pyplot as plt
from collections import Counter
def get_path(file_dir):
"""获取当前目录下,文件的绝对路径以及文件名称"""
path_list = []
name_list = []
for path in pathlib.Path(file_dir).iterdir():
path_list.append(str(path))
name_list.append(path.name)
path_list = sorted(path_list, key=lambda path_: int(pathlib.Path(path_).stem))
name_list = sorted(name_list, key=lambda path_: int(pathlib.Path(path_).stem))
return path_list, name_list
def prepro_image(img_path, img_resize, threshold=128):
"""将图像转换为二值图,并且resize"""
image = cv.imread(img_path, 0)
if len(image.shape) != 2:
image = cv.cvtColor(image, cv.COLOR_RGB2GRAY)
image = cv.resize(image, img_resize)
_, bin_image = cv.threshold(image, threshold, 255, cv.THRESH_BINARY)
bin_image = np.array(bin_image / 255, dtype=np.int)
return bin_image
def true_positive(pred, gt):
assert pred.shape == gt.shape
tp_bool = np.logical_and(pred, gt)
tp_int = np.array(tp_bool, dtype=np.int)
tp = np.sum(tp_int)
return tp
def true_negative(pred, gt):
assert pred.shape == gt.shape
# 取反,背景为1、前景为0
no_pred = np.array(np.logical_not(pred), dtype=np.int)
no_gt = np.array(np.logical_not(gt), dtype=np.int)
tn = true_positive(no_pred, no_gt)
return tn
def false_positive(pred, gt):
assert pred.shape == gt.shape
# 取反,背景为1、前景为0
no_gt = np.array(np.logical_not(gt), dtype=np.int)
fp = true_positive(pred, no_gt)
return fp
def false_negative(pred, gt):
assert pred.shape == gt.shape
# 取反,背景为1、前景为0
no_pred = np.array(np.logical_not(pred), dtype=np.int)
fn = true_positive(no_pred, gt)
return fn
def calc_preformance(pred_path, gt_path, img_resize, threshold=128):
""" 用于统计预测的分割图像与Ground Truth之间的差异,从而评价模型的性能, 但是目前只能计算灰度图像
:param pred_path: 预测图像的路径
:param gt_path: 真实mask路径
:param img_resize: 将图像resize的尺寸
:return: pix-accuracy、precision、recall、VOE、RVD、Dice、IOU评价指标
"""
total_pix = reduce(lambda x, y: x * y, img_resize)
pred_image = prepro_image(pred_path, img_resize, threshold)
mask_image = prepro_image(gt_path, img_resize, threshold)
# 计算 TP、FP、TN、FN
start = time.time()
tp = true_positive(pred_image, mask_image)
tn = true_negative(pred_image, mask_image)
fp = false_positive(pred_image, mask_image)
fn = false_negative(pred_image, mask_image)
# 计算评价指标
accuracy = (tp + tn) / total_pix
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
iou = tp / (tp + fp + fn + 1e-10)
dice = 2 * tp / (fn + tp + tp + fp + 1e-10)
voe = 1 - tp / (tp + fn + fp + 1e-10)
rvd = (fp - fn) / (fn + tp + 1e-10)
specificity = tn / (tn + fp + 1e-10)
return tp, tn, fp, fn, accuracy, precision, recall, iou, dice, voe, rvd, specificity
def save_performace_to_csv(pred_dir, gt_dir, img_resize, csv_save_name, csv_save_path='', threshold=128):
""" 保存到csv文件中
:param gt_dir: 真实标签的目录
:param pred_dir: 预测mask目录
:param csv_save_name:
:param img_resize: tuple 类型, 将图像resize后在计算 [height, width]
:return:
"""
gt_paths, gt_names = get_path(gt_dir)
pred_paths, pred_names = get_path(pred_dir)
record_pd = pd.DataFrame(columns=[
'pred_name', 'gt_name', 'TP', 'FP', 'FN', 'TN',
'accuracy', 'precision', 'recall', 'IOU', 'DICE', 'VOE', 'RVD', 'specificity',
])
total_file_nums = len(gt_paths)
for file_index in tqdm(range(total_file_nums), total=total_file_nums):
TP, TN, FP, FN, accuracy, precision, recall, IOU, DICE, VOE, RVD, specificity = calc_preformance(
pred_paths[file_index], gt_paths[file_index], img_resize, threshold)
record_pd = record_pd.append({
'pred_name': pred_names[file_index],
'gt_name': gt_names[file_index],
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'specificity': specificity,
'IOU': IOU,
'DICE': DICE,
'VOE': VOE,
'RVD': RVD,
'TP': TP, 'FP': FP, 'FN': FN, 'TN': TN
}, ignore_index=True)
record_pd.to_csv(
os.path.join(csv_save_path, '{}.csv'.format(csv_save_name)), index=True, header=True)
# 计算平均值
m_accuracy, m_precision, m_recall, m_iou, m_dice, m_voe, m_rvd, m_spec = analysis_performance(
os.path.join(csv_save_path, '{}.csv'.format(csv_save_name)))
analysis_pd = pd.DataFrame(columns=[
'm_accu', 'm_prec', 'm_recall', 'm_iou', 'm_dice', 'm_voe', 'm_rvd', 'm_spec'
])
analysis_pd = analysis_pd.append({
'm_accu': m_accuracy, 'm_prec': m_precision, 'm_recall': m_recall, 'm_iou': m_iou,
'm_dice': m_dice, 'm_voe': m_voe, 'm_rvd': m_rvd, 'm_spec': m_spec,
}, ignore_index=True)
analysis_pd.to_csv(
os.path.join(csv_save_path, 'analysis_{}.csv'.format(csv_save_name)), index=True, header=True)
return m_dice, m_iou, m_precision, m_recall
def analysis_performance(csv_file_path):
""" 统计csv文件中各个performance的平均值 """
data_frame = pd.read_csv(csv_file_path, header=None)
m_accuracy = np.mean(np.array(data_frame.loc[1:, 7], dtype=np.float32))
m_precision = np.mean(np.array(data_frame.loc[1:, 8], dtype=np.float32))
m_recall = np.mean(np.array(data_frame.loc[1:, 9], dtype=np.float32))
m_iou = np.mean(np.array(data_frame.loc[1:, 10], dtype=np.float32))
m_dice = np.mean(np.array(data_frame.loc[1:, 11], dtype=np.float32))
m_voe = np.mean(np.array(data_frame.loc[1:, 12], dtype=np.float32))
m_rvd = np.mean(np.array(data_frame.loc[1:, 13], dtype=np.float32))
m_spec = np.mean(np.array(data_frame.loc[1:, 14], dtype=np.float32))
print(
' accuracy: {},\n precision: {},\n recall: {},\n iou: {},\n dice: {},\n voe: {},\n rvd: {},\n spec: {}.\n'.format(
m_accuracy, m_precision, m_recall, m_iou, m_dice, m_voe, m_rvd, m_spec))
return m_accuracy, m_precision, m_recall, m_iou, m_dice, m_voe, m_rvd, m_spec
def f1_score(precision, recall):
return (2 * precision * recall) / (precision + recall)
if __name__ == '__main__':
# pred_dir_ = r'E:\tookit_backup\毕业论文\程序\DWNET\invalid_pred_crop\epoch_1'
# gt_dir_ = r'E:\tookit_backup\毕业论文\程序\data\invalid_mask'
# save_performace_to_csv(pred_dir_, gt_dir_,
# (256, 256),
# 'test', r'E:\tookit_backup\毕业论文\程序\DWNET\invalid_pred_crop')
img_path_ = r'D:\Users\YingYing\Desktop\data\unet_bn_predict_mask\pred_mask\007582.jpg'
bin_image = prepro_image(img_path_, (256, 256))
print(Counter(np.reshape(bin_image, (-1,))))
|
<gh_stars>1-10
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import random
import time
from bubblesort import bubble_sort
from selectionsort import selection_sort
from insertionsort import insertion_sort
from mergesort import merge_sort
from quicksort import quick_sort
root = Tk()
root.title("Sorting Algorithm Visulaiser")
root.maxsize(900,600)
# background color
root.config(bg="black")
# variable
selected_algo = StringVar()
data = []
def drawData(data, color):
canvas.delete("all")
c_height=380
c_width= 580
# don't start from border
offset = 10
# spacing between the bars
spacing = 2
# width of th bar graph
x_width = (c_width-spacing*len(data))/ (len(data)) + 1
normalisedData= [i / max(data) for i in data ]
prev=offset
x=0
for i, height in enumerate(normalisedData):
# top left
# print(str(i) + " " + str(x_width)+ " " + color[i])
x0 = prev + spacing
y0 = c_height - height * 340
# bottom right
x1 = x0 + x_width
y1 = c_height
# creating the bars
canvas.create_rectangle(x0, y0, x1, y1, fill=color[i])
prev = x1
root.update_idletasks()
def Generate():
global data
try:
minVal = max(int(minEntry.get()),0)
except:
minVal = 3
try:
maxVal = min(int(maxEntry.get()),1000)
except:
maxVal = 1000
try:
size = min(200,int(sizeEntry.get()))
if size < 0:
size=200
except:
size = 200
data = []
# generating random list
color=[]
for _ in range (size):
data.append(random.randrange(minVal, maxVal +1))
color.append('sky blue')
drawData(data, color)
def StartAlgorithm():
global data
speed=int(speedScale.get())
if speed==2:
speed=0.5
elif speed == 3:
speed = 0.1
elif speed == 4:
speed = 0.05
elif speed == 5:
speed = 0.01
elif speed == 6:
speed = 0.005
elif speed == 7:
speed = 0.001
if selected_algo.get() == "Bubble Sort" :
bubble_sort(data, drawData, speed)
elif selected_algo.get() == "Selection Sort" :
selection_sort(data, drawData, speed)
elif selected_algo.get() == "Insertion Sort" :
insertion_sort(data, drawData, speed)
elif selected_algo.get() == "Merge Sort" :
merge_sort(data, 0, len(data)-1, drawData, speed)
drawData(data, ['light green' for x in range(len(data))])
time.sleep(speed)
elif selected_algo.get() == "Quick Sort" :
quick_sort(data, drawData, speed)
def mabout():
messagebox._show(title="About Me", _icon=None, message=" Name: <NAME>\n Email: <EMAIL>\n Codechef Handle: diptayan\n Codeforces Handle: kakarotto_sama\n Ratings:\n - 6* at Codechef\n - Expert at Codeforces")
# frame / base layout
UI_frame = Frame(root, width=600, height=200, bg='green')
UI_frame.grid(row=0,column=0, padx=0 ,pady=5)
canvas = Canvas(root, width=800, height=380, bg='white')
canvas.grid(row=1, column=0, padx=10, pady=5)
# user interface area
# row[0]
# size of data
Label(UI_frame , text="Size of Data : " , bg='Green').grid(row=0, column=0, padx=5, pady=5, sticky=W)
sizeEntry = Entry(UI_frame)
sizeEntry.grid(row=0, column=1, padx=5, pady=5, sticky=W)
# minimum value of data
Label(UI_frame , text="Minimum Value: " , bg='Green').grid(row=0, column=2, padx=5, pady=5, sticky=W)
minEntry = Entry(UI_frame)
minEntry.grid(row=0, column=3, padx=5, pady=5, sticky=W)
# maximum value of data
Label(UI_frame , text="Maximum Value: " , bg='Green').grid(row=0, column=4, padx=5, pady=5, sticky=W)
maxEntry = Entry(UI_frame)
maxEntry.grid(row=0, column=5, padx=5, pady=5, sticky=W)
# Generate button
Button(UI_frame, text="Generate", command=Generate, bg='yellow').grid(row=0, column=6, padx=5, pady=5)
# row[1]
Label(UI_frame , text="Select Algorithm" , bg='Green').grid(row=1, column=0, padx=5, pady=5, sticky=W)
# Drop down menu for algorithm selection
algMenu=ttk.Combobox(UI_frame, textvariable=selected_algo, values=['Bubble Sort', 'Selection Sort', 'Insertion Sort', 'Merge Sort', 'Quick Sort'])
algMenu.grid(row=1, column=1, padx=5, pady=5)
# In case no algorithm is selected, the default value is first option
algMenu.current(0)
# speed scale
speedScale = Scale(UI_frame,from_=1, to=7, length=200, digits=2, resolution=1, orient=HORIZONTAL, label="Select Speed :", bg="sky blue")
speedScale.grid(row=1, column=3, padx=5, pady=5)
# start button
Button(UI_frame, text="Start", command=StartAlgorithm, bg='red').grid(row=1, column=4, padx=5, pady=5)
# About button
Button(UI_frame, text="About Me", command=mabout, bg='red').grid(row=1, column=5, padx=5, pady=5)
root.mainloop()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Copyright (c) <NAME> 2016. All Rights Reserved.
# slipy/spectrum/spectrum.py
"""Spectrum class interface."""
from algorithms import *
from calibration import *
from core import *
from etc import *
from gui import *
from measurement import *
from operations import *
from astropy.utils.exceptions import AstropyUserWarning
class SpectrumError(Exception):
"""Error specific to the Spectrum object."""
pass
class SpectrumWarning(AstropyUserWarning):
"""Warning specific to the Spectrum object."""
pass
class Spectrum:
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
_init(self, *args, **kwargs)
def copy(self):
"""
"""
return _copy(self)
def resample(self, *args, **kwargs):
"""
"""
return _resample(self, *args, **kwargs)
def insert(self, *args, **kwargs):
"""
"""
return _insert(self, *args, **kwargs)
def normalize(self, *args, **kwargs):
"""
"""
return _normalize(self, *args, **kwargs)
def remove_telluric(self, *args, **kwargs):
"""
"""
return _remove_telluric(self, *args, **kwargs)
def apply_velocity_correction(self, *args, **kwargs):
"""
"""
return _apply_velocity_correction(self, *args, **kwargs)
def draw(self, *args, **kwargs):
"""
"""
return _draw(self, *args, **kwargs)
def select_line(self, *args, **kwargs):
"""
"""
return _select_line(self, *args, **kwargs)
def extract_line(self, *args, **kwargs):
"""
"""
return _extract_line(self, *args, **kwargs)
def auto_fit(self, *args, **kwargs):
"""
"""
return _auto_fit(self, *args, **kwargs)
def fit(self, *args, **kwargs):
"""
"""
return _fit(self, *args, **kwargs)
def deblend(self, *args, **kwargs):
"""
"""
return _deblend(self, *args, **kwargs)
def optical_depth(self, *args, **kwargs):
"""
"""
return _optical_depth(self, *args, **kwargs)
def equivalent_width(self, *args, **kwargs):
"""
"""
return _equivalent_width(self, *args, **kwargs)
def column_density(self, *args, **kwargs):
"""
"""
return _column_density(self, *args, **kwargs)
def __getitem__(self, key):
"""
"""
return _getitem(self, key):
def __str__(self):
"""
"""
return _str(self)
def __repr__(self):
"""
"""
return _repr(self)
def __len__(self):
"""
"""
return _len(self)
def __contains__(self, other):
"""
"""
return _contains(self, other)
def __add__(self, other):
"""
"""
return _add(self, other)
def __sub__(self, other):
"""
"""
return _sub(self, other)
def __mul__(self, other):
"""
"""
return _mul(self, other)
def __truediv__(self, other):
"""
"""
return _truediv(self, other)
def __iadd__(self, other):
"""
"""
return _iadd(self, other)
def __isub__(self, other):
"""
"""
return _isub(self, other)
def __imul__(self, other):
"""
"""
return _imul(self, other)
def __itruediv__(self, other):
"""
"""
return _itruediv(self, other)
def __radd__(self, other):
"""
"""
return _radd(self, other)
def __rsub__(self, other):
"""
"""
return _rsub(self, other)
def __rmul__(self, other):
"""
"""
return _rmul(self, other)
def __rtruediv__(self, other):
"""
"""
return _rtruediv(self, other)
def __lshift__(self, other):
"""
"""
return _lshift(self, other)
def __rshift__(self, other):
"""
"""
return _rshift(self, other)
def __eq__(self, other):
"""
"""
return _eq(self, other)
def __ne__(self, other):
"""
"""
return _ne(self, other)
def __lt__(self, other):
"""
"""
return _lt(self, other)
def __gt__(self, other):
"""
"""
return _gt(self, other)
def __le__(self, other):
"""
"""
return _le(self, other)
def __ge__(self, other):
"""
"""
return _ge(self, other)
def __and__(self, other):
"""
"""
return _and(self, other)
def __or__(self, other):
"""
"""
return _or(self, other)
def __xor__(self, other):
"""
"""
return _xor(self, other)
def __rand__(self, other):
"""
"""
return _rand(self, other)
def __ror__(self, other):
"""
"""
return _ror(self, other)
def __rxor__(self, other):
"""
"""
return _rxor(self, other)
@classmethod
def rms(cls, *args, **kwargs):
"""
"""
return _rms(cls, *args, **kwargs)
@classmethod
def xcorr(cls, *args, **kwargs):
"""
"""
return _xorr(cls, *args, **kwargs)
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2016-2017, <NAME> (www.karlsruhe.de)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Tests for the geoextract web app.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import contextlib
import multiprocessing
from bs4 import BeautifulSoup
import requests
from geoextract import __version__ as geoextract_version, Pipeline
from . import stop_process, wait_for_server
SERVER_URL = 'http://localhost:5000'
EXTRACT_URL = SERVER_URL + '/api/v1/extract'
class AppProcess(multiprocessing.Process):
'''
Process for running and stopping an app server.
'''
def __init__(self, *args, **kwargs):
'''
Set up a new geoextract app.
All arguments are passed to ``geoextract.Pipeline``.
'''
super(AppProcess, self).__init__()
self.pipeline = Pipeline(*args, **kwargs)
self.app = self.pipeline.create_app()
def run(self):
'''
Start the app in a separate process.
'''
self.app.run()
def stop(self):
'''
Stop the app.
'''
if self.pid is None:
raise RuntimeError('Process is not running.')
stop_process(self.pid, delay=10)
@contextlib.contextmanager
def app(locations=(), *args, **kwargs):
'''
Context manager that provides a geoextract app.
All arguments are passed on to ``geoextract.Pipeline``.
'''
process = AppProcess(locations, *args, **kwargs)
process.start()
try:
wait_for_server(SERVER_URL)
yield
finally:
process.stop()
def html2text(html):
'''
Extract the text of a piece of HTML code.
'''
return BeautifulSoup(html, 'html.parser').get_text()
class TestApp(object):
'''
Tests for the web app.
'''
def test_extract_get(self):
'''
Test that GET requests to ``extract`` fail.
'''
with app():
r = requests.get(EXTRACT_URL)
assert r.status_code == 405
def test_extract_no_input(self):
'''
Test calling ``extract`` with no input.
'''
with app():
r = requests.post(EXTRACT_URL)
assert r.status_code == 400
assert 'Missing "text" parameter' in html2text(r.text)
def test_extract_no_utf8(self):
'''
Test calling ``extract`` with text that isn't UTF-8.
'''
with app():
not_utf8 = 'öäü'.encode('latin1')
r = requests.post(EXTRACT_URL, files={'text': ('x.txt', not_utf8)})
assert r.status_code == 400
text = html2text(r.text)
assert 'Decoding error' in text
assert 'UTF-8' in text
def test_extract_success(self):
'''
Test a successful call of ``extract``.
'''
locations = [{'name': 'a-location'}]
with app(locations):
text = 'a-location is the place to be'
r = requests.post(EXTRACT_URL, files={'text': ('x.txt', text)})
assert r.status_code == 200
assert r.json() == locations
def test_root(self):
'''
Test accessing the root document.
'''
with app():
r = requests.get(SERVER_URL)
text = html2text(r.text)
assert 'GeoExtract' in text
assert geoextract_version in text
|
<gh_stars>1-10
import asyncio
import aiohttp
import async_timeout
import copy
class tdCall(object):
"""
Prepares arguments for making HTTP calls to TDAmeritrade API.
Gets data from TDAmeritrade API.
Processes the result from TDAmeritrade.
"""
base_url = "https://api.tdameritrade.com/v1/"
def __init__(self, key, symbols="SPY", configs=None):
"""
:param key: string TDAmeritrade Access Key
:param symbols: string or list, Stock Symbols
:param configs: dict or list of dicts,
required:
route: string
"q" : qoutes endpoint
"h" : price history endpoint
"o" : options endpoint
optional:
Each endpoint has different parameters. See TDAmeritrade's developer website.
Examples:
This will get daily historical prices for the last three months.
{"route": "h", "periodType": "month", "period": 3, "frequencyType": "daily"}
This will get vertical spreads for just calls, within one strike from at the money.
{"route": "o", "contractType": "CALL", "strikeCount": 3, "strategy": "VERTICAL"}
This will get quotes and options with all other arguments using the api's default.
[{"route": "q"}, {"route": "o"}]
"""
self.key = key
self.raw_configs = configs
if isinstance(symbols, str):
self.symbols = [symbols]
else: self.symbols = symbols
def get_configs(self):
"""
:return: prepared config object capable of completely setting up HTTP get arguments
"""
if not self.raw_configs:
return [self.quotes]
else:
if len(self.raw_configs) == 1 and isinstance(self.raw_configs, dict):
return [self.set_single_config(self.raw_configs)]
else:
refined_configs = []
for config in self.raw_configs:
refined_configs.append(self.set_single_config(config))
return refined_configs
def set_single_config(self, raw_config):
"""
:param raw_config: dict The user provided configuration.
:return: a single config object
"""
if not raw_config:
raise ValueError("Your configuration was empty. You don't have to include a configuration but if you do it must contain values.")
elif 'route' not in raw_config.keys():
raise ValueError('You specified a configuration without a route. All configurations must include a route.')
elif raw_config['route'] == 'h':
static_config = self.hist
elif raw_config['route'] == 'o':
static_config = self.opts
else:
static_config = self.quotes
for index, value in raw_config.items():
if index == 'route': continue
static_config['get_params']['param_vars'][index] = value
return static_config
def get_symbol_lists(self, symbol_file_name):
"""
Given a file name will asynchronously parse a list of symbols into a list of lists of symbols with each member list being 10 symbols long. This function was created to allow making a get request for the 10 symbols, processing the results (for example saving locally), an then making another get request. This assists in the event there is an error in getting data, the entire list need not be retreived.
:param symbol_file_name:
:return: list of lists of symbols
"""
coroutine = self.set_parse_symbols(symbol_file_name)
symbols = self.wrap_schedule(coroutine)
return symbols
async def get_coroutines(self, configs, symbols):
"""
Using the ClientSession it creates a list of functions ready to be executed. Then using gather asynchronous calls are made using the list.
:param configs: list
:param symbols: list
:return: coroutine functions
"""
async with aiohttp.ClientSession() as session:
all_calls = []
for config in configs:
tasks = []
if config['func'] == 'test':
return 1
if config['func'] == 'options':
for item in symbols:
config['get_params']['param_vars']['symbol'] = item
tasks.append(self.options(session, copy.deepcopy(config['get_params'])))
elif config['func'] == 'multi_quote':
""".join turns the list into str separated by commas.
This is necessary to be able to make a get call with multiple Strings"""
tasks.append(self.multi_quote(session, ','.join(symbols), config['get_params']))
else:
tasks = [self.historical_prices(session, item, config['get_params']) for item in symbols]
single_endpoint = await asyncio.gather(*tasks)
all_calls.append(single_endpoint)
return all_calls
async def historical_prices(self, session, symbol, get_params):
"""
:param session: obj
:param symbol: list
:param get_params: dict
:return: func
"""
"""Only works as part of the the get_data function. It essentially sets everything up for an http call."""
with async_timeout.timeout(100):
async with session.get(self.base_url + get_params['path'][0] + "/" + symbol + "/" + get_params['path'][1],
headers=get_params['headers'], params=get_params['param_vars']) as response:
return await response.json()
async def options(self, session, get_params):
"""
:param session: obj
:param get_params: dict
:return: func
"""
with async_timeout.timeout(100):
async with session.get(self.base_url + get_params['path'][0] + "/" + get_params['path'][1],
headers=get_params['headers'], params=get_params['param_vars']) as response:
return await response.json()
async def multi_quote(self, session, symbols, get_params):
"""
:param session: obj
:param symbol: list
:param get_params: dict
:return: func
"""
with async_timeout.timeout(100):
async with session.get(self.base_url + get_params['path'][0] + "/" + get_params['path'][1],
headers=get_params['headers'], params={'symbol': symbols}) as response:
return await response.json()
def get_td_data(self):
"""
Wrapper to get TDAmeritrade Data
:return: list
"""
configurations = self.get_configs()
coroutines = self.get_coroutines(configurations, self.symbols)
return self.wrap_schedule(coroutines)
def flatten_response(self, list_of_lists_of_prices):
"""
Flattens the response to a single list of dicts.
:param list_of_lists_of_dicts:
:return: list_of_dicts
"""
result = []
for list_of_list_of_prices in list_of_lists_of_prices:
[result.append(p) for p in list_of_list_of_prices]
return result
def wrap_schedule(self, coroutines):
"""
Scheduler for making asynchronous calls.
:param coroutines:
:return: list_of_lists_dict
"""
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(coroutines)
return loop.run_until_complete(task)
async def set_parse_symbols(self, symbol_file_name):
"""
:param symbol_file_name: string
:return: list of list
"""
with open(symbol_file_name, 'r') as file:
symbols = []
for l in file:
parsed_line = await self._parse_line(l)
# for group in parsed_line:
# que = await get_data(group, symbol_file_name)
if all(isinstance(elem, list) for elem in parsed_line):
for p in parsed_line:
symbols.append(p)
else:
symbols.append(parsed_line)
return symbols
async def _parse_line(self, line):
"""
:param line:
:return: list
"""
parsed_line = "[" + line[:-1] + "]"
if len(parsed_line) > 10:
queue = [parsed_line[i:i + 10] for i in range(0, len(parsed_line), 10)]
else:
queue = parsed_line
await asyncio.sleep(0)
return queue
_historical = {
"get_params": {
"param_vars": {},
"path": [
"marketdata",
"pricehistory"
],
"headers": {
"Authorization": None
}
},
"func": "historical_prices"
}
@property
def hist(self):
self._historical['get_params']['headers']['Authorization'] = "Bearer " + self.key
return self._historical
_quotes = {
"get_params": {
"param_vars": {},
"path": [
"marketdata",
"quotes"
],
"headers": {
"Authorization": None
}
},
"func": "multi_quote"
}
@property
def quotes(self):
self._quotes['get_params']['headers']['Authorization'] = "Bearer " + self.key
return self._quotes
_options = {
"func": "options",
"get_params":
{
"param_vars": {},
"path": [
"marketdata",
"chains"
],
"headers": {"Authorization": None}
}
}
@property
def opts(self):
self._options['get_params']['headers']['Authorization'] = "Bearer " + self.key
return self._options |
<filename>src/interface/BaseHardwareInterface.py
from monotonic import monotonic
ENTER_AT_PEAK_MARGIN = 5 # closest that captured enter-at level can be to node peak RSSI
class BaseHardwareInterface(object):
LAP_SOURCE_REALTIME = 0
LAP_SOURCE_MANUAL = 1
LAP_SOURCE_RECALC = 2
RACE_STATUS_READY = 0
RACE_STATUS_STAGING = 3
RACE_STATUS_RACING = 1
RACE_STATUS_DONE = 2
def __init__(self):
self.calibration_threshold = 20
self.calibration_offset = 10
self.trigger_threshold = 20
self.start_time = 1000*monotonic() # millis
self.filter_ratio = 50
self.race_status = BaseHardwareInterface.RACE_STATUS_READY
# returns the elapsed milliseconds since the start of the program
def milliseconds(self):
return 1000*(monotonic() - self.start_time)
def log(self, message):
'''Hardware log of messages.'''
if callable(self.hardware_log_callback):
string = 'Interface: {0}'.format(message)
self.hardware_log_callback(string)
def process_lap_stats(self, node, readtime, lap_id, ms_val, cross_flag, pn_history, cross_list, upd_list):
if cross_flag is not None and cross_flag != node.crossing_flag: # if 'crossing' status changed
node.crossing_flag = cross_flag
if callable(self.node_crossing_callback):
cross_list.append(node)
# calc lap timestamp
if ms_val < 0 or ms_val > 9999999:
ms_val = 0 # don't allow negative or too-large value
node.lap_timestamp = 0
else:
node.lap_timestamp = readtime - (ms_val / 1000.0)
# if new lap detected for node then append item to updates list
if lap_id != node.last_lap_id:
upd_list.append((node, lap_id, node.lap_timestamp))
# check if capturing enter-at level for node
if node.cap_enter_at_flag:
node.cap_enter_at_total += node.current_rssi
node.cap_enter_at_count += 1
if self.milliseconds() >= node.cap_enter_at_millis:
node.enter_at_level = int(round(node.cap_enter_at_total / node.cap_enter_at_count))
node.cap_enter_at_flag = False
# if too close node peak then set a bit below node-peak RSSI value:
if node.node_peak_rssi > 0 and node.node_peak_rssi - node.enter_at_level < ENTER_AT_PEAK_MARGIN:
node.enter_at_level = node.node_peak_rssi - ENTER_AT_PEAK_MARGIN
if callable(self.new_enter_or_exit_at_callback):
self.new_enter_or_exit_at_callback(node, True)
# check if capturing exit-at level for node
if node.cap_exit_at_flag:
node.cap_exit_at_total += node.current_rssi
node.cap_exit_at_count += 1
if self.milliseconds() >= node.cap_exit_at_millis:
node.exit_at_level = int(round(node.cap_exit_at_total / node.cap_exit_at_count))
node.cap_exit_at_flag = False
if callable(self.new_enter_or_exit_at_callback):
self.new_enter_or_exit_at_callback(node, False)
# prune history data if race is not running (keep last 60s)
if self.race_status is BaseHardwareInterface.RACE_STATUS_READY:
if len(node.history_times):
while node.history_times[0] < (monotonic() - 60):
node.history_values.pop(0)
node.history_times.pop(0)
if not len(node.history_times): #prevent while from destroying itself
break
if pn_history and self.race_status != BaseHardwareInterface.RACE_STATUS_DONE:
# get and process history data (except when race is over)
pn_history.addTo(readtime, node.history_values, node.history_times, self)
def process_crossings(self, cross_list):
if len(cross_list) > 0:
for node in cross_list:
self.node_crossing_callback(node)
def process_updates(self, upd_list):
if len(upd_list) > 0:
if len(upd_list) == 1: # list contains single item
item = upd_list[0]
node = item[0]
if node.last_lap_id != -1 and callable(self.pass_record_callback):
self.pass_record_callback(node, item[2], BaseHardwareInterface.LAP_SOURCE_REALTIME) # (node, lap_time_absolute)
node.last_lap_id = item[1] # new_lap_id
else: # list contains multiple items; sort so processed in order by lap time
upd_list.sort(key = lambda i: i[0].lap_timestamp)
for item in upd_list:
node = item[0]
if node.last_lap_id != -1 and callable(self.pass_record_callback):
self.pass_record_callback(node, item[2], BaseHardwareInterface.LAP_SOURCE_REALTIME) # (node, lap_time_absolute)
node.last_lap_id = item[1] # new_lap_id
#
# External functions for setting data
#
def intf_simulate_lap(self, node_index, ms_val):
node = self.nodes[node_index]
node.lap_timestamp = monotonic() - (ms_val / 1000.0)
self.pass_record_callback(node, node.lap_timestamp, BaseHardwareInterface.LAP_SOURCE_MANUAL)
def set_race_status(self, race_status):
self.race_status = race_status
#
# Get Json Node Data Functions
#
def get_settings_json(self):
return {
'nodes': [node.get_settings_json() for node in self.nodes],
'calibration_threshold': self.calibration_threshold,
'calibration_offset': self.calibration_offset,
'trigger_threshold': self.trigger_threshold,
'filter_ratio': self.filter_ratio
}
def get_heartbeat_json(self):
return {
'current_rssi': [node.current_rssi for node in self.nodes],
'loop_time': [node.loop_time for node in self.nodes],
'crossing_flag': [node.crossing_flag for node in self.nodes]
}
def get_calibration_threshold_json(self):
return {
'calibration_threshold': self.calibration_threshold
}
def get_calibration_offset_json(self):
return {
'calibration_offset': self.calibration_offset
}
def get_trigger_threshold_json(self):
return {
'trigger_threshold': self.trigger_threshold
}
def get_filter_ratio_json(self):
return {
'filter_ratio': self.filter_ratio
}
def get_frequency_json(self, node_index):
node = self.nodes[node_index]
return {
'node': node.index,
'frequency': node.frequency
}
class PeakNadirHistory:
def addTo(self, readtime, history_values, history_times, interface):
if self.peakRssi > 0:
if self.nadirRssi > 0:
# both
if self.peakLastTime > self.nadirTime:
# process peak first
if self.peakFirstTime > self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakFirstTime / 1000.0))
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
elif self.peakFirstTime == self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
else:
interface.log('Ignoring corrupted peak history times ({0} < {1})'.format(self.peakFirstTime, self.peakLastTime))
history_values.append(self.nadirRssi)
history_times.append(readtime - (self.nadirTime / 1000.0))
else:
# process nadir first
history_values.append(self.nadirRssi)
history_times.append(readtime - (self.nadirTime / 1000.0))
if self.peakFirstTime > self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakFirstTime / 1000.0))
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
elif self.peakFirstTime == self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
else:
interface.log('Ignoring corrupted peak history times ({0} < {1})'.format(self.peakFirstTime, self.peakLastTime))
else:
# peak, no nadir
# process peak only
if self.peakFirstTime > self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakFirstTime / 1000.0))
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
elif self.peakFirstTime == self.peakLastTime:
history_values.append(self.peakRssi)
history_times.append(readtime - (self.peakLastTime / 1000.0))
else:
interface.log('Ignoring corrupted peak history times ({0} < {1})'.format(self.peakFirstTime, self.peakLastTime))
elif self.nadirRssi > 0:
# no peak, nadir
# process nadir only
history_values.append(self.nadirRssi)
history_times.append(readtime - (self.nadirTime / 1000.0))
|
<filename>onefile-example_douyin.py
# from ytb_up import *
from ytb_up.douyin import DouyinUpload
from datetime import datetime,date,timedelta
import asyncio
import json
profilepath = ''
CHANNEL_COOKIES='assets/douyin-cookie.json'
videopath = 'assets/hello.mp4'
tags = ['ba,baaa,bababa']
publish_date = ''
# if you use some kinda of proxy to access youtube,
proxy_option = "socks5://127.0.0.1:1080"
proxy_option=''
# for cookie issue,
title = 'bababala'
title=title[:95]
username = "antivte"
password = ""
description = '========================'
thumbnail = './9-16.jpeg'
thumbnail = './hello.png'
# video file format: pls use mp4、webm
# douyin title format:
# bababala #话题1 #话题2 @好友1 @好友2
upload = DouyinUpload(
# use r"" for paths, this will not give formatting errors e.g. "\n"
root_profile_directory='',
proxy_option=proxy_option,
watcheveryuploadstep=True,
# if you want to silent background running, set watcheveryuploadstep false
CHANNEL_COOKIES=CHANNEL_COOKIES,
username=username,
password=password,
recordvideo=True
# for test purpose we need to check the video step by step ,
)
today = date.today()
def instantpublish():
asyncio.run(upload.upload(
videopath=videopath,
title='instant publish-test-005',
description=description,
thumbnail=thumbnail,
tags=tags,
closewhen100percentupload=True,
location='深圳',
miniprogram='',
heji='heji1',
hottopic='老公',
up2toutiao=False,
allow2save=True,
allow2see='公开',
publishpolicy='立即发布'
))
def saveasprivatedraft():
asyncio.run(upload.upload(
videopath=videopath,
title='private draft-test-004',
description=description,
thumbnail=thumbnail,
tags=tags,
closewhen100percentupload=True,
publishpolicy=0
))
def scheduletopublish_tomorrow():
# mode a:release_offset exist,publishdate exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
publish_date = datetime(today.year, today.month, today.day, 10, 15)
#if you want more delay ,just change 1 to other numbers to start from other days instead of tomorrow
asyncio.run(upload.upload(
videopath=videopath,
title='instant publish-test-005',
description=description,
thumbnail=thumbnail,
tags=tags,
closewhen100percentupload=True,
location='深圳市',
miniprogram='',
heji='heji1',
hottopic='老公',
up2toutiao=False,
allow2save=True,
allow2see='公开',
publishpolicy='定时发布',
publish_date=publish_date
))
def scheduletopublish_7dayslater():
# mode a:release_offset exist,publishdate exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
publish_date = datetime(today.year, today.month, today.day, 10, 15)
#if you want more delay ,just change 1 to other numbers to start from other days instead of tomorrow
publish_date += timedelta(days=7)
asyncio.run(upload.upload(
videopath=videopath,
title='7days later-test-003',
description=description,
thumbnail=thumbnail,
tags=tags,
release_offset='0-1',
closewhen100percentupload=True,
publishpolicy=2,
publish_date=publish_date
))
def scheduletopublish_specific_date():
# mode a:release_offset exist,publishdate exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
publish_date = datetime(today.year, today.month, today.day, 10, 15)
#if you want tomorrow ,just change 7 to 1
publish_date += timedelta(days=3)
# publish_date = datetime.strftime(publish_date, "%Y-%m-%d %H:%M:%S")
asyncio.run(upload.upload(
videopath=videopath,
title='four days later-test-002',
description=description,
thumbnail=thumbnail,
tags=tags,
closewhen100percentupload=True,
publishpolicy=2,
publish_date=publish_date
))
# scheduletopublish_specific_date()
# scheduletopublish_7dayslater()
# saveasprivatedraft()
# instantpublish()
scheduletopublish_tomorrow()
|
<reponame>JosephGarrone/uqcsbot-1
from typing import List
import re
from datetime import date, datetime, timedelta
from calendar import month_name, month_abbr, day_abbr
from icalendar import Calendar
import requests
from pytz import timezone, utc
from typing import Tuple, Optional
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import UsageSyntaxException, loading_status
from uqcsbot.utils.itee_seminar_utils import (get_seminars, HttpException, InvalidFormatException)
CALENDAR_URL = ("https://calendar.google.com/calendar/ical/"
+ "q3n3pce86072n9knt3pt65fhio%40group.calendar.google.com/public/basic.ics")
FILTER_REGEX = re.compile('full|all|[0-9]+( weeks?)?|jan.*|feb.*|mar.*'
+ '|apr.*|may.*|jun.*|jul.*|aug.*|sep.*|oct.*|nov.*|dec.*')
BRISBANE_TZ = timezone('Australia/Brisbane')
# empty string to one-index
MONTH_NUMBER = {month.lower(): index for index, month in enumerate(month_abbr)}
class EventFilter(object):
def __init__(self, full=False, weeks=None, cap=None, month=None, is_valid=True):
self.is_valid = is_valid
self._full = full
self._weeks = weeks
self._cap = cap
self._month = month
@classmethod
def from_argument(cls, argument: str):
if not argument:
return cls(weeks=2)
else:
match = re.match(FILTER_REGEX, argument.lower())
if not match:
return cls(is_valid=False)
filter_str = match.group(0)
if filter_str in ['full', 'all']:
return cls(full=True)
elif 'week' in filter_str:
return cls(weeks=int(filter_str.split()[0]))
elif filter_str[:3] in MONTH_NUMBER:
return cls(month=MONTH_NUMBER[filter_str[:3]])
else:
return cls(cap=int(filter_str))
def filter_events(self, events: List['Event'], start_time: datetime):
if self._weeks is not None:
end_time = start_time + timedelta(weeks=self._weeks)
return [e for e in events if e.start < end_time]
if self._month is not None:
return [e for e in events if e.start.month == self._month]
elif self._cap is not None:
return events[:self._cap]
return events
def get_header(self):
if self._full:
return "List of *all* upcoming events"
elif self._weeks is not None:
return f"Events in the *next _{self._weeks}_ weeks*"
elif self._month is not None:
return f"Events in *_{month_name[self._month]}_*"
else:
return f"The *next _{self._cap}_ events*"
def get_no_result_msg(self):
if self._weeks is not None:
return f"There don't appear to be any events in the next *{self._weeks}* weeks"
elif self._month is not None:
return f"There don't appear to be any events in *{month_name[self._month]}*"
else:
return "There don't appear to be any upcoming events..."
class Event(object):
def __init__(self, start: datetime, end: datetime,
location: str, summary: str, link: Optional[str]):
self.start = start
self.end = end
self.location = location
self.summary = summary
self.link = link
@classmethod
def encode_text(cls, text: str) -> str:
"""
Encodes user-specified text so that it is not interpreted as command characters
by Slack. Implementation as required by: https://api.slack.com/docs/message-formatting
Note that this encoding process does not stop injection of text effects (bolding,
underlining, etc.), or a malicious user breaking the text formatting in the events
command. It should, however, prevent <, & and > being misinterpreted and including
links where they should not.
--
:param text: The text to encode
:return: The encoded text
"""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
@classmethod
def from_cal_event(cls, cal_event):
start = cal_event.get('dtstart').dt
end = cal_event.get('dtend').dt
# ical 'dt' properties are parsed as a 'DDD' (datetime, date, duration) type.
# The below code converts a date to a datetime, where time is set to midnight.
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, datetime.min.time()).astimezone(utc)
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, datetime.max.time()).astimezone(utc)
location = cal_event.get('location', 'TBA')
summary = cal_event.get('summary')
return cls(start, end, location, summary, None)
@classmethod
def from_seminar(cls, seminar_event: Tuple[str, str, datetime, str]):
title, link, start, location = seminar_event
# ITEE doesn't specify the length of seminars, but they are normally one hour
end = start + timedelta(hours=1)
# Note: this
return cls(start, end, location, title, link)
def __str__(self):
d1 = self.start.astimezone(BRISBANE_TZ)
d2 = self.end.astimezone(BRISBANE_TZ)
start_str = (f"{day_abbr[d1.weekday()].upper()}"
+ f" {month_abbr[d1.month].upper()} {d1.day} {d1.hour}:{d1.minute:02}")
if (d1.month, d1.day) != (d2.month, d2.day):
end_str = (f"{day_abbr[d2.weekday()].upper()}"
+ f" {month_abbr[d2.month].upper()} {d2.day} {d2.hour}:{d2.minute:02}")
else:
end_str = f"{d2.hour}:{d2.minute:02}"
# Encode user-provided text to prevent certain characters
# being interpreted as slack commands.
summary_str = Event.encode_text(self.summary)
location_str = Event.encode_text(self.location)
if self.link is None:
return f"*{start_str} - {end_str}* - `{summary_str}` - _{location_str}_"
else:
return f"*{start_str} - {end_str}* - `<{self.link}|{summary_str}>` - _{location_str}_"
def get_current_time():
"""
returns the current date and time
this function exists purely so it can be mocked for testing
"""
return datetime.now(tz=BRISBANE_TZ).astimezone(utc)
@bot.on_command('events')
@loading_status
def handle_events(command: Command):
"""
`!events [full|all|NUM EVENTS|<NUM WEEKS> weeks] [uqcs|itee]`
- Lists all the UQCS and/or ITEE events that are
scheduled to occur within the given filter.
If unspecified, will return the next 2 weeks of events.
"""
argument = command.arg if command.has_arg() else ""
source_get = {"uqcs": False, "itee": False}
for k in source_get:
if k in argument:
source_get[k] = True
argument = argument.replace(k, "")
argument = argument.strip()
if not any(source_get.values()):
source_get = dict.fromkeys(source_get, True)
event_filter = EventFilter.from_argument(argument)
if not event_filter.is_valid:
raise UsageSyntaxException()
cal = Calendar.from_ical(get_calendar_file())
current_time = get_current_time()
events = []
# subcomponents are how icalendar returns the list of things in the calendar
if source_get["uqcs"]:
for c in cal.subcomponents:
# TODO: support recurring events
# we are only interested in ones with the name VEVENT as they
# are events we also currently filter out recurring events
if c.name != 'VEVENT' or c.get('RRULE') is not None:
continue
# we convert it to our own event class
event = Event.from_cal_event(c)
# then we want to filter out any events that are not after the current time
if event.start > current_time:
events.append(event)
if source_get["itee"]:
try:
# Try to include events from the ITEE seminars page
seminars = get_seminars()
for seminar in seminars:
# The ITEE website only lists current events.
event = Event.from_seminar(seminar)
events.append(event)
except (HttpException, InvalidFormatException) as e:
bot.logger.error(e.message)
# then we apply our event filter as generated earlier
events = event_filter.filter_events(events, current_time)
# then, we sort the events by date
events = sorted(events, key=lambda e: e.start)
# then print to the user the result
if not events:
message = (f"_{event_filter.get_no_result_msg()}_\r\n"
"For a full list of events, visit: https://uqcs.org.au/calendar.html"
+ " and https://www.itee.uq.edu.au/seminar-list")
else:
message = f"{event_filter.get_header()}\r\n" + '\r\n'.join(str(e) for e in events)
bot.post_message(command.channel_id, message)
def get_calendar_file() -> bytes:
"""
Loads the UQCS Events calender .ics file from Google Calendar.
This method is mocked by unit tests.
:return: The returned ics calendar file, as a stream
"""
http_response = requests.get(CALENDAR_URL)
return http_response.content
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error and warning message support for Emboss.
This module exports the error, warn, and note functions, which return a _Message
representing the error, warning, or note, respectively. The format method of
the returned object can be used to render the message with source code snippets.
Throughout Emboss, messages are passed around as lists of lists of _Messages.
Each inner list represents a group of messages which should either all be
printed, or not printed; i.e., an error message and associated informational
messages. For example, to indicate both a duplicate definition error and a
warning that a field is a reserved word, one might return:
return [
[
error.error(file_name, location, "Duplicate definition),
error.note(original_file_name, original_location,
"Original definition"),
],
[
error.warn(file_name, location, "Field name is a C reserved word.")
],
]
"""
from compiler.util import parser_types
# Error levels; represented by the strings that will be included in messages.
ERROR = "error"
WARNING = "warning"
NOTE = "note"
# Colors; represented by the terminal escape sequences used to switch to them.
# These work out-of-the-box on Unix derivatives (Linux, *BSD, Mac OS X), and
# work on Windows using colorify.
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
YELLOW = "\033[0;33m"
BLUE = "\033[0;34m"
MAGENTA = "\033[0;35m"
CYAN = "\033[0;36m"
WHITE = "\033[0;37m"
BRIGHT_BLACK = "\033[0;1;30m"
BRIGHT_RED = "\033[0;1;31m"
BRIGHT_GREEN = "\033[0;1;32m"
BRIGHT_YELLOW = "\033[0;1;33m"
BRIGHT_BLUE = "\033[0;1;34m"
BRIGHT_MAGENTA = "\033[0;1;35m"
BRIGHT_CYAN = "\033[0;1;36m"
BRIGHT_WHITE = "\033[0;1;37m"
BOLD = "\033[0;1m"
RESET = "\033[0m"
def error(source_file, location, message):
"""Returns an object representing an error message."""
return _Message(source_file, location, ERROR, message)
def warn(source_file, location, message):
"""Returns an object representing a warning."""
return _Message(source_file, location, WARNING, message)
def note(source_file, location, message):
"""Returns and object representing an informational note."""
return _Message(source_file, location, NOTE, message)
class _Message(object):
"""_Message holds a human-readable message."""
__slots__ = ("location", "source_file", "severity", "message")
def __init__(self, source_file, location, severity, message):
self.location = location
self.source_file = source_file
self.severity = severity
self.message = message
def format(self, source_code):
"""Formats the _Message for display.
Arguments:
source_code: A dict of file names to source texts. This is used to
render source snippets.
Returns:
A list of tuples.
The first element of each tuple is an escape sequence used to put a Unix
terminal into a particular color mode. For use in non-Unix-terminal
output, the string will match one of the color names exported by this
module.
The second element is a string containing text to show to the user.
The text will not end with a newline character, nor will it include a
RESET color element.
To show non-colorized output, simply write the second element of each
tuple, then a newline at the end.
To show colorized output, write both the first and second element of each
tuple, then a newline at the end. Before exiting to the operating system,
a RESET sequence should be emitted.
"""
# TODO(bolms): Figure out how to get Vim, Emacs, etc. to parse Emboss error
# messages.
severity_colors = {
ERROR: (BRIGHT_RED, BOLD),
WARNING: (BRIGHT_MAGENTA, BOLD),
NOTE: (BRIGHT_BLACK, WHITE)
}
result = []
if self.location.is_synthetic:
pos = "[compiler bug]"
else:
pos = parser_types.format_position(self.location.start)
source_name = self.source_file or "[prelude]"
if not self.location.is_synthetic and self.source_file in source_code:
source_lines = source_code[self.source_file].splitlines()
source_line = source_lines[self.location.start.line - 1]
else:
source_line = ""
lines = self.message.splitlines()
for i in range(len(lines)):
line = lines[i]
# This is a little awkward, but we want to suppress the final newline in
# the message. This newline is final if and only if it is the last line
# of the message and there is no source snippet.
if i != len(lines) - 1 or source_line:
line += "\n"
result.append((BOLD, "{}:{}: ".format(source_name, pos)))
if i == 0:
severity = self.severity
else:
severity = NOTE
result.append((severity_colors[severity][0], "{}: ".format(severity)))
result.append((severity_colors[severity][1], line))
if source_line:
result.append((WHITE, source_line + "\n"))
indicator_indent = " " * (self.location.start.column - 1)
if self.location.start.line == self.location.end.line:
indicator_caret = "^" * max(
1, self.location.end.column - self.location.start.column)
else:
indicator_caret = "^"
result.append((BRIGHT_GREEN, indicator_indent + indicator_caret))
return result
def __repr__(self):
return ("Message({source_file!r}, make_location(({start_line!r}, "
"{start_column!r}), ({end_line!r}, {end_column!r}), "
"{is_synthetic!r}), {severity!r}, {message!r})").format(
source_file=self.source_file,
start_line=self.location.start.line,
start_column=self.location.start.column,
end_line=self.location.end.line,
end_column=self.location.end.column,
is_synthetic=self.location.is_synthetic,
severity=self.severity,
message=self.message)
def __eq__(self, other):
return (
self.__class__ == other.__class__ and self.location == other.location
and self.source_file == other.source_file and
self.severity == other.severity and self.message == other.message)
def __ne__(self, other):
return not self == other
def split_errors(errors):
"""Splits errors into (user_errors, synthetic_errors).
Arguments:
errors: A list of lists of _Message, which is a list of bundles of
associated messages.
Returns:
(user_errors, synthetic_errors), where both user_errors and
synthetic_errors are lists of lists of _Message. synthetic_errors will
contain all bundles that reference any synthetic source_location, and
user_errors will contain the rest.
The intent is that user_errors can be shown to end users, while
synthetic_errors should generally be suppressed.
"""
synthetic_errors = []
user_errors = []
for error_block in errors:
if any(message.location.is_synthetic for message in error_block):
synthetic_errors.append(error_block)
else:
user_errors.append(error_block)
return user_errors, synthetic_errors
def filter_errors(errors):
"""Returns the non-synthetic errors from `errors`."""
return split_errors(errors)[0]
def format_errors(errors, source_codes, use_color=False):
"""Formats error messages with source code snippets."""
result = []
for error_group in errors:
assert error_group, "Found empty error_group!"
for message in error_group:
if use_color:
result.append("".join(e[0] + e[1] + RESET
for e in message.format(source_codes)))
else:
result.append("".join(e[1] for e in message.format(source_codes)))
return "\n".join(result)
def make_error_from_parse_error(file_name, parse_error):
return [error(file_name,
parse_error.token.source_location,
"{code}\n"
"Found {text!r} ({symbol}), expected {expected}.".format(
code=parse_error.code or "Syntax error",
text=parse_error.token.text,
symbol=parse_error.token.symbol,
expected=", ".join(parse_error.expected_tokens)))]
|
<filename>src/data/bgg_weekly_crawler.py
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
from requests.compat import urljoin
from datetime import datetime
import re
def game_data():
xml_bs = "https://www.boardgamegeek.com/xmlapi2/thing?type=boardgame&stats=1&ratingcomments=1&page=1&pagesize=10&id="
all_items = []
for pg in range(1, 51):
pg_items = []
ct = 0
soup_pg = browse_games(pg)
pg_ids, pg_links = extract_game_ids(soup_pg)
while len(pg_items) != 100:
xml_fl = requests.get(f'{xml_bs}{",".join(pg_ids)}')
soup_xml = BeautifulSoup(xml_fl.content, "xml")
pg_items = extract_xml(soup_xml, pg_links)
ct += 1
if ct > 1:
print(f"page number {pg} attempt number {ct}")
all_items += pg_items
return all_items
def extract_xml(soup, game_links):
item_list = []
items = soup.find_all("item")
for idx, item in enumerate(items):
item_list.append(extract_item(item, game_links[idx]))
return item_list
def extract_item(game_item, game_url):
game_dict = {"name": game_item.find("name")["value"], "game_id": game_item["id"]}
values_int = [
"yearpublished",
"minplayers",
"maxplayers",
"playingtime",
"minplaytime",
"maxplaytime",
"minage",
]
for vals in values_int:
game_dict[vals] = game_item.find(vals)["value"]
link_categ = [
"boardgamecategory",
"boardgamemechanic",
"boardgamefamily",
"boardgameexpansion",
"boardgameartist",
"boardgamecompilation",
"boardgameimplementation",
"boardgamedesigner",
"boardgamepublisher",
"boardgameintegration",
]
for categ in link_categ:
game_dict[categ] = [
x["value"] for x in game_item.find_all("link", {"type": categ})
]
stats_float = ["average", "bayesaverage", "stddev", "median", "averageweight"]
for stat in stats_float:
game_dict[stat] = float(game_item.find(stat)["value"])
stats_int = [
"usersrated",
"owned",
"trading",
"wanting",
"wishing",
"numcomments",
"numweights",
]
for stat in stats_int:
game_dict[stat] = int(game_item.find(stat)["value"])
for game_cat in game_item.find_all("rank"):
cat_name = re.sub("\W", "", game_cat["friendlyname"])
game_dict[cat_name] = int(game_cat["value"])
return game_dict
def browse_games(page_num):
bs_url = "https://boardgamegeek.com/browse/boardgame/page/"
pg_url = f"{bs_url}{page_num}"
pg = requests.get(pg_url)
soup = BeautifulSoup(pg.content, "html.parser")
return soup
def extract_game_ids(soup):
bs_pg = "https://boardgamegeek.com/"
all_games = soup.find_all("td", {"class": "collection_objectname"})
game_ids = [x.find("a")["href"].split("/")[-2] for x in all_games]
game_pages = [urljoin(bs_pg, x.find("a")["href"]) for x in all_games]
return game_ids, game_pages
def export_csv(game_list: list):
df = pd.DataFrame(game_list)
df.to_csv(
f"../../data/kaggle/{str(datetime.now().date())}_bgg_top{len(game_list)}.csv",
index=False,
)
update_metadata(game_list)
def update_metadata(game_list: list):
with open("../../data/kaggle/dataset-metadata.json", "rb") as f:
meta_dict = json.load(f)
meta_dict["resources"].append(
{
"path": f"{str(datetime.now().date())}_bgg_top{len(game_list)}.csv",
"description": f"Board Game Geek top 2000 games on {str(datetime.now().date())}",
}
)
with open("../../data/dataset-metadata_backup.json", "w") as fp:
json.dump(meta_dict, fp)
with open("../../data/kaggle/dataset-metadata.json", "w") as fp:
json.dump(meta_dict, fp)
if __name__ == "__main__":
game_items = game_data()
export_csv(game_items)
# print(5)
|
<reponame>compomics/rescore
"""Interface to MaxQuant msms.txt files."""
import logging
import os
import re
from functools import cmp_to_key
from typing import Dict, List, Optional, Tuple, Type, Union
import click
import numpy as np
import pandas as pd
from ms2rescore.peptide_record import PeptideRecord
from ms2rescore._exceptions import ModificationParsingError
logger = logging.getLogger(__name__)
@pd.api.extensions.register_dataframe_accessor("msms")
class MSMSAccessor:
"""Pandas extension for MaxQuant msms.txt files."""
default_columns = {
"Raw file",
"Scan number",
"Charge",
"Length",
"Sequence",
"Modified sequence",
"Proteins",
"Missed cleavages",
"Mass",
"Mass error [Da]",
"Mass error [ppm]",
"Reverse",
"Retention time",
"PEP",
"Score",
"Delta score",
"Localization prob",
"Matches",
"Intensities",
"Mass Deviations [Da]",
"Mass Deviations [ppm]",
"Intensity coverage",
"id",
}
_mass_error_unit = None
def __init__(self, pandas_obj) -> None:
"""Pandas extension for MaxQuant msms.txt files."""
self._obj = pandas_obj
self._set_mass_error_unit()
self.invalid_amino_acids = r"[BJOUXZ]"
@classmethod
def _evaluate_columns(cls, column: str) -> bool:
"""Case insensitive column evaluation for Pandas.read_csv usecols argument."""
return column.lower() in [col.lower() for col in cls.default_columns]
@classmethod
def _fix_column_case(cls, columns: List[str]) -> Dict[str, str]:
"""
Create mapping for column names with the correct case.
Using `_evaluate_columns`, we can load required columns in a case-insensitive
manner. As a result, the column name case must be fixed for downstream usage.
"""
case_mapping = {col.lower(): col for col in cls.default_columns}
rename_mapping = {col: case_mapping[col.lower()] for col in columns}
return rename_mapping
@classmethod
def from_file(
cls,
path_to_msms: Union[str, os.PathLike],
filter_rank1_psms: bool = True,
validate_amino_acids: bool = True,
) -> pd.DataFrame:
"""
Read msms.txt from file.
Parameters
----------
path_to_msms : str, os.Pathlike
path to msms.txt file
filter_rank1_psms : bool, optional
filter for rank 1 PSMs
validate_amino_acids : bool, optional
remove PSMs where the sequence includes an invalid amino acid; required for
MS2PIP compatibility
Returns
-------
msms : ms2rescore.maxquant.MSMS
MSMS object (pandas.DataFrame with additional methods)
"""
msms_df = pd.read_csv(path_to_msms, sep="\t", usecols=cls._evaluate_columns)
msms_df.rename(columns=cls._fix_column_case(msms_df.columns), inplace=True)
if filter_rank1_psms:
msms_df = msms_df.msms.filter_rank1_psms()
if validate_amino_acids:
msms_df = msms_df.msms.remove_invalid_amino_acids()
return msms_df
def _set_mass_error_unit(self) -> None:
"""Get mass error unit from DataFrame columns."""
if "Mass error [Da]" in self._obj.columns:
self._mass_error_unit = "Da"
elif "Mass error [ppm]" in self._obj.columns:
self._mass_error_unit = "ppm"
else:
raise NotImplementedError(f"MSMS.txt mass error unit not supported.")
def filter_rank1_psms(self) -> pd.DataFrame:
"""Filter MSMS for rank 1 PSMs."""
self._obj = self._obj.sort_values("Score", ascending=False)
duplicate_indices = self._obj[
self._obj.duplicated(["Raw file", "Scan number"], keep="first")
].index
self._obj = self._obj.drop(duplicate_indices).sort_index().reset_index()
logger.debug(
f"Found {len(self._obj)} rank 1 PSMs of which "
f"{len(self._obj[self._obj['Reverse'] == '+']) / len(self._obj):.0%} are "
"decoy hits."
)
if len(duplicate_indices) > 0:
logger.warning(
"Removed %i non-rank 1 PSMs.", len(duplicate_indices)
)
return self._obj
def remove_invalid_amino_acids(self) -> pd.DataFrame:
"""Remove invalid amino acids from MSMS."""
invalid_indices = self._obj[self._obj["Sequence"].str.contains(
self.invalid_amino_acids, regex=True
)].index
self._obj = self._obj.drop(index=invalid_indices).reset_index(drop=True)
if len(invalid_indices) > 0:
logger.warning(
"Removed %i PSMs with invalid amino acids.", len(invalid_indices)
)
return self._obj
def _get_spec_id(self) -> pd.Series:
"""Get PEPREC-style spec_id."""
return (
self._obj["Raw file"]
+ "."
+ self._obj["Scan number"].astype(str)
+ "."
+ self._obj["Scan number"].astype(str)
).rename("spec_id")
@staticmethod
def _minus_one_compare_fn(mod_1, mod_2):
"""Custom comparision function where `-1` is always larger."""
location_1 = mod_1[0]
location_2 = mod_2[0]
if location_1 == -1:
if location_2 == -1:
return 0
else:
return 1
elif location_2 == -1:
return -1
else:
return location_1 - location_2
@staticmethod
def _find_mods_recursively(
mod_seq, pattern_mapping, regex_pattern, mod_list=None
):
"""
Find modifications in MaxQuant modified sequence recursively.
Parameters
----------
mod_seq : string
MaxQuant modified sequence stripped of flanking amino acids and
underscores
pattern_mapping : dict[str, tuple]
Mapping of modification pattern to name (e.g. `"(ox)": "Oxidation"`)
regex_pattern : re.Pattern
Compiled regex pattern containing all modification labels, including
amino acid prefix (if not N/C-terminal)
mod_list : list, optional
List with modification positions and labels to recursively extend.
"""
if not mod_list:
mod_list = []
# Recursively find matches
match = re.search(regex_pattern, mod_seq)
if match:
pattern = match.group(0)
mod_name = pattern_mapping[pattern]
# Handle N/C-terminal modification locations
if match.start() == 0:
mod_location = 0
elif match.end() == len(mod_seq):
mod_location = -1
else:
mod_location = match.start()
mod_list.append((mod_location, mod_name))
# Remove current modification and recurse
mod_seq = re.sub(regex_pattern, "", mod_seq, count=1)
mod_list = MSMSAccessor._find_mods_recursively(
mod_seq, pattern_mapping, regex_pattern, mod_list
)
# Validate that all modifications are found
else:
if not re.fullmatch(r"[A-Z]+", mod_seq):
raise ModificationParsingError(
f"Coud not match remaining modification labels in sequence "
f"`{mod_seq}`. Ensure that all modifications are "
"configured in the MaxQuant `modification_mapping` setting."
)
return mod_list
@staticmethod
def _get_single_peprec_modification(
sequence, modified_sequence, modification_mapping, fixed_modifications
):
"""
Get single PEPREC-style modifications from MaxQuant modified sequence.
"""
# Prepare modifications regex pattern
pattern_mapping = {}
for label, name in modification_mapping.items():
pattern_mapping[f"({label})"] = name
regex_pattern = re.compile("|".join(
[re.escape(p) for p in pattern_mapping.keys()])
)
# Find variable modifications
mod_list = MSMSAccessor._find_mods_recursively(
modified_sequence, pattern_mapping, regex_pattern
)
# Add fixed modifications
for aa, name in fixed_modifications.items():
mod_list.extend(
[(m.start() + 1, name) for m in re.finditer(aa, sequence)]
)
# Sort and format mod_list
if mod_list:
mod_string = "|".join(
["|".join([str(x) for x in mod])
for mod
in sorted(
mod_list, key=cmp_to_key(MSMSAccessor._minus_one_compare_fn)
)]
)
else:
mod_string = "-"
return mod_string
def get_peprec_modifications(
self, modification_mapping=None, fixed_modifications=None
) -> List:
"""
Get PEPREC-formatted modifications for full MSMS.
Parameters
----------
modification_mapping: dict
Mapping used to convert the MaxQuant modification labels to
PSI-MS modification names (e.g.
`{"M", "Oxidation (M)"): "Oxidation"}`)
fixed_modifications: dict
Dictionary (`{aa: mod}`) with fixed modifications to be added to the
peprec. E.g. `{'C': 'Carbamidomethyl'}`. MaxQuant output does not
include modifications that were set as fixed during the search. The
first tuple element contains the one-letter amino acid code. The
second tuple element contains the full modification name, as listed
in the MS²PIP configuration.
"""
if not modification_mapping:
modification_mapping = {}
if not fixed_modifications:
fixed_modifications = {}
# Remove surrounding underscores
if "_" in self._obj["Modified sequence"].iloc[0]:
mod_sequences = self._obj["Modified sequence"].str.extract(
"_(.*)_",
expand=False
)
else:
mod_sequences = self._obj["Modified sequence"]
# Apply over PSMs
peprec_mods = []
for seq, mod_seq in zip(
self._obj["Sequence"].to_list(), mod_sequences.to_list()
):
peprec_mods.append(self._get_single_peprec_modification(
seq, mod_seq, modification_mapping, fixed_modifications
))
return peprec_mods
@staticmethod
def _calculate_top7_peak_features(
intensities: List,
mass_errors: List
) -> Tuple[np.ndarray]:
"""
Calculate "top 7 peak"-related search engine features.
The following features are calculated:
- mean_error_top7: Mean of mass errors of the seven fragment ion peaks with the
highest intensities
- sq_mean_error_top7: Squared MeanErrorTop7
- stdev_error_top7: Standard deviation of mass errors of the seven fragment ion
peaks with the highest intensities
"""
if not (isinstance(intensities, list) and isinstance(mass_errors, list)):
return np.nan, np.nan, np.nan
else:
intensities = [float(i) for i in intensities]
mass_errors = [float(i) for i in mass_errors]
indices_most_intens = np.array(intensities).argsort()[-1:-8:-1]
mass_errors_top7 = [(mass_errors[i]) for i in indices_most_intens]
mean_error_top7 = np.mean(mass_errors_top7)
sq_mean_error_top7 = mean_error_top7 ** 2
stdev_error_top7 = np.std(mass_errors_top7)
return mean_error_top7, sq_mean_error_top7, stdev_error_top7
@staticmethod
def _calculate_ion_current_features(
matches: List,
intensities: List,
intensity_coverage: List
) -> Tuple[np.ndarray]:
"""
Calculate ion current related search engine features.
The following features are calculated:
- ln_explained_ion_current: Summed intensity of identified fragment ions,
divided by that of all fragment ions, logged
- ln_nterm_ion_current_ratio: Summed intensity of identified N-terminal
fragments, divided by that of all identified fragments, logged
- ln_cterm_ion_current_ratio: Summed intensity of identified N-terminal
fragments, divided by that of all identified fragments, logged
- ln_ms2_ion_current: Summed intensity of all observed fragment ions, logged
"""
pseudo_count = 0.00001
if not isinstance(intensities, list):
return np.nan, np.nan, np.nan, np.nan
else:
ln_explained_ion_current = intensity_coverage + pseudo_count
summed_intensities = sum([float(i) for i in intensities])
# Calculate ratio between matched b- and y-ion intensities
y_ion_int = sum([
float(intensities[i])
for i, m
in enumerate(matches) if m.startswith("y")
])
y_int_ratio = y_ion_int / summed_intensities
ln_nterm_ion_current_ratio = (y_int_ratio + pseudo_count) * ln_explained_ion_current
ln_cterm_ion_current_ratio = (1 - y_int_ratio + pseudo_count) * ln_explained_ion_current
ln_ms2_ion_current = summed_intensities / ln_explained_ion_current
out = [
ln_explained_ion_current,
ln_nterm_ion_current_ratio,
ln_cterm_ion_current_ratio,
ln_ms2_ion_current,
]
return tuple([np.log(x) for x in out])
def to_peprec(
self,
modification_mapping=None,
fixed_modifications=None,
) -> PeptideRecord:
"""
Get PeptideRecord from MaxQuant msms.txt file.
Parameters
----------
modification_mapping: dict
Mapping used to convert the two-letter MaxQuant modification labels to
PSI-MS modification names.
fixed_modifications: dict
Dictionary ({aa: mod}) can contain fixed modifications to be added to the
peprec. E.g. `{'C': 'Carbamidomethyl'}`, as the MaxQuant output does not
include modifications that were set as fixed during the search. The first
tuple element contains the one-letter amino acid code. The second tuple
element contains the full modification name, as listed in the values of
`modification_mapping`.
"""
peprec = pd.DataFrame(
columns=[
"spec_id",
"peptide",
"modifications",
"charge",
"protein_list",
"psm_score",
"observed_retention_time",
"Label",
"Raw file"
]
)
peprec["spec_id"] = self._get_spec_id()
peprec["peptide"] = self._obj["Sequence"]
peprec["modifications"] = self.get_peprec_modifications(
modification_mapping, fixed_modifications
)
peprec["charge"] = self._obj["Charge"]
# Fill NaN values in Proteins column for decoy PSMs
# But first check that NaN Proteins only occur for decoy PSMs, if so:
# fill these without the "REV_"
peprec["protein_list"] = self._obj["Proteins"].str.split(";")
if (peprec["protein_list"].isna() & self._obj["Reverse"].isna()).any():
req_cols = zip(self._obj["Proteins"], self._obj["Reverse"], self._obj["Modified sequence"])
peprec["protein_list"] = [
[modseq] if (type(rev) == float) & (type(prot) == float) else prot
for prot, rev, modseq in req_cols
]
peprec["protein_list"] = peprec["protein_list"].fillna(
"REV_" + self._obj["Modified sequence"]
)
peprec["psm_score"] = self._obj["Score"]
peprec["observed_retention_time"] = self._obj["Retention time"]
peprec["Label"] = self._obj["Reverse"].isna().apply(lambda x: 1 if x else -1)
peprec["Raw file"] = self._obj["Raw file"]
peprec.sort_values("spec_id", inplace=True)
peprec.reset_index(drop=True, inplace=True)
return PeptideRecord.from_dataframe(peprec)
def get_search_engine_features(self):
"""
Get search engine features from MSMS for Percolator rescoring.
Percolator features are derived from the MSGF2PIN script. See table 1 of
Percolator-MSGF+ article (doi.org/10.1021/pr400937n).
"""
logger.debug("Calculating search engine features...")
spec_id = self._get_spec_id()
charge = self._obj["Charge"].rename("charge")
directly_copied = self._obj[[
"Score",
"Delta score",
"Localization prob",
"Charge",
"Mass",
"Length",
f"Mass error [{self._mass_error_unit}]",
"Missed cleavages",
]].rename(columns={
"Score": "RawScore",
"Delta score": "RawDeltaScore",
"Localization prob": "RawModLocProb",
"Length": "PepLen",
f"Mass error [{self._mass_error_unit}]": "dM",
"Charge": "ChargeN",
"Missed cleavages": "enzInt",
})
absdM = self._obj[f"Mass error [{self._mass_error_unit}]"].abs().rename("absdM")
charges_encoded = pd.get_dummies(self._obj["Charge"], prefix="Charge", prefix_sep='')
top7_features = pd.DataFrame([
self._calculate_top7_peak_features(i, md)
for i, md in zip(
self._obj["Intensities"].str.split(";"),
self._obj["Mass Deviations [Da]"].str.split(";"),
)],
columns=["MeanErrorTop7", "sqMeanErrorTop7", "StdevErrorTop7"],
)
ion_current_features = pd.DataFrame([
self._calculate_ion_current_features(m, i, ic)
for m, i, ic in zip(
self._obj["Matches"].str.split(";"),
self._obj["Intensities"].str.split(";"),
self._obj["Intensity coverage"],
)],
columns=[
"lnExplainedIonCurrent",
"lnNTermIonCurrentRatio",
"lnCTermIonCurrentRatio",
"lnMS2IonCurrent",
],
)
features = pd.concat([
spec_id,
charge,
directly_copied,
absdM,
charges_encoded,
top7_features,
ion_current_features,
], axis=1).sort_values("spec_id").reset_index(drop=True)
return features
@click.command()
@click.argument("input-msms")
@click.argument("output-peprec")
def main(**kwargs):
"""Convert msms.txt to PEPREC."""
msms_df = pd.DataFrame.msms.from_file(kwargs["input_psm_report"])
peprec = msms_df.msms.to_peprec()
peprec.to_csv(kwargs["output_peprec"])
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from sys import stderr,exit,argv
import json,yaml,os,ipaddress,random,pynetbox
import requests as req
from pprint import pprint
from uuid import uuid4
# display error & bail out
def fail(*messages):
print(*messages, file=stderr)
exit(1)
def generate_mac():
prefix = [0x52, 0x54, 0x00]
suffix = [random.randint(0,0xFF) for _ in range(3)]
return ':'.join(map(lambda x: "%02x" % x, [*prefix, *suffix] ))
doc = """
Add interface to existing device or vm.
## Usage:
%s FQDN IFACE_NAME (VLAN) (IP+n) (MAC)
""" % argv[0]
def main():
# parse inputs
if len(argv) < 3:
fail("error, invalid number of args!\n%s" % doc)
VLAN = None
ADDR = None
MAC = None
FQDN = argv[1]
IFACE = argv[2]
if len(argv) >= 4:
VLAN = int(argv[3])
if len(argv) >= 5:
ADDR = int(argv[4])
if len(argv) >= 6:
MAC = argv[5]
nb = pynetbox.api(os.getenv('NETBOX_API_URL'), token=os.getenv('NETBOX_TOKEN'))
# find device or vm
vm = nb.virtualization.virtual_machines.get(name=FQDN)
dev = nb.dcim.devices.get(name=FQDN)
# make sure it exists
if vm == None and dev == None:
fail("no such vm or device")
# make sure it does not have this interface already
if vm != None:
test_iface = nb.virtualization.interfaces.get(virtual_machine=FQDN, name=IFACE)
if dev != None:
test_iface = nb.dcim.interfaces.get(device=FQDN, name=IFACE)
if test_iface != None:
fail("interface already exists")
vlan = None
# validate VLAN
if VLAN != None:
vlan = nb.ipam.vlans.get(vid=VLAN)
# TODO:
# if this is a vm, make sure vlan is already allocated to a cluster
# validate ip no and segment
iface_addr_masked = None
net = None
if ADDR != None:
net = nb.ipam.prefixes.get(vlan_vid=VLAN)
# make sure this address belongs to the network
net_addr = ipaddress.ip_network(net.prefix)
if net_addr.num_addresses - 2 < ADDR:
fail("network segment is too small")
if ADDR <= 0:
fail("can't assign network address")
iface_addr = net_addr[ADDR]
iface_addr_masked = "%s/%d" % (iface_addr, net_addr.prefixlen)
# make sure this address is free
test_addr = nb.ipam.ip_addresses.get(address=iface_addr)
if test_addr != None:
fail("address already assigned")
# TODO: make sure mac address is globally unique!!!
if MAC == None:
MAC = generate_mac()
iface_data = {
'name': IFACE,
'type': "virtual",
'mode': "access",
}
if not IFACE.startswith('br'):
iface_data['mac_address'] = MAC
if vlan != None:
iface_data['untagged_vlan'] = vlan.id
iface_data['description'] = vlan.name
# create interface object
if vm != None:
iface_data['virtual_machine'] = vm.id
iface = nb.virtualization.interfaces.create(iface_data)
if dev != None:
iface_data['device'] = dev.id
iface = nb.dcim.interfaces.create(iface_data)
# create ip address and assign to interface
if iface_addr_masked != None:
ip_data = {
'address': iface_addr_masked,
'family': 4,
'interface': iface.id,
'dns_name': FQDN,
}
ip = nb.ipam.ip_addresses.create(ip_data)
if __name__ == "__main__":
main()
|
<reponame>PLM912/Keter
import time
import discord
import psutil
import os
from datetime import datetime
from discord.ext import commands
from evs import default
class Information_ja(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
self.process = psutil.Process(os.getpid())
@commands.command()
async def ピン(self, ctx):
""" Pong! """
before = time.monotonic()
before_ws = int(round(self.bot.latency * 1000, 1))
message = await ctx.send("🏓 ポン")
ping = (time.monotonic() - before) * 1000
await message.edit(content=f"🏓 WS: {before_ws}ms | REST: {int(ping)}ms")
@commands.command(aliases=['ボットを招待する', '招待', '参加'])
async def 招待する(self, ctx):
""" Invite me to your server """
embed = discord.Embed(title="私をパーティーに招待させてください!", description=f"**{ctx.author.name}**, こちらのリンクを使ってください。\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)", color=0xeff0f1)
await ctx.send(embed=embed)
@commands.command()
async def コード(self, ctx):
""" Check out my source code <3 """
await ctx.send(f"**{ctx.bot.user}** コードはこちらからご確認ください。:\nhttps://github.com/Shio7/Keter")
@commands.command(aliases=['サーバ', 'サバー', 'サバ'])
async def サーバー(self, ctx):
""" Get an invite to our support server! """
if isinstance(ctx.channel, discord.DMChannel) or ctx.guild.id != 749595288280498188:
return await ctx.send(f"**こちらえ! {ctx.author.name} 🍻\n<{self.config.botserver}>**")
await ctx.send(f"**{ctx.author.name}** ここが私のサーバーです~ :3")
@commands.command(aliases=['常態'])
async def 情報(self, ctx):
""" About the bot """
f = open("./lib/cache/version.ccf", "r")
version = f.read()
f.close
ramUsage = self.process.memory_full_info().rss / 1024**2
avgmembers = round(len(self.bot.users) / len(self.bot.guilds))
embed = discord.Embed(colour=0xeff0f1)
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
embed.add_field(name="最後のリブート", value=default.timeago(datetime.now() - self.bot.uptime), inline=True)
embed.add_field(
name=f"開発陣{'' if len(self.config.owners) == 1 else 's'}",
value=', '.join([str(self.bot.get_user(x)) for x in self.config.owners]),
inline=True)
embed.add_field(name="ライブラリ", value="discord.py", inline=True)
embed.add_field(name="サーバー", value=f"{len(ctx.bot.guilds)} ( avg: {avgmembers} users/server )", inline=True)
embed.add_field(name="コマンド", value=len([x.name for x in self.bot.commands]), inline=True)
embed.add_field(name="ラム", value=f"{ramUsage:.2f} MB", inline=True)
await ctx.send(content=f"ℹ About **{ctx.bot.user}** | **" + version + "**", embed=embed)
def setup(bot):
bot.add_cog(Information_ja(bot))
|
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Conv2DTranspose, \
Reshape
from tensorflow.keras import Model
LATENT_DIM = 10
train_loss_autoencoder = tf.keras.metrics.Mean(name='train_loss_ae')
optimizer = tf.keras.optimizers.Adam()
loss_obj = tf.keras.losses.MeanSquaredError()
class EncoderModel(Model):
def __init__(self):
super(EncoderModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation=tf.nn.leaky_relu, strides=2,
padding='SAME')
self.conv2 = Conv2D(64, 3, activation=tf.nn.leaky_relu, strides=2,
padding='SAME')
self.flatten = Flatten()
self.d1 = Dense(512, activation=tf.nn.leaky_relu)
self.d2 = Dense(10, activation=tf.nn.leaky_relu)
def call(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
class DecoderModel(Model):
def __init__(self):
super(DecoderModel, self).__init__()
self.d1 = Dense(512, activation=tf.nn.leaky_relu)
self.d2 = Dense(7 * 7 * 64, activation=tf.nn.leaky_relu)
self.resh = Reshape((7, 7, 64))
self.conv1t = Conv2DTranspose(64, 3, strides=2,
activation=tf.nn.leaky_relu,
padding='SAME')
self.conv2t = Conv2DTranspose(32, 3, strides=2,
activation=tf.nn.leaky_relu,
padding='SAME')
self.conv3t = Conv2DTranspose(1, 3, strides=1,
activation='sigmoid',
padding='SAME')
def call(self, x):
x = self.d1(x)
x = self.d2(x)
x = self.resh(x)
x = self.conv1t(x)
x = self.conv2t(x)
return self.conv3t(x)
def add_gaussian_noise(image, sigma):
"""
Adds random gaussian noise to image with variance randomly chosen from range [min_sigma, max_sigma]
:param image: a grayscale image with values in the [0, 1] range of type float64.
:param min_sigma: a non-negative scalar value representing the minimal variance of the
gaussian distribution.
:param max_sigma: a non-negative scalar value larger than or equal to min_sigma, representing the maximal
variance of the gaussian distribution
:return: Noisy image
"""
noise = np.random.normal(loc=0, scale=sigma, size=image.shape)
noisy_image = image + noise
noisy_image = np.around(noisy_image * 255) / 255
return noisy_image.clip(0, 1)
def get_train_step_autoencoder():
""" Wrapper for training step, needed if running more than one model
per run
:return: train step function
"""
@tf.function
def train_step(encoder, decoder, images, targets):
with tf.GradientTape() as tape:
latent_vectors = encoder(images)
# print(f"Latent vectors: {latent_vectors.shape}")
pred = decoder(latent_vectors)
# print(f"Pred: {pred.shape}")
loss = loss_obj(targets, pred)
trainable_vars = [*encoder.trainable_variables,
*decoder.trainable_variables]
gradients = tape.gradient(loss, trainable_vars)
optimizer.apply_gradients(zip(gradients, trainable_vars))
train_loss_autoencoder(loss)
return train_step
def train_autoencoder(encoder, decoder, images, target_images, num_epochs,
batch_size):
""" Trains a model (subclassing tf.keras.Model) over MNIST data collection
:param load_data:
:param use_full_train_set:
:param Model model: Model to train, whose __call__() function accepts a
batch of 28x28 greyscale images and returns a 10-class logits
:param int num_epochs: Number of epochs to train with
:param int batch_size: Batch size
:param train_metric: either `train_loss` or `train_accuracy`
:param test_metric: either `test_loss` or `test_accuracy`
:param List metric_scaling_factor: ints [train_metric_scale, test_metric_scale] .
Scales the value outputted by the metric at each measuring point by this value.
:returns List: [train_metric_values, test_metric_values]
"""
shuffle_seed = 10000
train_set = images[..., tf.newaxis]
target_set = target_images[..., tf.newaxis]
train_ds = tf.data.Dataset.from_tensor_slices(
(train_set, target_set)).shuffle(
shuffle_seed).batch(batch_size)
train_step = get_train_step_autoencoder()
for epoch in range(num_epochs):
for im_batch, target_batch in train_ds:
train_step(encoder, decoder, im_batch, target_batch)
print(
f'Epoch {epoch + 1}, Train loss: {train_loss_autoencoder.result()}')
# Reset the metrics for the next epoch
train_loss_autoencoder.reset_states()
def plot_latent_vectors(encoder, dataset, labels, title, filename):
latent_vectors = encoder(dataset[..., tf.newaxis]).numpy()
embeddings = np.array(TSNE().fit_transform(latent_vectors))
for i in range(LATENT_DIM):
c = embeddings[labels == i]
plt.scatter(c[..., 0], c[..., 1], s=.5)
plt.title(title)
plt.savefig(filename)
plt.show()
|
#!/usr/bin/env python
#
# Set it up so we can painless run the python nose tests against the localhost 8983.
# Summary:
#
# ./run_solr_for_unit_tests.py [--debug] [--debug-port=9999] [--port=8983]
#
# Add the argument "--debug" to start the Jetty server in debug mode. Specify the port with "--debug-port=XXXX".
# Assumes you're running this on a *nix machine.
#
import sys, os, shutil, urllib, xml.dom.minidom, tarfile, getopt
args = {'--debug-port' : 9999, '--port' : 8983}
args.update(dict(getopt.getopt(sys.argv[1:], '', ['debug', 'debug-port=', 'port='])[0]))
os.system("mvn clean package")
shutil.rmtree('target/webapp', ignore_errors=True)
os.mkdir('target/webapp')
def find_solr_version():
pom = xml.dom.minidom.parse('pom.xml')
def isSolrCoreDep(dep):
return filter((lambda artifact : artifact.lastChild.data == 'solr-core'), dep.getElementsByTagName('artifactId'))
solrCoreDep = filter(isSolrCoreDep, pom.getElementsByTagName('dependency'))[0]
return solrCoreDep.getElementsByTagName('version')[0].lastChild.data
# compare major.minor.patch-style versions, e.g. 1.2 vs 1.2.1 vs 1.2.3 etc.
def version_compare(ver1, ver2):
def listify(str):
return map(lambda x:int(x), str.split('.'))
(list1, list2) = (listify(ver1), listify(ver2))
# lexicographical comparison
return -1 if list1 < list2 else (1 if list2 > list1 else 0)
solr_version = find_solr_version()
# they changed their naming convention in v 4.1.0
tgz_filename = ('solr-%s' if (version_compare(solr_version, '4.1.0') >= 0) else 'apache-solr-%s') % solr_version
local_filename = 'target/webapp/solr.tgz'
mvn_filename = os.environ['HOME'] + \
('/.m2/repository/org/healthonnet/hon-lucene-synonyms-solrdep/%s/hon-lucene-synonyms-solrdep-%s.tgz' \
% (solr_version, solr_version))
if not os.path.isfile(mvn_filename):
# download the tgz file
print "Downloading solr tgz file version %s (I'll only have to do this once)" % solr_version
tgz_url = 'http://archive.apache.org/dist/lucene/solr/%s/%s.tgz' % (solr_version, tgz_filename)
def reporthook(a,b,c):
print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
sys.stdout.flush()
urllib.urlretrieve(tgz_url, local_filename, reporthook)
# use maven to store the file locally in the future
install_cmd = """mvn install:install-file \
-DgroupId=org.healthonnet \
-DartifactId=hon-lucene-synonyms-solrdep \
-Dversion=%s \
-Dfile=%s \
-Dpackaging=tgz""" % (solr_version, local_filename)
os.system(install_cmd)
else:
shutil.copy(mvn_filename, local_filename)
solrdir = 'target/webapp/' + tgz_filename
tar = tarfile.open(local_filename)
tar.extractall(path='target/webapp/')
os.mkdir(solrdir + '/example/myjar')
os.system('cd %s/example/myjar; jar -xf ../webapps/solr.war; cd -' % solrdir)
os.system('cp target/hon-lucene-synonyms-*.jar %s/example/myjar/WEB-INF/lib' % solrdir)
os.system('cd %s/example/myjar; jar -cf ../webapps/solr.war *; cd -' % solrdir)
# they changed the location of the example conf dir in solr 4.0.0
confdir = 'collection1/conf' if version_compare(solr_version, '4.0.0') >= 0 else 'conf'
shutil.copy('examples/example_synonym_file.txt', solrdir + '/example/solr/' + confdir)
# add the config to the config file
conf_to_add = open('examples/example_config.xml', 'r').read()
conf_filename = solrdir + '/example/solr/' + confdir + '/solrconfig.xml'
filein = open(conf_filename,'r')
filetext = filein.read()
filein.close()
fileout = open(conf_filename,'w')
fileout.write(filetext.replace('</config>', conf_to_add + '</config>'))
fileout.close()
debug = ('-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=%s' % args['--debug-port']) if '--debug' in args else ''
cmd = 'cd %(solrdir)s/example; java %(debug)s -Djetty.port=%(port)s -jar start.jar' % \
{'debug' : debug, 'solrdir' : solrdir, 'port' : args['--port']}
print "Running jetty with command: " + cmd
os.system(cmd)
|
<filename>python/histogram.py
#!/usr/bin/python
######################################
# @file histogram.py
# @author <NAME>
# @date 5/29/07
# @brief outputs a histogram in lc format given input lc format & bin structure
######################################
from optparse import OptionParser
import os
import re
import sys
########################
#command line arguments
########################
parser = OptionParser(description="Creates a histogram by reading & writing in lc format. Default read in and read out is via STDIN and STDOUT. If using STDIN, us lc to strip headers (-o option) and select only the column of interest.")
parser.add_option("-c", "--col", dest="col", help="Histogram variable", metavar="COL")
parser.add_option("-a", "--min", dest="min", help="Minimum value", metavar="VAL")
parser.add_option("-b", "--max", dest="max", help="Maximum value", metavar="VAL")
parser.add_option("-n", "--nbins", dest="nbins", help="Num Bins", metavar="VAL")
parser.add_option("-s", "--suppress", action="store_false", dest="display", help="Prevent histogram from being displayed", default=True)
parser.add_option("-d", "--device", dest="image_file", help="Output graph to this file (PS)", metavar="FILE")
parser.add_option("-i", "--infile", dest="infile", help="Input Catalog", metavar="FILE", default="STDIN")
parser.add_option("-o", "--outfile", dest="outfile", help="Output file", metavar="FILE", default="STDOUT")
(options, args) = parser.parse_args()
if (options.min == None or
options.max == None or
options.nbins == None):
parser.print_help()
sys.exit("Required options not specified")
min = float(options.min)
max = float(options.max)
nbins = int(options.nbins)
use_outfile = (options.outfile != "STDOUT")
######################
#open pipes
######################
if options.infile == "STDIN":
input = sys.stdin
else:
input_command = "lc -o " + options.col + " < " + options.infile
input = os.popen(input_command, 'r')
if options.display:
plot_command = "plotcat x num -h"
if options.col is not None:
plot_command += " -l " + options.col + " num"
if options.image_file != None:
plot_command += " -d '" + options.image_file + "/ps'"
print plot_command
plot = os.popen(plot_command, 'w')
output_command = "lc -C -n x -n num -n min -n max "
(output,feedback) = os.popen2(output_command,'t')
if use_outfile:
outfile = open(options.outfile, 'w')
#####################
#create histo
#####################
step = float(max - min)/nbins
divPoints = []
cur = min
for i in range(nbins):
divPoints.append(cur)
cur += step
divPoints.append(cur)
xaxis = []
histo = {}
for i in range(nbins):
xaxis.append( (divPoints[i] + divPoints[i+1])/2 )
histo[(divPoints[i],divPoints[i+1])] = 0
bins = histo.keys()
for curline in input:
if curline[0] == "#":
continue
d = float(curline)
def inBin(bin):
(min,max) = bin
return min <= d < max
pos = filter(inBin, bins)
assert len(pos) <=1
if len(pos) == 1:
histo[pos[0]] += 1
bins.sort()
counts = [histo[bin] for bin in bins]
#####################
#Output histogram
#####################
for i in range(nbins):
(min,max) = bins[i]
output.write( str(xaxis[i]) + '\t' + str(counts[i]) + '\t' + str(min) +
'\t' + str(max) + '\n')
######################
# Cleanup
######################
input.close()
output.close()
lc_histogram = feedback.read()
feedback.close()
if options.display:
plot.write(lc_histogram)
plot.close()
if use_outfile:
outfile.write(lc_histogram)
outfile.close()
else:
sys.stdout.write(lc_histogram)
|
from PIL import Image, ImageDraw, ImageFont
import math
from gauge import Gauge
class RPM(Gauge):
def __init__(self, fontfile='Roboto-Regular.ttf', size=256):
self.size = size
self.fontfile = fontfile
self.fontbig = ImageFont.truetype(self.fontfile, size=int(self.size/4))
self.fontsmall = ImageFont.truetype(self.fontfile, size=int(self.size/6))
self.maxrpm = 0
def _maxrpm(self, rpm):
self.maxrpm = max(self.maxrpm, rpm)
def _rangerpm(self, rpm):
rpmang = 135 + int((rpm/8000)*270)
return rpmang
def draw_meter(self, rpm):
self._maxrpm(rpm)
im = Image.new('RGBA', (self.size, self.size), color=Gauge._transparent)
linewidth = int(self.size*0.018)
r1 = int(self.size/2 * 0.95)
r2 = int(self.size/2 * 0.75)
r3 = r2-linewidth
r4 = int(self.size / 2 * 0.90)
center = int(self.size/2)
draw = ImageDraw.Draw(im)
rpmang = self._rangerpm(rpm)
rpmmax = self._rangerpm(self.maxrpm)
draw.pieslice([center-r1,center-r1,center+r1, center+r1], 405-int(1.5*33.75), 405, Gauge._warning)
draw.pieslice([center-r2+1,center-r2+1,center+r2-1, center+r2-1], 405-int(1.5*33.75), 405, Gauge._transparent)
draw.pieslice([center-r4,center-r4,center+r4, center+r4], 135, rpmang, Gauge._red)
draw.pieslice([center-r2+1,center-r2+1,center+r2-1, center+r2-1], 135, rpmang, Gauge._transparent)
x = math.cos(math.radians(rpmmax))
y = math.sin(math.radians(rpmmax))
draw.line([int(x*(r1))+center, int(y*(r1))+center, int(x*r2)+center, int(y*r2)+center],
Gauge._red, width=2)
for marker in range(0, 9):
ang = 135 + (marker*33.75)
x = math.cos(math.radians(ang))
y = math.sin(math.radians(ang))
draw.line([int(x*(r1+10))+center, int(y*(r1+10))+center, int(x*r2)+center, int(y*r2)+center],
Gauge._gray2, width=int(linewidth/2))
draw.chord([center-r2, center-r2, center+r2, center+r2], 135, 405, Gauge._gray)
draw.chord([center-r3, center-r3, center+r3, center+r3], 115, 425, Gauge._transparent)
draw.arc([center-r4, center-r4, center+r4, center+r4], 135, 405, Gauge._gray2)
maxtext = self.fontsmall.getsize('RPM')
rpmtext = '{0:.0f}'.format(rpm)
draw.text((center - int(maxtext[0]/2), int(center*1.4)), 'RPM', fill=Gauge._black, font=self.fontsmall)
valtext = self.fontbig.getsize(rpmtext)
draw.text((center - int(valtext[0]/2), int(center*1.2) - int(valtext[1]*1.1)),
rpmtext, fill=Gauge._black, font=self.fontbig)
return im
if __name__ == '__main__':
sa = RPM(size=350, fontfile='Roboto-Medium.ttf')
i = sa.draw_meter(rpm=4200)
i = sa.draw_meter(rpm=2000)
sa.save_png(name='test.png', rpm=999)
i.show()
|
<reponame>peichman-umd/plastron<filename>plastron/handlers/ndnp.py
""" Classes for interpreting and loading metadata and files stored
according to the NDNP specification. """
import csv
import logging
import lxml
from lxml.etree import parse, XMLSyntaxError
import os
from plastron import pcdm
from plastron.exceptions import DataReadException
from plastron.namespaces import dcmitype, ndnp
from plastron.files import LocalFileSource
from plastron.models.newspaper import Article, Issue, IssueMetadata, MetadataFile, Page
# alias the rdflib Namespace
ns = ndnp
# ============================================================================
# METADATA MAPPING
# ============================================================================
XPATHMAP = {
'batch': {
'issues': "./{http://www.loc.gov/ndnp}issue",
'reels': "./{http://www.loc.gov/ndnp}reel"
},
'issue': {
'volume': (".//{http://www.loc.gov/mods/v3}detail[@type='volume']/"
"{http://www.loc.gov/mods/v3}number"
),
'issue': (".//{http://www.loc.gov/mods/v3}detail[@type='issue']/"
"{http://www.loc.gov/mods/v3}number"
),
'edition': (".//{http://www.loc.gov/mods/v3}detail[@type='edition']/"
"{http://www.loc.gov/mods/v3}number"
),
'article': (".//{http://www.loc.gov/METS/}div[@TYPE='article']"
),
'areas': (".//{http://www.loc.gov/METS/}area"
),
}
}
xmlns = {
'METS': 'http://www.loc.gov/METS/',
'mix': 'http://www.loc.gov/mix/',
'MODS': 'http://www.loc.gov/mods/v3',
'premis': 'http://www.loc.gov/standards/premis',
'xlink': 'http://www.w3.org/1999/xlink',
}
# ============================================================================
# NDNP BATCH CLASS
# ============================================================================
class Batch:
def __init__(self, repo, config):
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
graph = repo.get_graph(config.collection_uri, include_server_managed=False)
self.collection = pcdm.Collection.from_graph(graph, config.collection_uri)
self.collection.created = True
self.fieldnames = ['aggregation', 'sequence', 'uri']
try:
tree = parse(config.batch_file)
except OSError:
raise DataReadException(f'Unable to read {config.batch_file}')
except XMLSyntaxError:
raise DataReadException(f'Unable to parse {config.batch_file} as XML')
root = tree.getroot()
m = XPATHMAP
# read over the index XML file assembling a list of paths to the issues
self.basepath = os.path.dirname(config.batch_file)
self.issues = []
for i in root.findall(m['batch']['issues']):
sanitized_path = i.text[:-6] + i.text[-4:]
self.issues.append(
(os.path.join(self.basepath, i.text),
os.path.join(
self.basepath, "Article-Level", sanitized_path)
)
)
# set up a CSV file for each reel, skipping existing CSVs
self.reels = set(
[r.get('reelNumber') for r in root.findall(m['batch']['reels'])]
)
self.logger.info('Batch contains {0} reels'.format(len(self.reels)))
self.path_to_reels = os.path.join(config.log_dir, 'reels')
if not os.path.isdir(self.path_to_reels):
os.makedirs(self.path_to_reels)
for n, reel in enumerate(self.reels):
reel_csv = '{0}/{1}.csv'.format(self.path_to_reels, reel)
if not os.path.isfile(reel_csv):
self.logger.info(
"{0}. Creating reel aggregation CSV in '{1}'".format(
n + 1, reel_csv)
)
with open(reel_csv, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldnames)
writer.writeheader()
else:
self.logger.info(
"{0}. Reel aggregation file '{1}' exists; skipping".format(
n + 1, reel_csv)
)
self.length = len(self.issues)
self.num = 0
self.logger.info("Batch contains {0} items.".format(self.length))
def __iter__(self):
return self
def __next__(self):
if self.num < self.length:
issue_path, article_path = self.issues[self.num]
item = BatchItem(self, issue_path, article_path)
self.num += 1
return item
else:
self.logger.info('Processing complete!')
raise StopIteration()
# mapping from the USE attribute to a class representing that type of file
FILE_CLASS_FOR = {
'master': pcdm.PreservationMasterFile,
'service': pcdm.IntermediateFile,
'derivative': pcdm.ServiceFile,
'ocr': pcdm.ExtractedText
}
class BatchItem:
def __init__(self, batch, issue_path, article_path):
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.batch = batch
self.issue = None
# gather metadata
self.dir = os.path.dirname(issue_path)
self.path = issue_path
self.article_path = article_path
self.reel_csv_loc = batch.path_to_reels
def read_data(self):
try:
tree = parse(self.path)
except OSError:
raise DataReadException("Unable to read {0}".format(self.path))
except XMLSyntaxError:
raise DataReadException(
"Unable to parse {0} as XML".format(self.path)
)
issue_mets = METSResource(tree)
root = tree.getroot()
m = XPATHMAP['issue']
issue = Issue(member_of=self.batch.collection)
# get required metadata elements
try:
issue.title = root.get('LABEL')
issue.date = root.find('.//MODS:dateIssued', xmlns).text
issue.sequence_attr = ('Page', 'number')
except AttributeError:
raise DataReadException("Missing metadata in {0}".format(self.path))
# optional metadata elements
if root.find(m['volume']) is not None:
issue.volume = root.find(m['volume']).text
if root.find(m['issue']) is not None:
issue.issue = root.find(m['issue']).text
if root.find(m['edition']) is not None:
issue.edition = root.find(m['edition']).text
# add the issue and article-level XML files as related objects
issue.add_related(IssueMetadata(MetadataFile.from_source(
LocalFileSource(self.path),
title=f'{issue.title}, issue METS metadata'
)))
issue.add_related(IssueMetadata(MetadataFile.from_source(
LocalFileSource(self.article_path),
title=f'{issue.title}, article METS metadata'
)))
# create a page object for each page and append to list of pages
for page_div in issue_mets.xpath('METS:structMap//METS:div[@TYPE="np:page"]'):
# create a page and add to the list of members
page = self.create_page(issue_mets, page_div, issue)
issue.add_member(page)
# create a proxy for the page in this issue and add it to the aggregation
issue.append_proxy(page, title=f'Proxy for page {page.number} in {issue.title}')
# add OCR text blocks as annotations
issue.annotations.extend(page.textblocks())
# iterate over the article XML and create objects for articles
try:
article_tree = parse(self.article_path)
except OSError:
raise DataReadException(
"Unable to read {0}".format(self.article_path)
)
except XMLSyntaxError:
raise DataReadException(
"Unable to parse {0} as XML".format(self.article_path)
)
article_root = article_tree.getroot()
for article in article_root.findall(m['article']):
article_title = article.get('LABEL')
article_pagenums = set()
for area in article.findall(m['areas']):
pagenum = int(area.get('FILEID').replace('ocrFile', ''))
article_pagenums.add(pagenum)
article = Article(
title=article_title,
issue=issue,
pages=sorted(list(article_pagenums))
)
issue.add_member(article)
self.issue = issue
return issue
def create_page(self, issue_mets, page_div, issue):
dmdsec = issue_mets.dmdsec(page_div.get('DMDID'))
number = dmdsec.find('.//MODS:start', xmlns).text
reel = dmdsec.find('.//MODS:identifier[@type="reel number"]', xmlns)
if reel is not None:
reel = reel.text
frame = dmdsec.find('.//MODS:identifier[@type="reel sequence number"]', xmlns)
if frame is not None:
frame = frame.text
title = "{0}, page {1}".format(issue.title, number)
# create Page object
page = Page(issue=issue, reel=reel, number=number, title=title, frame=frame)
# optionally generate a file object for each file in the XML snippet
for fptr in page_div.findall('METS:fptr', xmlns):
fileid = fptr.get('FILEID')
filexml = issue_mets.file(fileid)
if 'ADMID' not in filexml.attrib:
raise DataReadException(f'No ADMID found for {fileid}, cannot lookup technical metadata')
# get technical metadata by type
techmd = {}
for admid in filexml.get('ADMID').split():
t = issue_mets.techmd(admid)
for mdwrap in t.findall('METS:mdWrap', xmlns):
mdtype = mdwrap.get('MDTYPE')
if mdtype == 'OTHER':
mdtype = mdwrap.get('OTHERMDTYPE')
techmd[mdtype] = t
use = filexml.get('USE')
file_locator = filexml.find('METS:FLocat', xmlns)
href = file_locator.get('{http://www.w3.org/1999/xlink}href')
localpath = os.path.join(self.dir, os.path.basename(href))
basename = os.path.basename(localpath)
mimetype = techmd['PREMIS'].find('.//premis:formatName', xmlns).text
file_class = FILE_CLASS_FOR[use]
file = file_class.from_source(
LocalFileSource(localpath, mimetype=mimetype),
title=f'{basename} ({use})'
)
file.use = use
file.basename = basename
file.dcmitype = dcmitype.Text
if mimetype == 'image/tiff':
file.width = techmd['NISOIMG'].find('.//mix:ImageWidth', xmlns).text
file.height = techmd['NISOIMG'].find('.//mix:ImageLength', xmlns).text
file.resolution = (
int(techmd['NISOIMG'].find('.//mix:XSamplingFrequency', xmlns).text),
int(techmd['NISOIMG'].find('.//mix:YSamplingFrequency', xmlns).text)
)
else:
file.width = None
file.height = None
file.resolution = None
page.add_file(file)
page.parse_ocr()
return page
# actions to take upon successful creation of object in repository
def post_creation_hook(self):
for page in self.issue.ordered_components():
if hasattr(page, 'frame'):
row = {'aggregation': page.reel,
'sequence': page.frame,
'uri': page.uri
}
csv_path = os.path.join(
self.reel_csv_loc, '{0}.csv'.format(page.reel)
)
with open(csv_path, 'r') as f:
fieldnames = f.readline().strip('\n').split(',')
with open(csv_path, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row)
self.logger.info('Completed post-creation actions')
class METSResource(object):
def __init__(self, xmldoc):
self.root = xmldoc.getroot()
self.xpath = lxml.etree.XPathElementEvaluator(self.root, namespaces=xmlns,
smart_strings=False)
def dmdsec(self, id):
try:
return self.xpath('METS:dmdSec[@ID=$id]', id=id)[0]
except IndexError:
raise DataReadException(f'Cannot find METS:dmdSec element with ID "{id}"')
def file(self, id):
try:
return self.xpath('METS:fileSec//METS:file[@ID=$id]', id=id)[0]
except IndexError:
raise DataReadException(f'Cannot find METS:file element with ID "{id}"')
def techmd(self, id):
try:
return self.xpath('METS:amdSec/METS:techMD[@ID=$id]', id=id)[0]
except IndexError:
raise DataReadException(f'Cannot find METS:techMD element with ID "{id}"')
|
# 6.00 Problem Set 8
#
# Name:
# Collaborators:
# Time:
import numpy
import random
import matplotlib.pyplot as plt
from ps7 import *
#
# PROBLEM 1
#
class ResistantVirus(SimpleVirus):
"""
Representation of a virus which can have drug resistance.
"""
def __init__(self, maxBirthProb, clearProb, resistances, mutProb):
"""
Initialize a ResistantVirus instance, saves all parameters as attributes
of the instance.
maxBirthProb: Maximum reproduction probability (a float between 0-1)
clearProb: Maximum clearance probability (a float between 0-1).
resistances: A dictionary of drug names (strings) mapping to the state
of this virus particle's resistance (either True or False) to each drug.
e.g. {__DRUG__:False, 'grimpex',False}, means that this virus
particle is resistant to neither guttagonol nor grimpex.
mutProb: Mutation probability for this virus particle (a float). This is
the probability of the offspring acquiring or losing resistance to a drug.
"""
SimpleVirus.__init__(self, maxBirthProb, clearProb)
self.resistances = resistances
self.mutProb = mutProb
def isResistantTo(self, drug):
"""
Get the state of this virus particle's resistance to a drug. This method
is called by getResistPop() in Patient to determine how many virus
particles have resistance to a drug.
drug: The drug (a string)
returns: True if this virus instance is resistant to the drug, False
otherwise.
"""
return self.resistances.get(drug, False)
def reproduce(self, popDensity, activeDrugs):
"""
Stochastically determines whether this virus particle reproduces at a
time step. Called by the update() method in the Patient class.
If the virus particle is not resistant to any drug in activeDrugs,
then it does not reproduce. Otherwise, the virus particle reproduces
with probability:
self.maxBirthProb * (1 - popDensity).
If this virus particle reproduces, then reproduce() creates and returns
the instance of the offspring ResistantVirus (which has the same
maxBirthProb and clearProb values as its parent).
For each drug resistance trait of the virus (i.e. each key of
self.resistances), the offspring has probability 1-mutProb of
inheriting that resistance trait from the parent, and probability
mutProb of switching that resistance trait in the offspring.
For example, if a virus particle is resistant to guttagonol but not
grimpex, and `self.mutProb` is 0.1, then there is a 10% chance that
that the offspring will lose resistance to guttagonol and a 90%
chance that the offspring will be resistant to guttagonol.
There is also a 10% chance that the offspring will gain resistance to
grimpex and a 90% chance that the offspring will not be resistant to
grimpex.
popDensity: the population density (a float), defined as the current
virus population divided by the maximum population
activeDrugs: a list of the drug names acting on this virus particle
(a list of strings).
returns: a new instance of the ResistantVirus class representing the
offspring of this virus particle. The child should have the same
maxBirthProb and clearProb values as this virus. Raises a
NoChildException if this virus particle does not reproduce.
"""
# TODO
for drug in activeDrugs:
if not self.isResistantTo(drug):
raise NoChildException
if random.random() > self.maxBirthProb * (1 - popDensity):
raise NoChildException
child_res = {}
for key in self.resistances.keys():
if random.random() < self.mutProb:
child_res[key] = not self.resistances[key]
else:
child_res[key] = self.resistances[key]
return ResistantVirus(self.maxBirthProb, self.clearProb, child_res, self.mutProb)
class Patient(SimplePatient):
"""
Representation of a patient. The patient is able to take drugs and his/her
virus population can acquire resistance to the drugs he/she takes.
"""
def __init__(self, viruses, maxPop):
"""
Initialization function, saves the viruses and maxPop parameters as
attributes. Also initializes the list of drugs being administered
(which should initially include no drugs).
viruses: the list representing the virus population (a list of
SimpleVirus instances)
maxPop: the maximum virus population for this patient (an integer)
"""
SimplePatient.__init__(self, viruses, maxPop)
self.drugs = []
def addPrescription(self, newDrug):
"""
Administer a drug to this patient. After a prescription is added, the
drug acts on the virus population for all subsequent time steps. If the
newDrug is already prescribed to this patient, the method has no effect.
newDrug: The name of the drug to administer to the patient (a string).
postcondition: list of drugs being administered to a patient is updated
"""
if newDrug not in self.drugs:
self.drugs.append(newDrug)
def getPrescriptions(self):
"""
Returns the drugs that are being administered to this patient.
returns: The list of drug names (strings) being administered to this
patient.
"""
return self.drugs
def getResistPop(self, drugResist):
"""
Get the population of virus particles resistant to the drugs listed in
drugResist.
drugResist: Which drug resistances to include in the population (a list
of strings - e.g. ['guttagonol'] or ['guttagonol', 'grimpex'])
returns: the population of viruses (an integer) with resistances to all
drugs in the drugResist list.
"""
numPop = 0
for virus in self.viruses:
flag = False
for drug in drugResist:
flag = False
if virus.resistances.get(drug, False):
flag = True
else:
break
if flag:
numPop += 1
return numPop
def update(self):
"""
Update the state of the virus population in this patient for a single
time step. update() should execute these actions in order:
- Determine whether each virus particle survives and update the list of
virus particles accordingly
- The current population density is calculated. This population density
value is used until the next call to update().
- Determine whether each virus particle should reproduce and add
offspring virus particles to the list of viruses in this patient.
The listof drugs being administered should be accounted for in the
determination of whether each virus particle reproduces.
returns: the total virus population at the end of the update (an
integer)
"""
density = self.popDensity
dieList = []
for virus in self.viruses:
if virus.doesClear():
dieList.append(virus)
for virus in dieList:
self.viruses.remove(virus)
childViruses = []
for virus in self.viruses:
try:
offspring = virus.reproduce(density, self.getPrescriptions())
except NoChildException as identifier:
pass
else:
childViruses.append(offspring)
self.viruses.extend(childViruses)
self.popDensity = float(self.getTotalPop()) / float(self.maxPop)
return len(self.viruses)
#
# PROBLEM 2
#
def simulationWithDrug():
"""
Runs simulations and plots graphs for problem 4.
Instantiates a patient, runs a simulation for 150 timesteps, adds
guttagonol, and runs the simulation for an additional 150 timesteps.
total virus population vs. time and guttagonol-resistant virus population
vs. time are plotted
"""
dataTotal = {}
dataResist = {}
numTrials = 50
for t in range(numTrials):
t = simOneTrial(0, 150, {__DRUG_GUTTAGONOL__: False})
for key in t[0].keys():
dataTotal[key] = dataTotal.get(key, 0) + t[0][key]
for key in t[1].keys():
dataResist[key] = dataResist.get(key, 0) + t[1][key]
for key in dataTotal.keys():
dataTotal[key] = float(dataTotal[key]) / float(numTrials)
for key in dataResist.keys():
dataResist[key] = float(dataResist[key]) / float(numTrials)
x = range(1, 0+150+1)
y = [dataTotal[i] for i in x]
plt.plot(x, y, 'b', label='total population')
y = [dataResist[i] for i in x]
plt.plot(x, y, 'b:', label='resist population')
plt.xlabel('times/steps')
plt.ylabel('virus populaton')
plt.suptitle('viruses population for steps')
plt.legend()
plt.show()
__DRUG_GUTTAGONOL__ = 'guttagonol'
__DRUG_GRIMPEX__ = 'grimpex'
def simOneTrial(stepBeforeDrug, stepAfterDrug, resistances):
viruses = []
popTotal = []
popResist = []
for i in range(100):
viruses.append(ResistantVirus(0.1, 0.05, resistances, 0.005))
patient = Patient(viruses, 1000)
for t in range(stepBeforeDrug):
popTotal.append(patient.update())
popResist.append(patient.getResistPop([__DRUG_GUTTAGONOL__]))
patient.addPrescription(__DRUG_GUTTAGONOL__)
for t in range(stepAfterDrug):
popTotal.append(patient.update())
popResist.append(patient.getResistPop([__DRUG_GUTTAGONOL__]))
mPop = {}
mResist = {}
i = 1
for n in popTotal:
mPop[i] = n
i += 1
i = 1
for n in popResist:
mResist[i] = n
i += 1
return (mPop, mResist)
#
# PROBLEM 3
#
def simulationDelayedTreatment():
"""
Runs simulations and make histograms for problem 5.
Runs multiple simulations to show the relationship between delayed treatment
and patient outcome.
Histograms of final total virus populations are displayed for delays of 300,
150, 75, 0 timesteps (followed by an additional 150 timesteps of
simulation).
"""
stepAfterDrug = 150
numTrials = 100
i = 1
for stepBeforeDrug in [300, 150, 75, 0]:
vals = []
for t in range(numTrials):
m = simOneTrial(stepBeforeDrug, stepAfterDrug, {__DRUG_GUTTAGONOL__: False})
popDict = m[0]
popFinal = popDict[stepBeforeDrug+stepAfterDrug]
vals.append(popFinal)
plt.subplot(2, 2, i)
plt.hist(vals)
plt.suptitle(str(stepBeforeDrug) + ' steps delay drug administration')
plt.legend()
i += 1
plt.show()
#
# PROBLEM 4
#
def simOneTrial2(stepBeforeDrug1, stepBeforeDrug2, stepAfterDrug, resistances):
viruses = []
popTotal = []
popResist = []
drugList = [__DRUG_GUTTAGONOL__, __DRUG_GRIMPEX__]
numTotal = 0
for i in range(100):
viruses.append(ResistantVirus(0.1, 0.05, resistances, 0.005))
patient = Patient(viruses, 1000)
for t in range(stepBeforeDrug1):
numTotal = patient.update()
patient.addPrescription(__DRUG_GUTTAGONOL__)
for t in range(stepBeforeDrug2):
numTotal = patient.update()
patient.addPrescription(__DRUG_GRIMPEX__)
for t in range(stepAfterDrug):
numTotal = patient.update()
return numTotal
def simulationTwoDrugsDelayedTreatment():
"""
Runs simulations and make histograms for problem 6.
Runs multiple simulations to show the relationship between administration
of multiple drugs and patient outcome.
Histograms of final total virus populations are displayed for lag times of
150, 75, 0 timesteps between adding drugs (followed by an additional 150
timesteps of simulation).
"""
stepBeforeDrug1 = 150
stepAfterDrug = 150
numTrials = 100
i = 1
for stepBeforeDrug2 in [300, 150, 75, 0]:
vals = []
for t in range(numTrials):
total = simOneTrial2(stepBeforeDrug1, stepBeforeDrug2, stepAfterDrug, {__DRUG_GUTTAGONOL__: False, __DRUG_GRIMPEX__: False})
vals.append(total)
plt.subplot(2, 2, i)
plt.hist(vals)
plt.legend()
i += 1
plt.show()
#
# PROBLEM 5
#
def simulationTwoDrugsVirusPopulations():
"""
Run simulations and plot graphs examining the relationship between
administration of multiple drugs and patient outcome.
Plots of total and drug-resistant viruses vs. time are made for a
simulation with a 300 time step delay between administering the 2 drugs and
a simulations for which drugs are administered simultaneously.
"""
runSim(100, 150, 0, 150)
def runSim(numTrials, stepBeforeDrug1, stepBeforeDrug2, stepAfterDrug):
drugList = [__DRUG_GUTTAGONOL__, __DRUG_GRIMPEX__]
resistances = {__DRUG_GRIMPEX__: False, __DRUG_GUTTAGONOL__: False}
popTotal = {}
popResistToGut = {}
popResistToGrim = {}
popResistBoth = {}
def f():
popTotal[count] = popTotal.get(count, 0) + patient.update()
popResistToGut[count] = popResistToGut.get(count, 0) + patient.getResistPop([__DRUG_GUTTAGONOL__])
popResistToGrim[count] = popResistToGrim.get(count, 0) + patient.getResistPop([__DRUG_GRIMPEX__])
popResistBoth[count] = popResistBoth.get(count, 0) + patient.getResistPop(drugList)
for t in range(numTrials):
viruses = []
count = 1
for i in range(100):
viruses.append(ResistantVirus(0.1, 0.05, resistances, 0.005))
patient = Patient(viruses, 1000)
for i in range(stepBeforeDrug1):
f()
count += 1
patient.addPrescription(__DRUG_GUTTAGONOL__)
for i in range(stepBeforeDrug2):
f()
count += 1
patient.addPrescription(__DRUG_GRIMPEX__)
for i in range(stepAfterDrug):
f()
count += 1
x = range(1, stepBeforeDrug1+stepBeforeDrug2+stepAfterDrug+1)
yTotal = [float(popTotal[i]) / float(numTrials) for i in x]
yResistGut = [float(popResistToGut[i]) / float(numTrials) for i in x]
yResistGrim = [float(popResistToGrim[i]) / float(numTrials) for i in x]
yResistBoth = [float(popResistBoth[i]) / float(numTrials) for i in x]
print len(yTotal), len(yResistGut), len(yResistGrim),len(yResistBoth)
plt.plot(x, yTotal, '1', label='population of total virus')
plt.plot(x, yResistGut, '2', label='population of virus resistent to guttagonol')
plt.plot(x, yResistGrim, '-', label='population of virus resistent to grimpex')
plt.plot(x, yResistBoth, '.', label='poplulation fo virus resisten to both drug')
plt.xlabel('time/step')
plt.ylabel('population of virus')
plt.suptitle('virus population dynamics with drug')
plt.legend()
plt.show()
# simulationDelayedTreatment()
# simulationWithDrug()
# simulationTwoDrugsDelayedTreatment()
simulationTwoDrugsVirusPopulations() |
_base_ = [
'../_base_/datasets/dota.py',
'../_base_/schedules/schedule_1x.py',
'../../_base_/default_runtime.py'
]
model = dict(
type='OrientedRCNN',
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint='swin_tiny_patch4_window7_224.pth')),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='OrientedRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[6],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='MidpointOffsetCoder',
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 0.5, 0.5]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='OBBStandardRoIHead',
bbox_roi_extractor=dict(
type='OBBSingleRoIExtractor',
roi_layer=dict(type='RoIAlignRotated', out_size=7, sample_num=2),
out_channels=256,
extend_factor=(1.4, 1.2),
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='OBBShared2FCBBoxHead',
start_bbox_type='obb',
end_bbox_type='obb',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=37,
bbox_coder=dict(
type='OBB2OBBDeltaXYWHTCoder',
target_means=[0., 0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
gpu_assign_thr=200,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=1000,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=5000,
nms_post=5000,
max_num=5000,
nms_thr=0.8,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1,
iou_calculator=dict(type='OBBOverlaps')),
sampler=dict(
type='OBBRandomSampler',
num=2000,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=5000,
nms_post=5000,
max_num=5000,
nms_thr=0.8,
min_bbox_size=0),
rcnn=dict(
score_thr=0.005, nms=dict(type='obb_nms', iou_thr=0.1), max_per_img=2000))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[9, 11])
runner = dict(max_epochs=12) |
<gh_stars>0
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class PreEstablishedSrLsps(Base):
"""Pre-Established SR LSPs
The PreEstablishedSrLsps class encapsulates a required preEstablishedSrLsps resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'preEstablishedSrLsps'
_SDM_ATT_MAP = {
'Active': 'active',
'ActiveDataTrafficEndpoint': 'activeDataTrafficEndpoint',
'AssociationId': 'associationId',
'Bandwidth': 'bandwidth',
'BindingType': 'bindingType',
'Bos': 'bos',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DestinationIpv4Address': 'destinationIpv4Address',
'ExcludeAny': 'excludeAny',
'HoldingPriority': 'holdingPriority',
'IncludeAll': 'includeAll',
'IncludeAny': 'includeAny',
'IncludeBandwidth': 'includeBandwidth',
'IncludeConfiguredERO': 'includeConfiguredERO',
'IncludeEro': 'includeEro',
'IncludeLsp': 'includeLsp',
'IncludeLspa': 'includeLspa',
'IncludeMetric': 'includeMetric',
'IncludePpag': 'includePpag',
'IncludeSrp': 'includeSrp',
'IncludeSymbolicPathNameTlv': 'includeSymbolicPathNameTlv',
'IncludeTEPathBindingTLV': 'includeTEPathBindingTLV',
'InitialDelegation': 'initialDelegation',
'InsertIpv6ExplicitNull': 'insertIpv6ExplicitNull',
'LocalProtection': 'localProtection',
'LspDelegationState': 'lspDelegationState',
'MplsLabel': 'mplsLabel',
'Name': 'name',
'NumberOfEroSubObjects': 'numberOfEroSubObjects',
'NumberOfMetricSubObject': 'numberOfMetricSubObject',
'OverridePlspId': 'overridePlspId',
'PlspId': 'plspId',
'ProtectionLspBit': 'protectionLspBit',
'ReDelegationTimerStatus': 'reDelegationTimerStatus',
'RedelegationTimeoutInterval': 'redelegationTimeoutInterval',
'SetupPriority': 'setupPriority',
'SrcEndPointIpv4': 'srcEndPointIpv4',
'SrcEndPointIpv6': 'srcEndPointIpv6',
'Srv6SID': 'srv6SID',
'StandbyLspBit': 'standbyLspBit',
'SymbolicPathName': 'symbolicPathName',
'Tc': 'tc',
'Ttl': 'ttl',
}
def __init__(self, parent):
super(PreEstablishedSrLsps, self).__init__(parent)
@property
def PcepEroSubObjectsList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pceperosubobjectslist_7ea27079d1a1d53cebc6e1e83b2ca0b4.PcepEroSubObjectsList): An instance of the PcepEroSubObjectsList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pceperosubobjectslist_7ea27079d1a1d53cebc6e1e83b2ca0b4 import PcepEroSubObjectsList
return PcepEroSubObjectsList(self)
@property
def PcepMetricSubObjectsList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pcepmetricsubobjectslist_b1398d82dd25e8e98d50662ebf5ba3d1.PcepMetricSubObjectsList): An instance of the PcepMetricSubObjectsList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pcepmetricsubobjectslist_b1398d82dd25e8e98d50662ebf5ba3d1 import PcepMetricSubObjectsList
return PcepMetricSubObjectsList(self)
@property
def Tag(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def ActiveDataTrafficEndpoint(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Specifies whether that specific Data Traffic Endpoint will generate data traffic
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActiveDataTrafficEndpoint']))
@property
def AssociationId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The Association ID of this LSP.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AssociationId']))
@property
def Bandwidth(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Bandwidth (bits/sec)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bandwidth']))
@property
def BindingType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates the type of binding included in the TLV. Types are as follows: 20bit MPLS Label 32bit MPLS Label. SRv6 SID Default value is 20bit MPLS Label.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BindingType']))
@property
def Bos(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This bit is set to True for the last entry in the label stack i.e., for the bottom of the stack, and False for all other label stack entries. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bos']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DestinationIpv4Address(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Destination IPv4 Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DestinationIpv4Address']))
@property
def ExcludeAny(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is a type of Resource Affinity Procedure that is used to validate a link. This control accepts a link only if the link carries all of the attributes in the set.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExcludeAny']))
@property
def HoldingPriority(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The priority of the LSP with respect to holding resources. The value 0 is the highest priority. Holding Priority is used in deciding whether this session can be preempted by another session.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldingPriority']))
@property
def IncludeAll(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is a type of Resource Affinity Procedure that is used to validate a link. This control excludes a link from consideration if the link carries any of the attributes in the set.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeAll']))
@property
def IncludeAny(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is a type of Resource Affinity Procedure that is used to validate a link. This control accepts a link if the link carries any of the attributes in the set.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeAny']))
@property
def IncludeBandwidth(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether Bandwidth will be included in a PCInitiate message. All other attributes in sub-tab-Bandwidth would be editable only if this checkbox is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeBandwidth']))
@property
def IncludeConfiguredERO(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If this is enabled, entire ERO will go out in packet even if there is Binding SID, which means no SR-ERO/SRv6-ERO validation will be done.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeConfiguredERO']))
@property
def IncludeEro(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Specifies whether ERO is active or inactive. All subsequent attributes of the sub-tab-ERO would be editable only if this is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeEro']))
@property
def IncludeLsp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether LSP will be included in a PCInitiate message. All other attributes in sub-tab-LSP would be editable only if this checkbox is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeLsp']))
@property
def IncludeLspa(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether LSPA will be included in a PCInitiate message. All other attributes in sub-tab-LSPA would be editable only if this checkbox is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeLspa']))
@property
def IncludeMetric(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether the PCInitiate message will have the metric list that is configured. All subsequent attributes of the sub-tab-Metric would be editable only if this is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeMetric']))
@property
def IncludePpag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether Association will be included in a Sync PCReport message. All other attributes in sub-tab-PPAG would be editable only if this checkbox is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludePpag']))
@property
def IncludeSrp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether SRP object will be included in a PCInitiate message. All other attributes in sub-tab-SRP would be editable only if this checkbox is enabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeSrp']))
@property
def IncludeSymbolicPathNameTlv(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates if Symbolic-Path-Name TLV is to be included in PCInitiate message.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeSymbolicPathNameTlv']))
@property
def IncludeTEPathBindingTLV(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates if TE-PATH-BINDING TLV is to be included in PCC Sync LSP.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeTEPathBindingTLV']))
@property
def InitialDelegation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Initial Delegation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InitialDelegation']))
@property
def InsertIpv6ExplicitNull(self):
"""
Returns
-------
- bool: Insert IPv6 Explicit Null MPLS header if the traffic type is of type IPv6
"""
return self._get_attribute(self._SDM_ATT_MAP['InsertIpv6ExplicitNull'])
@InsertIpv6ExplicitNull.setter
def InsertIpv6ExplicitNull(self, value):
self._set_attribute(self._SDM_ATT_MAP['InsertIpv6ExplicitNull'], value)
@property
def LocalProtection(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): When set, this means that the path must include links protected with Fast Reroute
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalProtection']))
@property
def LspDelegationState(self):
"""
Returns
-------
- list(str[delegated | delegationConfirmed | delegationRejected | delegationReturned | delegationRevoked | nonDelegated | none]): LSP Delegation State
"""
return self._get_attribute(self._SDM_ATT_MAP['LspDelegationState'])
@property
def MplsLabel(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This control will be editable if the Binding Type is set to either 20bit or 32bit MPLS-Label. This field will take the 20bit value of the MPLS-Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MplsLabel']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfEroSubObjects(self):
"""
Returns
-------
- number: Value that indicates the number of ERO Sub Objects to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'])
@NumberOfEroSubObjects.setter
def NumberOfEroSubObjects(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'], value)
@property
def NumberOfMetricSubObject(self):
"""
Returns
-------
- number: Value that indicates the number of Metric Objects to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfMetricSubObject'])
@NumberOfMetricSubObject.setter
def NumberOfMetricSubObject(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberOfMetricSubObject'], value)
@property
def OverridePlspId(self):
"""
Returns
-------
- bool: Indicates if PLSP-ID will be set by the state machine or user. If disabled user wont have the control and state machine will set it.
"""
return self._get_attribute(self._SDM_ATT_MAP['OverridePlspId'])
@OverridePlspId.setter
def OverridePlspId(self, value):
self._set_attribute(self._SDM_ATT_MAP['OverridePlspId'], value)
@property
def PlspId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): An identifier for the LSP. A PCC creates a unique PLSP-ID for each LSP that is constant for the lifetime of a PCEP session. The PCC will advertise the same PLSP-ID on all PCEP sessions it maintains at a given time.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PlspId']))
@property
def ProtectionLspBit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether Protection LSP Bit is On.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ProtectionLspBit']))
@property
def ReDelegationTimerStatus(self):
"""
Returns
-------
- list(str[expired | none | notStarted | running | stopped]): Re-Delegation Timer Status
"""
return self._get_attribute(self._SDM_ATT_MAP['ReDelegationTimerStatus'])
@property
def RedelegationTimeoutInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The period of time a PCC waits for, when a PCEP session is terminated, before revoking LSP delegation to a PCE and attempting to redelegate LSPs associated with the terminated PCEP session to PCE.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RedelegationTimeoutInterval']))
@property
def SetupPriority(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The priority of the LSP with respect to taking resources.The value 0 is the highest priority.The Setup Priority is used in deciding whether this session can preempt another session.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetupPriority']))
@property
def SrcEndPointIpv4(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Source IPv4 address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SrcEndPointIpv4']))
@property
def SrcEndPointIpv6(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Source IPv6 address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SrcEndPointIpv6']))
@property
def Srv6SID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID with a format of a 16 byte IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SID']))
@property
def StandbyLspBit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates whether Standby LSP Bit is On.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StandbyLspBit']))
@property
def SymbolicPathName(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Each LSP (path) must have a symbolic name that is unique in the PCC. It must remain constant throughout a path's lifetime, which may span across multiple consecutive PCEP sessions and/or PCC restarts.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SymbolicPathName']))
@property
def Tc(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This field is used to carry traffic class information. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tc']))
@property
def Ttl(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This field is used to encode a time-to-live value. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ttl']))
def update(self, InsertIpv6ExplicitNull=None, Name=None, NumberOfEroSubObjects=None, NumberOfMetricSubObject=None, OverridePlspId=None):
"""Updates preEstablishedSrLsps resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- InsertIpv6ExplicitNull (bool): Insert IPv6 Explicit Null MPLS header if the traffic type is of type IPv6
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfEroSubObjects (number): Value that indicates the number of ERO Sub Objects to be configured.
- NumberOfMetricSubObject (number): Value that indicates the number of Metric Objects to be configured.
- OverridePlspId (bool): Indicates if PLSP-ID will be set by the state machine or user. If disabled user wont have the control and state machine will set it.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, Active=None, ActiveDataTrafficEndpoint=None, AssociationId=None, Bandwidth=None, BindingType=None, Bos=None, DestinationIpv4Address=None, ExcludeAny=None, HoldingPriority=None, IncludeAll=None, IncludeAny=None, IncludeBandwidth=None, IncludeConfiguredERO=None, IncludeEro=None, IncludeLsp=None, IncludeLspa=None, IncludeMetric=None, IncludePpag=None, IncludeSrp=None, IncludeSymbolicPathNameTlv=None, IncludeTEPathBindingTLV=None, InitialDelegation=None, LocalProtection=None, MplsLabel=None, PlspId=None, ProtectionLspBit=None, RedelegationTimeoutInterval=None, SetupPriority=None, SrcEndPointIpv4=None, SrcEndPointIpv6=None, Srv6SID=None, StandbyLspBit=None, SymbolicPathName=None, Tc=None, Ttl=None):
"""Base class infrastructure that gets a list of preEstablishedSrLsps device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- ActiveDataTrafficEndpoint (str): optional regex of activeDataTrafficEndpoint
- AssociationId (str): optional regex of associationId
- Bandwidth (str): optional regex of bandwidth
- BindingType (str): optional regex of bindingType
- Bos (str): optional regex of bos
- DestinationIpv4Address (str): optional regex of destinationIpv4Address
- ExcludeAny (str): optional regex of excludeAny
- HoldingPriority (str): optional regex of holdingPriority
- IncludeAll (str): optional regex of includeAll
- IncludeAny (str): optional regex of includeAny
- IncludeBandwidth (str): optional regex of includeBandwidth
- IncludeConfiguredERO (str): optional regex of includeConfiguredERO
- IncludeEro (str): optional regex of includeEro
- IncludeLsp (str): optional regex of includeLsp
- IncludeLspa (str): optional regex of includeLspa
- IncludeMetric (str): optional regex of includeMetric
- IncludePpag (str): optional regex of includePpag
- IncludeSrp (str): optional regex of includeSrp
- IncludeSymbolicPathNameTlv (str): optional regex of includeSymbolicPathNameTlv
- IncludeTEPathBindingTLV (str): optional regex of includeTEPathBindingTLV
- InitialDelegation (str): optional regex of initialDelegation
- LocalProtection (str): optional regex of localProtection
- MplsLabel (str): optional regex of mplsLabel
- PlspId (str): optional regex of plspId
- ProtectionLspBit (str): optional regex of protectionLspBit
- RedelegationTimeoutInterval (str): optional regex of redelegationTimeoutInterval
- SetupPriority (str): optional regex of setupPriority
- SrcEndPointIpv4 (str): optional regex of srcEndPointIpv4
- SrcEndPointIpv6 (str): optional regex of srcEndPointIpv6
- Srv6SID (str): optional regex of srv6SID
- StandbyLspBit (str): optional regex of standbyLspBit
- SymbolicPathName (str): optional regex of symbolicPathName
- Tc (str): optional regex of tc
- Ttl (str): optional regex of ttl
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Delegate(self, *args, **kwargs):
"""Executes the delegate operation on the server.
Delegate
delegate(Arg2=list)list
-----------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('delegate', payload=payload, response_object=None)
def RevokeDelegation(self, *args, **kwargs):
"""Executes the revokeDelegation operation on the server.
Revoke Delegation
revokeDelegation(Arg2=list)list
-------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('revokeDelegation', payload=payload, response_object=None)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class BerkeleyDb(AutotoolsPackage):
"""Oracle Berkeley DB"""
homepage = "https://www.oracle.com/database/technologies/related/berkeleydb.html"
# URL must remain http:// so Spack can bootstrap curl
url = "http://download.oracle.com/berkeley-db/db-18.1.40.tar.gz"
version("18.1.40", sha256="0cecb2ef0c67b166de93732769abdeba0555086d51de1090df325e18ee8da9c8")
version('18.1.32', sha256='fa1fe7de9ba91ad472c25d026f931802597c29f28ae951960685cde487c8d654', deprecated=True)
version('6.2.32', sha256='a9c5e2b004a5777aa03510cfe5cd766a4a3b777713406b02809c17c8e0e7a8fb')
version('6.1.29', sha256='b3c18180e4160d97dd197ba1d37c19f6ea2ec91d31bbfaf8972d99ba097af17d')
version('6.0.35', sha256='24421affa8ae436fe427ae4f5f2d1634da83d3d55a5ad6354a98eeedb825de55', deprecated=True)
version('5.3.28', sha256='e0a992d740709892e81f9d93f06daf305cf73fb81b545afe72478043172c3628')
variant('docs', default=False)
variant('cxx', default=False, description='Build with C++ API')
variant('stl', default=False, description='Build with C++ STL API')
configure_directory = 'dist'
build_directory = 'build_unix'
patch("drop-docs.patch", when='~docs')
conflicts('%clang@7:', when='@5.3.28')
conflicts('%gcc@8:', when='@5.3.28')
conflicts('+stl', when='~cxx', msg='+stl implies +cxx')
def patch(self):
# some of the docs are missing in 18.1.40
if self.spec.satisfies("@18.1.40"):
filter_file(r'bdb-sql', '', 'dist/Makefile.in')
filter_file(r'gsg_db_server', '', 'dist/Makefile.in')
def configure_args(self):
spec = self.spec
config_args = [
'--disable-static',
'--enable-dbm',
# compat with system berkeley-db on darwin
"--enable-compat185",
# SSL support requires OpenSSL, but OpenSSL depends on Perl, which
# depends on Berkey DB, creating a circular dependency
'--with-repmgr-ssl=no',
]
config_args += self.enable_or_disable('cxx')
config_args += self.enable_or_disable('stl')
# The default glibc provided by CentOS 7 and Red Hat 8 does not provide
# proper atomic support when using the NVIDIA compilers
if (spec.satisfies('%nvhpc')
and (spec.satisfies('os=centos7') or spec.satisfies('os=rhel8'))):
config_args.append('--disable-atomicsupport')
return config_args
def test(self):
"""Perform smoke tests on the installed package binaries."""
exes = [
'db_checkpoint', 'db_deadlock', 'db_dump', 'db_load',
'db_printlog', 'db_stat', 'db_upgrade', 'db_verify'
]
for exe in exes:
reason = 'test version of {0} is {1}'.format(exe,
self.spec.version)
self.run_test(exe, ['-V'], [self.spec.version.string],
installed=True, purpose=reason, skip_missing=True)
|
<reponame>agucova/cs42
import check50
@check50.check()
def muro_0():
"""muro_0"""
check50.run("python3 costear.py").stdin("V--V----V--P--VV---V------V--P--V--V", prompt=False).stdout("60", regex=False).exit(0)
@check50.check()
def muro_1():
"""muro_1"""
check50.run("python3 costear.py").stdin("V--P--V--P--V", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_2():
"""muro_2"""
check50.run("python3 costear.py").stdin("V-V", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_3():
"""muro_3"""
check50.run("python3 costear.py").stdin("V--P---P--V", prompt=False).stdout("30", regex=False).exit(0)
@check50.check()
def muro_4():
"""muro_4"""
check50.run("python3 costear.py").stdin("V---P--P-V-V-VP-V------V", prompt=False).stdout("60", regex=False).exit(0)
@check50.check()
def muro_5():
"""muro_5"""
check50.run("python3 costear.py").stdin("V---P-P---V", prompt=False).stdout("30", regex=False).exit(0)
@check50.check()
def muro_6():
"""muro_6"""
check50.run("python3 costear.py").stdin("V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V-V", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_7():
"""muro_7"""
check50.run("python3 costear.py").stdin("VVVPPPVV-VVVPPPVVV", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_8():
"""muro_8"""
check50.run("python3 costear.py").stdin("V-P--V--V", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_9():
"""muro_9"""
check50.run("python3 costear.py").stdin("V---V--P-----P-V", prompt=False).stdout("50", regex=False).exit(0)
@check50.check()
def muro_10():
"""muro_10"""
check50.run("python3 costear.py").stdin("V-P--P--P-P--P--P---P--P-----------VVVV", prompt=False).stdout("110", regex=False).exit(0)
@check50.check()
def muro_11():
"""muro_11"""
check50.run("python3 costear.py").stdin("V--------------------V", prompt=False).stdout("200", regex=False).exit(0)
@check50.check()
def muro_12():
"""muro_12"""
check50.run("python3 costear.py").stdin("V-P-VV-P-P--------------------VV-PVVP-V", prompt=False).stdout("200", regex=False).exit(0)
@check50.check()
def muro_13():
"""muro_13"""
check50.run("python3 costear.py").stdin("VV-V", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_14():
"""muro_14"""
check50.run("python3 costear.py").stdin("VV--P--P--V--V--P--V", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_15():
"""muro_15"""
check50.run("python3 costear.py").stdin("V--P---P--V", prompt=False).stdout("30", regex=False).exit(0)
@check50.check()
def muro_16():
"""muro_16"""
check50.run("python3 costear.py").stdin("VVV--VVV--P-V", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_17():
"""muro_17"""
check50.run("python3 costear.py").stdin("PV----------PV--PVV-V", prompt=False).stdout("100", regex=False).exit(0)
@check50.check()
def muro_18():
"""muro_18"""
check50.run("python3 costear.py").stdin("V-V-V-V-V-V-V-P", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_19():
"""muro_19"""
check50.run("python3 costear.py").stdin("VP-PV----------------PVPVPVV", prompt=False).stdout("160", regex=False).exit(0)
@check50.check()
def muro_20():
"""muro_20"""
check50.run("python3 costear.py").stdin("V-V--VVV-VV-P-----PV-PV-P", prompt=False).stdout("50", regex=False).exit(0)
@check50.check()
def muro_21():
"""muro_21"""
check50.run("python3 costear.py").stdin("P-P-P-V-V-V-V-VV-----P-P-PV-PV", prompt=False).stdout("50", regex=False).exit(0)
@check50.check()
def muro_22():
"""muro_22"""
check50.run("python3 costear.py").stdin("V-V-V-V-V-VP-P-P-P-P-P-PV-PVP-P", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_23():
"""muro_23"""
check50.run("python3 costear.py").stdin("P-P-P-P-VP-VP-V-V-V-PV-PV--PV-P", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_24():
"""muro_24"""
check50.run("python3 costear.py").stdin("V-P-V-PV-P-P---V-V-V-V-V", prompt=False).stdout("30", regex=False).exit(0)
@check50.check()
def muro_25():
"""muro_25"""
check50.run("python3 costear.py").stdin("P-P", prompt=False).stdout("10", regex=False).exit(0)
@check50.check()
def muro_26():
"""muro_26"""
check50.run("python3 costear.py").stdin("V-V-V-V-V-V-V-V-V-VV--V-VP", prompt=False).stdout("20", regex=False).exit(0)
@check50.check()
def muro_27():
"""muro_27"""
check50.run("python3 costear.py").stdin("V--------------------------P", prompt=False).stdout("260", regex=False).exit(0)
@check50.check()
def muro_28():
"""muro_28"""
check50.run("python3 costear.py").stdin("P-------------V-----------P", prompt=False).stdout("130", regex=False).exit(0)
@check50.check()
def muro_29():
"""muro_29"""
check50.run("python3 costear.py").stdin("V----V-----V----V----V---V---V--P", prompt=False).stdout("50", regex=False).exit() |
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import randint, choice
import pytest
from packtype import Constant
from .common import random_str
def test_constant_default():
""" Test a constants's default behaviour """
var = Constant()
assert var.value == None
assert var.width == 32
assert var._pt_width == 32
assert var.signed == False
assert var._pt_signed == False
assert var.name == None
assert var._pt_name == None
assert var.desc == None
assert var._pt_desc == None
def test_constant_init():
""" Create a constant and check the properties are registered """
width = randint(1, 64)
signed = choice((True, False))
name = random_str(10)
desc = random_str(30)
value = (
randint(-1 * (1 << (width - 1)), (1 << (width-1)) - 1) if signed else
randint(0, (1 << width) - 1)
)
var = Constant(value=value, width=width, signed=signed, name=name, desc=desc)
assert var.value == value
assert int(var) == value
assert var.width == width
assert var._pt_width == width
assert var.signed == signed
assert var._pt_signed == signed
assert var.name == name
assert var._pt_name == name
assert var.desc == desc
assert var._pt_desc == desc
def test_constant_alter_desc():
""" Create a constant, then alter the description """
name = random_str(10)
desc = random_str(30)
var = Constant(value=randint(0, 1000), name=name)
var._pt_desc = desc
assert var._pt_desc == desc
with pytest.raises(AssertionError) as excinfo:
var._pt_desc = random_str(30)
assert f"Trying to alter description of constant {name}" == str(excinfo.value)
assert var._pt_desc == desc
def test_constant_bad_value():
""" Try creating a constant with a bad value """
# Try creating a constant with a string
val = random_str(10)
with pytest.raises(AssertionError) as excinfo: Constant(value=val)
assert f"Value must be None or an integer: {val}" in str(excinfo.value)
# Try creating an unsigned constant with a negative value
neg_val = -1 * randint(1, 1000)
with pytest.raises(AssertionError) as excinfo: Constant(value=neg_val)
assert f"Value {neg_val} is outside of an unsigned 32 bit range" in str(excinfo.value)
# Try creating an unsigned constant with an out-of-range value
val = randint(16, 32)
with pytest.raises(AssertionError) as excinfo: Constant(width=4, value=val)
assert f"Value {val} is outside of an unsigned 4 bit range" in str(excinfo.value)
# Try creating a signed constant with an out-of-range +'ve value
val = randint(128, 256)
with pytest.raises(AssertionError) as excinfo:
Constant(width=8, signed=True, value=val)
assert f"Value {val} is outside of a signed 8 bit range" in str(excinfo.value)
# Try creating a signed constant with an out-of-range -'ve value
val = -1 * randint(129, 256)
with pytest.raises(AssertionError) as excinfo:
Constant(width=8, signed=True, value=val)
assert f"Value {val} is outside of a signed 8 bit range" in str(excinfo.value)
def test_constant_bad_desc():
""" Create a constant, then alter the description """
name = random_str(10)
var = Constant(value=randint(0, 1000), name=name)
with pytest.raises(AssertionError) as excinfo:
var._pt_desc = 1234
assert "Description must be a string: 1234" == str(excinfo.value)
|
<gh_stars>1-10
from app.api.services.game_services.action_handlers.action_handler import (
ActionHandler
)
from app.schemas import game_schema
from app.schemas.game_schema import VoteCard, State, Positions
class VoteActionHandler(ActionHandler):
@property
def activity_text(self):
return f"{self.player} voted"
def execute(self):
last_action = self.game.last_action
assert self.game.can_vote(self.player)
vote_card = self.game.players_info.get(self.player).vote_cards.pop(
self.payload.vote_card_index
)
last_action.action_data.participating_players.remove(
self.player
)
self.game.votes.participated_players.append(self.player)
self.game.votes.vote_cards.append(vote_card)
if last_action.action_type == game_schema.Action.ActionType.CALL_FOR_AN_ATTACK:
self.handle_call_for_attack_vote(vote_card)
elif last_action.action_type == game_schema.Action.ActionType.CALL_FOR_BRAWL:
self.handle_call_for_brawl_vote(vote_card)
elif last_action.action_type == game_schema.Action.ActionType.CALL_FOR_A_MUTINY:
self.handle_call_for_mutiny_vote(vote_card)
def handle_call_for_attack_vote(self, vote_card: VoteCard):
last_action = self.game.last_action
self.game.votes.cannons += vote_card.cannon
self.game.votes.fire += vote_card.fire
self.game.votes.water += vote_card.water
if len(last_action.action_data.participating_players) == 0:
self.game.votes.cannons += vote_card.cannon
self.game.votes.fire += vote_card.fire
self.game.votes.water += vote_card.water
if (
self.game.votes.fire > self.game.votes.water and
self.game.votes.cannons > 0
):
last_action.action_data.state = game_schema.State.Success
self.game.give_chest(
last_action.action_data.which_captain.username
)
else:
last_action.action_data.state = game_schema.State.Failed
self.game.next_turn()
self.game.last_action.action_data.vote_results.extend(
self.game.votes.vote_cards
)
self.game.end_voting()
def handle_call_for_brawl_vote(self, vote_card):
last_action = self.game.last_action
self.game.votes.britain += vote_card.britain
self.game.votes.france += vote_card.france
if len(last_action.action_data.participating_players) == 0:
self.game.votes.britain += self.game.vote_deck.britain
self.game.votes.france += self.game.vote_deck.france
last_action.action_data.state = game_schema.State.Failed
if (
self.game.votes.britain > self.game.votes.france and
self.game.chests_position.tr_fr > 0
):
self.game.chests_position.tr_fr -= 1
self.game.chests_position.tr_en += 1
last_action.action_data.state = game_schema.State.Success
elif (
self.game.votes.britain < self.game.votes.france and
self.game.chests_position.tr_en > 0
):
self.game.chests_position.tr_en -= 1
self.game.chests_position.tr_fr += 1
last_action.action_data.state = game_schema.State.Success
self.game.last_action.action_data.vote_results.extend(
self.game.votes.vote_cards
)
self.game.end_voting()
self.game.next_turn()
def handle_call_for_mutiny_vote(self, vote_card):
last_action = self.game.last_action
self.game.votes.wheel += vote_card.wheel
self.game.votes.skull += vote_card.skull
if len(last_action.action_data.participating_players) == 0:
self.game.votes.wheel += self.game.vote_deck.wheel
self.game.votes.skull += self.game.vote_deck.skull
if self.game.votes.skull > self.game.votes.wheel:
self.game.last_action.action_data.state = State.Success
self.game.set_position(
last_action.action_data.captain, Positions.TR
)
else:
self.game.last_action.action_data.state = State.Failed
self.game.last_action.action_data.vote_results.extend(
self.game.votes.vote_cards
)
self.game.end_voting()
self.game.next_turn()
|
<filename>django_inventory/apps/inventory/__init__.py<gh_stars>0
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from common.api import register_links, register_menu
from .models import Location, ItemTemplate, Inventory, InventoryTransaction, Supplier
inventory_list = {'text': _('View all inventories'), 'view': 'inventory_list', 'famfam': 'package_go'}
inventory_create = {'text': _('Create new inventory'), 'view': 'inventory_create', 'famfam': 'package_add'}
inventory_update = {'text': _(u'Edit'), 'view': 'inventory_update', 'args': 'object.id', 'famfam': 'package_green'}
inventory_delete = {'text': _(u'Delete'), 'view': 'inventory_delete', 'args': 'object.id', 'famfam': 'package_delete'}
inventory_create_transaction = {'text': _('Add transaction'), 'view': 'inventory_create_transaction', 'args': 'object.id', 'famfam': 'book_add'}
inventory_view = {'text': _(u'Details'), 'view': 'inventory_view', 'args': 'object.id', 'famfam': 'package_go'}
inventory_list_transactions = {'text': _(u'Inventory transactions'), 'view': 'inventory_list_transactions', 'args': 'object.id', 'famfam': 'book_go'}
inventory_transaction_update = {'text': _(u'Edit'), 'view': 'inventory_transaction_update', 'args': 'object.id', 'famfam': 'book_add'}
inventory_transaction_delete = {'text': _(u'Delete'), 'view': 'inventory_transaction_delete', 'args': 'object.id', 'famfam': 'book_delete'}
inventory_transaction_view = {'text': _(u'Details'), 'view': 'inventory_transaction_view', 'args': 'object.id', 'famfam': 'book_go'}
location_list = {'text': _('Locations'), 'view': 'location_list', 'famfam': 'map'}
location_create = {'text': _(u'Create new location'), 'view': 'location_create', 'famfam': 'map_add'}
location_update = {'text': _(u'Edit'), 'view': 'location_update', 'args': 'object.id', 'famfam': 'map_edit'}
location_delete = {'text': _(u'Delete'), 'view': 'location_delete', 'args': 'object.id', 'famfam': 'map_delete'}
supplier_create = {'text': _('Create new supplier'), 'view': 'supplier_create', 'famfam': 'lorry_add'}
supplier_list = {'text': _('Suppliers'), 'view': 'supplier_list', 'famfam': 'lorry'}
supplier_update = {'text': _('Edit'), 'view': 'supplier_update', 'args': 'object.id', 'famfam': 'lorry'}
supplier_delete = {'text': _('Delete'), 'view': 'supplier_delete', 'args': 'object.id', 'famfam': 'lorry_delete'}
supplier_assign_itemtemplate = {'text': _(u'Assign templates'), 'view': 'supplier_assign_itemtemplates', 'args': 'object.id', 'famfam': 'page_go'}
supplier_purchase_orders = {'text': _(u'Related purchase orders'), 'view': 'supplier_purchase_orders', 'args': 'object.id', 'famfam': 'cart_go'}
template_list = {'text': _('View all'), 'view': 'template_list', 'famfam': 'page_go'}
template_create = {'text': _('Create new template'), 'view': 'template_create', 'famfam': 'page_add'}
template_orphan_list = {'text': _('Orphans templates'), 'view': 'template_orphans_list'}
template_update = {'text': _(u'Edit'), 'view': 'template_update', 'args': 'object.id', 'famfam': 'page_edit'}
template_delete = {'text': _(u'Delete'), 'view': 'template_delete', 'args': 'object.id', 'famfam': 'page_delete'}
template_photos = {'text': _(u'Add / remove photos'), 'view': 'template_photos', 'args': 'object.id', 'famfam': 'picture_go'}
template_assets = {'text': _(u'Related assets'), 'view': 'template_items_list', 'args': 'object.id', 'famfam': 'computer_go'}
template_assign_supplies = {'text': _(u'Assign supplies'), 'view': 'template_assign_supply', 'args': 'object.id', 'famfam': 'monitor'}
template_assign_suppliers = {'text': _(u'Assign suppliers'), 'view': 'template_assign_suppliers', 'args': 'object.id', 'famfam': 'lorry_go'}
jump_to_template = {'text': _(u'Template'), 'view': 'template_view', 'args': 'object.supply.id', 'famfam': 'page_go'}
jump_to_inventory = {'text': _(u'Return to inventory'), 'view': 'inventory_view', 'args': 'object.inventory.id', 'famfam': 'package_go'}
template_menu_links = [template_list, template_orphan_list, supplier_list]
inventory_menu_links = [
inventory_list,
]
location_filter = {'name': 'Location', 'title': _(u'location'), 'queryset': Location.objects.all(), 'destination': 'location'}
register_links(['template_list', 'template_create', 'template_view', 'template_orphans_list', 'template_update', 'template_delete', 'template_photos', 'template_assign_supply', 'template_assign_suppliers'], [template_create], menu_name='sidebar')
register_links(ItemTemplate, [template_update, template_delete, template_photos, template_assets, template_assign_supplies, template_assign_suppliers])
register_links(['supplier_list', 'supplier_create', 'supplier_update', 'supplier_view', 'supplier_delete', 'supplier_assign_itemtemplates'], [supplier_create], menu_name='sidebar')
register_links(Supplier, [supplier_update, supplier_delete, supplier_assign_itemtemplate, supplier_purchase_orders])
register_links(['inventory_view', 'inventory_list', 'inventory_create', 'inventory_update', 'inventory_delete'], [inventory_create], menu_name='sidebar')
register_links(Inventory, [inventory_update, inventory_delete, inventory_list_transactions, inventory_create_transaction])
register_links(Inventory, [inventory_view], menu_name='sidebar')
register_links(['inventory_transaction_update', 'inventory_transaction_delete', 'inventory_transaction_view'], [inventory_create_transaction], menu_name='sidebar')
register_links(InventoryTransaction, [inventory_transaction_view, inventory_transaction_update, inventory_transaction_delete, jump_to_template])
register_links(InventoryTransaction, [jump_to_inventory], menu_name='sidebar')
register_links(['location_list', 'location_create', 'location_update', 'location_delete'], [location_create], menu_name='sidebar')
register_links(Location, [location_update, location_delete])
register_menu([
{'text': _('Templates'), 'view': 'template_list', 'links': template_menu_links, 'famfam': 'page', 'position': 1},
{'text': _('Inventories'), 'view': 'inventory_list', 'links': inventory_menu_links, 'famfam': 'package', 'position': 4},
])
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# <NAME>. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
"""
We want to model a chain attached to two supports and hanging in between. Let us discretise
it with N mass points connected by N-1 springs. Each mass i has position (yi,zi), i=1,...,N.
The equilibrium point of the system minimises the potential energy.
The potential energy of each spring is
Vi=D_i/2 * ((y_i-y_{i+1})^2 + (z_i-z_{i+1})^2)
The gravitational potential energy of each mass is
Vg_i = m_i*g0*z_i
The total potential energy is thus given by:
Vchain(y,z) = 1/2*sum{i=1,...,N-1} D_i ((y_i-y_{i+1})^2+(z_i-z_{i+1})^2) + g0 * sum{i=1,...,N} m_i * z_i
where y=[y_1,...,y_N] and z=[z_1,...,z_N]
We wish to solve
minimize{y,z} Vchain(y, z)
Subject to the piecewise linear ground constraints:
z_i >= zin
z_i - 0.1*y_i >= 0.5
"""
from casadi import *
# Constants
N = 40
m_i = 40.0/N
D_i = 70.0*N
g0 = 9.81
#zmin = -inf # unbounded
zmin = 0.5 # ground
# Objective function
Vchain = 0
# Variables
x = []
# Variable bounds
lbx = []
ubx = []
# Constraints
g = []
# Constraint bounds
lbg = []
ubg = []
# Loop over all chain elements
for i in range(1, N+1):
# Previous point
if i>1:
y_prev = y_i
z_prev = z_i
# Create variables for the (y_i, z_i) coordinates
y_i = SX.sym('y_' + str(i))
z_i = SX.sym('z_' + str(i))
# Add to the list of variables
x += [y_i, z_i]
if i==1:
lbx += [-2., 1.]
ubx += [-2., 1.]
elif i==N:
lbx += [ 2., 1.]
ubx += [ 2., 1.]
else:
lbx += [-inf, zmin]
ubx += [ inf, inf]
# Spring potential
if i>1:
Vchain += D_i/2*((y_prev-y_i)**2 + (z_prev-z_i)**2)
# Graviational potential
Vchain += g0 * m_i * z_i
# Slanted ground constraints
g.append(z_i - 0.1*y_i)
lbg.append( 0.5)
ubg.append( inf)
# Formulate QP
qp = {'x':vertcat(*x), 'f':Vchain, 'g':vertcat(*g)}
# Solve with IPOPT
solver = qpsol('solver', 'qpoases', qp, {'sparse':True})
#solver = qpsol('solver', 'gurobi', qp)
#solver = nlpsol('solver', 'ipopt', qp)
# Get the optimal solution
sol = solver(lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
x_opt = sol['x']
f_opt = sol['f']
print('f_opt = ', f_opt)
# Retrieve the result
Y0 = x_opt[0::2]
Z0 = x_opt[1::2]
# Plot the result
import matplotlib.pyplot as plt
plt.plot(Y0,Z0,'o-')
ys = linspace(-2.,2.,100)
zs = 0.5 + 0.1*ys
plt.plot(ys,zs,'--')
plt.xlabel('y [m]')
plt.ylabel('z [m]')
plt.title('hanging chain QP')
plt.grid(True)
plt.legend(['Chain','z - 0.1y >= 0.5'],loc=9)
plt.show()
|
<filename>src/unregulated_genes/models.py
import math
import random
import numpy
import pandas
from scipy.integrate import odeint
"""
This models the ODE of an unregulated genetic expression of a prokaryotic cell
k0 is the rate at which mRNA is being generated
k1 is the rate at which proteins are being
dm and dp are the degradation constants of mRNA and proteins respectively
C1 and C2 are integration constants
m0 and p0 are the mRNA and protein concentrations in M
"""
class UnregulatedGeneExpression:
# t is a number
# const is an array of numbers that represent the constants of the reactions
def __init__(self, tmax=10, num_of_datapoints=1000, m0=0, p0=0, const=(1, 1, .1, .1)):
self.n = num_of_datapoints
self.tmax = tmax
self.m0 = m0
self.p0 = p0
self.k0 = const[0]
self.k1 = const[1]
self.dm = const[2]
self.dp = const[3]
self.t = numpy.linspace(0, self.tmax, self.n) # time points
def analytical_solution(self, t):
C1 = self.m0 - self.k0 / self.dm
C2 = self.p0 - (self.k1 * self.k0) / (self.dm * self.dp) + C1 * self.k1 / (self.dm - self.dp)
m_rna = C1 * math.exp(-self.dm * t) + self.k0 / self.dm
protein = (self.k1 * self.k0) / (self.dm * self.dp) - (C1 * self.k1 * math.exp(-self.dm * t)) / (self.dm - self.dp) + C2 * math.exp(-self.dp * t)
return m_rna, protein
def analytical_sim(self):
# store solutions
m = numpy.empty_like(self.t)
p = numpy.empty_like(self.t)
# record initial conditions
m[0] = self.m0
p[0] = self.p0
# iterate over time:
for i in range(1, self.n):
z = self.analytical_solution(i)
# store solution for plotting
m[i] = z[0]
p[i] = z[1]
dfp = pandas.DataFrame()
dfp["Time"] = self.t
dfp["Proteins"] = p
dfm = pandas.DataFrame()
dfm["Time"] = self.t
dfm["mRNA"] = m
return dfm, dfp
# Use scipy.odeint to evaluate
def numerical_solution(self, z, t):
m0 = z[0]
p0 = z[1]
dmdt = self.k0 - self.dm * m0
dpdt = self.k1 * m0 - self.dp * p0
dzdt = dmdt, dpdt
return dzdt
def numerical_sim(self):
# store solutions
m = numpy.empty_like(self.t)
p = numpy.empty_like(self.t)
# record initial conditions
z0 = [self.m0, self.p0]
m[0] = self.m0
p[0] = self.p0
# solve ODE
for i in range(1, self.n):
# span for next time step
tspan = [self.t[i - 1], self.t[i]]
# solve for next step
z = odeint(self.numerical_solution, z0, tspan)
# store solution for plotting
m[i] = z[1][0]
p[i] = z[1][1]
# next initial condition
z0 = z[1]
dfp = pandas.DataFrame()
dfp["Time"] = self.t
dfp["Proteins"] = p
dfm = pandas.DataFrame()
dfm["Time"] = self.t
dfm["mRNA"] = m
return dfm, dfp
class GillespieUnregulatedGeneExpression:
# const is an array of numbers that represent the constants of the reactions
def __init__(self, tmax=10, m0=0, p0=0, const=None, num_cells=1):
if const is None:
const = [1, 1, .1, .1]
self.tmax = tmax
self.k0 = const[0]
self.k1 = const[1]
self.dm = const[2]
self.dp = const[3]
self.Nm = m0
self.Np = p0
self.num_cells = num_cells
def initial_state(self):
return [self.Nm, self.Np]
def update_propensities(self, Nm, Np):
a1 = self.k0
a2 = self.k1 * Nm
a3 = self.dm * Nm
a4 = self.dp * Np
total = a1 + a2 + a3 + a4
r1 = a1/total
r2 = a2/total
r3 = a3/total
r4 = a4/total
return [r1, r2, r3, r4, total]
def next_reaction(self, r):
n = random.uniform(0, 1)
# Create an mRNA
if 0 <= n <= r[0]:
return [1, 0]
# Create a protein
elif r[0] < n <= r[1] + r[0]:
return [0, 1]
# Delete an mRNA
elif r[1] + r[0] < n <= r[2] + r[1] + r[0]:
return [-1, 0]
# Delete a protein
elif r[2] + r[1] + r[0] < n <= r[3] + r[2] + r[1] + r[0]:
return [0, -1]
# no reaction occurs
elif r[3] + r[2] + r[1] + r[0] < n < 1:
return [0, 0]
def time_to_next_rxn(self, a, t):
dt = -math.log(1 - random.uniform(0, 1))/a[4]
t = t + dt
return t
def update_reaction_vector(self, current_state, update_vector):
current_state = numpy.array(current_state)
update_vector = numpy.array(update_vector)
updated = current_state + update_vector
return updated
def run_sim(self):
time = []
protein = []
mrna = []
t = 0 # start time
r0 = self.initial_state()
mrna.append(r0[0])
protein.append(r0[1])
time.append(t)
while t <= self.tmax:
a = self.update_propensities(r0[0], r0[1])
next_rxn = self.next_reaction(a)
t = self.time_to_next_rxn(a, t)
r = self.update_reaction_vector(r0, next_rxn)
mrna.append(r[0])
protein.append(r[1])
time.append(t)
r0 = r
dfp = pandas.DataFrame()
dfp["Time"] = time
dfp["Proteins"] = protein
dfm = pandas.DataFrame()
dfm["Time"] = time
dfm["mRNA"] = mrna
return dfm, dfp
def multiple_cells_sim(self):
dfp_multiple = pandas.DataFrame()
dfm_multiple = pandas.DataFrame()
dfmt_multiple = pandas.DataFrame()
dfpt_multiple = pandas.DataFrame()
if self.num_cells == 1:
return self.run_sim()
elif self.num_cells > 1:
for i in range(0, self.num_cells):
mrna, prot = self.run_sim()
dfm_multiple["Run{n}".format(n=i)] = mrna["mRNA"]
dfp_multiple["Run{n}".format(n=i)] = prot["Proteins"]
dfmt_multiple["mRNA_Run_time{t}".format(t=i)] = mrna["Time"]
dfpt_multiple["prot_Run_time{t}".format(t=i)] = prot["Time"]
dfm_multiple = dfm_multiple.dropna()
dfp_multiple = dfp_multiple.dropna()
dfmt_multiple = dfmt_multiple.dropna()
dfpt_multiple = dfpt_multiple.dropna()
# Add average col to dataframes
dfm_multiple["Average"], dfmt_multiple["Average_Time"] = dfm_multiple.mean(axis=1), \
dfmt_multiple.mean(axis=1)
dfp_multiple["Average"], dfpt_multiple["Average_Time"] = dfp_multiple.mean(axis=1), \
dfpt_multiple.mean(axis=1)
# Join the DataFrams
dfm_final = pandas.concat([dfm_multiple, dfmt_multiple], axis=1, sort=False)
dfp_final = pandas.concat([dfp_multiple, dfpt_multiple], axis=1, sort=False)
return dfm_final, dfp_final
|
<gh_stars>0
import numpy as np
import cv2
import pickle
nx = 9 # Num of corners per row
ny = 6 # Num of corners per column
def abs_sobel_threshold(img, orient, sobel_kernel, thresh):
"""
:param img:
:param orient:
:param sobel_kernel:
:param thresh:
:return:
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
if orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled_sobel = np.uint8(255*absolute/np.max(absolute))
output = np.zeros_like(scaled_sobel)
output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return output
def magnitude_threshold(img, sobel_kernel, mag_thresh):
"""
:param img:
:param sobel_kernel:
:param mag_thresh:
:return:
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
pos_sqrt = np.sqrt(x**2 + y**2)
scale_factor = np.max(pos_sqrt)/255
pos_sqrt = (pos_sqrt/scale_factor).astype(np.uint8)
output = np.zeros_like(pos_sqrt)
output[(pos_sqrt >= mag_thresh[0]) & (pos_sqrt <= mag_thresh[1])] = 1
return output
def array_threshold(img, sobel_kernel, thresh):
"""
:param img:
:param sobel_kernel:
:param thresh:
:return:
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
angle_arr_radians = np.arctan2(np.absolute(y), np.absolute(x))
output = np.zeros_like(angle_arr_radians)
output[(angle_arr_radians > thresh[0]) & (angle_arr_radians < thresh[1])] = 1
return output
def vet_threshold(img):
"""
:param img:
:return:
"""
# Convert image to appropriate channels
b_channel = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :, 2]
l_channel = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)[:, :, 0]
# For the L channel
_l = np.zeros_like(l_channel)
_l[(l_channel >= 215) & (l_channel <= 255)] = 1
# For the B channel
_b = np.zeros_like(b_channel)
_b[(b_channel >= 145) & (b_channel <= 200)] = 1
# Combine the two images
output = np.zeros_like(_b)
output[(_l == 1) | (_b == 1)] = 1
return output
def warper(img, nx, ny, mtx, dist):
"""
receives image, number of x and y points,
camera matrix and distortion coefficients
:param img:
:param nx:
:param ny:
:param mtx:
:param dist:
:return:
"""
# get the image shape
img_size = (img.shape[1], img.shape[0])
# get detected corners
src = np.float32([[490, 482],[810, 482],
[1250, 720],[40, 720]])
dst = np.float32([[0, 0], [1280, 0],
[1250, 720],[40, 720]])
# calculate perspective transform
m = cv2.getPerspectiveTransform(src, dst)
# warp image
output = cv2.warpPerspective(img, m, img_size)
return output, m
def build(file):
"""
:param file:
:return:
"""
from main import combined
global prev_right_y
global prev_right_x
with open("camera_cal/wide_dist_pickle.p", mode='rb') as f:
camera_calib = pickle.load(f)
mtx = camera_calib["mtx"]
dist = camera_calib["dist"]
prev_right_y = None
prev_right_x = None
# select sobel kernel size
test_binary = vet_threshold(file)
top_down, perspective_m = warper(test_binary, nx, ny, mtx, dist)
binary_warped = top_down
# histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# produce output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# determine peak of the left and right halves of the histogram
# this will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# number of sliding windows
nwindows = 9
# height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# x and y positions of all nonzero pixels
nonzero = binary_warped.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
# current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# select the width of the windows +/- margin
margin = 100
# select minimum number of pixels
minpix = 50
# create empty lists
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
# window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high),
(0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high),
(0, 255, 0), 2)
# nonzero pixels in x and y within the window
good_left_inds = ((nonzero_y >= win_y_low) & (nonzero_y < win_y_high) &
(nonzero_x >= win_xleft_low) & (nonzero_x < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzero_y >= win_y_low) & (nonzero_y < win_y_high) &
(nonzero_x >= win_xright_low) & (nonzero_x < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzero_x[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzero_x[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# extract left and right pixels
left_x = nonzero_x[left_lane_inds]
left_y = nonzero_y[left_lane_inds]
right_x = nonzero_x[right_lane_inds]
right_y = nonzero_y[right_lane_inds]
if right_y.size == 0:
right_y = prev_right_y
right_x = prev_right_x
# fit a second order polynomial
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
prev_right_y = right_y
prev_right_x = right_x
# find the pixels
nonzero = binary_warped.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzero_x > (left_fit[0] * (nonzero_y ** 2) + left_fit[1] * nonzero_y +
left_fit[2] - margin)) & (nonzero_x < (left_fit[0] * (nonzero_y ** 2) +
left_fit[1] * nonzero_y + left_fit[
2] + margin)))
right_lane_inds = ((nonzero_x > (right_fit[0] * (nonzero_y ** 2) + right_fit[1] * nonzero_y +
right_fit[2] - margin)) & (nonzero_x < (right_fit[0] * (nonzero_y ** 2) +
right_fit[1] * nonzero_y + right_fit[
2] + margin)))
# extract left and right line pixels
left_x = nonzero_x[left_lane_inds]
left_y = nonzero_y[left_lane_inds]
right_x = nonzero_x[right_lane_inds]
right_y = nonzero_y[right_lane_inds]
# apply second order polynomial
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
# generate x and y values for plotting
plot_y = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * plot_y ** 2 + left_fit[1] * plot_y + left_fit[2]
right_fitx = right_fit[0] * plot_y ** 2 + right_fit[1] * plot_y + right_fit[2]
# create image to draw the lines on
src = np.float32([[490, 482], [810, 482],
[1250, 720], [40, 720]])
dst = np.float32([[0, 0], [1280, 0],
[1250, 720], [40, 720]])
warped = combined
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
print(color_warp.shape)
# x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, plot_y]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, plot_y])))])
pts = np.hstack((pts_left, pts_right))
# draw the lane (using warped image)
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
Minv = cv2.getPerspectiveTransform(dst, src)
# warp the blank back to original image
new_warp = cv2.warpPerspective(color_warp, Minv, (file.shape[1], file.shape[0]))
# combine results
result_output = cv2.addWeighted(file, 1, new_warp, 0.3, 0)
# generate random x position within +/-50 pix
left_x = left_fitx
right_x = right_fitx
# apply second order polynomial to pixels
y_eval = np.max(plot_y)
ym_per_pix = 30 / 720 # meters per pixel y axis
xm_per_pix = 3.7 / 700 # meters per pixel x axis
# apply polynomial to x,y
left_fit_cr = np.polyfit(plot_y * ym_per_pix, left_x * xm_per_pix, 2)
right_fit_cr = np.polyfit(plot_y * ym_per_pix, right_x * xm_per_pix, 2)
# calc. new radii
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
# print curvature
curvature = (left_curverad + right_curverad) / 2
cv2.putText(result_output, 'Curvature Radius {}(m)'.format(curvature), (120, 140),fontFace=16, fontScale=2, color=(255, 255, 255))
center = (right_x[0] + left_x[0]) / 2
dist_center = abs((640 - center) * 3.7 / 700)
if center > 640:
cv2.putText(result_output, 'Vehicle is {:.2f}m left of center'.format(dist_center), (100, 80),fontFace=16, fontScale=2, color=(255, 255, 255))
else:
cv2.putText(result_output, 'Vehicle is {:.2f}m right of center'.format(dist_center), (100, 80),fontFace=16, fontScale=2, color=(255, 255, 255))
return result_output |
<gh_stars>100-1000
import torch
from torch.nn.functional import softmax
from .generator.jacobian import Jacobian
from .layercollection import LayerCollection
def FIM_MonteCarlo(model,
loader,
representation,
variant='classif_logits',
trials=1,
device='cpu',
function=None,
layer_collection=None):
"""
Helper that creates a matrix computing the Fisher Information
Matrix using a Monte-Carlo estimate of y|x with `trials` samples per
example
Parameters
----------
model : torch.nn.Module
The model that contains all parameters of the function
loader : torch.utils.data.DataLoader
DataLoader for computing expectation over the input space
representation : class
The parameter matrix representation that will be used to store
the matrix
variants : string 'classif_logits' or 'regression', optional
(default='classif_logits')
Variant to use depending on how you interpret your function.
Possible choices are:
- 'classif_logits' when using logits for classification
- 'classif_logsoftmax' when using log_softmax values for classification
- 'segmentation_logits' when using logits in a segmentation task
- 'regression' when using a gaussian regression model
trials : int, optional (default=1)
Number of trials for Monte Carlo sampling
device : string, optional (default='cpu')
Target device for the returned matrix
function : function, optional (default=None)
An optional function if different from `model(input)`. If
it is different from None, it will override the device
parameter.
layer_collection : layercollection.LayerCollection, optional
(default=None)
An optional layer collection
"""
if function is None:
def function(*d):
return model(d[0].to(device))
if layer_collection is None:
layer_collection = LayerCollection.from_model(model)
if variant == 'classif_logits':
def fim_function(*d):
log_softmax = torch.log_softmax(function(*d), dim=1)
probabilities = torch.exp(log_softmax)
sampled_targets = torch.multinomial(probabilities, trials,
replacement=True)
return trials ** -.5 * torch.gather(log_softmax, 1,
sampled_targets)
elif variant == 'classif_logsoftmax':
def fim_function(*d):
log_softmax = function(*d)
probabilities = torch.exp(log_softmax)
sampled_targets = torch.multinomial(probabilities, trials,
replacement=True)
return trials ** -.5 * torch.gather(log_softmax, 1,
sampled_targets)
elif variant == 'segmentation_logits':
def fim_function(*d):
log_softmax = torch.log_softmax(function(*d), dim=1)
s_mb, s_c, s_h, s_w = log_softmax.size()
log_softmax = log_softmax.permute(0, 2, 3, 1).contiguous() \
.view(s_mb * s_h * s_w, s_c)
probabilities = torch.exp(log_softmax)
sampled_indices = torch.multinomial(probabilities, trials,
replacement=True)
sampled_targets = torch.gather(log_softmax, 1,
sampled_indices)
sampled_targets = sampled_targets.view(s_mb, s_h * s_w, trials) \
.sum(dim=1)
return trials ** -.5 * sampled_targets
else:
raise NotImplementedError
generator = Jacobian(layer_collection=layer_collection,
model=model,
function=fim_function,
n_output=trials)
return representation(generator=generator, examples=loader)
def FIM(model,
loader,
representation,
n_output,
variant='classif_logits',
device='cpu',
function=None,
layer_collection=None):
"""
Helper that creates a matrix computing the Fisher Information
Matrix using closed form expressions for the expectation y|x
as described in (Pascanu and Bengio, 2013)
Parameters
----------
model : torch.nn.Module
The model that contains all parameters of the function
loader : torch.utils.data.DataLoader
DataLoader for computing expectation over the input space
representation : class
The parameter matrix representation that will be used to store
the matrix
n_output : int
Number of outputs of the model
variants : string 'classif_logits' or 'regression', optional
(default='classif_logits')
Variant to use depending on how you interpret your function.
Possible choices are:
- 'classif_logits' when using logits for classification
- 'regression' when using a gaussian regression model
device : string, optional (default='cpu')
Target device for the returned matrix
function : function, optional (default=None)
An optional function if different from `model(input)`. If
it is different from None, it will override the device
parameter.
layer_collection : layercollection.LayerCollection, optional
(default=None)
An optional layer collection
"""
if function is None:
def function(*d):
return model(d[0].to(device))
if layer_collection is None:
layer_collection = LayerCollection.from_model(model)
if variant == 'classif_logits':
def function_fim(*d):
log_probs = torch.log_softmax(function(*d), dim=1)
probs = torch.exp(log_probs).detach()
return (log_probs * probs**.5)
elif variant == 'regression':
def function_fim(*d):
estimates = function(*d)
return estimates
else:
raise NotImplementedError
generator = Jacobian(layer_collection=layer_collection,
model=model,
function=function_fim,
n_output=n_output)
return representation(generator=generator, examples=loader)
|
<filename>program/program/CalibrationResults.py<gh_stars>1-10
import json
import os
from datetime import datetime
import numpy
def get_current_time():
return datetime.now().strftime("%Y-%m-%d-at-%H-%M")
class CalibrationImportError(RuntimeError):
pass
def import_json(target, json_file):
'''Maps imported json to variables of the instance'''
try:
with open(json_file, 'r') as i:
target.__dict__ = json.load(i)
except:
raise CalibrationImportError
def save_to_json(source, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as output:
json.dump(source, output)
class MonoCameraCalibrationResults(object):
'''Class to store all mono calibration results'''
def __init__(self, camera_matrix=None, distortion_coeffs=None, rotation_vecs=None, translation_vecs=None,
json_file=None):
if json_file is not None:
import_json(self, json_file)
self.camera_matrix = numpy.array(self.camera_matrix)
self.distortion_coeffs = numpy.array(self.distortion_coeffs)
else:
self.camera_matrix = camera_matrix
self.distortion_coeffs = distortion_coeffs
self.rotation_vec = rotation_vecs
self.translation_vec = translation_vecs
def __str__(self):
return "Camera matrix\n" \
"{}\n" \
"Distortion coefficients\n" \
"{}\n" \
"Rotation matrix\n" \
"{}\n" \
"Translation vector\n" \
"{}\n".format(self.camera_matrix, self.distortion_coeffs, self.rotation_vec, self.translation_vec)
def save(self, camera_index):
"""Saves the mono calibration results to a json"""
result = {}
for key in self.__dict__:
if isinstance(self.__dict__[key], list) or self.__dict__[key] is None:
result[key] = None
else:
result[key] = (self.__dict__[key]).tolist()
curr_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(curr_dir, "calib_results", str(camera_index + 1), '{}.json'.format(get_current_time()))
save_to_json(result, filename)
class StereoCameraCalibrationResults(object):
'''Class for storing stereo calibration results'''
def __init__(self, rotation_matrix=None, translation_vector=None, essential_matrix=None, fundamental_matrix=None,
reprojection_error=None, json_file=None):
if json_file is not None:
import_json(self, json_file)
self.rotation_matrix = numpy.array(self.rotation_matrix)
self.translation_vector = numpy.array(self.translation_vector)
self.fundamental_matrix = numpy.array(self.fundamental_matrix)
self.essential_matrix = numpy.array(self.essential_matrix)
else:
self.rotation_matrix = rotation_matrix
self.translation_vector = translation_vector
self.essential_matrix = essential_matrix
self.fundamental_matrix = fundamental_matrix
self.reprojection_error = reprojection_error
def camera_distance(self):
'''Computes the norm of the transaltion vector -- distances between the cameras'''
return numpy.linalg.norm(self.translation_vector)
def save(self):
result = {}
for key in self.__dict__:
if isinstance(self.__dict__[key], numpy.ndarray):
result[key] = (self.__dict__[key]).tolist()
else:
result[key] = self.__dict__[key]
curr_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(curr_dir, "calib_results", "stereo_calib_results", '{}.json'.format(get_current_time()))
save_to_json(result, filename)
def __str__(self):
return "Rotation matrix\n" \
"{}\n" \
"Translation vector\n" \
"{}\n" \
"Essential matrix\n" \
"{}\n" \
"Fundamental matrix\n" \
"{}\n" \
"Reprojection error {}\n".format(self.rotation_matrix, self.translation_vector, self.essential_matrix,
self.fundamental_matrix, self.reprojection_error) |
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (C) 2020, 2021 University of Rochester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import glob
import os
import statistics
import sys
#
# Path to the root directory of whole project.
#
root = os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
#
# Path to the debug directory where we put generated binaries.
#
debug_dir = root + '/debug'
#
# Path to the experiment data directory.
#
data_dir = root + '/data'
#
# List of configurations.
#
configurations = [
'baseline',
'ss',
'sp',
'cfi',
'silhouette',
'invert',
'sfifull',
]
#
# List of benchmark suites.
#
benchmarks = [
'beebs',
'coremark',
'coremark-pro',
'pinlock',
'stm32apps',
]
###############################################################################
#
# Write extracted data to an output file.
#
# @data: the data collection.
# @relative: whether to generate relative numbers for non-baseline
# configurations.
# @output: path to the output CSV file.
#
def write_data(data, relative, output):
# Do we have any program that uses average + stdev?
has_stdev = False
for prog in data:
for conf in data[prog]:
if isinstance(data[prog][conf], list):
assert len(data[prog][conf]) == 2, 'Not average + stdev list?'
has_stdev = True
# Post-process: convert to relative numbers for non-baseline
if relative:
for prog in data:
assert 'baseline' in data[prog], 'Relative to no baseline?'
baseline = data[prog]['baseline']
if isinstance(baseline, list):
baseline = baseline[0]
baseline = float(baseline)
for conf in configurations:
if conf == 'baseline':
continue
if conf in data[prog]:
if isinstance(data[prog][conf], list):
data[prog][conf][0] /= baseline
data[prog][conf][1] /= baseline
else:
data[prog][conf] /= baseline
# Post-process: limit floating-point numbers to 3 digits
for prog in data:
for conf in data[prog]:
if isinstance(data[prog][conf], list):
if isinstance(data[prog][conf][0], float):
data[prog][conf][0] = '{0:.3f}'.format(data[prog][conf][0])
if isinstance(data[prog][conf][1], float):
data[prog][conf][1] = '{0:.3f}'.format(data[prog][conf][1])
elif isinstance(data[prog][conf], float):
data[prog][conf] = '{0:.3f}'.format(data[prog][conf])
# Now write to a CSV file
with open(output, mode='w') as csv_file:
writer = csv.writer(csv_file)
# Construct and write the header row
row = ['#Program']
for conf in configurations:
row.append(conf)
if has_stdev:
row.append('stdev')
writer.writerow(row)
# Construct and write a row for each program
for prog in data:
row = [prog]
for conf in configurations:
if conf in data[prog]:
if isinstance(data[prog][conf], list):
row.extend(data[prog][conf])
elif has_stdev:
row.extend([data[prog][conf], '0'])
else:
row.append(data[prog][conf])
elif has_stdev:
row.extend(['', ''])
else:
row.append('')
writer.writerow(row)
#
# Generate a code size CSV file for a specified benchmark suite, assuming
# @debug_dir already contains all the generate binaries needed.
#
# @benchmark: name of the benchmark suite.
# @relative: whether to generate relative numbers for non-baseline
# configurations.
# @output: path to the output CSV file.
#
def gen_csv_codesize(benchmark, relative, output):
data = {}
for conf in configurations:
new_debug_dir = debug_dir + '/' + benchmark + '-' + conf
for f in sorted(glob.glob(new_debug_dir + '/*.elf')):
prog = os.path.splitext(os.path.basename(f))[0]
prog = prog.replace(conf + '-', '', 1)
number = 0
stdout = os.popen('size -A -d ' + f)
line = stdout.readline()
while line != '':
if '.text' in line:
number += int(line.split()[1])
line = stdout.readline()
if number != 0:
if prog not in data:
data[prog] = {}
data[prog][conf] = number
# Write data to CSV
write_data(data, relative, output)
#
# Generate a performance CSV file for a specified benchmark suite, assuming
# @data_dir already contains all the experiment data needed.
#
# @benchmark: name of the benchmark suite.
# @relative: whether to generate relative numbers for non-baseline
# configurations.
# @output: path to the output CSV file.
#
def gen_csv_perf(benchmark, relative, output):
data = {}
for conf in configurations:
new_data_dir = data_dir + '/' + benchmark + '-' + conf
# Process single-number data as is
for f in sorted(glob.glob(new_data_dir + '/*.stat')):
prog = os.path.splitext(os.path.basename(f))[0]
prog = prog.replace(conf + '-', '', 1)
number = None
for line in open(f):
# BEEBS
if 'Finished' in line:
number = int(line.split(' ')[2].lstrip())
break
# CoreMark
elif 'Total ticks' in line:
number = int(line.split(':')[-1].lstrip())
break
# CoreMark-Pro
elif 'time(ns)' in line:
number = int(line.split('=')[-1].lstrip())
break
# PinLock and STM32apps
elif 'Elapsed time' in line:
number = int(line.split(' ')[-2].lstrip())
break
if number is not None:
if prog not in data:
data[prog] = {}
data[prog][conf] = number
# Process multi-number data as average and stdev
for f in sorted(glob.glob(new_data_dir + '/*-stat')):
prog = os.path.splitext(os.path.basename(f))[0]
prog = prog.replace(conf + '-', '', 1)
number = None
for line in open(f):
# BEEBS
if 'Finished' in line:
number = int(line.split(' ')[2].lstrip())
break
# CoreMark
elif 'Total ticks' in line:
number = int(line.split(':')[-1].lstrip())
break
# CoreMark-Pro
elif 'time(ns)' in line:
number = int(line.split('=')[-1].lstrip())
break
# PinLock and STM32apps
elif 'Elapsed time' in line:
number = int(line.split(' ')[-2].lstrip())
break
if number is not None:
if prog not in data:
data[prog] = {}
if conf not in data[prog]:
data[prog][conf] = []
data[prog][conf].append(number)
for prog in data:
if conf in data[prog] and isinstance(data[prog][conf], list):
average = float(sum(data[prog][conf])) / len(data[prog][conf])
stdev = statistics.stdev(data[prog][conf])
data[prog][conf] = [average, stdev]
# Write data to CSV
write_data(data, relative, output)
#
# The main function.
#
def main():
# Construct a CLI argument parser
parser = argparse.ArgumentParser(description='Generate CSV files.')
parser.add_argument('-b', '--benchmark', choices=benchmarks,
default='beebs', metavar='BENCH',
help='Name of the benchmark suite')
parser.add_argument('-t', '--type', choices=['codesize', 'perf'],
default='perf', metavar='TYPE',
help='Type of the CSV file to generate')
parser.add_argument('-r', '--relative', action='store_true',
help='Generate non-baseline numbers relative to baseline')
parser.add_argument('-o', '--output', metavar='FILE',
help='Path to the output CSV file')
# Parse CLI arguments
args = parser.parse_args()
benchmark = args.benchmark
typ = args.type
relative = args.relative
output = typ + '-' + benchmark + '.csv'
if args.output is not None:
output = args.output
# Generate CSV
if typ == 'perf':
gen_csv_perf(benchmark, relative, output)
else:
gen_csv_codesize(benchmark, relative, output)
#
# entrance of this script.
#
if __name__ == '__main__':
main()
|
from .testcase import DatatableViewTestCase
from .test_app import models
from .. import utils
def get_structure(columns, opts):
return utils.get_datatable_structure('/', dict(opts, columns=columns), model=models.ExampleModel)
class UtilsTests(DatatableViewTestCase):
def test_get_first_orm_bit(self):
""" """
self.assertEqual(utils.get_first_orm_bit('field'), 'field')
self.assertEqual(utils.get_first_orm_bit('field__otherfield'), 'field')
self.assertEqual(utils.get_first_orm_bit(["Pretty Name", 'field']), 'field')
self.assertEqual(utils.get_first_orm_bit(["Pretty Name", 'field', "callback"]), 'field')
self.assertEqual(utils.get_first_orm_bit(["Pretty Name", 'field__otherfield']), 'field')
self.assertEqual(utils.get_first_orm_bit(["Pretty Name", 'field__otherfield', "callback"]), 'field')
def test_resolve_orm_path_local(self):
""" Verifies that references to a local field on a model are returned. """
field = utils.resolve_orm_path(models.ExampleModel, 'name')
self.assertEqual(field, models.ExampleModel._meta.get_field('name'))
def test_resolve_orm_path_fk(self):
""" Verify that ExampleModel->RelatedModel.name == RelatedModel.name """
remote_field = utils.resolve_orm_path(models.ExampleModel, 'related__name')
self.assertEqual(remote_field, models.RelatedModel._meta.get_field('name'))
def test_resolve_orm_path_reverse_fk(self):
""" Verify that ExampleModel->>>ReverseRelatedModel.name == ReverseRelatedModel.name """
remote_field = utils.resolve_orm_path(models.ExampleModel, 'reverserelatedmodel__name')
self.assertEqual(remote_field, models.ReverseRelatedModel._meta.get_field('name'))
def test_resolve_orm_path_m2m(self):
""" Verify that ExampleModel->>>RelatedM2MModel.name == RelatedM2MModel.name """
remote_field = utils.resolve_orm_path(models.ExampleModel, 'relateds__name')
self.assertEqual(remote_field, models.RelatedM2MModel._meta.get_field('name'))
def test_split_real_fields(self):
""" Verifies that the first non-real field causes a break in the field list. """
model = models.ExampleModel
# All-real fields
real, fake = utils.split_real_fields(model, ['name', 'date_created'])
self.assertEqual(real, ['name', 'date_created'])
self.assertEqual(fake, [])
# No real fields
real, fake = utils.split_real_fields(model, ['fake1', 'fake2'])
self.assertEqual(real, [])
self.assertEqual(fake, ['fake1', 'fake2'])
# Real first, fake last
real, fake = utils.split_real_fields(model, ['name', 'fake'])
self.assertEqual(real, ['name'])
self.assertEqual(fake, ['fake'])
# Fake first, real last
real, fake = utils.split_real_fields(model, ['fake', 'name'])
self.assertEqual(real, [])
self.assertEqual(fake, ['fake', 'name'])
def test_filter_real_fields(self):
model = models.ExampleModel
fields = [
'name',
('name',),
("Pretty Name", 'name'),
("Pretty Name", 'name', 'callback'),
]
fakes = [
'fake',
("Pretty Name", 'fake'),
("Pretty Name", 'fake', 'callback'),
None,
("Pretty Name", None),
("Pretty Name", None, 'callback'),
]
db_fields, virtual_fields = utils.filter_real_fields(model, fields + fakes,
key=utils.get_first_orm_bit)
self.assertEqual(db_fields, fields)
self.assertEqual(virtual_fields, fakes)
def test_structure_ordering(self):
""" Verifies that the structural object correctly maps configuration values. """
# Verify basic ordering
columns = [
'name',
]
structure = get_structure(columns, {'ordering': ['name']})
self.assertEqual(structure.ordering['name'].direction, 'asc')
structure = get_structure(columns, {'ordering': ['+name']})
self.assertEqual(structure.ordering['name'].direction, 'asc')
structure = get_structure(columns, {'ordering': ['-name']})
self.assertEqual(structure.ordering['name'].direction, 'desc')
# Verify compound ordering is preserved
columns = [
'id',
'name',
]
structure = get_structure(columns, {'ordering': ['name', 'id']})
self.assertEqual(structure.ordering['name'].order, 0)
self.assertEqual(structure.ordering['id'].order, 1)
# Verify non-real field ordering is recognized when column is defined
columns = [
'fake',
]
structure = get_structure(columns, {'ordering': ['fake']})
self.assertEqual(structure.ordering['fake'].direction, 'asc')
structure = get_structure(columns, {'ordering': ['+fake']})
self.assertEqual(structure.ordering['fake'].direction, 'asc')
structure = get_structure(columns, {'ordering': ['-fake']})
self.assertEqual(structure.ordering['fake'].direction, 'desc')
# Verify invalid ordering names are not included
columns = [
'name',
]
structure = get_structure(columns, {'ordering': ['fake', 'name']})
self.assertIn('name', structure.ordering)
self.assertNotIn('fake', structure.ordering)
def test_structure_data_api(self):
"""
Verifies that unsortable_columns, hidden_columns, and ordering all add expected data-* API
attributes
"""
columns = [
'id',
'name',
]
structure = get_structure(columns, {})
self.assertEqual(structure.get_column_attributes('name')['data-visible'], 'true')
self.assertEqual(structure.get_column_attributes('name')['data-sortable'], 'true')
structure = get_structure(columns, {'hidden_columns': ['name']})
self.assertEqual(structure.get_column_attributes('name')['data-visible'], 'false')
self.assertEqual(structure.get_column_attributes('name')['data-sortable'], 'true')
structure = get_structure(columns, {'unsortable_columns': ['name']})
self.assertEqual(structure.get_column_attributes('name')['data-visible'], 'true')
self.assertEqual(structure.get_column_attributes('name')['data-sortable'], 'false')
structure = get_structure(columns, {'hidden_columns': ['name'], 'unsortable_columns': ['name']})
self.assertEqual(structure.get_column_attributes('name')['data-visible'], 'false')
self.assertEqual(structure.get_column_attributes('name')['data-sortable'], 'false')
structure = get_structure(columns, {'ordering': ['-name', 'id']})
self.assertEqual(structure.get_column_attributes('id')['data-sorting'], '1,0,asc')
self.assertEqual(structure.get_column_attributes('name')['data-sorting'], '0,1,desc')
def test_structure_automatic_pretty_names(self):
""" Verify columns missing Pretty Names receive one based on their field name. """
columns = [
('Primary Key', 'id'),
'name',
]
structure = get_structure(columns, {})
column_info = structure.get_column_info()
self.assertEqual(column_info[0].pretty_name, "Primary Key")
name_field = models.ExampleModel._meta.get_field('name')
self.assertEqual(column_info[1].pretty_name, name_field.name)
def test_structure_is_iterable(self):
""" Verify structure object can be iterated for each column definition. """
columns = [
'id',
'name',
'fake',
]
structure = get_structure(columns, {})
self.assertEqual(len(list(structure)), len(columns))
def test_options_use_default_to_local_fields(self):
""" Verifies that no columns specified in options means showing all local fields. """
opts = {}
options = utils.DatatableOptions(models.ExampleModel, {}, **opts)
local_field_names = [(f.verbose_name, f.name) for f in models.ExampleModel._meta.local_fields]
self.assertEqual(options['columns'], local_field_names)
def test_options_use_defaults(self):
""" Verifies that no options normalizes to the default set. """
options = utils.DatatableOptions(models.ExampleModel, {})
self.assertEqual(options, dict(utils.DEFAULT_OPTIONS, columns=options['columns']))
def test_options_normalize_values(self):
""" Verifies that the options object fixes bad values. """
model = models.ExampleModel
opts = {
'search_fields': None,
'unsortable_columns': None,
'hidden_columns': None,
}
options = utils.DatatableOptions(model, {}, **opts)
self.assertEqual(options['search_fields'], [])
self.assertEqual(options['unsortable_columns'], [])
self.assertEqual(options['hidden_columns'], [])
data = {utils.OPTION_NAME_MAP['start_offset']: -5}
options = utils.DatatableOptions(model, data)
self.assertEqual(options['start_offset'], 0)
data = {utils.OPTION_NAME_MAP['start_offset']: 'not a number'}
options = utils.DatatableOptions(model, data)
self.assertEqual(options['start_offset'], 0)
data = {utils.OPTION_NAME_MAP['page_length']: -5}
options = utils.DatatableOptions(model, data)
self.assertEqual(options['page_length'], utils.MINIMUM_PAGE_LENGTH)
data = {utils.OPTION_NAME_MAP['page_length']: -1} # special case for dataTables.js
options = utils.DatatableOptions(model, data)
self.assertEqual(options['page_length'], -1)
data = {utils.OPTION_NAME_MAP['page_length']: 'not a number'}
options = utils.DatatableOptions(model, data)
self.assertEqual(options['page_length'], utils.DEFAULT_OPTIONS['page_length'])
def test_options_sorting_validation(self):
""" Verifies that sorting options respect configuration. """
model = models.ExampleModel
opts = {
'columns': [
'id',
'date_created',
'name',
],
'ordering': ['name', 'id'],
'unsortable_columns': ['date_created'],
}
# Invalid sort number means use default sorting
data = {utils.OPTION_NAME_MAP['num_sorting_columns']: 'not a number'}
options = utils.DatatableOptions(model, data, **opts)
self.assertEqual(options['ordering'], ['name', 'id'])
# Invalid sort index means no sorting for that sorting priority
data = {
utils.OPTION_NAME_MAP['num_sorting_columns']: '2',
(utils.OPTION_NAME_MAP['sort_column'] % 0): '999', # bad column index
(utils.OPTION_NAME_MAP['sort_column_direction'] % 0): 'asc',
(utils.OPTION_NAME_MAP['sort_column'] % 1): '2',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 1): 'asc',
}
options = utils.DatatableOptions(model, data, **opts)
self.assertEqual(options['ordering'], ['name'])
# Sort requested for unsortable column rejects sorting
data = {
utils.OPTION_NAME_MAP['num_sorting_columns']: '2',
(utils.OPTION_NAME_MAP['sort_column'] % 0): '1', # unsortable column index
(utils.OPTION_NAME_MAP['sort_column_direction'] % 0): 'asc',
(utils.OPTION_NAME_MAP['sort_column'] % 1): '0',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 1): 'asc',
}
options = utils.DatatableOptions(model, data, **opts)
self.assertEqual(options['ordering'], ['id'])
# Invalid sort direction rejects sorting
data = {
utils.OPTION_NAME_MAP['num_sorting_columns']: '2',
(utils.OPTION_NAME_MAP['sort_column'] % 0): '2',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 0): 'bad direction', # unusable value
(utils.OPTION_NAME_MAP['sort_column'] % 1): '0',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 1): 'asc',
}
options = utils.DatatableOptions(model, data, **opts)
self.assertEqual(options['ordering'], ['id'])
def test_options_normalize_virtual_columns_to_special_names(self):
"""
Verifies that virtual field receives index-based synonym to denote special conditions that
will affect sorting/searching behavior.
"""
model = models.ExampleModel
opts = {
'columns': [
'id',
'pk', # 'pk' is technically not a real field, so it registers as fake!
'fake',
],
}
data = {
utils.OPTION_NAME_MAP['num_sorting_columns']: '3',
(utils.OPTION_NAME_MAP['sort_column'] % 0): '0',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 0): 'desc',
(utils.OPTION_NAME_MAP['sort_column'] % 1): '1',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 1): 'desc',
(utils.OPTION_NAME_MAP['sort_column'] % 2): '2',
(utils.OPTION_NAME_MAP['sort_column_direction'] % 2): 'asc',
}
options = utils.DatatableOptions(model, data, **opts)
self.assertEqual(options['ordering'], ['-id', '-!1', '!2'])
|
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
#
# Steps included in experimental are for internal experimental use only.
#
import logging
import os
import subprocess
import requests
from curie.exception import CurieTestException
from curie.steps._base_step import BaseStep
log = logging.getLogger(__name__)
class WaitOplogEmpty(BaseStep):
"""Wait for the cluster's oplog to be empty.
Note: The firewall on the CVMs in the cluster must allow access to port
2009 to query /h/vars.
Args:
scenario (Scenario): Scenario this step belongs to.
timeout (int): Seconds to wait for oplog to drain.
"""
def __init__(self, scenario, timeout=1200):
super(WaitOplogEmpty, self).__init__(scenario, annotate=False)
self.description = "Waiting for oplog to drain"
self.timeout = timeout
def _run(self):
all_cvms = set()
verified_empty_cvms = set()
for vm in self.scenario.cluster.vms():
if vm.is_cvm():
all_cvms.add(vm.vm_ip())
if len(all_cvms) == 0:
log.warning("No CVMs found to check for oplog empty.")
return
def is_cluster_oplog_empty():
to_check = all_cvms - verified_empty_cvms
for cvm_ip in to_check:
url = "http://%s:2009/h/vars" % cvm_ip
try:
response = requests.get(
url,
params={
"format": "text",
"regex": "stargate/vdisk/total/oplog_bytes$"
})
response.raise_for_status()
except requests.exceptions.ConnectionError:
log.warning("Couldn't connect to CVM at %s to get oplog bytes.",
cvm_ip)
continue
try:
# Expect response to look like:
# stargate/vdisk/total/oplog_bytes 27018567680\n
metric_name, value = response.content.strip().split()
assert metric_name == "stargate/vdisk/total/oplog_bytes"
oplog_bytes = int(value)
except BaseException:
log.exception("Received unexpected response while waiting for oplog "
"to be empty at URL '%s': %r", url, response.content)
continue
else:
if oplog_bytes == 0:
verified_empty_cvms.add(cvm_ip)
return True if len(all_cvms - verified_empty_cvms) == 0 else False
rval = self.scenario.wait_for(func=is_cluster_oplog_empty,
msg="Oplog on cluster to be empty",
timeout_secs=self.timeout)
return rval
class Shell(BaseStep):
"""Run a shell command from the X-Ray VM.
Args:
scenario (Scenario): Scenario this step belongs to.
cmd (str): Shell command to run.
"""
def __init__(self, scenario, cmd, annotate=False):
super(Shell, self).__init__(scenario, annotate=annotate)
self.description = "Executing '%s'" % cmd
self.cmd = cmd
def _run(self):
cwd = os.getcwd()
try:
if self.scenario.output_directory:
os.chdir(self.scenario.output_directory)
return_code = subprocess.check_call(self.cmd, shell=True)
except subprocess.CalledProcessError as err:
raise CurieTestException(
cause=
"Received non-zero return code %d from shell command '%s': %s" %
(err.returncode, self.cmd, err),
impact="The command did not complete successfully.",
corrective_action=
"Please check the syntax of the command requested in the %s step." %
self.name)
else:
self.create_annotation("%s" % self.cmd)
return return_code
finally:
os.chdir(cwd)
|
<filename>soundspaces/tasks/nav.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from typing import Any, Type, Union
import logging
import numpy as np
import torch
import cv2
import librosa
from gym import spaces
from skimage.measure import block_reduce
from habitat.config import Config
from habitat.core.dataset import Episode
from habitat.tasks.nav.nav import DistanceToGoal, Measure, EmbodiedTask, Success
from habitat.core.registry import registry
from habitat.core.simulator import (
Sensor,
SensorTypes,
Simulator,
)
from habitat.utils.geometry_utils import (
quaternion_from_coeff,
quaternion_rotate_vector,
)
from habitat.tasks.utils import cartesian_to_polar
from soundspaces.mp3d_utils import CATEGORY_INDEX_MAPPING
from soundspaces.utils import convert_semantic_object_to_rgb
from soundspaces.mp3d_utils import HouseReader
@registry.register_sensor
class AudioGoalSensor(Sensor):
def __init__(self, *args: Any, sim: Simulator, config: Config, **kwargs: Any):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "audiogoal"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
sensor_shape = (2, self._sim.config.AUDIO.RIR_SAMPLING_RATE)
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=sensor_shape,
dtype=np.float32,
)
def get_observation(self, *args: Any, observations, episode: Episode, **kwargs: Any):
return self._sim.get_current_audiogoal_observation()
@registry.register_sensor
class SpectrogramSensor(Sensor):
cls_uuid: str = "spectrogram"
def __init__(self, *args: Any, sim: Simulator, config: Config, **kwargs: Any):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "spectrogram"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
spectrogram = self.compute_spectrogram(np.ones((2, self._sim.config.AUDIO.RIR_SAMPLING_RATE)))
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=spectrogram.shape,
dtype=np.float32,
)
@staticmethod
def compute_spectrogram(audio_data):
def compute_stft(signal):
n_fft = 512
hop_length = 160
win_length = 400
stft = np.abs(librosa.stft(signal, n_fft=n_fft, hop_length=hop_length, win_length=win_length))
stft = block_reduce(stft, block_size=(4, 4), func=np.mean)
return stft
channel1_magnitude = np.log1p(compute_stft(audio_data[0]))
channel2_magnitude = np.log1p(compute_stft(audio_data[1]))
spectrogram = np.stack([channel1_magnitude, channel2_magnitude], axis=-1)
return spectrogram
def get_observation(self, *args: Any, observations, episode: Episode, **kwargs: Any):
spectrogram = self._sim.get_current_spectrogram_observation(self.compute_spectrogram)
return spectrogram
@registry.register_measure
class NormalizedDistanceToGoal(Measure):
r""" Distance to goal the episode ends
"""
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._start_end_episode_distance = None
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any):
return "normalized_distance_to_goal"
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._start_end_episode_distance = episode.info["geodesic_distance"]
self._metric = None
def update_metric(
self, *args: Any, episode, action, task: EmbodiedTask, **kwargs: Any
):
distance_to_goal = task.measurements.measures[DistanceToGoal.cls_uuid].get_metric()
self._metric = distance_to_goal / self._start_end_episode_distance
@registry.register_sensor(name="Collision")
class Collision(Sensor):
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return "collision"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=1,
shape=(1,),
dtype=bool
)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
return [self._sim.previous_step_collided]
@registry.register_measure
class SNA(Measure):
r"""SPL (Success weighted by Path Length)
ref: On Evaluation of Embodied Agents - Anderson et. al
https://arxiv.org/pdf/1807.06757.pdf
"""
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._start_end_num_action = None
self._agent_num_action = None
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any):
return "sna"
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._start_end_num_action = episode.info["num_action"]
self._agent_num_action = 0
self._metric = None
def update_metric(
self, *args: Any, episode, action, task: EmbodiedTask, **kwargs: Any
):
ep_success = task.measurements.measures[Success.cls_uuid].get_metric()
self._agent_num_action += 1
self._metric = ep_success * (
self._start_end_num_action
/ max(
self._start_end_num_action, self._agent_num_action
)
)
@registry.register_measure
class NA(Measure):
r""" Number of actions
ref: On Evaluation of Embodied Agents - Anderson et. al
https://arxiv.org/pdf/1807.06757.pdf
"""
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._agent_num_action = None
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any):
return "na"
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._agent_num_action = 0
self._metric = None
def update_metric(
self, *args: Any, episode, action, task: EmbodiedTask, **kwargs: Any
):
self._agent_num_action += 1
self._metric = self._agent_num_action
@registry.register_sensor(name="EgoMap")
class EgoMap(Sensor):
r"""Estimates the top-down occupancy based on current depth-map.
Args:
sim: reference to the simulator for calculating task observations.
config: contains the MAP_RESOLUTION, MAP_SIZE, HEIGHT_THRESH fields to
decide grid-size, extents of the projection, and the thresholds
for determining obstacles and explored space.
"""
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
super().__init__(config=config)
# Map statistics
self.map_size = self.config.MAP_SIZE
self.map_res = self.config.MAP_RESOLUTION
# Agent height for pointcloud transformation
self.sensor_height = self.config.POSITION[1]
# Compute intrinsic matrix
hfov = float(self._sim.config.DEPTH_SENSOR.HFOV) * np.pi / 180
self.intrinsic_matrix = np.array([[1 / np.tan(hfov / 2.), 0., 0., 0.],
[0., 1 / np.tan(hfov / 2.), 0., 0.],
[0., 0., 1, 0],
[0., 0., 0, 1]])
self.inverse_intrinsic_matrix = np.linalg.inv(self.intrinsic_matrix)
# Height thresholds for obstacles
self.height_thresh = self.config.HEIGHT_THRESH
# Depth processing
self.min_depth = float(self._sim.config.DEPTH_SENSOR.MIN_DEPTH)
self.max_depth = float(self._sim.config.DEPTH_SENSOR.MAX_DEPTH)
# Pre-compute a grid of locations for depth projection
W = self._sim.config.DEPTH_SENSOR.WIDTH
H = self._sim.config.DEPTH_SENSOR.HEIGHT
self.proj_xs, self.proj_ys = np.meshgrid(
np.linspace(-1, 1, W),
np.linspace(1, -1, H)
)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "ego_map"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
sensor_shape = (self.config.MAP_SIZE, self.config.MAP_SIZE, 2)
return spaces.Box(
low=0,
high=1,
shape=sensor_shape,
dtype=np.uint8,
)
def convert_to_pointcloud(self, depth):
"""
Inputs:
depth = (H, W, 1) numpy array
Returns:
xyz_camera = (N, 3) numpy array for (X, Y, Z) in egocentric world coordinates
"""
depth_float = depth.astype(np.float32)[..., 0]
# =========== Convert to camera coordinates ============
W = depth.shape[1]
xs = self.proj_xs.reshape(-1)
ys = self.proj_ys.reshape(-1)
depth_float = depth_float.reshape(-1)
# Filter out invalid depths
max_forward_range = self.map_size * self.map_res
valid_depths = (depth_float != 0.0) & (depth_float <= max_forward_range)
xs = xs[valid_depths]
ys = ys[valid_depths]
depth_float = depth_float[valid_depths]
# Unproject
# negate depth as the camera looks along -Z
xys = np.vstack((xs * depth_float,
ys * depth_float,
-depth_float, np.ones(depth_float.shape)))
inv_K = self.inverse_intrinsic_matrix
xyz_camera = np.matmul(inv_K, xys).T # XYZ in the camera coordinate system
xyz_camera = xyz_camera[:, :3] / xyz_camera[:, 3][:, np.newaxis]
return xyz_camera
def safe_assign(self, im_map, x_idx, y_idx, value):
try:
im_map[x_idx, y_idx] = value
except IndexError:
valid_idx1 = np.logical_and(x_idx >= 0, x_idx < im_map.shape[0])
valid_idx2 = np.logical_and(y_idx >= 0, y_idx < im_map.shape[1])
valid_idx = np.logical_and(valid_idx1, valid_idx2)
im_map[x_idx[valid_idx], y_idx[valid_idx]] = value
def _get_depth_projection(self, sim_depth):
"""
Project pixels visible in depth-map to ground-plane
"""
if self._sim.config.DEPTH_SENSOR.NORMALIZE_DEPTH:
depth = sim_depth * (self.max_depth - self.min_depth) + self.min_depth
else:
depth = sim_depth
XYZ_ego = self.convert_to_pointcloud(depth)
# Adding agent's height to the point cloud
XYZ_ego[:, 1] += self.sensor_height
# Convert to grid coordinate system
V = self.map_size
Vby2 = V // 2
points = XYZ_ego
grid_x = (points[:, 0] / self.map_res) + Vby2
grid_y = (points[:, 2] / self.map_res) + V
# Filter out invalid points
valid_idx = (grid_x >= 0) & (grid_x <= V-1) & (grid_y >= 0) & (grid_y <= V-1)
points = points[valid_idx, :]
grid_x = grid_x[valid_idx].astype(int)
grid_y = grid_y[valid_idx].astype(int)
# Create empty maps for the two channels
obstacle_mat = np.zeros((self.map_size, self.map_size), np.uint8)
explore_mat = np.zeros((self.map_size, self.map_size), np.uint8)
# Compute obstacle locations
high_filter_idx = points[:, 1] < self.height_thresh[1]
low_filter_idx = points[:, 1] > self.height_thresh[0]
obstacle_idx = np.logical_and(low_filter_idx, high_filter_idx)
self.safe_assign(obstacle_mat, grid_y[obstacle_idx], grid_x[obstacle_idx], 1)
# Compute explored locations
explored_idx = high_filter_idx
self.safe_assign(explore_mat, grid_y[explored_idx], grid_x[explored_idx], 1)
# Smoothen the maps
kernel = np.ones((3, 3), np.uint8)
obstacle_mat = cv2.morphologyEx(obstacle_mat, cv2.MORPH_CLOSE, kernel)
explore_mat = cv2.morphologyEx(explore_mat, cv2.MORPH_CLOSE, kernel)
# Ensure all expanded regions in obstacle_mat are accounted for in explored_mat
explore_mat = np.logical_or(explore_mat, obstacle_mat)
return np.stack([obstacle_mat, explore_mat], axis=2)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
# convert to numpy array
ego_map_gt = self._sim.get_egomap_observation()
if ego_map_gt is None:
sim_depth = asnumpy(observations['depth'])
ego_map_gt = self._get_depth_projection(sim_depth)
self._sim.cache_egomap_observation(ego_map_gt)
return ego_map_gt
def asnumpy(v):
if torch.is_tensor(v):
return v.cpu().numpy()
elif isinstance(v, np.ndarray):
return v
else:
raise ValueError('Invalid input')
@registry.register_sensor(name="Category")
class Category(Sensor):
cls_uuid: str = "category"
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=1,
shape=(len(CATEGORY_INDEX_MAPPING.keys()),),
dtype=bool
)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
index = CATEGORY_INDEX_MAPPING[episode.object_category]
onehot = np.zeros(len(CATEGORY_INDEX_MAPPING.keys()))
onehot[index] = 1
return onehot
@registry.register_sensor(name="CategoryBelief")
class CategoryBelief(Sensor):
cls_uuid: str = "category_belief"
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
mp3d_objects_of_interest_filepath = r"data/metadata/mp3d_objects_of_interest_data.bin"
with open(mp3d_objects_of_interest_filepath, 'rb') as bin_file:
self.ooi_objects_id_name = pickle.load(bin_file)
self.ooi_regions_id_name = pickle.load(bin_file)
self.num_objects = len(self.ooi_objects_id_name)
self.num_regions = len(self.ooi_regions_id_name)
return spaces.Box(
low=0,
high=1,
# shape=(len(CATEGORY_INDEX_MAPPING.keys()),),
shape=(self.num_objects + self.num_regions,),
dtype=bool
)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
# belief = np.zeros(len(CATEGORY_INDEX_MAPPING.keys()))
belief = np.zeros(self.num_objects + self.num_regions)
return belief
@registry.register_sensor(name="LocationBelief")
class LocationBelief(Sensor):
cls_uuid: str = "location_belief"
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=1,
shape=(2,),
dtype=bool
)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
belief = np.zeros(2)
return belief
@registry.register_sensor(name="MPCAT40Index")
class MPCAT40Index(Sensor):
def __init__(
self, sim: Union[Simulator, Config], config: Config, *args: Any, **kwargs: Any
):
self.config = config
self._category_mapping = {
'chair': 3,
'table': 5,
'picture': 6,
'cabinet': 7,
'cushion': 8,
'sofa': 10,
'bed': 11,
'chest_of_drawers': 13,
'plant': 14,
'sink': 15,
'toilet': 18,
'stool': 19,
'towel': 20,
'tv_monitor': 22,
'shower': 23,
'bathtub': 25,
'counter': 26,
'fireplace': 27,
'gym_equipment': 33,
'seating': 34,
'clothes': 38
}
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return "mpcat40_index"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=1,
shape=(1,),
dtype=bool
)
def get_observation(
self, *args: Any, observations, episode: Episode, **kwargs: Any
) -> object:
index = self._category_mapping[episode.object_category]
encoding = np.array([index])
return encoding
@registry.register_sensor(name="SemanticObjectSensor")
class SemanticObjectSensor(Sensor):
r"""Lists the object categories for each pixel location.
Args:
sim: reference to the simulator for calculating task observations.
"""
cls_uuid: str = "semantic_object"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._current_episode_id = None
self.mapping = None
self._initialize_category_mappings()
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _initialize_category_mappings(self):
self.category_to_task_category_id = {
'chair': 0,
'table': 1,
'picture': 2,
'cabinet': 3,
'cushion': 4,
'sofa': 5,
'bed': 6,
'chest_of_drawers': 7,
'plant': 8,
'sink': 9,
'toilet': 10,
'stool': 11,
'towel': 12,
'tv_monitor': 13,
'shower': 14,
'bathtub': 15,
'counter': 16,
'fireplace': 17,
'gym_equipment': 18,
'seating': 19,
'clothes': 20
}
self.category_to_mp3d_category_id = {
'chair': 3,
'table': 5,
'picture': 6,
'cabinet': 7,
'cushion': 8,
'sofa': 10,
'bed': 11,
'chest_of_drawers': 13,
'plant': 14,
'sink': 15,
'toilet': 18,
'stool': 19,
'towel': 20,
'tv_monitor': 22,
'shower': 23,
'bathtub': 25,
'counter': 26,
'fireplace': 27,
'gym_equipment': 33,
'seating': 34,
'clothes': 38
}
self.num_task_categories = np.max(
list(self.category_to_task_category_id.values())
) + 1
self.mp3d_id_to_task_id = np.ones((200, ), dtype=np.int64) * -1
for k in self.category_to_task_category_id.keys():
v1 = self.category_to_task_category_id[k]
v2 = self.category_to_mp3d_category_id[k]
self.mp3d_id_to_task_id[v2] = v1
# Map unknown classes to a new category
self.mp3d_id_to_task_id[
self.mp3d_id_to_task_id == -1
] = self.num_task_categories
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.COLOR
def _get_observation_space(self, *args: Any, **kwargs: Any):
if self.config.CONVERT_TO_RGB:
observation_space = spaces.Box(
low=0,
high=255,
shape=(self.config.HEIGHT, self.config.WIDTH, 3),
dtype=np.uint8,
)
else:
observation_space = spaces.Box(
low=np.iinfo(np.uint32).min,
high=np.iinfo(np.uint32).max,
shape=(self.config.HEIGHT, self.config.WIDTH),
dtype=np.uint32,
)
return observation_space
def get_observation(
self, *args: Any, observations, episode, **kwargs: Any
):
episode_uniq_id = f"{episode.scene_id} {episode.episode_id}"
if self._current_episode_id != episode_uniq_id:
self._current_episode_id = episode_uniq_id
reader = HouseReader(self._sim._current_scene.replace('.glb', '.house'))
instance_id_to_mp3d_id = reader.compute_object_to_category_index_mapping()
self.instance_id_to_mp3d_id = np.array([instance_id_to_mp3d_id[i] for i in range(len(instance_id_to_mp3d_id))])
# Pre-process semantic observations to remove invalid values
semantic = np.copy(observations["semantic"])
semantic[semantic >= self.instance_id_to_mp3d_id.shape[0]] = 0
# Map from instance id to semantic id
semantic_object = np.take(self.instance_id_to_mp3d_id, semantic)
# Map from semantic id to task id
semantic_object = np.take(self.mp3d_id_to_task_id, semantic_object)
if self.config.CONVERT_TO_RGB:
semantic_object = SemanticObjectSensor.convert_semantic_map_to_rgb(
semantic_object
)
return semantic_object
@staticmethod
def convert_semantic_map_to_rgb(semantic_map):
return convert_semantic_object_to_rgb(semantic_map)
@registry.register_sensor(name="PoseSensor")
class PoseSensor(Sensor):
r"""The agents current location and heading in the coordinate frame defined by the
episode, i.e. the axis it faces along and the origin is defined by its state at
t=0. Additionally contains the time-step of the episode.
Args:
sim: reference to the simulator for calculating task observations.
config: Contains the DIMENSIONALITY field for the number of dimensions to express the agents position
Attributes:
_dimensionality: number of dimensions used to specify the agents position
"""
cls_uuid: str = "pose"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._episode_time = 0
self._current_episode_id = None
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(4,),
dtype=np.float32,
)
def _quat_to_xy_heading(self, quat):
direction_vector = np.array([0, 0, -1])
heading_vector = quaternion_rotate_vector(quat, direction_vector)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return np.array([phi], dtype=np.float32)
def get_observation(
self, observations, episode, *args: Any, **kwargs: Any
):
episode_uniq_id = f"{episode.scene_id} {episode.episode_id}"
if episode_uniq_id != self._current_episode_id:
self._episode_time = 0.0
self._current_episode_id = episode_uniq_id
agent_state = self._sim.get_agent_state()
origin = np.array(episode.start_position, dtype=np.float32)
rotation_world_start = quaternion_from_coeff(episode.start_rotation)
agent_position_xyz = agent_state.position
rotation_world_agent = agent_state.rotation
agent_position_xyz = quaternion_rotate_vector(
rotation_world_start.inverse(), agent_position_xyz - origin
)
agent_heading = self._quat_to_xy_heading(
rotation_world_agent.inverse() * rotation_world_start
)
ep_time = self._episode_time
self._episode_time += 1.0
return np.array(
[-agent_position_xyz[2], agent_position_xyz[0], agent_heading, ep_time],
dtype=np.float32
)
@registry.register_sensor
class ProximitySensor(Sensor):
r"""Sensor for observing the distance to the closest obstacle
Args:
sim: reference to the simulator for calculating task observations.
config: config for the sensor.
"""
cls_uuid: str = "proximity"
def __init__(self, sim, config, *args: Any, **kwargs: Any):
self._sim = sim
self._max_detection_radius = getattr(
config, "MAX_DETECTION_RADIUS", 2.0
)
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.TACTILE
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0.0,
high=self._max_detection_radius,
shape=(1,),
dtype=np.float32,
)
def get_observation(
self, observations, *args: Any, episode, **kwargs: Any
):
current_position = self._sim.get_agent_state().position
return np.array(
[
self._sim.distance_to_closest_obstacle(
current_position, self._max_detection_radius
)
],
dtype=np.float32,
)
@registry.register_sensor
class OracleActionSensor(Sensor):
def __init__(self, *args: Any, sim: Simulator, config: Config, **kwargs: Any):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "oracle_action_sensor"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
sensor_shape = (1,)
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=sensor_shape,
dtype=np.float32,
)
def get_observation(self, *args: Any, observations, episode: Episode, **kwargs: Any):
return self._sim.get_oracle_action()
|
<reponame>steemfans/steem-lightdb<filename>transfer/user_relation.py
#!/usr/bin/python3
#encoding:UTF-8
import json, os, sys, time
import utils.TransferTasks as tasks
import utils.utils as utils
from utils.BlockProcess import BlockProcess as BlockProcess
import asyncio, aiomysql
from multiprocessing import Pool
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
from contextlib import suppress
task_type = 'user_relation'
class UserRelationProcess(BlockProcess):
def __init__(self, loop, data_type):
super().__init__(loop, data_type)
async def process(self, block_num, block_time, trans_id, ops):
global task_type
db = self.db
# print('process %i blcok\'s ops' % block_num, ops)
self.processed_data = {
'data': [],
'undo': []}
for op_idx, op in enumerate(ops):
op_type = op[0]
op_detail = op[1]
if op_type == 'custom_json' and 'id' in op_detail and op_detail['id'] == 'follow':
if op_detail['json'] == '':
continue
try:
json_data = json.loads(op_detail['json'])
follower = None
following = None
what = None
if isinstance(json_data, dict):
if 'follower' in json_data:
follower = json_data['follower']
if 'following' in json_data:
following = json_data['following']
if 'what' in json_data and isinstance(json_data['what'], list) and len(json_data['what']) > 0:
what = json_data['what'][0]
#elif isinstance(json_data, list):
# if len(json_data) >= 2 and json_data[0] == 'follow':
# if 'follower' in json_data[1]:
# follower = json_data[1]['follower']
# if 'following' in json_data[1]:
# following = json_data[1]['following']
# if 'what' in json_data[1] and len(json_data[1]['what']) > 0:
# what = json_data[1]['what'][0]
# else:
# continue
else:
continue
if follower == None and following == None and (what == None or what == ''):
print('follow_data_error', block_num, trans_id, follower, following, what, op)
continue
sql = '''
select id, username from users
where username = %s or username = %s'''
cur = await db.cursor()
await cur.execute(sql, (follower, following))
user_data = await cur.fetchall()
await cur.close()
if len(user_data) == 2:
for user in user_data:
if user[1] == follower:
follower_id = user[0]
if user[1] == following:
following_id = user[0]
self.processed_data['data'].append((follower_id, following_id, what, block_time, ))
else:
self.processed_data['undo'].append((block_num, trans_id, op_idx, json.dumps(op), tasks.getTypeId(task_type), block_time))
except Exception as e:
self.processed_data['undo'].append((block_num, trans_id, op_idx, json.dumps(op), tasks.getTypeId(task_type), block_time))
utils.PrintException([block_num, trans_id, op_idx])
else:
# print('unknown type:', op_type, block_num, trans_id, ops, op_idx)
continue
# print('processed:', self.processed_data)
return self.processed_data
async def insertData(self):
db = self.db
try:
cur = await db.cursor()
if self.prepared_data['data'] != []:
sql_main_data = '''
insert ignore into user_relations
(follower_id, following_id, what, created_at)
values
(%s, %s, %s, %s)'''
await cur.executemany(sql_main_data, self.prepared_data['data'])
if self.prepared_data['undo'] != []:
sql_undo_data = '''
insert ignore into undo_op
(block_num, transaction_id, op_index, op, task_type, block_time)
values
(%s, %s, %s, %s, %s, %s)'''
await cur.executemany(sql_undo_data, self.prepared_data['undo'])
sql_update_task = '''
update multi_tasks set is_finished = 1
where id = %s'''
await cur.execute(sql_update_task, (self.task_id))
await db.commit()
await cur.close()
except Exception as e:
await db.rollback()
await cur.close()
print('insert_data_failed', 'task_id:', self.task_id, e)
def processor(all_tasks):
global task_type
if all_tasks != []:
loop = asyncio.get_event_loop()
loop_tasks = []
try:
for one_task in all_tasks:
user_task = UserRelationProcess(loop, task_type)
loop_tasks.append(asyncio.ensure_future(user_task.doMultiTasks(one_task)))
loop.run_until_complete(asyncio.wait(loop_tasks))
except KeyboardInterrupt as e:
for task in asyncio.Task.all_tasks():
task.cancel()
loop.stop()
finally:
loop.close()
def mainMultiProcess():
global task_type
config = utils.get_config()
while True:
all_tasks = tasks.splitTasks(tasks.get(task_type), config['slice_step'])
if all_tasks != []:
p = ProcessPoolExecutor(config['worker'])
for t in all_tasks:
p.submit(processor, t)
p.shutdown()
time.sleep(3)
if __name__ == '__main__':
with suppress(KeyboardInterrupt):
mainMultiProcess()
|
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2018 Datadog, Inc.
from datetime import datetime
import json
import logging
import os
import sys
import tornado
from utils.hostname import get_hostname, HostnameException
from utils.api import validate_api_key
from collector import CheckLoader, WheelLoader
log = logging.getLogger(__name__)
class AgentStatusHandler(tornado.web.RequestHandler):
LOADERS = [CheckLoader, WheelLoader]
def initialize(self, config, started, status):
self._config = config
self._status = status
self._started = started
def get(self):
status = {}
for component, stats in self._status.items():
log.debug("adding component %s to stats", component)
stats_snap, info_snap = stats.snapshot()
if component == 'agent':
info_snap = self.process_agent_info(info_snap)
elif component == 'collector':
info_snap = self.process_collector_info(info_snap)
status[component] = {
'stats': stats_snap,
'info': info_snap,
}
now = datetime.utcnow()
status['uptime'] = (now - self._started).total_seconds()
status['utc_time'] = now.strftime("%a, %d %b %Y %H:%M:%S.%f %Z")
status['pid'] = os.getpid()
status['python_version'] = "{major}.{minor}.{bugfix}".format(
major=sys.version_info[0],
minor=sys.version_info[1],
bugfix=sys.version_info[2]
)
status['agent_log_path'] = self._config.get('logging', {}).get('agent_log_file')
status['agent_config_path'] = self._config.get_loaded_config()
status['log_level'] = self._config.get('log_level', 'INFO').upper()
try:
status['hostname'] = get_hostname()
status['hostname_native'] = get_hostname(config_override=False)
except HostnameException:
status['hostname'] = '' if 'hostname' not in status else status['hostname']
status['hostname_native'] = ''
status['redacted_api'] = '*'*20 + self._config.get('api_key')[-5:]
status['api_status'] = validate_api_key(self._config)
try:
log.debug('status response to render: %s', status)
self.write(json.dumps(status))
except TypeError as e:
log.error("unable to handle status request: %s", e)
def process_agent_info(self, info):
processed = {}
for signature, values in info.get('sources', {}).items():
log.debug("processing %s, %s", signature, values)
check = signature[0]
if check in processed:
processed['sources'][check]['merics'] += values
else:
processed[check] = {'metrics': values}
return {'checks': processed}
def process_collector_info(self, info):
processed = {
'loader': {},
'runtime': {},
}
check_classes = info.get('check_classes', {})
loader_errors = info.get('loader_errors', {})
runtime_errors = info.get('runtime_errors', {})
for check, errors in loader_errors.items():
if check in check_classes: # check eventually loaded
continue
processed['loader'][check] = {}
for loader, error in errors.items():
if loader == CheckLoader.__name__:
for place, err in error.items():
processed['loader'][check][loader] = '{path}: {err}'.format(path=place, err=err['error'])
elif loader == WheelLoader.__name__:
processed['loader'][check][loader] = str(error['error'])
for check, errors in runtime_errors.items():
processed['runtime'][check] = {}
for instance, error in errors.items():
processed['runtime'][check][hex(instance)] = error
return {'errors': processed}
|
import torch
torch.backends.cudnn.benchmark = True
from trainer.base_audio_trainer import BaseAudioTrainer
from logger.new_callbacks import Callbacks
from torch.utils.data import DataLoader
from dataloader.audio_dataset import AudioDataset
from torch.optim.lr_scheduler import CosineAnnealingLR
from models.audio_models.model_dcase import ConvNet
from models.audio_models.model_m1 import Classifier_M2, Classifier_M3
from models.audio_models.model_m0 import Classifier
from utils_helper.mixup import *
import numpy as np
import cProfile
try:
from apex import amp
except:
pass
try:
import wandb
except:
pass
class AudioTrainer(BaseAudioTrainer):
def __init__(self, hparams, train_length=None, valid_length=None):
self.mixup = hparams.mixup
self.cutmix = hparams.cutmix
self.batch_size = hparams.batch_size
self.num_workers = hparams.num_workers
self.train_dir = hparams.train_dir
self.train_meta_file = hparams.train_meta_file
self.valid_dir = hparams.valid_dir
self.valid_meta_file = hparams.valid_meta_file
self.epochs = hparams.epochs
self.save_dir = hparams.save_dir
self.checkpoint_dir = hparams.checkpoint_dir
self.grad_acc_num = hparams.grad_acc_num
self.lr = hparams.lr
self.network_name = hparams.network_name
self.optimizer_name = hparams.optimizer_name
self.scheduler_name = hparams.scheduler_name
self.project_name = hparams.project_name
self.run_name = hparams.run_name
self.criterion_name = hparams.criterion_name
self.use_amp = hparams.use_amp
self.device = hparams.device
self.load_model_only = hparams.load_model_only
self.tuning_type = hparams.tuning_type
self.pos_weight_factor = hparams.pos_weight_factor
self.cb = Callbacks(log_every=10, save_dir=self.save_dir)
self.init_train_dataloader(length=train_length)
self.init_valid_dataloader(length = valid_length)
self.init_criterion()
self.init_model()
self.set_tuning_parameters()
self.init_optimizer()
self.init_scheduler()
if hparams.project_name is not None:
self.cb.init_wandb(hparams.project_name, hparams, hparams.run_name)
wandb.watch(self.model)
if torch.cuda.device_count() > 1 and self.device == 'cuda':
print("Using Multiple GPUs")
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
self.model.to(self.device)
if self.use_amp:
self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O1")
self.load_checkpoint(self.checkpoint_dir, is_model_only=self.load_model_only)
def init_criterion(self):
# self.criterion_name
self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.pos_weight_factor))
self.log_loss_criterion = torch.nn.BCELoss()
self.valid_criterion = torch.nn.BCELoss()
def init_model(self):
# self.network_name
model_dict = {
"m0": Classifier,
"m2": Classifier_M2,
"m3": Classifier_M3,
"dcase": ConvNet,
}
self.model = model_dict[self.network_name](num_classes=1)
def set_tuning_parameters(self):
# self.tuning_type
if self.tuning_type=="freeze_bn":
self.model.freeze_bn = True
self.model.freeze_bn_affine = True
def init_optimizer(self, lr=None):
# self.optimizer_name
if lr is not None:
self.lr = lr
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.lr, amsgrad=False)
def init_scheduler(self):
# self.scheduler_name
if self.scheduler_name == "cosine":
self.scheduler = CosineAnnealingLR(self.optimizer, T_max=10, eta_min=1e-5)
else:
self.scheduler = None
'''
1.1.1. batch process
'''
def batch_process(self, batch, index=None, isTraining=True):
self.cb.on_batch_process_start()
source_filenames, x_batch, y_batch, video_original_filenames = batch
y_batch = y_batch.float()
if isTraining:
r = np.random.rand(1)
if (self.mixup or self.cutmix) and r < 0.5:
if self.mixup and (not self.cutmix):
x_batch, y_batch_a, y_batch_b, lam = mixup_data(x_batch, y_batch)
elif self.cutmix and (not self.mixup):
x_batch, y_batch_a, y_batch_b, lam = cutmix_data(x_batch, y_batch, device=self.device)
else:
x_batch, y_batch_a, y_batch_b, lam = cutmix_data(x_batch, y_batch, device=self.device) if np.random.rand() > 0.5 else mixup_data(x_batch, y_batch, device=self.device)
y_batch_b = y_batch_b.unsqueeze(1)
y_batch_a = y_batch_a.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch_a, y_batch_b, lam
else:
y_batch = y_batch.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch
else:
y_batch = y_batch.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch
'''
1.1.2. batch train
'''
def batch_train_step(self, batch, index):
self.cb.on_batch_train_step_start()
r = np.random.rand(1)
if (len(batch)==4):
x_batch, y_batch_a, y_batch_b, lam = batch
preds = self.model(x_batch.to(self.device))
loss = mixup_criterion(self.criterion, preds, y_batch_a.to(self.device), y_batch_b.to(self.device), lam)
else:
x_batch, y_batch = batch
preds = self.model(x_batch.to(self.device))
loss = self.criterion(preds, y_batch.to(self.device))
dict_metrics = {"train_batch_loss":loss.item()}
if self.scheduler is not None:
dict_metrics["lr"] = self.optimizer.param_groups[0]['lr']
self.cb.on_batch_train_step_end(dict_metrics)
return loss
'''
2.1.2. batch valid
'''
def batch_valid_step(self, batch, index):
self.cb.on_batch_valid_step_start()
with torch.no_grad():
for idx, (x_batch, y_batch) in enumerate(zip(*batch)):
x_batch = x_batch
y_batch = y_batch.unsqueeze(0)
predicted = self.model(x_batch.to(self.device))
loss_original = self.criterion(predicted.mean(axis=0).unsqueeze(0), y_batch.to(self.device))
predicted2 = torch.sigmoid(predicted).mean(axis=0)
predicted2[predicted2<0.5] = 0.5
log_loss = self.log_loss_criterion(predicted2, y_batch.to(self.device))
self.cb.on_batch_valid_step_end({"predicted":torch.sigmoid(predicted).mean(axis=0).item(), "actual":y_batch[0].item(),"num_above":(predicted2>0.5).sum().item(),"valid_batch_loss":loss_original.item(), "valid_log_loss": log_loss.item(), "valid_original_loss":loss_original.item()})
def init_train_dataloader(self, length = None):
train_dataset = AudioDataset(self.train_dir, self.train_meta_file, spec_aug=False, isBalanced=True, isValid=False)
if length is not None:
train_dataset.length = length
self.trainloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers= self.num_workers, collate_fn= train_dataset.collate_fn, pin_memory= True, drop_last = True, worker_init_fn=train_dataset.init_workers_fn)
def init_valid_dataloader(self, length = None):
valid_dataset = AudioDataset(self.valid_dir, self.valid_meta_file, spec_aug=False, isBalanced=False, isValid=True)
if length is not None:
valid_dataset.length = length
self.validloader = DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers= self.num_workers,pin_memory= True, collate_fn= valid_dataset.collate_fn, drop_last = False, worker_init_fn=valid_dataset.init_workers_fn)
|
<reponame>seth586/lndmanage<filename>lndmanage/lib/report.py
"""
Creates reports for forwardings, channel opens/closings, onchain activity.
"""
import logging
from datetime import datetime
from lndmanage.lib.ln_utilities import (
convert_channel_id_to_short_channel_id,
height_to_timestamp
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# number of bins for histograms
NUMBER_OF_BINS = 48
# define character scale for histogram representation
# these are Braille characters, which are growing from zero to eight dots per
# character
CHARACTER_SCALE = [
u'\u2800',
u'\u2840',
u'\u28C0',
u'\u28C4',
u'\u28E4',
u'\u28E6',
u'\u28F6',
u'\u28F7',
u'\u28FF',
]
def print_histogram(histogram_bar, unit, max_scale):
"""
Prints a histogram over one line of text using Braille unicode chars.
:param histogram_bar: histogram string
:type histogram_bar: str
:param unit: represented y unit of histogram
:type unit: str
:param max_scale: the maximal y size of a single bin in the histogram
:type max_scale: int
"""
logger.info(
" activity (" + CHARACTER_SCALE[8] +
" represents %s %s):", max_scale, unit)
logger.info("\n %s\n", histogram_bar)
class Report(object):
def __init__(self, node, time_start, time_end):
"""
:param node: LND node interface
:type node: lndmanage.lib.node.LndNode
:param time_start: unix timestamp for beginning of analysis
:type time_start: int
:param time_end: unix timestamp for end of analysis
:type time_end: int
"""
if time_start > time_end:
raise ValueError("starting time must be earlier than end time")
self.node = node
self.time_start = int(time_start)
self.time_end = int(time_end)
offset_days = (self.time_end - self.time_start) // 3600 // 24
self.forwarding_events = self.node.get_forwarding_events(
offset_days=offset_days)
self.channel_closings = self.node.get_closed_channels()
self.channels = self.node.get_all_channels()
def report(self):
"""
Prints different subreports on forwardings and channel events.
"""
date_start = datetime.fromtimestamp(
self.time_start).strftime('%Y-%m-%d %H:%M')
date_end = datetime.fromtimestamp(
self.time_end).strftime('%Y-%m-%d %H:%M')
logger.info("\nReport from %s to %s\n", date_start, date_end)
self.report_forwarding_events()
logger.info("")
self.report_forwarding_fees()
logger.info("")
self.report_forwarding_amounts()
logger.info("")
self.report_channel_closings()
logger.info("")
self.report_channel_openings()
def report_forwarding_events(self):
"""
Reports forwarding events.
"""
series = self.get_forwarding_event_series()
time_series = TimeSeries(series, self.time_start, self.time_end)
histogram_bar, max_scale = time_series.histogram_bar()
logger.info("Forwardings:")
if time_series.total_counts:
print_histogram(histogram_bar, "forwardings", max_scale)
logger.info(" total forwardings: %s", time_series.total_values)
logger.info(
" forwardings per day: %d",
time_series.total_values /
((self.time_end - self.time_start) / (24 * 3600)))
logger.info("\n channels with most outgoing forwardings:")
sorted_events_by_key = sorted(
time_series.events_by_key.items(),
key=lambda x: x[1]['counts'],
reverse=True
)
for c in sorted_events_by_key[:5]:
logger.info(f" {c[0]}: {c[1]['values']}")
else:
logger.info(" No forwardings during this time frame.")
def get_forwarding_event_series(self):
"""
Fetches forwarding events in the format to be used by TimeSeries.
:return: time series of forwarding events
:rtype: list[dict]
"""
series = [
{
'timestamp': event['timestamp'],
'key': event['chan_id_out'],
'quantity': 1
}
for event in self.forwarding_events]
return series
def report_forwarding_fees(self):
"""
Reports on forwarding fees.
"""
series = self.get_forwarding_fees_series()
time_series = TimeSeries(series, self.time_start, self.time_end)
histogram_bar, max_scale = time_series.histogram_bar()
logger.info("Forwarding fees:")
if time_series.total_counts:
print_histogram(histogram_bar, "msat fees", max_scale)
logger.info(
" total forwarding fees: %s msat", time_series.total_values)
logger.info(
" fees per forwarding: %d msat",
time_series.total_values / sum(time_series.bins_counts))
logger.info("\n channels with most fees collected:")
sorted_events_by_key = sorted(
time_series.events_by_key.items(),
key=lambda x: x[1]['values'],
reverse=True
)
for c in sorted_events_by_key[:5]:
logger.info(f" {c[0]}: {c[1]['values']} msat")
else:
logger.info(" No forwardings during this time frame.")
def get_forwarding_fees_series(self):
"""
Fetches forwarding fee series to be used by TimeSeries.
:return: forwarding fee series
:rtype: list[dict]
"""
series = [
{
'timestamp': event['timestamp'],
'key': event['chan_id_out'],
'quantity': event['fee_msat']
}
for event in self.forwarding_events]
return series
def report_forwarding_amounts(self):
"""
Reports forwarding amounts.
"""
series = self.get_forwarding_amounts_series()
time_series = TimeSeries(series, self.time_start, self.time_end)
histogram_bar, max_scale = time_series.histogram_bar()
logger.info("Forwarding amount:")
if time_series.total_counts:
print_histogram(histogram_bar, "sat", max_scale)
logger.info(" total forwarded: %s sat", time_series.total_values)
logger.info(
" amount per forwarding: %d sat",
time_series.total_values / sum(time_series.bins_counts))
logger.info("\n channels with most forwarding amounts:")
sorted_events_by_key = sorted(
time_series.events_by_key.items(),
key=lambda x: x[1]['values'],
reverse=True
)
for c in sorted_events_by_key[:5]:
logger.info(f" {c[0]}: {c[1]['values']} sat")
else:
logger.info(" No forwardings during this time frame.")
def get_forwarding_amounts_series(self):
"""
Fetches forwarding amount series to be used by TimeSeries.
:return: forwarding amount series
:rtype: list[dict]
"""
series = [
{
'timestamp': event['timestamp'],
'key': event['chan_id_out'],
'quantity': event['amt_out']
}
for event in self.forwarding_events]
return series
def report_channel_closings(self):
"""
Reports channel closings.
"""
logger.info("Channel closings:")
series = self.get_channel_closings_series()
time_series = TimeSeries(series, self.time_start, self.time_end)
histogram_bar, max_scale = time_series.histogram_bar()
if time_series.total_counts:
print_histogram(histogram_bar, "sat", max_scale)
logger.info(" total closings: %s", sum(time_series.bins_counts))
logger.info(" freed funds: %s sat", sum(time_series.bins_values))
logger.info("\n closed channels:")
for c in time_series.events_by_key.items():
logger.info(f" {c[0]}: {c[1]['values']} sat freed")
else:
logger.info(" No channel closings during this time frame.")
def get_channel_closings_series(self):
"""
Fetches forwarding amount series to be used by TimeSeries.
:return: channel closing series
:rtype: list[dict]
"""
series = [
{
# calculate back, when approximately the channel was closed
'timestamp': height_to_timestamp(self.node,
event['close_height']),
'key': event_key,
'quantity': event['settled_balance']
}
for event_key, event in self.channel_closings.items()]
return series
def report_channel_openings(self):
"""
Reports channel openings.
"""
logger.info("Channel openings (of current channels):")
series = self.get_channel_openings_series()
time_series = TimeSeries(series, self.time_start, self.time_end)
histogram_bar, max_scale = time_series.histogram_bar()
if time_series.total_counts:
print_histogram(
histogram_bar, "capacity added in sat", max_scale)
logger.info(
" total openings: %s", sum(time_series.bins_counts))
logger.info(
" total capacity added: %s sat",
sum(time_series.bins_values))
logger.info("\n opened channels:")
sorted_events_by_key = sorted(
time_series.events_by_key.items(),
key=lambda x: x[0]
)
for c in sorted_events_by_key:
logger.info(f" {c[0]}: {c[1]['values']} sat of new capacity")
else:
logger.info(" No channel openings during this time frame.")
def get_channel_openings_series(self):
"""
Fetches channel opening series to be used by TimeSeries.
:return: channel opening series
:rtype: list[dict]
"""
series = []
for chan_id, channel_values in self.channels.items():
blockheight = convert_channel_id_to_short_channel_id(chan_id)[0]
series.append({
'timestamp': height_to_timestamp(self.node, blockheight),
'key': chan_id,
'quantity': channel_values['capacity']
})
return series
class TimeSeries(object):
"""
Object to calculate time series histograms on data with special format,
look at the above get_series methods.
"""
def __init__(self, series, time_start, time_end):
"""
:param series: data series
:type series: list[dict]
:param time_start: unix timestamp
:type time_start: int
:param time_end: unix timestamp
:type time_end: int
"""
self.series = series
self.time_start = time_start
self.time_end = time_end
self.time_interval_sec = (time_end - time_start) // NUMBER_OF_BINS
self.binned_series = []
self.events_by_key = {}
self.binned_series, self.events_by_key = self.create_binned_series()
self.bins_values = list(
map(lambda x: self.sum_data(x['data']), self.binned_series))
self.bins_counts = list(
map(lambda x: len(x['data']), self.binned_series))
self.total_values = sum(self.bins_values)
self.total_counts = sum(self.bins_counts)
def create_binned_series(self):
"""
Creates the histogram and analyzes the data by keys.
:return: binned series, events by key
:rtype: (list[dict], dict)
"""
binned_series = []
events_by_key = {}
# initialize bins
for d in range(NUMBER_OF_BINS):
binned_series.append({
'time_start': self.time_start + d * self.time_interval_sec,
'time_end': self.time_start + (d + 1) * self.time_interval_sec,
'data': []
})
# fill bins with key and quantity
for s in self.series:
_bin = (s['timestamp'] - self.time_start) // self.time_interval_sec
if 0 <= _bin < NUMBER_OF_BINS:
binned_series[_bin]['data'].append((s['key'], s['quantity']))
# accumulate keyed values to have additional statistics
if s['key'] in events_by_key:
events_by_key[s['key']]['counts'] += 1
events_by_key[s['key']]['values'] += s['quantity']
else:
events_by_key[s['key']] = {
'counts': 1, 'values': s['quantity']}
return binned_series, events_by_key
def sum_data(self, data):
"""
Sums up data in data.
:param data: contains list of events with first index timestamp and
second index the value of the event
:type data: list[tuple]
:return: data sum
:rtype: int
"""
s = 0
for d in data:
s += d[1]
return s
def histogram_bar(self):
"""
Creates the histogram string from the binned data using Braille chars.
:return: the histogram string and the maximal value of the bins
:rtype: (str, int)
"""
bar = '|'
max_count = max(self.bins_values)
# take 8 as the default max scale (Braille chars have eight dots)
if max_count <= 8:
max_count = 8
# normalize to ints of maximal size of eight
normalized_counts = [
int(round(8 * c / max_count, 0)) for c in self.bins_values]
# map ints to Braille characters
for s in normalized_counts:
bar += CHARACTER_SCALE[s]
bar += '|'
return bar, max_count
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.