text stringlengths 957 885k |
|---|
import time
import matplotlib.pyplot as plt
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser()
parser.add_argument('--tol', type=float, default=1e-3)
parser.add_argument('--adjoint', type=eval, default=False)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--extra_dim', type=int, default=1)
parser.add_argument('--experiment_no', type=int, default=1)
args = parser.parse_args()
from torchdiffeq import odeint_adjoint as odeint
omega = np.pi/5
def c(t):
return torch.cos(omega*t)
class init_aug(nn.Module):
def __init__(self, dim):
super(init_aug, self).__init__()
self.fc = nn.Linear(dim, dim)
def forward(self, x0):
out = self.fc(x0)
return torch.cat((x0, out))
class ODEfunc(nn.Module):
def __init__(self, dim):
super(ODEfunc, self).__init__()
self.fc = nn.Linear(dim+1, dim)
self.nfe = 0
def forward(self, t, z):
cutoff = data_dim
x = z[:cutoff]
a = z[cutoff:]
self.nfe += 1
c_ = torch.tensor([c(t)]).float()
z_ = torch.cat((x, a, c_))
out = self.fc(z_)
return out
class ODEBlock(nn.Module):
def __init__(self, odefunc, integration_times, indices):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.integration_times = integration_times
self.indices = indices
def forward(self, x):
out = odeint(self.odefunc, x, self.integration_times, rtol=args.tol, atol=args.tol)
out = out.gather(1, self.indices)
return out
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == '__main__':
filename = 'anode('+str(args.extra_dim)+')./'+str(args.experiment_no)+'./'
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
data_dim = 1
dim = data_dim
#dim does not equal data_dim for ANODEs where they are augmented with extra zeros
# model
# making sampled data to fit
full_z = torch.load('data./position_data.pt')
full_ts = torch.load('data./test_time_data.pt')
z0 = full_z[0].to(device)
# test
model = torch.load(filename+'model.pth')
y0 = model[0](z0)
pred_z = odeint(model[1].odefunc, y0, full_ts)
ids = torch.arange(data_dim)
ids = ids.repeat(len(full_ts), 1)
pred_z_test = pred_z.gather(1, ids)
pred_z_test = pred_z_test.detach().numpy().reshape(len(full_ts))
np.save(filename+'learnt_trajectory.npy', pred_z_test)
|
<reponame>gradiuscypher/advent-of-code
#!/usr/bin/env python3
import copy
from pprint import pprint
import traceback
from sys import argv
filename = argv[1]
sea = [[0] * 1000 for _ in range(1000)]
def line_iterator(start, end):
path_list = []
# figure out whether x or y is changing
# this means x is changing
if start[0] != end[0] and start[1] == end[1]:
# this means x is decreasing
if start[0] > end[0]:
# print(f"going from {start} to {end}")
for x in range(start[0], end[0]-1, -1):
# print(f"shrinking x value: {x},{start[1]}")
path_list.append([x, start[1]])
# this means x is increasing
if start[0] < end[0]:
# print(f"going from {start} to {end}")
for x in range(start[0], end[0]+1, 1):
# print(f"shrinking x value: {x},{start[1]}")
path_list.append([x, start[1]])
# this means y is changing
elif start[1] != end[1] and start[0] == end[0]:
if start[1] > end[1]:
# print(f"going from {start} to {end}")
for y in range(start[1], end[1]-1, -1):
# print(f"shrinking x value: {x},{start[1]}")
path_list.append([start[0], y])
# this means y is increasing
if start[1] < end[1]:
# print(f"going from {start} to {end}")
for y in range(start[1], end[1]+1, 1):
# print(f"shrinking x value: {x},{start[1]}")
path_list.append([start[0], y])
else:
# print(f"{start} to {end} looks like a diagonal")
if start[0] > end[0]:
xmod = -1
else:
xmod = 1
if start[1] > end[1]:
ymod = -1
else:
ymod = 1
# I could use list[-1] but my brain cant fix this right now
current_pos = [start[0], start[1]]
path_list.append([start[0], start[1]])
# can we do a combined move to get there? (+/-1 in both directions)
while (current_pos[0] != end[0]) and (current_pos[1] != end[1]):
if (current_pos[0] != end[0]) and (current_pos[1] != end[1]):
current_pos[0] += xmod
current_pos[1] += ymod
# print("both up:", current_pos)
# path_list.append(current_pos)
elif current_pos[0] != end[0]:
current_pos[0] += xmod
# path_list.append(current_pos)
# print("x up:", current_pos)
elif current_pos[1] != end[1]:
current_pos[1] += ymod
# path_list.append(current_pos)
# print("y up:", current_pos)
path_list.append(copy.deepcopy(current_pos))
# figure out which direction x/y is changing in (+/-)
# generate all points between those two points as a list and return
# print(f"{start} to {end} path: {path_list}")
# print()
return path_list
def count_bad():
tally = 0
for row in sea:
for col in row:
if col >= 2:
tally += 1
return tally
def fill_map():
with open(filename) as input:
for line in input:
data = line.strip().split(' -> ')
start_point = [int(n) for n in data[0].split(',')]
end_point = [int(n) for n in data[1].split(',')]
path_points = line_iterator(start_point, end_point)
# iterate over every path point and increase value on map
for point in path_points:
x = point[0]
y = point[1]
sea[x][y] = sea[x][y] + 1
print(f"answer: {count_bad()}")
if __name__ == '__main__':
fill_map()
|
<gh_stars>0
from mpi4pyve import MPI
import mpiunittest as unittest
class TestErrorCode(unittest.TestCase):
errorclasses = [item[1] for item in vars(MPI).items()
if item[0].startswith('ERR_')]
errorclasses.insert(0, MPI.SUCCESS)
errorclasses.remove(MPI.ERR_LASTCODE)
def testGetErrorClass(self):
self.assertEqual(self.errorclasses[0], 0)
for ierr in self.errorclasses:
errcls = MPI.Get_error_class(ierr)
self.assertTrue(errcls >= MPI.SUCCESS)
self.assertTrue(errcls <= MPI.ERR_LASTCODE)
self.assertEqual(errcls, ierr)
def testGetErrorStrings(self):
for ierr in self.errorclasses:
errstr = MPI.Get_error_string(ierr)
def testException(self):
from sys import version_info as py_version
success = MPI.Exception(MPI.SUCCESS)
lasterr = MPI.Exception(MPI.ERR_LASTCODE)
for ierr in self.errorclasses:
errstr = MPI.Get_error_string(ierr)
errcls = MPI.Get_error_class(ierr)
errexc = MPI.Exception(ierr)
if py_version >= (2,5):
self.assertEqual(errexc.error_code, ierr)
self.assertEqual(errexc.error_class, ierr)
self.assertEqual(errexc.error_string, errstr)
self.assertEqual(repr(errexc), "MPI.Exception(%d)" % ierr)
self.assertEqual(str(errexc), errstr)
self.assertEqual(int(errexc), ierr)
self.assertEqual(hash(errexc), hash(errexc.error_code))
self.assertTrue(errexc == ierr)
self.assertTrue(errexc == errexc)
self.assertFalse(errexc != ierr)
self.assertFalse(errexc != errexc)
self.assertTrue(success <= ierr <= lasterr)
self.assertTrue(success <= errexc <= lasterr)
self.assertTrue(errexc >= ierr)
self.assertTrue(errexc >= success)
self.assertTrue(lasterr >= ierr)
self.assertTrue(lasterr >= errexc)
if errexc == success:
self.assertFalse(errexc)
else:
self.assertTrue(errexc)
self.assertTrue(errexc > success)
self.assertTrue(success < errexc)
exc = MPI.Exception(MPI.SUCCESS-1)
self.assertTrue(exc, MPI.ERR_UNKNOWN)
exc = MPI.Exception(MPI.ERR_LASTCODE+1)
self.assertTrue(exc, MPI.ERR_UNKNOWN)
@unittest.skipMPI('openmpi(<1.10.0)')
def testAddErrorClass(self):
try:
errclass = MPI.Add_error_class()
except NotImplementedError:
self.skipTest('mpi-add_error_class')
self.assertTrue(errclass >= MPI.ERR_LASTCODE)
@unittest.skip('necmpi')
@unittest.skipMPI('openmpi(<1.10.0)')
def testAddErrorClassCodeString(self):
try:
errclass = MPI.Add_error_class()
except NotImplementedError:
self.skipTest('mpi-add_error_class')
lastused = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE)
self.assertTrue(errclass == lastused)
errstr = MPI.Get_error_string(errclass)
self.assertEqual(errstr, "")
MPI.Add_error_string(errclass, "error class")
self.assertEqual(MPI.Get_error_string(errclass), "error class")
errcode1 = MPI.Add_error_code(errclass)
errstr = MPI.Get_error_string(errcode1)
self.assertEqual(errstr, "")
MPI.Add_error_string(errcode1, "error code 1")
self.assertEqual(MPI.Get_error_class(errcode1), errclass)
self.assertEqual(MPI.Get_error_string(errcode1), "error code 1")
errcode2 = MPI.Add_error_code(errclass)
errstr = MPI.Get_error_string(errcode2)
self.assertEqual(errstr, "")
MPI.Add_error_string(errcode2, "error code 2")
self.assertEqual(MPI.Get_error_class(errcode2), errclass)
self.assertEqual(MPI.Get_error_string(errcode2), "error code 2")
if __name__ == '__main__':
unittest.main()
|
<reponame>JTarball/tetherbox
import datetime
from django.core import mail
from django.core.handlers.wsgi import WSGIRequest
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.models import Site
from django.test import TestCase, Client, RequestFactory
from django.conf import settings
from django_nose.tools import assert_redirects, assert_template_used
from accounts.backend import AccountsBackend
from accounts.models import RegistrationProfile
from accounts.views import Activate, Register
class _MockRequestClient(Client):
"""
A ``django.test.Client`` subclass which can return mock ``HttpRequest`` objects.
"""
def request(self, **request):
"""
Rather than issuing a request and returning the response, this
simply constructs an ``HttpRequest`` object and returns it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.input': None,
}
environ.update(self.defaults)
environ.update(request)
request = WSGIRequest(environ)
# We have to manually add a session since we'll be bypassing
# the middleware chain.
session_middleware = SessionMiddleware()
session_middleware.process_request(request)
return request
def _mock_request():
"""
Construct and return a mock ``HttpRequest`` object; this is used
in testing backend methods which expect an ``HttpRequest`` but
which are not being called from views.
"""
return _MockRequestClient().request()
class DefaultRegistrationBackendTests(TestCase):
"""
Test the default reg backend.
Running these tests successfully will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
backend = AccountsBackend()
client = Client()
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS`` if it's not set already.
"""
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = 7 # pragma: no cover
def tearDown(self):
"""
Yank out ``ACCOUNT_ACTIVATION_DAYS`` back out if it wasn't
originally set.
"""
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation # pragma: no cover
def test_reg(self):
"""
Test the reg process: reg creates a new
inactive account and a new profile with activation key,
populates the correct account data and sends an activation
email.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
# Details of the returned user must match what went in.
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, '<EMAIL>')
# New user must not be active.
self.failIf(new_user.is_active)
# A reg profile was created, and an activation email was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_reg_no_sites(self):
"""
Test that reg still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
new_user = self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('<PASSWORD>'))
self.assertEqual(new_user.email, '<EMAIL>')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
Site._meta.installed = True
def test_valid_activation(self):
"""
Test the activation process: activating within the permitted window
sets the account's ``is_active`` field to ``True`` and resets the activation key.
"""
valid_user = self.backend.register(_mock_request(),
username='alice',
email='<EMAIL>',
password1='<PASSWORD>')
valid_profile = RegistrationProfile.objects.get(user=valid_user)
activated = self.backend.activate(_mock_request(), valid_profile.activation_key)
self.assertEqual(activated.username, valid_user.username)
self.failUnless(activated.is_active)
# Fetch the profile again to verify its activation key has been reset.
valid_profile = RegistrationProfile.objects.get(user=valid_user)
self.assertEqual(valid_profile.activation_key, RegistrationProfile.ACTIVATED)
def test_invalid_activation(self):
"""
Test the activation process: trying to activate outside the
permitted window fails, and leaves the account inactive.
"""
expired_user = self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
expired_user.date_joined = expired_user.date_joined - datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
expired_user.save()
expired_profile = RegistrationProfile.objects.get(user=expired_user)
self.failIf(self.backend.activate(_mock_request(),
expired_profile.activation_key))
self.failUnless(expired_profile.activation_key_expired())
def test_allow(self):
"""
Test that the setting ``REGISTRATION_OPEN`` appropriately
controls whether reg is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
self.failUnless(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = False
self.failIf(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = old_allowed
def test_post_reg_redirect_success(self):
"""
Test that view redirect to successful url page and the correct template is used if activation successful.
"""
valid_user = self.backend.register(_mock_request(),
username='alice',
email='<EMAIL>',
password1='<PASSWORD>')
valid_profile = RegistrationProfile.objects.get(user=valid_user)
response = self.client.get('activate/$', activation_key=valid_profile.activation_key)
assert_redirects(response, self.backend.post_activation_successful_redirect_url, status_code=301, target_status_code=200)
def test_reg_signal(self):
"""
Test that registering a user sends the ``user_registered``
signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_registered.connect(receiver, sender=self.backend.__class__)
self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_registered])
def test_activation_signal_success(self):
"""
Test that successfully activating a user sends the
``user_activated`` signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_activated])
def test_activation_signal_failure(self):
"""
Test that an unsuccessful activation attempt does not send the
``user_activated`` signal.
"""
receiver = lambda sender, **kwargs: received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='<EMAIL>',
password1='<PASSWORD>')
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 0)
def test_email_send_action(self):
"""
Test re-sending of activation emails via admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='<EMAIL>',
password1='<PASSWORD>')
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # One on registering, one more on the resend.
RegistrationProfile.objects.filter(user=alice).update(activation_key=RegistrationProfile.ACTIVATED)
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # No additional email because the account has activated.
def test_email_send_action_no_sites(self):
"""
Test re-sending of activation emails via admin action when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='<EMAIL>',
password1='<PASSWORD>')
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # One on registering, one more on the resend.
RegistrationProfile.objects.filter(user=alice).update(activation_key=RegistrationProfile.ACTIVATED)
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # No additional email because the account has activated.
Site._meta.installed = True
def test_activation_action(self):
"""
Test manual activation of users view admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='<EMAIL>',
password1='<PASSWORD>')
admin_class.activate_users(_mock_request(),
RegistrationProfile.objects.all())
self.failUnless(User.objects.get(username='alice').is_active)
|
<filename>src/sage/combinat/species/characteristic_species.py
"""
Characteristic Species
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from species import GenericCombinatorialSpecies
from generating_series import factorial_stream
from structure import GenericSpeciesStructure
from set_species import SetSpecies
from sage.misc.cachefunc import cached_function
from sage.structure.unique_representation import UniqueRepresentation
class CharacteristicSpeciesStructure(GenericSpeciesStructure):
def __repr__(self):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(3)
sage: a = F.structures([1, 2, 3]).random_element(); a
{1, 2, 3}
sage: F = species.SingletonSpecies()
sage: F.structures([1]).list()
[1]
sage: F = species.EmptySetSpecies()
sage: F.structures([]).list()
[{}]
"""
s = GenericSpeciesStructure.__repr__(self)
if self.parent()._n == 1:
return s[1:-1]
else:
return "{" + s[1:-1] + "}"
def canonical_label(self):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(3)
sage: a = F.structures(["a", "b", "c"]).random_element(); a
{'a', 'b', 'c'}
sage: a.canonical_label()
{'a', 'b', 'c'}
"""
P = self.parent()
rng = range(1, P._n+1)
return CharacteristicSpeciesStructure(P, self._labels, rng)
def transport(self, perm):
"""
Returns the transport of this structure along the permutation
perm.
EXAMPLES::
sage: F = species.CharacteristicSpecies(3)
sage: a = F.structures(["a", "b", "c"]).random_element(); a
{'a', 'b', 'c'}
sage: p = PermutationGroupElement((1,2))
sage: a.transport(p)
{'a', 'b', 'c'}
"""
return self
def automorphism_group(self):
"""
Returns the group of permutations whose action on this structure
leave it fixed. For the characteristic species, there is only one
structure, so every permutation is in its automorphism group.
EXAMPLES::
sage: F = species.CharacteristicSpecies(3)
sage: a = F.structures(["a", "b", "c"]).random_element(); a
{'a', 'b', 'c'}
sage: a.automorphism_group()
Symmetric group of order 3! as a permutation group
"""
from sage.groups.all import SymmetricGroup
return SymmetricGroup(len(self._labels))
class CharacteristicSpecies(GenericCombinatorialSpecies, UniqueRepresentation):
def __init__(self, n, min=None, max=None, weight=None):
"""
Returns the characteristic species of order `n`.
This species has exactly one structure on a set of of size `n`
and no structures of on sets of any other size.
EXAMPLES::
sage: X = species.CharacteristicSpecies(1)
sage: X.structures([1]).list()
[1]
sage: X.structures([1,2]).list()
[]
sage: X.generating_series().coefficients(4)
[0, 1, 0, 0]
sage: X.isotype_generating_series().coefficients(4)
[0, 1, 0, 0]
sage: X.cycle_index_series().coefficients(4)
[0, p[1], 0, 0]
sage: F = species.CharacteristicSpecies(3)
sage: c = F.generating_series().coefficients(4)
sage: F._check()
True
sage: F == loads(dumps(F))
True
TESTS::
sage: S1 = species.CharacteristicSpecies(1)
sage: S2 = species.CharacteristicSpecies(1)
sage: S3 = species.CharacteristicSpecies(2)
sage: S4 = species.CharacteristicSpecies(2, weight=2)
sage: S1 is S2
True
sage: S1 == S3
False
"""
self._n = n
self._name = "Characteristic species of order %s"%n
self._state_info = [n]
GenericCombinatorialSpecies.__init__(self, min=min, max=max, weight=weight)
_default_structure_class = CharacteristicSpeciesStructure
def _structures(self, structure_class, labels):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(2)
sage: l = [1, 2, 3]
sage: F.structures(l).list()
[]
sage: F = species.CharacteristicSpecies(3)
sage: F.structures(l).list()
[{1, 2, 3}]
"""
if len(labels) == self._n:
yield structure_class(self, labels, range(1,self._n+1))
_isotypes = _structures
def _gs_term(self, base_ring):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(2)
sage: F.generating_series().coefficients(5)
[0, 0, 1/2, 0, 0]
sage: F.generating_series().count(2)
1
"""
return base_ring(self._weight)/base_ring(factorial_stream[self._n])
def _order(self):
"""
Returns the order of the generating series.
EXAMPLES::
sage: F = species.CharacteristicSpecies(2)
sage: F._order()
2
"""
return self._n
def _itgs_term(self, base_ring):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(2)
sage: F.isotype_generating_series().coefficients(5)
[0, 0, 1, 0, 0]
Here we test out weighting each structure by q.
::
sage: R.<q> = ZZ[]
sage: Fq = species.CharacteristicSpecies(2, weight=q)
sage: Fq.isotype_generating_series().coefficients(5)
[0, 0, q, 0, 0]
"""
return base_ring(self._weight)
def _cis_term(self, base_ring):
"""
EXAMPLES::
sage: F = species.CharacteristicSpecies(2)
sage: g = F.cycle_index_series()
sage: g.coefficients(5)
[0, 0, 1/2*p[1, 1] + 1/2*p[2], 0, 0]
"""
cis = SetSpecies(weight=self._weight).cycle_index_series(base_ring)
return cis.coefficient(self._n)
def _equation(self, var_mapping):
"""
Returns the right hand side of an algebraic equation satisfied by
this species. This is a utility function called by the
algebraic_equation_system method.
EXAMPLES::
sage: C = species.CharacteristicSpecies(2)
sage: Qz = QQ['z']
sage: R.<node0> = Qz[]
sage: var_mapping = {'z':Qz.gen(), 'node0':R.gen()}
sage: C._equation(var_mapping)
z^2
"""
return var_mapping['z']**(self._n)
#Backward compatibility
CharacteristicSpecies_class = CharacteristicSpecies
class EmptySetSpecies(CharacteristicSpecies):
def __init__(self, min=None, max=None, weight=None):
"""
Returns the empty set species.
This species has exactly one structure on the empty set. It is
the same (and is implemented) as ``CharacteristicSpecies(0)``.
EXAMPLES::
sage: X = species.EmptySetSpecies()
sage: X.structures([]).list()
[{}]
sage: X.structures([1,2]).list()
[]
sage: X.generating_series().coefficients(4)
[1, 0, 0, 0]
sage: X.isotype_generating_series().coefficients(4)
[1, 0, 0, 0]
sage: X.cycle_index_series().coefficients(4)
[p[], 0, 0, 0]
TESTS::
sage: E1 = species.EmptySetSpecies()
sage: E2 = species.EmptySetSpecies()
sage: E1 is E2
True
sage: E = species.EmptySetSpecies()
sage: E._check()
True
sage: E == loads(dumps(E))
True
"""
CharacteristicSpecies_class.__init__(self, 0, min=min, max=max, weight=weight)
self._name = "Empty set species"
self._state_info = []
#Backward compatibility
EmptySetSpecies_class = EmptySetSpecies._cached_constructor = EmptySetSpecies
class SingletonSpecies(CharacteristicSpecies):
def __init__(self, min=None, max=None, weight=None):
"""
Returns the species of singletons.
This species has exactly one structure on a set of size `1`. It
is the same (and is implemented) as ``CharacteristicSpecies(1)``.
EXAMPLES::
sage: X = species.SingletonSpecies()
sage: X.structures([1]).list()
[1]
sage: X.structures([1,2]).list()
[]
sage: X.generating_series().coefficients(4)
[0, 1, 0, 0]
sage: X.isotype_generating_series().coefficients(4)
[0, 1, 0, 0]
sage: X.cycle_index_series().coefficients(4)
[0, p[1], 0, 0]
TESTS::
sage: S1 = species.SingletonSpecies()
sage: S2 = species.SingletonSpecies()
sage: S1 is S2
True
sage: S = species.SingletonSpecies()
sage: S._check()
True
sage: S == loads(dumps(S))
True
"""
CharacteristicSpecies_class.__init__(self, 1, min=min, max=max, weight=weight)
self._name = "Singleton species"
self._state_info = []
#Backward compatibility
SingletonSpecies_class = SingletonSpecies
|
from django.shortcuts import render, reverse, redirect
from SNI.check import check_tokens
from SNI.error import render_error
from SNI.esi import post_universe_names, get_corporations_corporation_id, ESI_SCOPES
from SNI.lib import global_headers
from utils import SNI_URL
import requests
from urllib.parse import urlencode
GLOBAL_URL = SNI_URL + "corporation"
@check_tokens(1)
def home(request):
"""
Corporation home
"""
request_corp = requests.get(GLOBAL_URL, headers=global_headers(request))
if request_corp.status_code != 200:
return render_error(request_corp)
return render(request, "corporation/home.html", {
"corporations": request_corp.json(),
})
@check_tokens(1)
def sheet(request, corp_id):
"""
Information sheet on a corporation
"""
request_corp = requests.get(GLOBAL_URL+f"/{corp_id}", headers=global_headers(request))
if request_corp.status_code != 200:
return render_error(request_corp)
esi_request_corp = get_corporations_corporation_id(corp_id)
if esi_request_corp.status_code != 200:
return render_error(esi_request_corp)
esi_corp = esi_request_corp.json()
esi_corp["tax_rate"] *= 100
return render(request, "corporation/sheet.html", {
"corporation": request_corp.json(),
"esi": esi_corp,
"corporation_id": corp_id,
"corporation_name": esi_request_corp.json()["name"],
"scopes": ESI_SCOPES,
"changed_scopes": request.GET.get("changed_scopes"),
})
@check_tokens(1)
def tracking(request, corp_id):
"""
Tracking of the corp membres tokens
"""
url = GLOBAL_URL + f"/{corp_id}/tracking"
request_track = requests.get(url, headers=global_headers(request))
if request_track.status_code != 200:
return render_error(request_track)
request_corp = get_corporations_corporation_id(corp_id)
if request_corp.status_code != 200:
return render_error(request_corp)
return render(request, "corporation/tracking.html", {
"tracking": request_track.json(),
"corporation_id": corp_id,
"corporation_name": request_corp.json()["name"],
})
@check_tokens(2)
def change_scopes(request, corp_id):
"""
Changing corporation mandatory scopes with a specific list
"""
scopes = []
for key in request.POST:
if key in ESI_SCOPES:
scopes.append(key)
if len(scopes) > 0:
data = "{\"mandatory_esi_scopes\": [\"" + "\",\"".join(scopes) + "\"]}"
else:
data = "{\"mandatory_esi_scopes\": []}"
request_change = requests.put(GLOBAL_URL+f"/{corp_id}", headers=global_headers(request, {"Content-type":"application/json"}), data=data)
if request_change.status_code != 200:
return render_error(request_change)
params = urlencode({"changed_scopes": "true"})
return_url = reverse("corporation-sheet", args=[corp_id]) + "?" + params
return redirect(return_url)
@check_tokens(2)
def change_scopes_all(request, corp_id):
"""
Changing corporation mandatory scopes by applying them all
"""
data = "{\"mandatory_esi_scopes\": [\"" + "\",\"".join(ESI_SCOPES) + "\"]}"
request_change = requests.put(GLOBAL_URL+f"/{corp_id}", headers=global_headers(request, {"Content-type":"application/json"}), data=data)
if request_change.status_code != 200:
return render_error(request_change)
params = urlencode({"changed_scopes": "true"})
return_url = reverse("corporation-sheet", args=[corp_id]) + "?" + params
return redirect(return_url)
@check_tokens(2)
def change_scopes_none(request, corp_id):
"""
Changing corporation mandatory scopes by removing them all
"""
data = "{\"mandatory_esi_scopes\": []}"
request_change = requests.put(GLOBAL_URL+f"/{corp_id}", headers=global_headers(request, {"Content-type":"application/json"}), data=data)
if request_change.status_code != 200:
return render_error(request_change)
params = urlencode({"changed_scopes": "true"})
return_url = reverse("corporation-sheet", args=[corp_id]) + "?" + params
return redirect(return_url)
@check_tokens(1)
def guest(request, corp_id):
"""
Displays the guest users of the corporation
"""
request_guest = requests.get(GLOBAL_URL+f"/{corp_id}/guest", headers=global_headers(request))
if request_guest.status_code != 200:
return render_error(request_guest)
request_corp_name = post_universe_names(corp_id)
if request_corp_name.status_code != 200:
return render_error(request_corp_name)
return render(request, "corporation/guest.html", {
"guests": request_guest.json(),
"corporation_id": corp_id,
"corporation_name": request_corp_name.json()[0]["name"],
"state_code": request.GET.get("state_code")
})
@check_tokens(1)
def guest_new(request, corp_id):
"""
Will issue a state code that can be used to authentificate and be recognized as a guest of the corproation
"""
request_code = requests.post(GLOBAL_URL+f"/{corp_id}/guest", headers=global_headers(request))
if request_code.status_code != 200:
return render_error(request_code)
print(request_code.json()["state_code"])
params = urlencode({"state_code": request_code.json()["state_code"]})
return_url = reverse("corporation-guest", args=[corp_id]) + "?" + params
return redirect(return_url)
@check_tokens(1)
def guest_delete(request, corp_id, user_id):
"""
Delete a user from the guests of a corporation
"""
request_delete = requests.delete(GLOBAL_URL+f"/{corp_id}/guest/{user_id}", headers=global_headers(request))
if request_delete.status_code != 200:
return render_error(request_delete)
params = urlencode({"delete_guest": "true"})
return_url = reverse("corporation-guest", args=[corp_id]) + "?" + params
return redirect(return_url)
|
<gh_stars>1-10
import logging
import src.ServiceTools
from src.DebtorsRegister import DebtorsRegister
from src.EntrepreneursRegister import EntrepreneursRegister
from src.LegalEntitiesRegister import LegalEntitiesRegister
from src.LustratedPersonsRegister import LustratedPersonsRegister
from src.MissingPersonsRegister import MissingPersonsRegister
from src.WantedPersonsRegister import WantedPersonsRegister
def search():
search_string = str(input('Search query: '))
logging.info('The search string: ' + search_string)
service.clear_results_dir()
# call search methods
missingPersons.search_into_collection(search_string)
wantedPersons.search_into_collection(search_string)
debtors.search_into_collection(search_string)
legalEntities.search_into_collection(search_string)
entrepreneurs.search_into_collection(search_string)
lustrated.search_into_collection(search_string)
def setup_datasets():
# Інформація про безвісно зниклих громадян (JSON)
missingPersons.setup_dataset()
# Інформація про осіб, які переховуються від органів влади (JSON)
wantedPersons.setup_dataset()
# Єдиний реєстр боржників (CSV in ZIP)
debtors.setup_dataset()
# Єдиний державний реєстр юридичних осіб, фізичних осіб-підприємців та громадських формувань (XMLs in ZIPped)
legalEntities.delete_collection_index()
legalEntities.clear_collection()
entrepreneurs.delete_collection_index()
entrepreneurs.clear_collection()
entrepreneurs_dataset_zip_url = legalEntities.get_dataset()
legalEntities.save_dataset(entrepreneurs_dataset_zip_url)
legalEntities.update_metadata()
entrepreneurs.update_metadata()
legalEntities.create_collection_index()
entrepreneurs.create_collection_index()
# Єдиний державний реєстр осіб, щодо яких застосовано положення Закону України «Про очищення влади» (XML in ZIP)
lustrated.setup_dataset()
menu_options = {
1: 'Search',
2: 'Refresh datasets',
3: 'Exit'
}
def print_menu():
for key in menu_options.keys():
print(key, '--', menu_options[key])
if __name__ == '__main__':
# Set up logging
logging.basicConfig(filename='logs/searchmydata.log', filemode='a',
format='%(asctime)s %(levelname)10s:%(filename)28s:%(message)s', datefmt='%d/%m/%Y %H:%M:%S',
level=logging.DEBUG, encoding='utf-8')
logging.info('The application started')
# create instances
service = src.ServiceTools.ServiceTools()
missingPersons = MissingPersonsRegister()
wantedPersons = WantedPersonsRegister()
debtors = DebtorsRegister()
legalEntities = LegalEntitiesRegister()
entrepreneurs = EntrepreneursRegister()
lustrated = LustratedPersonsRegister()
service.clear_console()
# main loop
while True:
service.get_registers_info()
service.check_is_expired()
print_menu()
option = ''
try:
option = int(input('Enter your choice: '))
except ValueError:
logging.error('The wrong input type of menu item choice')
print('Wrong input. Please enter a number ...')
# Check what choice was entered and act accordingly
if option == 1:
logging.warning('The "Search" menu item chosen')
search()
elif option == 2:
logging.warning('The "Refresh datasets" menu item chosen')
setup_datasets()
elif option == 3:
logging.warning('The "Exit" menu item chosen')
logging.info('The application closed')
print('Quitting...')
exit()
else:
logging.error('The wrong menu item choice')
print('Invalid option. Please enter a number between 1 and 3.')
|
# -*-coding:utf8-*-
#
# @autor:<EMAIL>
# ACO algorithm for K shortest path
#
# 研究问题:
# 针对北京市轨道交通的部分网络图,求解出O(起点站)-D(终点站)的K短路问题。本研究采用蚁群算法求解K短路的方案,
# 不仅考虑到蚁群算法的各种优越性,更在于其能很好解决该问题,并为诸多相关的问题研究提供一种可行的解决方案
# 考虑到所处理的问题的特殊性,为增强算法效率,本设计对地图进行了简化,但仍能保证正确的表述问题。
#
# 总体算法概述:
# 蚁群算法:
# 其是一种用来在图中寻找优化路径的机率型算法,一种模拟进化算法,具有一种新的模拟进化优化方法的有效性和应用价值。
# 生物学告诉我们,蚂蚁寻找食物的过程中,会向周围环境中释放一定的信息素,以吸引其他蚂蚁一起找食物,慢慢的越来越多的
# 蚂蚁找到食物,如果某个时刻的某条食物路线短,可以预见的是往返于这条路的蚂蚁就多,那么散发的激素也就多,最终越来越多
# 的蚂蚁会到这条线路上来,也就是我们期望的大部分蚂蚁最终重复着这条路线,也即最短路。那么我们在计算机世界里如何利用这样
# 一种智能呢?其实,大多数看似复杂的事物均由简单的规则组成,只是数量的不同而已。所以不必为此担心,来看看这些简单的规则:
# a)范围:蚂蚁观察到的范围是一个方格世界(square),假定为3*3,即真实世界中的八个方向,那么蚂蚁移动的范围也就确定了
# b)环境:蚂蚁所处的环境被模拟为一个世界(world),其中有障碍,其他蚂蚁,信息素(食物和窝),并且蚂蚁仅能感知其移动范围内
# 的信息,显然这符合客观事实,当然环境会以一定的速率是信息素挥发掉。
# c)觅食(窝)规则:find_food_nest_rule,在蚂蚁感知的范围内寻找是否有食物,有则直接找到,否则检查是否有信息素,有则比较信息素量,
# 并朝大的方向走。我们必须让蚂蚁以小概率犯错而选择非最大的点,这符合客观事实,不然蚂蚁就丧失了创新性,探索性,这不利于其发展,当然也
# 不利于算法的完备性。另找窝和食物规则基本一致,不再赘述
# d)移动规则:move_rule,每只蚂蚁都朝信息素最多的方向移动(这与觅食规则有所重复,本算法做了适当优化),当周围没有信息素指引的时候,蚂蚁则
# 惯性的按原来的方向运动,并且在此方向上会有随机扰动,这可以理解为环境因素的作用,毕竟方向感总有不正确的时候。那么有一个问题,,蚂蚁很可能
# 会在原地打转,如何解决呢?我们采用简单的记忆策略,即蚂蚁会记录下最近走过的地方,不再重复走,而是尽量避开。
# e)避障规则:avoid_obstacle_rule,如果蚂蚁要移动的方向有障碍物挡住,那么它会随机选择一个方向,有信息素指引则按照觅食规则(这里也有重复行为),如果
# 找不到可运动方向,则认为该蚂蚁走入死胡同(当然仅在本研究问题出现)。
# f)释放信息素规则:spread_pheromone_rule,不难想象,蚂蚁找到食物的时候释放的食物信息素最多,找到窝的时候释放窝信息素最多,并随着其走远,释放会减少。
# 综合以上规则(可以有更多规则,但超过本研究范围,不在讨论),可以看到,蚂蚁之间并没有必然联系,而是与环境发生信息交互,正式通过这个客观想象,使得蚂蚁关联起来。
# 举个例子,当一只蚂蚁找到食物时,并没有也无法告知其他蚂蚁哪里有食物,而是向环境释放信息素,当其他蚂蚁靠近时,就会跟随过来,从而也找到了食物。
# 关于某些问题的解释(算法部分参数):在没有任何信息素指引的情况下,蚂蚁是如何相对有效的找到食物的呢?答案是移动规则,这样保证了蚂蚁尽量向前,这就避免的原地打转
# 如果只是一味的向前,不太符合基本常理,所以算法使用PERTURBATION扰动参数来干扰蚂蚁的运动方向,这样就有了一定的目的性,保持原来的方向,又一定的试探,尤其是
# 遇到障碍的时候回立即改变方向。当然一只蚂蚁找到食物时,其他的会跟着找到食物。至于如何找到最短路的,上面已经介绍,归结于信息素,相同的时间内往返的蚂蚁多,释放的信息素
# 多,而且越来越多,趋于稳定。那么这样的规则下自然会产生局部最短和全局最短的问题,其实这个要归功于蚂蚁会选择错误的信息素点,这是一种创新,而一旦当这样创新找到的
# 路径更短时,那么经过充分的时间,这条路线上的蚂蚁一定会多起来,经过这样一系列的寻找,探索,最短路便被找到了。
# 本设计为增加扩展性,算法采用面向对象编程,方便扩展,包括以下几部分
# 设置类:主要用于所要求解问题的输入参数设置,以及蚁群算法的参数设置
# 蚂蚁类:主要用于表示蚂蚁个体,包括蚂蚁所具备的属性,以及特有生理现象
# 食物类:如果单针对本研究问题,食物类不是必须的,但考虑到扩展性可应用性,增加食物类,即世界中可以有不同种类的不同大小的食物,蚂蚁会做适当的选择,决定哪种更应先搬回窝。
# 蚁巢类:同样,蚁巢类不是必须的,但世界中可能有多个蚁巢,这是一个仿生环境,已不局限于所要解决所研究的问题
# 激素类:蚂蚁信息素也不是必须类,但为了更能表现蚂蚁世界的多样性,也考虑到有其他生物加入的可能性,激素类对增加算法的通用性是必须的
# 范围类:即蚂蚁移动的感知的范围,更确切的说与蚂蚁无关,它完全可以是其他生物的范围,所以为其不应携带蚂蚁信息。包括坐标信息,食物,窝,甚至可以包括敌窝等信息,它表征的是世界组成信息
# 世界类:显然这是模拟世界,里面不仅可以有蚂蚁,同样可以由各种生物,当然也可以理解为蚂蚁的世界,部落,世界之外还有世界等信息。其又很多范围组成。
#
# K短路求解算法:
# 对于K短路的求解,本设计采用了比较简单的策略,即在蚂蚁搜索食物的过程中,对于返回的路径进行记录,并保存当前以及以前找到的最短路径,在搜索的过程中实时的更新这个值,那么最终得到
# 基本可以保证是最短路径(经过有效的迭代次数以后),这在上面已经论述过。在迭代的过程中,还做了以下操作来求解K短路:排除记录表中已经存在的路线,排除路径损耗大于当前最短路线10分钟
# 的路线,将得到的路线按顺序插入路径表,显然这个操作是非常高效的,最重要的一点是这样最终得到的表中的路径正是我们需要的路径顺序,最短,第2,3,4...短路。当然为了最终排除大于最短路径10分钟
# 的路线,还需做最后一步操作,即去除表中大于最短路10分钟的路线以及以后的所有路线,这个非常容易实现。这样经过上述步骤,我们就获得了所有K短路线了。
# 算法函数:加载地图,执行初始化操作,以及最重要的求K短路的算法
#
# 算法详细步骤:
# 1)算法开始
# 2)初始化各个变量参数,初始化蚂蚁,世界等,重要参数为记录路线表k_paths,最短路线best_route,最短损耗shortest_len
# 3)执行算法,对于所有OD对,执行3)
# 4)迭代。在最大迭代次数内,对于每一只蚂蚁执行蚁群算法的各种规则,直到找到食物,返回该路线,并转为回窝操作,规则基本和觅食一致,当找到窝时,返回路线
# (本问题不使用该路线,所有排除,但考虑到算法适用性,仍然给出,而相应的窝激素和食物激素权重设为1,即互不影响,显然当二者影响时,可能会得到更佳的效果,但同样会增加控制的难度)
# 5)对于3)中,返回的每条路线,执行K算法求解策略,记录路线,更新最短路线等。另对于那些死亡的蚂蚁,则置为死亡状态,不再处理其运动操作。
# 6)迭代完毕,获得路线表,输出所有K短路
# 7)算法结束
#
# 算法总结以及分析:
# 从以上的分析可见,由于蚁群算法具有一定的随机性,针对本问题则显示出了一定的局限性,由于站点的不可重复性和不可回头性,蚂蚁容易走入死胡同,对蚂蚁的各项参数要求较高。
# 而且迭代次数无法预知,蚁群算法非一定从最大值向最小值收敛,不过基本可以保证最短路径的有效性,第K短路则不能保证解的完备性,这与蚁群算法本身特性相关。不过总体来看,
# 本解决方案在很好的解决所研究问题的同时,增加了算法的适用性,为更多的应用领域做了准备。当然算法不可能做到完美,更多的性能有待探讨和发现,限于水平,不再对蚁群算法以及相关领域做深入探讨
#
import random
import math
class Settings(object): # 设置类
ROUTE_LINE_1 = ["b", "t", "n", "e", "v", "k", "u"]
ROUTE_LINE_2 = ["d", "i", "j", "k", "l", "m", "f", "n"] # 2号线站点表
ROUTE_LINE_4 = ["a", "c", "d", "e", "f", "g", "h"] # 4号线
ROUTE_LINE_13 = ["d", "o", "p", "q", "r", "s", "j"] # 13号线
STATIONS_ROUTE_TIME = {
"ac": 13,
"ca": 13,
"cd": 12,
"dc": 12,
"de": 10,
"ed": 10,
"ef": 2,
"fe": 2,
"fg": 6,
"gf": 6,
"gh": 5,
"hg": 5,
"di": 9,
"id": 9,
"ij": 4,
"ji": 4,
"jk": 7,
"kj": 7,
"kl": 2,
"lk": 2,
"lm": 6,
"ml": 6,
"mf": 4,
"fm": 4,
"fn": 4,
"nf": 4,
"nd": 7,
"dn": 7,
"do": 5,
"od": 5,
"op": 12,
"po": 12,
"pq": 7,
"qp": 7,
"qr": 6,
"rq": 6,
"rs": 13,
"sr": 13,
"sj": 6,
"js": 6,
"bt": 23,
"tb": 23,
"tn": 6,
"nt": 6,
"ne": 3,
"en": 3,
"ev": 6,
"ve": 6,
"vk": 5,
"kv": 5,
"ku": 13,
"uk": 13,
} # 各站点间耗时
# #######修OD结构,必须填写完整的OD################################
O_D = {
"ac": 1480,
"ad": 2432,
"ae": 1704,
"af": 816,
"ag": 928,
"ah": 472,
"ai": 968,
"aj": 848,
"ak": 1328,
"al": 1456,
"am": 512,
"an": 272,
"ao": 808,
"ap": 744,
"aq": 512,
"ar": 536,
"as": 424,
"ab": 576,
"at": 1696,
"av": 664,
"au": 432,
"ca": 1056,
"cd": 1320,
"ce": 1432,
"cf": 896,
"cg": 1040,
"ch": 672,
"ci": 1488,
"cj": 1608,
"ck": 1904,
"cl": 1040,
"cm": 760,
"cn": 848,
"co": 864,
"cp": 1072,
"cq": 752,
"cr": 960,
"cs": 680,
"cb": 184,
"ct": 1104,
"cv": 1704,
"cu": 1080,
"da": 1392,
"dc": 1344,
"de": 832,
"df": 2520,
"dg": 2432,
"dh": 1712,
"di": 2136,
"dj": 2352,
"dk": 2824,
"dl": 1976,
"dm": 1278,
"dn": 960,
"do": 1128,
"dp": 1784,
"dq": 1656,
"dr": 1888,
"ds": 1696,
"db": 1104,
"dt": 2144,
"dv": 2432,
"du": 2208,
"ea": 2144,
"ec": 2920,
"ed": 984,
"ef": 608,
"eg": 1136,
"eh": 712,
"ei": 808,
"ej": 1088,
"ek": 1624,
"el": 1872,
"em": 1384,
"en": 312,
"eo": 968,
"ep": 1048,
"eq": 832,
"er": 872,
"es": 1040,
"eb": 2496,
"et": 3088,
"ev": 1232,
"eu": 3208,
"fa": 472,
"fc": 920,
"fd": 2296,
"fe": 128,
"fg": 360,
"fh": 448,
"fi": 1144,
"fj": 1008,
"fk": 1928,
"fl": 816,
"fm": 360,
"fn": 448,
"fo": 992,
"fp": 816,
"fq": 224,
"fr": 344,
"fs": 464,
"fb": 1336,
"ft": 1544,
"fv": 984,
"fu": 1520,
"ga": 624,
"gc": 1008,
"gd": 1472,
"ge": 848,
"gf": 464,
"gh": 248,
"gi": 1744,
"gj": 840,
"gk": 1184,
"gl": 448,
"gm": 360,
"gn": 688,
"go": 1264,
"gp": 984,
"gq": 848,
"gr": 192,
"gs": 824,
"gb": 864,
"gt": 1552,
"gv": 1680,
"gu": 1008,
"ha": 752,
"hc": 1264,
"hd": 824,
"he": 1168,
"hf": 1080,
"hg": 408,
"hi": 1632,
"hj": 992,
"hk": 1736,
"hl": 1016,
"hm": 832,
"hn": 1312,
"ho": 992,
"hp": 544,
"hq": 672,
"hr": 560,
"hs": 864,
"hb": 672,
"ht": 1728,
"hv": 1288,
"hu": 1472,
"ia": 432,
"ic": 552,
"id": 784,
"ie": 1912,
"if": 1920,
"ig": 1272,
"ih": 448,
"ij": 328,
"ik": 1392,
"il": 544,
"im": 1296,
"in": 832,
"io": 1136,
"ip": 256,
"iq": 464,
"ir": 248,
"is": 544,
"ib": 288,
"it": 944,
"iv": 1248,
"iu": 1968,
"ja": 624,
"jc": 1472,
"jd": 1704,
"je": 1344,
"jf": 1448,
"jg": 1824,
"jh": 808,
"ji": 328,
"jk": 808,
"jl": 568,
"jm": 712,
"jn": 1232,
"jo": 1128,
"jp": 168,
"jq": 280,
"jr": 216,
"js": 512,
"jb": 808,
"jt": 944,
"jv": 1248,
"ju": 1792,
"ka": 904,
"kc": 3064,
"kd": 2848,
"ke": 944,
"kf": 2192,
"kg": 2408,
"kh": 1248,
"ki": 1880,
"kj": 2016,
"kl": 1264,
"km": 1312,
"kn": 2032,
"ko": 1368,
"kp": 2928,
"kq": 1704,
"kr": 1880,
"ks": 1840,
"kb": 912,
"kt": 1968,
"kv": 816,
"ku": 2024,
"la": 840,
"lc": 1488,
"ld": 2688,
"le": 1536,
"lf": 1160,
"lg": 992,
"lh": 1048,
"li": 680,
"lj": 184,
"lk": 904,
"lm": 448,
"ln": 696,
"lo": 720,
"lp": 1248,
"lq": 808,
"lr": 1008,
"ls": 848,
"lb": 992,
"lt": 2280,
"lv": 960,
"lu": 1680,
"ma": 536,
"mc": 1104,
"md": 2776,
"me": 992,
"mf": 800,
"mg": 1136,
"mh": 696,
"mi": 808,
"mj": 984,
"mk": 1472,
"ml": 960,
"mn": 1080,
"mo": 904,
"mp": 592,
"mq": 856,
"mr": 840,
"ms": 808,
"mb": 752,
"mt": 1496,
"mv": 1000,
"mu": 1024,
"na": 432,
"nc": 1312,
"nd": 1704,
"ne": 328,
"nf": 272,
"ng": 1128,
"nh": 1072,
"ni": 1424,
"nj": 1312,
"nk": 2608,
"nl": 1424,
"nm": 1712,
"no": 1072,
"np": 808,
"nq": 992,
"nr": 504,
"ns": 784,
"nb": 1472,
"nt": 992,
"nv": 1440,
"nu": 1728,
"oa": 816,
"oc": 904,
"od": 1432,
"oe": 1264,
"of": 1136,
"og": 1048,
"oh": 368,
"oi": 1232,
"oj": 1088,
"ok": 1304,
"ol": 1968,
"om": 1112,
"on": 1040,
"op": 216,
"oq": 504,
"or": 408,
"os": 456,
"ob": 1336,
"ot": 2008,
"ov": 1280,
"ou": 992,
"pa": 592,
"pc": 368,
"pd": 1128,
"pe": 968,
"pf": 1832,
"pg": 808,
"ph": 752,
"pi": 1344,
"pj": 1704,
"pk": 1312,
"pl": 1176,
"pm": 304,
"pn": 320,
"po": 456,
"pq": 272,
"pr": 544,
"ps": 1136,
"pb": 280,
"pt": 776,
"pv": 816,
"pu": 1360,
"qa": 840,
"qc": 904,
"qd": 856,
"qe": 512,
"qf": 1088,
"qg": 904,
"qh": 696,
"qi": 776,
"qj": 856,
"qk": 1768,
"ql": 1264,
"qm": 2240,
"qn": 864,
"qo": 1000,
"qp": 688,
"qr": 320,
"qs": 800,
"qb": 584,
"qt": 1616,
"qv": 912,
"qu": 1312,
"ra": 352,
"rc": 392,
"rd": 592,
"re": 1728,
"rf": 808,
"rg": 768,
"rh": 480,
"ri": 864,
"rj": 1160,
"rk": 1528,
"rl": 1256,
"rm": 1312,
"rn": 2048,
"ro": 1424,
"rp": 960,
"rq": 456,
"rs": 1072,
"rb": 432,
"rt": 1808,
"rv": 832,
"ru": 1392,
"sa": 512,
"sc": 952,
"sd": 1112,
"se": 1296,
"sf": 1144,
"sg": 864,
"sh": 904,
"si": 936,
"sj": 648,
"sk": 1608,
"sl": 1472,
"sm": 1344,
"sn": 824,
"so": 1256,
"sp": 1192,
"sq": 1056,
"sr": 872,
"sb": 320,
"st": 1144,
"sv": 968,
"su": 1472,
"ta": 912,
"tc": 1584,
"td": 2432,
"te": 1126,
"tf": 1312,
"tg": 1096,
"th": 992,
"ti": 1432,
"tj": 1736,
"tk": 2576,
"tl": 1312,
"tm": 1192,
"tn": 408,
"to": 992,
"tp": 280,
"tq": 696,
"tr": 752,
"ts": 1016,
"tb": 856,
"tv": 992,
"tu": 1192,
"va": 448,
"vc": 1216,
"vd": 2048,
"ve": 544,
"vf": 1312,
"vg": 1008,
"vh": 344,
"vi": 1176,
"vj": 1528,
"vk": 896,
"vl": 832,
"vm": 2800,
"vn": 1472,
"vo": 1120,
"vp": 672,
"vq": 2304,
"vr": 936,
"vs": 536,
"vb": 608,
"vt": 1664,
"vu": 2512,
"ua": 368,
"uc": 1008,
"ud": 3264,
"ue": 2760,
"uf": 952,
"ug": 1560,
"uh": 1656,
"ui": 1432,
"uj": 1496,
"uk": 3472,
"ul": 984,
"um": 1576,
"un": 1080,
"uo": 1728,
"up": 2048,
"uq": 1960,
"ur": 2512,
"us": 2064,
"ub": 368,
"ut": 1048,
"uv": 1048,
"ba": 1192,
"bc": 1496,
"bd": 2136,
"be": 1336,
"bf": 1072,
"bg": 968,
"bh": 752,
"bi": 1072,
"bj": 1256,
"bk": 3400,
"bl": 1664,
"bm": 1272,
"bn": 1496,
"bo": 776,
"bp": 856,
"bq": 1024,
"br": 1168,
"bs": 832,
"bt": 1744,
"bu": 1968,
"bv": 1072,
}
# 需求解的OD对以及总人数
# ###所有相邻的两点,这里必须填上所有的相邻点,初始值都是0############
ALL_TWO = {
"ac": 0,
"cd": 0,
"de": 0,
"ef": 0,
"fg": 0,
"gh": 0,
"di": 0,
"ij": 0,
"jk": 0,
"kl": 0,
"lm": 0,
"mf": 0,
"fn": 0,
"nd": 0,
"do": 0,
"op": 0,
"pq": 0,
"qr": 0,
"rs": 0,
"sj": 0,
"bt": 0,
"tn": 0,
"ne": 0,
"ev": 0,
"vk": 0,
"ku": 0,
"ca": 0,
"dc": 0,
"ed": 0,
"fe": 0,
"gf": 0,
"hg": 0,
"id": 0,
"ji": 0,
"kj": 0,
"lk": 0,
"ml": 0,
"fm": 0,
"nf": 0,
"dn": 0,
"od": 0,
"po": 0,
"qp": 0,
"rq": 0,
"sr": 0,
"js": 0,
"tb": 0,
"nt": 0,
"en": 0,
"ve": 0,
"kv": 0,
"uk": 0,
}
D_CHANGE_ROUTE = {
"4->13": 14.5,
"4->2": 7.5,
"13->2": 15,
"13->4": 12.75,
"2->4": 8.25,
"2->13": 14.25,
} # D站点换乘耗时
F_CHANGE_ROUTE = {"4->2": 7.5, "2->4": 9.75} # F
J_CHANGE_ROUTE = {"13->2": 9, "2->13": 11.25} # J
N_CHANGE_ROUTE = {"1->2": 7.5, "2->1": 7.5} # J
E_CHANGE_ROUTE = {"1->4": 7.5, "4->1": 6} # J
K_CHANGE_ROUTE = {"1->2": 9, "2->1": 7.5} # J
MAX_NC = 100 # 最大迭代次数
ANTS_NUM = 50 # 蚂蚁个数
PHEROMONE_WEIGHT = 1.0 # 表征信息素重要程度的参数(这里指食物和蚁巢素所占权重)
MISTAKE_RATE = 1.0 / ANTS_NUM # 表征启发式因子重要程度的参数
PERTURBATION = 1.0 # 表征移动时的扰动参数,由于这里的线路限制,扰动为100%出现,可避免大量死蚂蚁
MEMORY_ABILITY = 8 # 表征蚂蚁记忆力参数
RHO = 1.0 / MAX_NC # 信息素蒸发系数
PHEROMONE = 100 # 信息素总量参数
class Ant(object): # 蚂蚁类
def __init__(self, world, square, ant_id):
self.world = world # 蚂蚁所处的世界
self.square = square # 蚂蚁所在方格
self.behavior = "FOOD" # 当前蚂蚁行为,初始化为觅食
self.prev_direction = -1 # 0-7分别为八个方向
self.total_food = 0 # 所搬的食物数
self.ant_id = ant_id # 蚂蚁的编号
self.pos_memory = [] # 蚂蚁记忆最近走过的路
self.total_route_len = 1 # 总路线长度
self.food_route = [] # 觅食路线
self.nest_route = [] # 寻巢路线
self.route = [] # 路线
self.is_dead = False # 是否已经进入死角
self.scope = self.get_scope(square) # 蚂蚁当前能观察到的范围
# ########以下为扩展############
def get_square(self):
return self.square
def set_square(self, square):
self.square = square
def get_total_food(self):
return self.total_food
def set_total_food(self):
pass
def get_ant_id(self):
return self.ant_id
def set_ant_id(self, ant_id):
self.ant_id = ant_id
# #####扩展结束###############
def get_scope(self, square): # 获取当前蚂蚁的【范围】
x, y = square.x, square.y
self.route.append(square.label) #
self.food_route.append(square) #
self.pos_memory.append(square) #
scope = [
self.world.SQUARES[y][x - 1],
self.world.SQUARES[y + 1][x - 1],
self.world.SQUARES[y + 1][x],
self.world.SQUARES[y + 1][x + 1],
self.world.SQUARES[y][x + 1],
self.world.SQUARES[y - 1][x + 1],
self.world.SQUARES[y - 1][x],
self.world.SQUARES[y - 1][x - 1],
]
return scope
# 蚂蚁所具备的本能【前进->觅食或寻窝】
def go_next_square(self):
next_direction = -1
square = None
route_line = ""
def change_route_cost(): # 计算换乘代价
change_cost = 0
# 获得当前换乘情况
if self.prev_direction >= 4:
tmp = self.prev_direction - 4
else:
tmp = self.prev_direction + 4
prev_station = self.scope[tmp].label
cur_station = self.square.label
next_station = square.label
if cur_station == "d":
if prev_station in Settings.ROUTE_LINE_4 and next_station in Settings.ROUTE_LINE_13:
change_cost = Settings.D_CHANGE_ROUTE["4->13"]
elif (
prev_station in Settings.ROUTE_LINE_4 and next_station in Settings.ROUTE_LINE_2
):
change_cost = Settings.D_CHANGE_ROUTE["4->2"]
elif (
prev_station in Settings.ROUTE_LINE_13 and next_station in Settings.ROUTE_LINE_2
):
change_cost = Settings.D_CHANGE_ROUTE["13->2"]
elif (
prev_station in Settings.ROUTE_LINE_13 and next_station in Settings.ROUTE_LINE_4
):
change_cost = Settings.D_CHANGE_ROUTE["13->4"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_4
):
change_cost = Settings.D_CHANGE_ROUTE["2->4"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_13
):
change_cost = Settings.D_CHANGE_ROUTE["2->13"]
elif cur_station == "f":
if prev_station in Settings.ROUTE_LINE_4 and next_station in Settings.ROUTE_LINE_2:
change_cost = Settings.F_CHANGE_ROUTE["4->2"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_4
):
change_cost = Settings.F_CHANGE_ROUTE["2->4"]
elif cur_station == "j":
if prev_station in Settings.ROUTE_LINE_13 and next_station in Settings.ROUTE_LINE_2:
change_cost = Settings.J_CHANGE_ROUTE["13->2"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_13
):
change_cost = Settings.J_CHANGE_ROUTE["2->13"]
elif cur_station == "n":
if prev_station in Settings.ROUTE_LINE_1 and next_station in Settings.ROUTE_LINE_2:
change_cost = Settings.N_CHANGE_ROUTE["1->2"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_1
):
change_cost = Settings.N_CHANGE_ROUTE["2->1"]
elif cur_station == "e":
if prev_station in Settings.ROUTE_LINE_1 and next_station in Settings.ROUTE_LINE_4:
change_cost = Settings.E_CHANGE_ROUTE["1->4"]
elif (
prev_station in Settings.ROUTE_LINE_4 and next_station in Settings.ROUTE_LINE_1
):
change_cost = Settings.E_CHANGE_ROUTE["4->1"]
elif cur_station == "k":
if prev_station in Settings.ROUTE_LINE_1 and next_station in Settings.ROUTE_LINE_2:
change_cost = Settings.K_CHANGE_ROUTE["1->2"]
elif (
prev_station in Settings.ROUTE_LINE_2 and next_station in Settings.ROUTE_LINE_1
):
change_cost = Settings.K_CHANGE_ROUTE["2->1"]
return change_cost
if self.is_dead: # 如果蚂蚁状态为死亡,则不再移动
return (None, 10000)
(next_direction, square, route_line, find_food_nest) = self.find_food_nest_rule() # 执行觅食规则
if find_food_nest: # 找到食物或者窝
if self.behavior == "FOOD":
self.food_route.append(square)
self.route.append(square.label)
route = self.route
self.behavior = "NEST"
self.nest_route = []
self.food_route = []
self.nest_route.append(square)
else:
self.nest_route.append(square)
self.route.append(square.label)
route = self.route
self.behavior = "FOOD"
self.total_food += 1
self.nest_route = []
self.food_route = []
self.food_route.append(square)
# 计算最终路线耗时
self.total_route_len += Settings.STATIONS_ROUTE_TIME[route_line]
if (
self.square.label == "d"
or self.square.label == "f"
or self.square.label == "j"
or self.square.label == "n"
or self.square.label == "e"
or self.square.label == "k"
) and self.prev_direction != -1:
self.total_route_len += change_route_cost()
total_route_len = self.total_route_len - 1
# 置位所有参数,进行相反的行为(觅食寻窝转换)
self.square = square
self.prev_direction = -1
self.total_route_len = 1
self.pos_memory = []
self.route = []
self.pos_memory.append(square) # 蚂蚁记忆最近走过的路
self.route.append(square.label) # 路线
x, y = self.square.x, self.square.y
self.scope = [
self.world.SQUARES[y][x - 1],
self.world.SQUARES[y + 1][x - 1],
self.world.SQUARES[y + 1][x],
self.world.SQUARES[y + 1][x + 1],
self.world.SQUARES[y][x + 1],
self.world.SQUARES[y - 1][x + 1],
self.world.SQUARES[y - 1][x],
self.world.SQUARES[y - 1][x - 1],
]
self.spread_pheromone_rule(self.square.x, self.square.y) # 执行播撒信息素规则
return (route, total_route_len) # 返回获得的路线
elif square is None: # 如果觅食规则无效,则执行移动规则(无效即无信息素指引)
(next_direction, square, route_line) = self.move_rule() # 执行移动规则
if square is None: # 如果移动规则无效(即撞墙),则执行避障规则
(next_direction, square, route_line) = self.avoid_obstacle_rule() # 执行避障规则
if self.is_dead: # 检查避障规则后蚂蚁是否死亡,是则直接返回
return (None, 10000)
# 计算当前移动后,路径耗时
self.total_route_len += Settings.STATIONS_ROUTE_TIME[route_line]
if (
self.square.label == "d"
or self.square.label == "f"
or self.square.label == "j"
or self.square.label == "n"
or self.square.label == "e"
or self.square.label == "k"
) and self.prev_direction != -1:
self.total_route_len += change_route_cost()
# 执行移动
self.square = square
self.prev_direction = next_direction
x, y = self.square.x, self.square.y
self.scope = [
self.world.SQUARES[y][x - 1],
self.world.SQUARES[y + 1][x - 1],
self.world.SQUARES[y + 1][x],
self.world.SQUARES[y + 1][x + 1],
self.world.SQUARES[y][x + 1],
self.world.SQUARES[y - 1][x + 1],
self.world.SQUARES[y - 1][x],
self.world.SQUARES[y - 1][x - 1],
]
memory_len = len(self.pos_memory) - 1 # 检查当前已记忆值
if memory_len >= Settings.MEMORY_ABILITY:
self.pos_memory[Settings.MEMORY_ABILITY - 1] = self.square
else:
self.pos_memory.append(self.square)
# 根据当前行为记录当前路径到对应表
if self.behavior == "FOOD":
self.food_route.append(self.square)
if self.behavior == "NEST":
self.nest_route.append(self.square)
# 记录该位置到路径表
self.route.append(self.square.label)
self.spread_pheromone_rule(self.square.x, self.square.y) # 执行播撒信息素规则
return (None, 10000)
def move_rule(self):
"""
移动规则:每只蚂蚁都朝向外激素最多的方向移,并且,当周围没有外激素指引的时候,蚂蚁会按照自己原来
运动的方向惯性的运动下去,并且,在运动的方向有一个随机的小的扰动。为了防止蚂蚁原地转圈,
它会记住最近刚走过了哪些点,如果发现要走的下一点已经在最近走过了,它就会尽量避开。
"""
station_routes = Settings.STATIONS_ROUTE_TIME.keys()
routes = []
square = None
next_direction = -1
line = ""
def search_route_line(): # 查找上次所在的线路,获得惯性方向
next_station = None
route_lines = [
Settings.ROUTE_LINE_1,
Settings.ROUTE_LINE_2,
Settings.ROUTE_LINE_4,
Settings.ROUTE_LINE_13,
]
next_line = None
if self.prev_direction >= 4:
tmp = self.prev_direction - 4
else:
tmp = self.prev_direction + 4
for i in range(4):
if self.scope[tmp].label in route_lines[i] and self.square.label in route_lines[i]:
next_line = i # 哪一条线路
index_p = route_lines[i].index(self.scope[tmp].label)
index_c = route_lines[i].index(self.square.label)
if index_p == 0 and index_c == len(route_lines[i]) - 1:
# 0-->last-->
next_station = index_c - 1
elif index_c == 0 and index_p == len(route_lines[i]) - 1:
# last-->0-->
next_station = index_c + 1
elif index_p > index_c:
# <----
if index_c == 0:
next_station = len(route_lines[i]) - 1
else:
next_station = index_c - 1
elif index_p < index_c:
# --->
if index_c == len(route_lines[i]) - 1:
next_station = 0
else:
next_station = index_c + 1
break
for i in range(len(routes)):
if routes[i].label == route_lines[next_line][next_station]:
return (self.scope.index(routes[i]), routes[i])
return (-1, None)
for i in range(8): # 搜索所有连通当前站点且未走过的站点
if (
self.scope[i] is None
or self.scope[i] in self.food_route
or self.scope[i] in self.nest_route
):
continue
if self.square.label + self.scope[i].label in station_routes:
routes.append(self.scope[i])
routes_num = len(routes)
if routes_num == 0: # 没有可选线路,直接返回
return (-1, None, "")
if self.prev_direction != -1:
(next_direction, square) = search_route_line()
else:
square = random.choice(routes)
next_direction = self.scope.index(square)
if random.random() <= Settings.PERTURBATION: # 扰动规则,符合则进行随机扰动
mistake = random.randint(0, 7)
square = self.scope[mistake]
for i in range(8):
if square is not None and square not in routes:
square = None
next_direction = mistake
if square is None:
line = ""
else:
line = self.square.label + square.label
return (next_direction, square, line)
def find_food_nest_rule(self):
"""
觅食寻巢规则:在每只蚂蚁能感知的范围内寻找是否有食物,如果有就直接过去。否则看是否有外激素,
并且比较在能感知的范围内哪一点的外激素最多,这样,它就朝外激素多的地方走,并且每只蚂蚁
多会以小概率犯错误,从而并不是往外激素最多的点移动。蚂蚁找窝的规则和上面一样,只不过
它对窝的外激素做出反应,而对食物外激素没反应。
"""
def calculate_pheromone(square, behavior): # 计算信息素量
if behavior == "FOOD":
return (
square.pheromone.food_pheromone * Settings.PHEROMONE_WEIGHT
+ square.pheromone.nest_pheromone * (1 - Settings.PHEROMONE_WEIGHT)
)
if behavior == "NEST":
return (
square.pheromone.food_pheromone * (1 - Settings.PHEROMONE_WEIGHT)
+ square.pheromone.nest_pheromone * Settings.PHEROMONE_WEIGHT
)
station_routes = Settings.STATIONS_ROUTE_TIME.keys()
routes = []
bool_pheromone = False
square = None
next_direction = -1
for i in range(8): # 搜索所有连通当前站点且未走过的站点
if (
self.scope[i] is None
or self.scope[i] in self.food_route
or self.scope[i] in self.nest_route
):
continue
if self.square.label + self.scope[i].label in station_routes:
routes.append(self.scope[i])
routes_num = len(routes)
if routes_num == 0: # 没有可选线路,直接返回
return (-1, None, "", False)
for i in range(routes_num):
if (
routes[i].square_food.food_type is not None and self.behavior == "FOOD"
): # 如果有食物,直接返回,不再考查激素
return (i, routes[i], self.square.label + routes[i].label, True)
if (
routes[i].square_nest.nest_type is not None and self.behavior == "NEST"
): # 如果有蚁巢,直接返回,不再考查激素
return (i, routes[i], self.square.label + routes[i].label, True) # 方向,方格,线路,是否找到
if (routes[i].pheromone.food_pheromone != 0 and self.behavior == "FOOD") or (
routes[i].pheromone.nest_pheromone != 0 and self.behavior == "NEST"
):
bool_pheromone = True
if not bool_pheromone: # 如果没有激素,则返回执行后面的规则
return (-1, None, "", False)
else: # 有激素,则获取所有可选方向
pheromone_values = []
pheromone_current_best_value = 0
pheromone_value = 0
line = ""
for i in range(routes_num):
pheromone_value = calculate_pheromone(routes[i], self.behavior) # 计算方格值
pheromone_values.append((pheromone_value, i))
if pheromone_value > pheromone_current_best_value:
pheromone_current_best_value = pheromone_value
square = routes[i]
next_direction = self.scope.index(square)
next_index = i
line = self.square.label + square.label
if len(pheromone_values) >= 2: # 少于2个,不具有选错性
if random.random() <= Settings.MISTAKE_RATE: # 执行错误选择信息素方向规则,满足则去除最大值,从剩下的随机选取一个移动
values = [v[1] for v in pheromone_values if v[1] != next_index]
next_direction = random.choice(values)
(next_direction, square) = (
self.scope.index(routes[next_direction]),
routes[next_direction],
)
line = self.square.label + square.label
return (next_direction, square, line, False)
def avoid_obstacle_rule(self):
"""
避障规则:如果蚂蚁要移动的方向有障碍物挡住,它会随机的选择另一个方向,并且有外激素指引的话,
它会按照觅食的规则行为
"""
station_routes = Settings.STATIONS_ROUTE_TIME.keys()
routes = []
for i in range(8): # 搜索所有连通当前站点且未走过的站点
if (
self.scope[i] is None
or self.scope[i] in self.food_route
or self.scope[i] in self.nest_route
):
continue
if self.square.label + self.scope[i].label in station_routes:
routes.append((i, self.scope[i], self.square.label + self.scope[i].label))
routes_num = len(routes)
if routes_num == 0: # 没有可选线路,直接返回,设蚂蚁为死亡
self.is_dead = True
return (-1, None, "")
else:
return random.choice(routes)
def spread_pheromone_rule(self, x, y):
"""
播撒外激素规则:每只蚂蚁在刚找到食物或者窝的时候撒发的外激素最多,并随着它走远的距离,
播撒的外激素越来越少。
"""
# 释放信息素采用线性规则,即蚂蚁携带的激素总量除以当前路径损耗,可满足释放激素规则
if self.behavior == "NEST":
self.world.SQUARES[y][x].pheromone.food_pheromone += (
1.0 * Settings.PHEROMONE / self.total_route_len
)
if self.behavior == "FOOD":
self.world.SQUARES[y][x].pheromone.nest_pheromone += (
1.0 * Settings.PHEROMONE / self.total_route_len
)
# ####扩展类开始#############
class Food(object): # 食物类
def __init__(self, food_size, food_type):
self.food_size = food_size
self.food_type = food_type
def get_food_size(self):
return self.food_size
def set_food_size(self, food_size):
self.food_size = food_size
def get_food_type(self):
return self.food_type
def set_food_type(self, food_type):
self.food_type = food_type
class Nest(object): # 蚁巢类
def __init__(self, nest_type):
self.nest_type = nest_type
def get_nest_type(self):
return self.nest_type
def set_nest_type(self, nest_type):
self.nest_type = nest_type
class Pheromone(object): # 激素类
def __init__(self, food_pheromone, nest_pheromone):
self.food_pheromone = food_pheromone
self.nest_pheromone = nest_pheromone
def get_food_pheromone(self):
return self.food_pheromone
def set_food_pheromone(self, food_pheromone):
self.food_pheromone = food_pheromone
def get_nest_pheromone(self):
return self.nest_pheromone
def set_nest_pheromone(self, nest_pheromone):
self.nest_pheromone = nest_pheromone
# ###########扩展类结束################
class Square(object): # 范围类
def __init__(self, x, y, label):
self.square_food = Food(None, None)
self.square_nest = Nest(None)
self.pheromone = Pheromone(0.0, 0.0)
self.x = x
self.y = y
self.label = label
class World(object): # 世界类
def __init__(self, world_map):
self.food_pos = []
self.nest_pos = []
self.SQUARES = self.get_world_map_squares(world_map)
def get_world_map_squares(self, world_map): # 初始化世界地图
width = len(world_map[0])
height = len(world_map)
SQUARES = [[None for x in range(width)] for y in range(height)]
for y in range(height):
for x in range(width):
if world_map[y][x] == "1":
continue
else:
SQUARES[y][x] = Square(x, y, world_map[y][x])
return SQUARES
def set_nest(self, label, *position): # 设置窝点
# (x,y)=position
# self.SQUARES[x][y].square_nest.nest_type='NEST'
#
for y in range(len(self.SQUARES)):
for x in range(len(self.SQUARES[0])):
if self.SQUARES[y][x] is None:
continue
if self.SQUARES[y][x].label == label:
self.SQUARES[y][x].square_nest.nest_type = "NEST"
self.nest_pos = [x, y]
return (x, y)
def set_food(self, label, *position): # 设置食物点
# (x,y)=position
# self.SQUARES[x][y].square_food.food_type='FOOD'
# self.SQUARES[x][y].square_food.food_size='BIG'
for y in range(len(self.SQUARES)):
for x in range(len(self.SQUARES[0])):
if self.SQUARES[y][x] is None:
continue
if self.SQUARES[y][x].label == label:
self.SQUARES[y][x].square_food.food_type = "FOOD"
self.SQUARES[y][x].square_food.food_size = "BIG"
self.food_pos = [x, y]
return (x, y)
def update_pheromone(self): # 更新信息素规则(环境挥发)
# 更新信息素,挥发
for y in range(len(self.SQUARES)):
for x in range(len(self.SQUARES[0])):
if self.SQUARES[y][x] is not None:
self.SQUARES[y][x].pheromone.food_pheromone = (1 - Settings.RHO) * self.SQUARES[
y
][x].pheromone.food_pheromone
self.SQUARES[y][x].pheromone.nest_pheromone = (1 - Settings.RHO) * self.SQUARES[
y
][x].pheromone.nest_pheromone
def Init_ACO_K_ShortRoute(): # 算法函数
# 加载地图
world_map = []
with open("map.txt") as f:
for line in f:
world_map.append("".join(line.split("\n")).split(","))
# 迭代求解
for od in Settings.O_D.keys():
best_route = [] # 最短路径
shortest_len = 10000 # 路径损耗(耗时)
k_paths = [] # K短路径
world = World(world_map)
(x, y) = world.set_nest(od[0])
world.set_food(od[1])
ants = [Ant(world, world.SQUARES[y][x], i) for i in range(Settings.ANTS_NUM)]
# 迭代
for nc in range(Settings.MAX_NC):
# 每只蚂蚁
for ant_id in range(Settings.ANTS_NUM):
# 觅食或找窝(移动)
(route, route_len) = ants[ant_id].go_next_square()
# 检查返回路线有效性,无效则返回
if route is None or route[0] != od[0]:
continue
# 返回路线是否大于当前最短10分钟,是则执行下轮操作
if route_len - shortest_len > 10:
continue
# 记录更新最短路线
if route_len < shortest_len:
shortest_len = route_len
best_route = route
# 第一条K短路线,则直接插入K短路线表,并执行下轮操作
if len(k_paths) == 0:
k_paths.insert(0, (route, route_len))
continue
# 如果该路线已经存在于K短路线表,则忽略,继续下轮操作
if route in [k[0] for k in k_paths]:
continue
else:
# 检查是否超过三次换乘
total_change = 0
for s in ["d", "f", "j", "n", "e", "k"]:
for r in route:
if s == r:
total_change += 1
if total_change > 3:
continue
# 查找路线应该插入K短路线表中的位置,如果到达K短路线表末尾,则直接加到末尾
tmp = len(k_paths)
for i in range(tmp):
if k_paths[i][1] >= route_len:
k_paths.insert(i, (route, route_len))
break
elif i == tmp - 1:
k_paths.append((route, route_len))
# 更新信息素
world.update_pheromone()
# ###############迭代结束##############
# 输出路线
k_path_od = []
if len(best_route) == 0:
print("search K-path for OD:" + od + " fail,please run again any way.")
else:
for k_path in range(len(k_paths)):
if k_paths[k_path][1] - shortest_len > 10:
break
else:
print(
"The "
+ str(k_path + 1)
+ " K-path for OD: "
+ od
+ "->"
+ "-".join(k_paths[k_path][0])
+ " time:"
+ str(k_paths[k_path][1])
+ " min"
)
k_path_od.append((k_paths[k_path], Sx(k_paths[k_path][1], shortest_len)))
# 断面流量处理
# 只有一条路
if len(k_path_od) == 1:
# print("只有一条路")
for i in range(len(k_path_od[0][0][0]) - 1):
Settings.ALL_TWO[k_path_od[0][0][0][i] + k_path_od[0][0][0][i + 1]] += Settings.O_D[
od
]
else:
for k_path in range(len(k_path_od)):
k_two = [k_path_od[i][0][1] for i in range(len(k_path_od))]
k_two.insert(0, k_path_od[k_path][0][1])
for i in range(len(k_path_od[k_path][0][0]) - 1):
Settings.ALL_TWO[
k_path_od[k_path][0][0][i] + k_path_od[k_path][0][0][i + 1]
] += Settings.O_D[od] * Pk(k_two)
# 配送相关处理,求断面流量
def Sx(Ck, Cmin):
return math.exp(-2 * (pow((Ck - Cmin), 2) / 25.0))
def Pk(Sk):
return 1.0 * Sk[0] / (sum(Sk[1:]))
# 启动算法
if __name__ == "__main__":
print("please waiting...")
Init_ACO_K_ShortRoute()
al = Settings.ALL_TWO.keys()
for i in al:
if int(Settings.ALL_TWO[i]) - Settings.ALL_TWO[i]:
print(i, int(Settings.ALL_TWO[i]) + 1)
|
<filename>services/web/project/models.py<gh_stars>0
"""Database models."""
from . import db
from flask_login import UserMixin, _compat
from flask_login._compat import text_type
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, ForeignKey, String, Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
"""User Object"""
class User(db.Model):
"""User account model."""
__tablename__ = 'users'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(100),
unique=False,
nullable=False
)
user_type = db.Column(
db.String(40),
unique=False,
nullable=False
)
email = db.Column(
db.String(40),
unique=True,
nullable=False
)
password = db.Column(
db.String(200),
primary_key=False,
unique=False,
nullable=False
)
organization = db.Column(
db.String(60),
index=False,
unique=False,
nullable=True
)
created_on = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
last_login = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
"""backreferences User class on retentions table"""
documents = relationship(
'Retention',
back_populates='user'
)
"""UserMixin requirements from flask-login"""
@property
def is_active(self):
return True
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return text_type(self.id)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
"""Password Check Functions"""
def set_password(self, password):
"""Create hashed password."""
self.password = <PASSWORD>_password_hash(
password,
method='sha256'
)
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
"""Document Object"""
class Document(db.Model):
"""Document model."""
"""Describes table which includes documents."""
__tablename__ = 'documents'
id = db.Column(
db.Integer,
primary_key=True
)
document_name = db.Column(
db.String(100),
unique=False,
nullable=True
)
document_body = db.Column(
db.String(1000),
unique=False,
nullable=True
)
created_on = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
"""backreferences User class on retentions table"""
users = relationship(
'Retention',
back_populates='document'
)
"""Association Object - User Retentions of Documents"""
class Retention(db.Model):
"""Model for who retains which document"""
"""Associate database."""
__tablename__ = 'retentions'
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
sponsor_id = db.Column(
db.Integer,
db.ForeignKey('users.id'),
primary_key=True,
unique=False,
nullable=True
)
editor_id = db.Column(
db.Integer,
unique=False,
nullable=True
)
document_id = db.Column(
db.Integer,
db.ForeignKey('documents.id'),
primary_key=True,
unique=False,
nullable=True
)
"""backreferences to user and document tables"""
user = db.relationship(
'User',
back_populates='documents'
)
document = db.relationship(
'Document',
back_populates='users'
) |
<filename>tests/test_production.py
import unittest
from generative.lsystem.grammar import RuleMapping, Token
from generative.lsystem.production import RuleParser
class RuleParsingParser(unittest.TestCase):
def test_simple(self):
parser = RuleParser()
rule = "a -> ab"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
# You can still use commas and whitespace to separate tokens.
rule = "a -> a,b"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
rule = "a -> a b"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
def test_simple_delimited(self):
parser = RuleParser(True)
rule = "a -> a,b"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
rule = "a -> ab"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["ab"])
rule = "a -> a b"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
rule = "a -> a\t\t \nb"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["a", "b"])
def test_probability(self):
parser = RuleParser()
rule = "a: 0.5 -> b"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertEqual(result["probability"], 0.5)
self.assertSequenceEqual(result["rhs"], ["b"])
def test_left_context(self):
parser = RuleParser()
rule = "a<b -> cde"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "b")
self.assertSequenceEqual(result["rhs"], ["c", "d", "e"])
self.assertEqual(result["left_context"], "a")
def test_left_context_delimited(self):
parser = RuleParser(True)
rule = "a<b -> cd,e"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "b")
self.assertSequenceEqual(result["rhs"], ["cd", "e"])
self.assertEqual(result["left_context"], "a")
def test_right_context(self):
parser = RuleParser()
rule = "a>b -> c"
result = parser._parse(rule)
self.assertEqual(result["lhs"], "a")
self.assertEqual(result["right_context"], "b")
self.assertSequenceEqual(result["rhs"], ["c"])
def test_both_context(self):
parser = RuleParser()
rule = "l<a>r -> b"
result = parser._parse(rule)
self.assertEqual(result["left_context"], "l")
self.assertEqual(result["right_context"], "r")
def test_context_roll(self):
parser = RuleParser()
rule = "<<a -> b"
result = parser._parse(rule)
self.assertEqual(result["left_context"], "<")
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["b"])
rule = "><a -> b"
result = parser._parse(rule)
self.assertEqual(result["left_context"], ">")
self.assertEqual(result["lhs"], "a")
self.assertSequenceEqual(result["rhs"], ["b"])
def test_ignore(self):
parser = RuleParser()
rule = "#ignore:ab"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore ab"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore: a,b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore: a b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
def test_ignore_delimited(self):
parser = RuleParser(True)
rule = "#ignore a,b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore:a,b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore: a b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
rule = "#ignore: a, b"
result = parser._parse(rule)
self.assertSequenceEqual(result["ignore"], ["a", "b"])
def test_fractal_plant(self):
rule = "G -> F-[[G]+G]+F[+FG]-G"
parser = RuleParser()
result = parser._parse(rule)
self.assertSequenceEqual(result["rhs"], rule.split()[-1])
# You can still use delimiters in single character mode.
rule2 = "G -> F,-,[ [ G\t \n],+,G,]+F[+FG]- G"
result = parser._parse(rule2)
self.assertSequenceEqual(result["rhs"], rule.split()[-1])
def test_fractal_plant_delimited(self):
rule = "G -> F-[[G]+G]+F[+FG]-G"
rule2 = "G -> F,-,[,[,G,]\n+, G,\t\n ],+,F,[,+,F,G,],-,G"
parser = RuleParser(True)
result = parser._parse(rule2)
self.assertSequenceEqual(result["rhs"], rule.split()[-1].replace(",", ""))
def tokenize(s: str):
return tuple(Token(c) for c in s)
class RuleParsingMappings(unittest.TestCase):
def test_simple(self):
parser = RuleParser()
rule = "a -> ab"
lhs, mapping = parser.parse(rule)
self.assertEqual(lhs, Token("a"))
self.assertEqual(mapping, RuleMapping(tokenize("ab")))
def test_simple_delimited(self):
parser = RuleParser(True)
rule = "a -> a, b"
lhs, mapping = parser.parse(rule)
self.assertEqual(lhs, Token("a"))
self.assertEqual(mapping, RuleMapping(tokenize("ab")))
def test_probability(self):
parser = RuleParser()
rule = "a: 0.33 -> b"
lhs, mapping = parser.parse(rule)
self.assertEqual(lhs, Token("a"))
self.assertEqual(mapping, RuleMapping(tokenize("b"), probability=0.33))
def test_context_delimited(self):
parser = RuleParser(True)
rule = "left < tok>right:0.2->prod,uct"
lhs, mapping = parser.parse(rule)
self.assertEqual(lhs, Token("tok"))
self.assertEqual(
mapping,
RuleMapping(
(Token("prod"), Token("uct")),
probability=0.2,
left_context=Token("left"),
right_context=Token("right"),
),
)
def test_ignore_delimited(self):
parser = RuleParser(True)
rule = "#ignore a,b"
result = parser.parse(rule)
self.assertIsNone(result)
self.assertIn("a", parser.ignore)
self.assertIn("b", parser.ignore)
def test_fractal_plant(self):
pass
|
<gh_stars>100-1000
import errno
import functools
import logging
import sys
import webbrowser
import os
import click
from flask_compress import Compress
from flask_cors import CORS
from server.default_config import default_config
from server.app.app import Server
from server.common.config.app_config import AppConfig
from server.common.errors import DatasetAccessError, ConfigurationError
from server.common.utils.utils import sort_options
DEFAULT_CONFIG = AppConfig()
def annotation_args(func):
@click.option(
"--disable-annotations",
is_flag=True,
default=not DEFAULT_CONFIG.dataset_config.user_annotations__enable,
show_default=True,
help="Disable user annotation of data.",
)
@click.option(
"--annotations-file",
default=DEFAULT_CONFIG.dataset_config.user_annotations__local_file_csv__file,
show_default=True,
multiple=False,
metavar="<path>",
help="CSV file to initialize editing of existing annotations; will be altered in-place. "
"Incompatible with --user-generated-data-dir.",
)
@click.option(
"--user-generated-data-dir",
"--annotations-dir",
default=DEFAULT_CONFIG.dataset_config.user_annotations__local_file_csv__directory,
show_default=False,
multiple=False,
metavar="<directory path>",
help="Directory of where to save output annotations; filename will be specified in the application. "
"Incompatible with --annotations-file and --gene-sets-file.",
)
@click.option(
"--disable-gene-sets-save",
is_flag=True,
default=DEFAULT_CONFIG.dataset_config.user_annotations__gene_sets__readonly,
show_default=False,
help="Disable saving gene sets. If disabled, users will be able to make changes to gene sets but all "
"changes will be lost on browser refresh.",
)
@click.option(
"--gene-sets-file",
default=DEFAULT_CONFIG.dataset_config.user_annotations__local_file_csv__gene_sets_file,
show_default=True,
multiple=False,
metavar="<path>",
help="CSV file to initialize editing of gene sets; will be altered in-place. Incompatible with "
"--user-generated-data-dir.",
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def config_args(func):
@click.option(
"--max-category-items",
default=DEFAULT_CONFIG.dataset_config.presentation__max_categories,
metavar="<integer>",
show_default=True,
help="Will not display categories with more distinct values than specified.",
)
@click.option(
"--disable-custom-colors",
is_flag=True,
default=False,
show_default=False,
help="Disable user-defined category-label colors drawn from source data file.",
)
@click.option(
"--diffexp-lfc-cutoff",
"-de",
default=DEFAULT_CONFIG.dataset_config.diffexp__lfc_cutoff,
show_default=True,
metavar="<float>",
help="Minimum log fold change threshold for differential expression.",
)
@click.option(
"--disable-diffexp",
is_flag=True,
default=not DEFAULT_CONFIG.dataset_config.diffexp__enable,
show_default=False,
help="Disable on-demand differential expression.",
)
@click.option(
"--embedding",
"-e",
default=DEFAULT_CONFIG.dataset_config.embeddings__names,
multiple=True,
show_default=False,
metavar="<text>",
help="Embedding name, eg, 'umap'. Repeat option for multiple embeddings. Defaults to all.",
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def dataset_args(func):
@click.option(
"--obs-names",
"-obs",
default=DEFAULT_CONFIG.server_config.single_dataset__obs_names,
metavar="<text>",
help="Name of annotation field to use for observations. If not specified cellxgene will use the the obs index.",
)
@click.option(
"--var-names",
"-var",
default=DEFAULT_CONFIG.server_config.single_dataset__var_names,
metavar="<text>",
help="Name of annotation to use for variables. If not specified cellxgene will use the the var index.",
)
@click.option(
"--backed",
"-b",
is_flag=True,
default=DEFAULT_CONFIG.server_config.adaptor__anndata_adaptor__backed,
show_default=False,
help="Load anndata in file-backed mode. " "This may save memory, but may result in slower overall performance.",
)
@click.option(
"--title",
"-t",
default=DEFAULT_CONFIG.server_config.single_dataset__title,
metavar="<text>",
help="Title to display. If omitted will use file name.",
)
@click.option(
"--about",
default=DEFAULT_CONFIG.server_config.single_dataset__about,
metavar="<URL>",
help="URL providing more information about the dataset (hint: must be a fully specified absolute URL).",
)
@click.option(
"--X-approximate-distribution",
default=DEFAULT_CONFIG.dataset_config.X_approximate_distribution,
show_default=True,
type=click.Choice(["auto", "normal", "count"], case_sensitive=False),
help="Specify the approximate distribution of X matrix values. 'auto' will use a heuristic "
"to determine the approximate distribution. Mode 'auto' is incompatible with --backed.",
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def server_args(func):
@click.option(
"--debug",
"-d",
is_flag=True,
default=DEFAULT_CONFIG.server_config.app__debug,
show_default=True,
help="Run in debug mode. This is helpful for cellxgene developers, "
"or when you want more information about an error condition.",
)
@click.option(
"--verbose",
"-v",
is_flag=True,
default=DEFAULT_CONFIG.server_config.app__verbose,
show_default=True,
help="Provide verbose output, including warnings and all server requests.",
)
@click.option(
"--port",
"-p",
metavar="<port>",
default=DEFAULT_CONFIG.server_config.app__port,
type=int,
show_default=True,
help="Port to run server on. If not specified cellxgene will find an available port.",
)
@click.option(
"--host",
metavar="<IP address>",
default=DEFAULT_CONFIG.server_config.app__host,
show_default=False,
help="Host IP address. By default cellxgene will use localhost (e.g. 127.0.0.1).",
)
@click.option(
"--scripts",
"-s",
default=DEFAULT_CONFIG.dataset_config.app__scripts,
multiple=True,
metavar="<text>",
help="Additional script files to include in HTML page. If not specified, "
"no additional script files will be included.",
show_default=False,
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def launch_args(func):
@annotation_args
@config_args
@dataset_args
@server_args
@click.argument("datapath", required=False, metavar="<path to data file>")
@click.option(
"--open",
"-o",
"open_browser",
is_flag=True,
default=DEFAULT_CONFIG.server_config.app__open_browser,
show_default=True,
help="Open web browser after launch.",
)
@click.option(
"--config-file",
"-c",
"config_file",
default=None,
show_default=True,
help="Location to yaml file with configuration settings",
)
@click.option(
"--dump-default-config",
"dump_default_config",
is_flag=True,
default=False,
show_default=True,
help="Print default configuration settings and exit",
)
@click.help_option("--help", "-h", help="Show this message and exit.")
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def handle_scripts(scripts):
if scripts:
click.echo(
r"""
/ / /\ \ \__ _ _ __ _ __ (_)_ __ __ _
\ \/ \/ / _` | '__| '_ \| | '_ \ / _` |
\ /\ / (_| | | | | | | | | | | (_| |
\/ \/ \__,_|_| |_| |_|_|_| |_|\__, |
|___/
The --scripts flag is intended for developers to include google analytics etc. You could be opening yourself to a
security risk by including the --scripts flag. Make sure you trust the scripts that you are including.
"""
)
scripts_pretty = ", ".join(scripts)
click.confirm(f"Are you sure you want to inject these scripts: {scripts_pretty}?", abort=True)
class CliLaunchServer(Server):
"""
the CLI runs a local web server, and needs to enable a few more features.
"""
def __init__(self, app_config):
super().__init__(app_config)
@staticmethod
def _before_adding_routes(app, app_config):
app.config["COMPRESS_MIMETYPES"] = [
"text/html",
"text/css",
"text/xml",
"application/json",
"application/javascript",
"application/octet-stream",
]
Compress(app)
if app_config.server_config.app__debug:
CORS(app, supports_credentials=True)
@sort_options
@click.command(
short_help="Launch the cellxgene data viewer. " "Run `cellxgene launch --help` for more information.",
options_metavar="<options>",
)
@launch_args
def launch(
datapath,
verbose,
debug,
open_browser,
port,
host,
embedding,
obs_names,
var_names,
max_category_items,
disable_custom_colors,
diffexp_lfc_cutoff,
title,
scripts,
about,
disable_annotations,
annotations_file,
user_generated_data_dir,
gene_sets_file,
disable_gene_sets_save,
backed,
disable_diffexp,
config_file,
dump_default_config,
x_approximate_distribution,
):
"""Launch the cellxgene data viewer.
This web app lets you explore single-cell expression data.
Data must be in a format that cellxgene expects.
Read the "getting started" guide to learn more:
https://github.com/chanzuckerberg/cellxgene-documentation/blob/main/README.md
Examples:
> cellxgene launch example-dataset/pbmc3k.h5ad --title pbmc3k
> cellxgene launch <your data file> --title <your title>
> cellxgene launch <url>"""
if dump_default_config:
print(default_config)
sys.exit(0)
# Startup message
click.echo("[cellxgene] Starting the CLI...")
# app config
app_config = AppConfig()
server_config = app_config.server_config
try:
if config_file:
app_config.update_from_config_file(config_file)
# Determine which config options were give on the command line.
# Those will override the ones provided in the config file (if provided).
cli_config = AppConfig()
cli_config.update_server_config(
app__verbose=verbose,
app__debug=debug,
app__host=host,
app__port=port,
app__open_browser=open_browser,
single_dataset__datapath=datapath,
single_dataset__title=title,
single_dataset__about=about,
single_dataset__obs_names=obs_names,
single_dataset__var_names=var_names,
adaptor__anndata_adaptor__backed=backed,
)
cli_config.update_dataset_config(
app__scripts=scripts,
user_annotations__enable=not disable_annotations,
user_annotations__local_file_csv__file=annotations_file,
user_annotations__local_file_csv__directory=user_generated_data_dir,
user_annotations__local_file_csv__gene_sets_file=gene_sets_file,
user_annotations__gene_sets__readonly=disable_gene_sets_save,
presentation__max_categories=max_category_items,
presentation__custom_colors=not disable_custom_colors,
embeddings__names=embedding,
diffexp__enable=not disable_diffexp,
diffexp__lfc_cutoff=diffexp_lfc_cutoff,
X_approximate_distribution=x_approximate_distribution,
)
diff = cli_config.server_config.changes_from_default()
changes = {key: val for key, val, _ in diff}
app_config.update_server_config(**changes)
diff = cli_config.dataset_config.changes_from_default()
changes = {key: val for key, val, _ in diff}
app_config.update_dataset_config(**changes)
# process the configuration
# any errors will be thrown as an exception.
# any info messages will be passed to the messagefn function.
def messagefn(message):
click.echo("[cellxgene] " + message)
# Use a default secret if one is not provided
if not server_config.app__flask_secret_key:
app_config.update_server_config(app__flask_secret_key="SparkleAndShine")
app_config.complete_config(messagefn)
except (ConfigurationError, DatasetAccessError) as e:
raise click.ClickException(e)
handle_scripts(scripts)
# create the server
server = CliLaunchServer(app_config)
if not server_config.app__verbose:
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
cellxgene_url = f"http://{app_config.server_config.app__host}:{app_config.server_config.app__port}"
if server_config.app__open_browser:
click.echo(f"[cellxgene] Launching! Opening your browser to {cellxgene_url} now.")
webbrowser.open(cellxgene_url)
else:
click.echo(f"[cellxgene] Launching! Please go to {cellxgene_url} in your browser.")
click.echo("[cellxgene] Type CTRL-C at any time to exit.")
if not server_config.app__verbose:
f = open(os.devnull, "w")
sys.stdout = f
try:
server.app.run(
host=server_config.app__host,
debug=server_config.app__debug,
port=server_config.app__port,
threaded=not server_config.app__debug,
use_debugger=False,
use_reloader=False,
)
except OSError as e:
if e.errno == errno.EADDRINUSE:
raise click.ClickException("Port is in use, please specify an open port using the --port flag.") from e
raise
|
load(
"//AvrToolchain:cc_toolchain/third_party.bzl",
"add_compiler_option_if_supported",
"get_cxx_inc_directories",
)
def _get_treat_warnings_as_errors_flags(repository_ctx, gcc):
# below flags are most certainly coding errors
flags_to_add = [
"-Werror=null-dereference",
"-Werror=return-type",
"-Werror=incompatible-pointer-types",
"-Werror=int-conversion",
]
supported_flags = []
for flag in flags_to_add:
supported_flags.extend(add_compiler_option_if_supported(
repository_ctx,
gcc,
flag,
))
return supported_flags
def create_cc_toolchain_config_rule(repository_ctx, gcc):
repository_ctx.template(
"cc_toolchain/cc_toolchain_config.bzl",
repository_ctx.path(Label("@EmbeddedSystemsBuildScripts//AvrToolchain:cc_toolchain/cc_toolchain_config.bzl.tpl")),
substitutions = {
"@warnings_as_errors@": "{}".format(_get_treat_warnings_as_errors_flags(repository_ctx, gcc)),
},
)
def create_toolchain_definitions(mcus, repository_ctx):
cc_toolchain_template = """load("@AvrToolchain//cc_toolchain:cc_toolchain_config.bzl",
"cc_toolchain_config")
package(default_visibility = ["//visibility:public"])
filegroup(
name = "empty",
srcs = [],
)
filegroup(
name = "all_files",
srcs = [
":avr-gcc.sh",
"@avr-binutils//:bin",
"@avr-gcc-unwrapped//:bin",
"@avr-libc//:include",
"@avr-libc//:lib",
"@avr-gcc-unwrapped//:include",
],
)
"""
mcu_specific = """
cc_toolchain_config(
name = "avr_cc_toolchain_config_{mcu}",
host_system_name = "{host_system_name}",
mcu = "{mcu}",
target_system_name = "avr-{mcu}",
)
cc_toolchain(
name = "avr_cc_toolchain_{mcu}",
all_files = ":all_files",
compiler_files = ":all_files",
dwp_files = ":empty",
linker_files = ":all_files",
objcopy_files = ":empty",
strip_files = ":empty",
toolchain_config = ":avr_cc_toolchain_config_{mcu}",
toolchain_identifier = "avr-toolchain-{mcu}",
)
toolchain(
name = "cc-toolchain-avr-{mcu}",
target_compatible_with = [
"@AvrToolchain//platforms/mcu:{mcu}",
"@AvrToolchain//platforms:avr",
],
exec_compatible_with = [
],
toolchain = ":avr_cc_toolchain_{mcu}",
toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
)
"""
for mcu in mcus:
cc_toolchain_template += mcu_specific.format(
mcu = mcu,
host_system_name = repository_ctx.os.name,
)
return cc_toolchain_template
def create_cc_toolchain_package(repository_ctx, paths):
mcu_list = repository_ctx.attr.mcu_list
repository_ctx.file(
"cc_toolchain/BUILD",
create_toolchain_definitions(
mcu_list,
repository_ctx,
),
)
repository_ctx.template(
"cc_toolchain/avr-gcc.sh",
paths["@EmbeddedSystemsBuildScripts//AvrToolchain:cc_toolchain/avr-gcc.sh"],
)
create_cc_toolchain_config_rule(repository_ctx, "@EmbeddedSystemsBuildScripts//AvrToolchain:cc_toolchain/avr-gcc.sh")
|
from __future__ import annotations
import numpy as np
from numpy import ndarray as Array
from functools import total_ordering
from typing import List, Dict, Tuple, Iterable, Callable, NamedTuple, Union, Optional
class Point(NamedTuple):
"""
Position tuple
"""
x: int = 0
y: int = 0
def __add__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise add onto the point
:param other: Other point to add
:return: The resulting point
"""
return Point(self.x + other[0], self.y + other[1])
def __sub__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise subtract onto the point
:param other: Other point to subtract
:return: The resulting point
"""
return Point(self.x - other[0], self.y - other[1])
def __mod__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise modulo onto the point
:param other: Other point to mod by
:return: The resulting point
"""
return Point(self.x % other[0], self.y % other[1])
@staticmethod
def manhattan_distance(a: Point, b: Point) -> Point:
"""
Manhattan distance between two points
:param a: First point
:param b: Second point
:return: The distance between the points
"""
return Point(abs(a.x - b.x), abs(a.y - b.y))
class State(NamedTuple):
"""
Board state, consisting of cost and associated board
"""
cost: int
target: int
board: Board
@total_ordering
class Board:
"""
Chi-Puzzle board
"""
def __init__(self, array: Array, zero: Point, goals: Tuple[Array, ...], heuristic: Optional[Callable[[Board], int]], sort_g: bool, cost: int = 0, parent: Optional[State] = None) -> None:
"""
Creates a new board with the specified parameters
:param array: Board array
:param zero: Position of the zero
:param goals: Tuple of goal states
:param heuristic: Heuristic function used
:param cost: Cost to reach this board
:param parent: Parent board
"""
self.height: int
self.width: int
self.height, self.width = array.shape
self.array = array
self.zero = zero
self.goals = goals
self._heuristic = heuristic
self._sort_g = sort_g
self.g = cost
self.parent = parent
self._hash: Optional[int] = None
self.is_goal = any(np.array_equal(self.array, goal) for goal in self.goals)
# Calculate heuristic
self.h = heuristic(self) if heuristic is not None else 0
self.f = self.g + self.h if self._sort_g else self.h
# region Ordering
def __hash__(self) -> int:
if self._hash is None:
self._hash = hash(tuple(self.array.flat))
return self._hash
def __eq__(self, other: Board) -> bool:
return np.array_equal(self.array, other.array)
def __lt__(self, other: Board) -> bool:
return self.f < other.f
def __str__(self) -> str:
return " ".join(map(str, self.array.flat))
# endregion
# region Move generation
def generate_moves(self) -> Iterable[State]:
"""
Generates all possible moves from the current board state
:return: Iterable of all the possible moves
"""
targets: Dict[Point, State] = {}
# Check cardinal direction moves
self._check_cardinal(self.zero + (1, 0), targets)
self._check_cardinal(self.zero - (1, 0), targets)
self._check_cardinal(self.zero + (0, 1), targets)
self._check_cardinal(self.zero - (0, 1), targets)
# Check the corner moves
max_x = self.height - 1
max_y = self.width - 1
if self.zero in ((0, 0), (max_x, max_y)):
# Top left/bottom right corners
self._check_corner(self.zero + (1, 1), targets)
self._check_corner(self.zero - (1, 1), targets)
elif self.zero in ((max_x, 0), (0, max_y)):
# Bottom left/top right corners
self._check_corner(self.zero + (1, -1), targets)
self._check_corner(self.zero - (1, -1), targets)
return targets.values()
def _check_cardinal(self, target: Point, targets: Dict[Point, State]) -> None:
"""
Checks for wrapping on a cardinal move and adjusts the position and cost
:param target: Target move
:param targets: Known targets so far
"""
# Check if we're wrapping with this move
if target.x in (-1, self.height) or target.y in (-1, self.width):
cost = 2
target %= self.array.shape
else:
cost = 1
self._check_target(target, cost, targets)
def _check_corner(self, target: Point, targets: Dict[Point, State]) -> None:
"""
Adjusts the wrapping on corner moves and sets the correct cost
:param target: Target move
:param targets: Known targets so far
"""
# Adjust wrapping bounds
target %= self.array.shape
self._check_target(target, 3, targets)
def _check_target(self, target: Point, cost: int, targets: Dict[Point, State]) -> None:
"""
Validates the target move and adds it to or adjusts known targets if possible
:param target: Target move
:param cost: Move cost
:param targets: Known targets so far
"""
# Check if not in targets
if target not in targets:
# Copy array, then apply the move
a = self.array.copy()
t = a[target]
a[self.zero], a[target] = t, a[self.zero]
board = Board(a, target, self.goals, self._heuristic, self._sort_g, self.g + cost, State(cost, t, self))
else:
# Check if we have a lower cost
state = targets[target]
if cost >= state.cost:
# If not do nothing
return
# Reuse the same board if possible
board = state.board
board.g = self.g + cost
# Create state
targets[target] = State(cost, 0, board)
# endregion
# region Heuristics
@staticmethod
def h0(self: Board) -> int:
"""
Heuristic 0 - A naive heuristic base one the position of 0
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
return 0 if self.array[(self.height - 1, self.width - 1)] == 0 else 1
@staticmethod
def h1(self: Board) -> int:
"""
Heuristic 1 - Hamming Distance
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
# Find the lowest heuristic over all goal states, return 0 if a goal state
return min(map(self._heuristic_hamming, self.goals)) if not self.is_goal else 0
@staticmethod
def h2(self: Board) -> int:
"""
Heuristic 2 - Wrapped Manhattan Distance
We are using regular Manhattan Distance, and accounting for wraps.
If a wrap is the shorter path, one is also added to account for the more expensive move.
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
# Find the lowest heuristic over all goal states, return 0 if a goal state
return min(map(self._heuristic_manhattan, self.goals)) if not self.is_goal else 0
def _heuristic_hamming(self, goal: Array) -> int:
"""
Hamming Distance heuristic
:param goal: Goal state the calculate the heuristic from
:return: The Hamming Distance from the given goal state
"""
# Running total
total = 0
for index in np.ndindex(goal.shape):
i = goal[index]
if i == 0:
# Skip zero since it's out "empty" position
continue
# If the spots do not match, add one
if i != self.array[index]:
total += 1
return total
def _heuristic_manhattan(self, goal: Array) -> int:
"""
Manhattan Distance heuristic
:param goal: Goal state the calculate the heuristic from
:return: The Manhattan Distance from the given goal state
"""
# Running total
total = 0
for index in np.ndindex(goal.shape):
i = goal[index]
if i == 0:
# Skip zero since it's out "empty" position
continue
g = Point(*index)
t = self._find_point(self.array, i)
x, y = Point.manhattan_distance(g, t)
# Take care of wrapping
wraps = 0
if x > self.height // 2:
x = self.height - x
wraps = 1
if y > self.width // 2:
y = self.width - y
wraps = 1
# Make sure we don't add two wrapping penalties
total += x + y + wraps
return total
# endregion
# region Static methods
@staticmethod
def _find_point(array: Array, value: int) -> Point:
return Point(*np.asarray(np.where(array == value)).T[0])
@staticmethod
def from_list(data: List[int], shape: Tuple[int, int], heuristic: Optional[Callable[[Board], int]] = None, sort_g: bool = True, dtype: Optional[object] = np.int16) -> Board:
"""
Creates a new board from a list and a specified size
:param data: List to create the board from
:param shape: Shape of the board (height, width)
:param heuristic: Heuristic function
:param sort_g: If the sorting should account g(n)
:param dtype: Type used within the Numpy arrays
:return: The created board
"""
# Create the board array
array: Array = np.array(data, dtype=dtype).reshape(shape)
# Find the location of the zero
zero = Board._find_point(array, 0)
# Create both solution boards
g1: Array = np.roll(np.arange(array.size, dtype=dtype), -1).reshape(shape)
g2: Array = g1.T.reshape(shape, order='F')
return Board(array, zero, (g1, g2), heuristic, sort_g)
# endregion
|
<gh_stars>1-10
# Created for aenea using libraries from the Dictation Toolbox
# https://github.com/dictation-toolbox/dragonfly-scripts
#
# Commands for interacting with Vim
#
# Author: <NAME>
# modified by: <NAME> # # Licensed under LGPL
from utility.vim_logic import lineJuggle_logic
from dragonfly import (
Dictation,
Grammar,
Key,
MappingRule,
Function,
IntegerRef,
Text,
Choice,
AppContext,
)
from utility.substitute_phrase import translate_spokenform_to_queryform
IS_WINDOWS = True
#vim_context = AppContext(executable="devenv", title="Microsoft visual studio")
#grammar = Grammar('vim', conteimport __builtin__
#context=vim_context)
surroundCharsMap = {
'quotes': '"',
'parens': "(",
'brackets': "[",
'braces': "{",
}
key = 'escape,'
if not IS_WINDOWS:
key = 'c-backslash, c-n'
goto_normal_mode_keys = key
goto_normal_mode = Key(key)
def goto_line_absolute(n):
for c in str(n):
Key(c).execute()
Key("G").execute()
def just_goto_line(n, dir):
Key('m, squote').execute()
if n <= 0:
return
for c in str(n):
Key(c).execute()
Key(dir).execute()
def lineJuggle(n1, n2, operation, linePrefix):
goto_normal_mode.execute()
Text(lineJuggle_logic(n1, n2, operation, linePrefix)).execute()
Key("enter").execute()
basics_mapping = {
'vim': Text("vim"),
# Moving between splits
# 'split-side': Key(goto_normal_mode_keys + "semicolon, v"),
# 'split-down': Key(goto_normal_mode_keys + "semicolon, s"),
'split-close': Key(goto_normal_mode_keys + "Z, Q"),
'open [in] split': Key("s"),
# 'open [in] tab': Key(goto_normal_mode_keys + "semicolon, t"),
'visual block': Key("c-v"),
'inner block': Key("i, b"),
'paragraph': Key("a, p"),
'visual line': Key("s-v"),
'comment': Key("g, c, c"),
'surround word [with] <surroundChar>': Key("s, a, W") + Text("%(surroundChar)s"),
'stern <surroundChar>': Key("s, a, W") + Text("%(surroundChar)s"),
# Moving viewport
'set number': Key(goto_normal_mode_keys + "comma, dot"),
'screen center': Key(goto_normal_mode_keys + "z, dot"),
'screen top': Key(goto_normal_mode_keys + "z, t"),
'screen bottom': Key(goto_normal_mode_keys + "z, b"),
# Append to line
'noop [<n0>]': goto_normal_mode + Function(lambda n0: just_goto_line(n0, 'j')) + Key("o"),
'nope [<n0>]': goto_normal_mode + Function(lambda n0: just_goto_line(n0, 'j')) + Key("A"),
'noop up [<n0>]': goto_normal_mode + Function(lambda n0: just_goto_line(n0, 'k')) + Key("o"),
'nope up [<n0>]': goto_normal_mode + Function(lambda n0: just_goto_line(n0, 'k')) + Key("A"),
'prepend': Key(goto_normal_mode_keys + "I"),
'insert below': Key(goto_normal_mode_keys + "o"),
'insert above': Key(goto_normal_mode_keys + "O"),
'regret': Key(goto_normal_mode_keys + "u"),
'read': Key(goto_normal_mode_keys + "c-r"),
'escape': Key("escape"),
'filename': Key(goto_normal_mode_keys + "c-g"),
'save and quit': Key(goto_normal_mode_keys + "colon, w, q, enter"),
'save': Key(goto_normal_mode_keys + "colon, w, enter"),
'quit all': Key(goto_normal_mode_keys + "colon, q, a, enter"),
'discard': Key(goto_normal_mode_keys + "colon, q, exclamation"),
'(vim|vic) tab <n>': Key(goto_normal_mode_keys + "comma, %(n)d"),
'comma': Key("comma"),
'(rook|Brook|rock)': Key("right, colon, space"),
'ghin front': Key(goto_normal_mode_keys + "zero, w"),
'ghin back': Key(goto_normal_mode_keys + "dollar"),
'auto format': Key(goto_normal_mode_keys + "colon, A, u, t, o, f, o, r, m, a, t, enter"),
'quick (prev|previous)': Key("lbracket, q"),
'quick next': Key(goto_normal_mode_keys + "rbracket, q"),
'location (prev|previous)': Key(goto_normal_mode_keys + "lbracket, l"),
'location next': Key(goto_normal_mode_keys + "rbracket, l"),
# Finding text
#'find <text>': Key(goto_normal_mode_keys + "slash") + Text("%(text)s"),
'jump <text>': Key("escape, slash") + Function(translate_spokenform_to_queryform),
'next': Key(goto_normal_mode_keys + "n"),
'prev|previous': Key(goto_normal_mode_keys + "N"),
'clear search': Key(goto_normal_mode_keys + "colon, n, o, h, enter"),
'change [the] big word': Key(goto_normal_mode_keys + "c, a, W"),
'change [the] word': Key(goto_normal_mode_keys + "c, a, w"),
'(Sea|See) world': Key(goto_normal_mode_keys + "c, a, w"),
'(Sea|See) inner block': Key(goto_normal_mode_keys + "c, i, b"),
'(Sea|See) inner quote': Key(goto_normal_mode_keys + "c, i, quote"),
'(Sea|See) inner sing': Key(goto_normal_mode_keys + "c, i, squote"),
'dine inner block': Key(goto_normal_mode_keys + "d, i, b"),
'dine inner quote': Key(goto_normal_mode_keys + "d, i, quote"),
'dine inner sing': Key(goto_normal_mode_keys + "d, i, squote"),
'(Sea|see) inner quote': Key(goto_normal_mode_keys + "c, i, quote"),
'yank inner block': Key(goto_normal_mode_keys + "y, i, b"),
'yank line': Key(goto_normal_mode_keys + "y, y"),
'(pseudo|sudo|pseudo-) save': goto_normal_mode + Text(":w !sudo tee > /dev/null %%") + Key("enter"),
'remove [the] word': Key(goto_normal_mode_keys + "d, a, w"),
'remove [the] big word': Key(goto_normal_mode_keys + "d, a, W"),
'change [the] word': Key(goto_normal_mode_keys + "c, a, W"),
'gargle': Key(goto_normal_mode_keys + "D"),
'behind [<n>]': Key(goto_normal_mode_keys + "e:%(n)d"),
'ass [<n>]': Key(goto_normal_mode_keys + "E:%(n)d"),
# Character operations
'dart': Key("x"),
'dart <n>': Key("x:%(n)d"),
'replace letter': Key("r"),
'replace mode': Key("R"),
'change case': Key(goto_normal_mode_keys + "s-backtick"),
# Window movement
'window': Key("c-w"),
#'remove [the] buffer': Key(goto_normal_mode_keys + "semicolon, d"),
# Word operations
'sword <n>': Key(goto_normal_mode_keys + "%(n)d, w"),
'forward <n>': Key(goto_normal_mode_keys + "%(n)d, w"),
'backward <n>': Key(goto_normal_mode_keys + "%(n)d, b"),
'start': goto_normal_mode + Text("^"),
'finish': goto_normal_mode + Text("$"),
'quick run': goto_normal_mode + Key("comma, r"),
'command mode': goto_normal_mode + Key("colon"),
# Line operations
'dine': goto_normal_mode + Function(lambda: lineJuggle(0,0,"d", "+")),
'dine <n>': goto_normal_mode + Function(lambda n: lineJuggle(n,n,"d","+")),
'dine up <n>': goto_normal_mode + Function(lambda n: lineJuggle(n, n, "d", "-")),
'dine <n> (thru|through|to) <n2>': goto_normal_mode + Function(lambda n1,n2: lineJuggle(n1,n2,"d","+")),
'dine up <n> (thru|through|to) <n2>': goto_normal_mode + Function(lambda n1,n2: lineJuggle(n1,n2,"d","-")),
'yank ': goto_normal_mode + Function(lambda: lineJuggle(0,0,"y", "+")),
'yank [down] <n>': goto_normal_mode + Function(lambda n: lineJuggle(n,n,"d","+")),
'yank up <n>': goto_normal_mode + Function(lambda n: lineJuggle(n,n,"d","-")),
'yank <n> (thru|through|to) <n2>': goto_normal_mode + Function(lambda n1,n2: lineJuggle(n,n2,"y","+")),
'yank up <n> (thru|through|to) <n2>': goto_normal_mode + Function(lambda n1,n2: lineJuggle(n,n2,"y","-")),
'select until <text>': Key(goto_normal_mode_keys + "v, t, slash") + Function(translate_spokenform_to_queryform),
'select including <text>': Key(goto_normal_mode_keys + "v, f, slash") + Function(translate_spokenform_to_queryform),
'dine until <text>': Key(goto_normal_mode_keys + "d, t") + Function(translate_spokenform_to_queryform),
'dine including <text>': Key(goto_normal_mode_keys + "d, f") + Function(translate_spokenform_to_queryform),
'(see|sea) until <text>': Key(goto_normal_mode_keys + "c, t") + Function(translate_spokenform_to_queryform),
'(see|sea) including <text>': Key(goto_normal_mode_keys + "c, f") + Function(translate_spokenform_to_queryform),
# The zs and ze denote pattern start and pattern end
# I use this to move the cursor right after the character we are looking for
# e.g. "next block" looks for a lparen, so I move to the character after it
'next block': goto_normal_mode + Text("?(\\zs[^ ]") + Key("enter, N, N"),
'pre block': goto_normal_mode + Text("?(\\zs[^ ]") + Key("enter"),
'next quote': goto_normal_mode + Text("?\\v('|\")\\zs.\\ze.{-\\}('|\")") + Key("enter, N, N"),
'pre quote': goto_normal_mode + Text("?\\v('|\")\\zs.\\ze.{-\\}('|\")") + Key("enter"),
# Fancy operations
'clay': Key(goto_normal_mode_keys + "c, i, b"),
# Movement
'go [down] [to] [line] <n>': goto_normal_mode + Function(lambda n: just_goto_line(n, 'j')),
'go up [to] [line] <n>': goto_normal_mode + Function(lambda n: just_goto_line(n, 'k')),
'go absolute to [line] <n>': goto_normal_mode + Function(goto_line_absolute),
'matching': Key(goto_normal_mode_keys + "percent"),
'rash': Key(goto_normal_mode_keys + "down, s-a"),
'back': Key(goto_normal_mode_keys + "c-o"),
'next tab': Key("g, t"),
'previous tab': Key("g, s-t"),
'new tab': goto_normal_mode + Text(":tabe | FZFMru") + Key("enter"),
# Plug-ins
'file explorer': Key(goto_normal_mode_keys + "colon") + Text("VimFilerExplorer") + Key("enter"),
}
class VimRule(MappingRule):
mapping = basics_mapping
extras = [
Dictation('text'),
IntegerRef('n0', 0, 999),
IntegerRef('n', 0, 999),
IntegerRef('n2', 0, 999),
Choice("surroundChar", surroundCharsMap),
]
defaults = {
"n0": 0, # Default repeat count.
"n": 1, # Default repeat count.
}
rules = VimRule() |
<filename>dammit/app.py
# Copyright (C) 2015-2018 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import argparse
import glob
import logging
import os
import sys
from dammit import annotate
from dammit import databases
from dammit import log
from dammit import utils
from dammit import ui
from dammit.meta import __version__, __authors__, __description__, __date__, get_config
from dammit.annotate import (build_quick_pipeline,
build_default_pipeline,
build_full_pipeline,
build_nr_pipeline)
class DammitApp(object):
def __init__(self, arg_src=sys.argv[1:]):
self.logger = logging.getLogger(self.__class__.__name__)
self.config_d, self.databases_d = get_config()
self.parser = self.get_parser()
self.args = self.parser.parse_args(arg_src)
if hasattr(self.args, 'config_file') and self.args.config_file is not None:
with open(self.args.config_file) as fp:
self.config_d.update(json.load(fp))
self.config_d.update(vars(self.args))
def run(self):
print(ui.header('dammit'))
print(ui.header(__description__, level=2))
about = '\nby {0}\n\n**v{1}**, {2}\n'.format(', '.join(__authors__),
__version__, __date__)
print(about)
return self.args.func()
def description(self):
return ui.header('dammit: ' + __description__)
def epilog(self):
return 'Available BUSCO groups are: '\
'{0}'.format(', '.join(sorted(self.databases_d['BUSCO'].keys())))
def get_parser(self):
'''
Build the main parser.
'''
parser = argparse.ArgumentParser(
description=self.description(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(func=parser.print_help)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='dammit subcommands')
def add_common_args(parser):
''' Add shared options to a parser.
Shared options are added this way instead of to the main parser
because I'd rather they come after the subcommand name.
Args:
parser (object): The parser to which arguments will be added.
'''
parser.add_argument('--database-dir',
default=databases.default_database_dir(self.logger),
help='Directory to store databases. Existing'\
' databases will not be overwritten.'\
' By default, the database directory is'\
' $HOME/.dammit/databases.'
)
parser.add_argument('--busco-group',
default='metazoa',
metavar='[metazoa, eukaryota, vertebrata, ...]',
choices=list(self.databases_d['BUSCO'].keys()),
help='Which BUSCO group to use. Should be chosen'\
' based on the organism being annotated.'\
' Full list of options is below.'
)
parser.add_argument('--n_threads',
type=int,
default=1,
help='For annotate, number of threads to pass to '\
'programs supporting multithreading. For '\
'databases, number of simultaneous tasks '\
'to execute.'
)
parser.add_argument('--config-file',
help='A JSON file providing values to override'\
' built-in config. Advanced use only!'
)
parser.add_argument('--busco-config-file',
help='Path to an alternative BUSCO config'\
' file; otherwise, BUSCO will attempt'\
' to use its default installation'\
' which will likely only work on'\
' bioconda. Advanced use only!')
parser.add_argument('--verbosity',
default=0,
type=int,
choices=[0,1,2],
help='Verbosity level for doit tasks.'
)
parser.add_argument('--profile',
default=False,
action='store_true',
help='Profile task execution.')
parser.add_argument('--force',
default=False,
action='store_true',
help='Ignore missing database tasks.')
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument('--full',
action='store_true',
default=False,
help='Run a "complete" annotation; includes'\
' uniref90, which is left out of the'\
' default pipeline because it is huge'\
' and homology searches take a long'\
' time.'
)
pgroup.add_argument('--nr',
action='store_true',
default=False,
help='Also include annotation to NR database, which'\
' is left out of the default and "full"'\
' pipelines because it is huge and'\
' homology searches take a long time.'
)
pgroup.add_argument('--quick',
default=False,
action='store_true',
help='Run a "quick" annotation; excludes'\
' the Infernal Rfam tasks, the HMMER'\
' Pfam tasks, and the LAST OrthoDB'\
' and uniref90 tasks. Best for users'\
' just looking to get basic stats'\
' and conditional reciprocal best'\
' LAST from a protein database.')
migrate_parser= subparsers.add_parser('migrate')
migrate_parser.add_argument('--destructive', default=False,
action='store_true')
add_common_args(migrate_parser)
migrate_parser.set_defaults(func=self.handle_migrate)
'''
Add the databases subcommand.
'''
desc = '''Check for databases and optionally download and prepare them
for use. By default, only check their status.'''
databases_parser = subparsers.add_parser(
'databases',
description=desc,
epilog=self.epilog(),
help=desc,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
databases_parser.add_argument('--install',
action='store_true',
default=False,
help='Install missing databases. Downloads'
' and preps where necessary'
)
add_common_args(databases_parser)
databases_parser.set_defaults(func=self.handle_databases)
'''
Add the annotation subcommand.
'''
desc = '''The main annotation pipeline. Calculates assembly stats;
runs BUSCO; runs LAST against OrthoDB (and optionally uniref90),
HMMER against Pfam, Inferal against Rfam, and Conditional Reciprocal
Best-hit Blast against user databases; and aggregates all results in
a properly formatted GFF3 file.'''
annotate_parser = subparsers.add_parser(
'annotate',
usage='%(prog)s <transcriptome> [OPTIONS]',
description=desc,
epilog=self.epilog(),
help=desc,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
annotate_parser.add_argument('transcriptome',
help='FASTA file with the transcripts to be'\
' annotated.'
)
annotate_parser.add_argument('-n', '--name',
default='Transcript',
help='Base name to use for renaming the'\
' input transcripts. The new names'\
' will be of the form <name>_<X>.'\
' It should not have spaces, pipes,'\
' ampersands, or other characters'\
' with special meaning to BASH.'
)
annotate_parser.add_argument('-e', '--evalue',
default=1e-5,
type=float,
help='e-value cutoff for similarity'\
' searches.'
)
annotate_parser.add_argument('-o', '--output-dir',
default=None,
help='Output directory. By default this will'\
' be the name of the transcriptome file'\
' with `.dammit` appended'
)
annotate_parser.add_argument('--user-databases',
nargs='+',
default=[],
help='Optional additional protein databases. '\
' These will be searched with CRB-blast.'
)
annotate_parser.add_argument('--sshloginfile',
default=None,
help='Distribute execution across the specified nodes.')
add_common_args(annotate_parser)
annotate_parser.set_defaults(func=self.handle_annotate)
return parser
def handle_migrate(self):
with utils.Move(self.args.database_dir):
odb_files = glob.glob('aa_seq_euk.fasta.db.*')
for fn in odb_files:
pre, _, suf = fn.partition('.db')
newfn = pre + suf
if self.args.destructive:
os.rename(fn, newfn)
else:
os.symlink(fn, newfn)
def handle_databases(self):
log.start_logging()
print(ui.header('submodule: databases', level=2))
handler = databases.get_handler(self.config_d)
if self.args.quick:
databases.build_quick_pipeline(handler,
self.config_d,
self.databases_d)
else:
databases.build_default_pipeline(handler,
self.config_d,
self.databases_d,
with_uniref=self.args.full,
with_nr=self.args.nr)
if self.args.install:
return databases.install(handler)
else:
databases.check_or_fail(handler)
def handle_annotate(self):
log.start_logging()
print(ui.header('submodule: annotate', level=2))
db_handler = databases.get_handler(self.config_d)
if self.args.quick:
databases.build_quick_pipeline(db_handler,
self.config_d,
self.databases_d)
else:
databases.build_default_pipeline(db_handler,
self.config_d,
self.databases_d,
with_uniref=self.args.full,
with_nr=self.args.nr)
if self.config_d['force'] is True:
utd_msg = '*All database tasks up-to-date.*'
ood_msg = '*Some database tasks out-of-date; '\
'FORCE is True, ignoring!'
uptodate, statuses = db_handler.print_statuses(uptodate_msg=utd_msg,
outofdate_msg=ood_msg)
else:
databases.check_or_fail(db_handler)
annotate_handler = annotate.get_handler(self.config_d, db_handler.files)
if self.args.quick:
build_quick_pipeline(annotate_handler,
self.config_d,
db_handler.files)
elif self.args.full:
build_full_pipeline(annotate_handler,
self.config_d,
db_handler.files)
elif self.args.nr:
build_nr_pipeline(annotate_handler,
self.config_d,
db_handler.files)
else:
build_default_pipeline(annotate_handler,
self.config_d,
db_handler.files)
return annotate.run_annotation(annotate_handler)
|
<filename>AssignmentCode/RedBlackBST_Starter.py
class RedBlackBST:
""" A Python Implementation of a Red-Black Binary Search Tree
"""
class RedBlackNode:
"""Basic node representing the Key/Value and color of a link/node.
"""
def __init__(self, key, value):
"""Returns a newly created `RedBlackNode` initiated to be a "Red" link.
:param key: The unique, comparable object by which to retrieve the desired value.
:param value: The value in which to store in the `RedBlackBST` object.
"""
self.key = key
self.value = value
self.left = None
self.right = None
self.parent = None
self.is_red = True # NEW NODES ARE ALWAYS RED IN THIS IMPLEMENTATION TO DEFAULT THEM TO BE SO.
def __str__(self):
"""Returns a string representation of a node, including the ids and colors of its left and right links.
The pattern used is: `(left.key)<-[Red|Black]--(node.key)--[Red|Black]->(right.key)
If either left or right nodes are blank, the key is `None` and the link color defaults to `Black`.
:return: String representation of the desired node.
"""
l_node = "None" if self.left is None else self.left.key
l_link = "Black" if self.left is None or not self.left.is_red else " Red "
r_node = "None" if self.right is None else self.right.key
r_link = "Black" if self.right is None or not self.right.is_red else " Red "
p_node = "None" if self.parent is None else self.parent.key
p_link = " Red " if self.is_red else "Black"
return f"({l_node})<--[{l_link}]--({self.key})--[{r_link}]-->({r_node}) [Parent: ({p_node})]"
def __init__(self):
"""Creates an empty `RedBlackBST` (Red-Black Binary Search Tree)
"""
self.root = None
### THE FOLLOWING THREE METHOD STUBS REQUIRE COMPLETION FOR ASSIGNMENT
def insert_i(self, key, value):
"""Insert the proper value using an iterative method of traversal.
Assumes the key provided is a comparable object, and assumes uniqueness. If the `Key` already exists in the
structure, the provided value will overwrite the previous value for this key.
:param key: The unique, comparable object by which to retrieve the desired value.
:param value: The value in which to store in the `RedBlackBST`
:return: `None`
"""
insert_node = RedBlackBST.RedBlackNode(key, value)
# SPECIAL CASE ROOT IS EMPTY.
if self.root is None:
self.root = insert_node
self.root.is_red = False
return
# FIND WHERE TO INSERT (TRAVERSING LEFT AND RIGHT)
# ONCE INSERTED, TRAVERSE UP CURR.PARENT
return
def _rotate_left_i(self, node):
"""Perform a `rotation_left` around the node provided. Return the new root of newly rotated local cluster.
:param node: The node around which to rotate.
:return: The new root that exists as a result of the rotation.
"""
return None
def _rotate_right_i(self, node):
"""Perform a `rotation_right` around the node provided. Return the new root of newly rotated local cluster.
:param node: The node around which to rotate.
:return: The new root that exists as a result of the rotation.
"""
return None
########### THE BELOW METHODS ARE FOR STUDENT USE AND CAN BE USED AS IS IN THE INTERATIVE IMPLEMENTATION
def _flip_colors(self, node):
"""Using the provided `node`, set both child links to black, and set the parent link to `Red`.
:param node: The node for which the child colors and parent link should have their colors flipped.
:return: None
"""
node.is_red = True
node.left.is_red = False
node.right.is_red = False
def _right_is_red(self, node):
"""Indicates whether the link to the right of the provided node is currently Red.
:param node: The node of which the right link is viewed for redness.
:return: `True` if `node.right` is red, `False` otherwise.
"""
if node.right is None:
return False
else:
return node.right.is_red
def _left_is_red(self, node):
"""Indicates whether the link to the left of the provided node is currently Red.
:param node: The node of which the left link is viewed for redness.
:return: `True` if `node.left` is red, `False` otherwise.
"""
if node.left is None:
return False
else:
return node.left.is_red
def _left_left_is_red(self, node):
"""Indicates whether there exists to consecutive left red links from the given node.
:param node: The node from which to interrogate the left and left.left nodes for redness.
:return: `True` if `node.left` is red and 'node.left.left` is red. `False` otherwise.
"""
if node is None:
return False
else:
return self._left_is_red(node) and self._left_is_red(node.left)
def search(self, key):
"""Search for the desired Key.
Uses binary search to locate and return the Value at the provided Key. If the Key is not found, `search` will
return `None`, otherwise will return the Value stored at the key provided.
:param key: The unique key by which to retrieve the desired value. Must be comparable.
:return: The Value at the Key provided, if the Key is not found, `search` will return `None`
"""
n = self._node_search(key)
return n.value if n is not None else None
def _node_search(self, key):
""" Searches for the desired key and returns the `RedBlackNode` associated to that key.
:param key: The unique key by which to retrieve the desired value. Must be comparable.
:return: The `RedBlackNode` at the Key provided, if the Key is not found, `_node_search` will return `None`
"""
curr = self.root
while True:
if curr is None:
return None
elif curr.key == key:
return curr
elif curr.key > key:
curr = curr.left
else:
curr = curr.right
return curr
########### THE BELOW SECTION IS ONLY FOR REFERENCE AS A FUNCTIONING RECURSIVE IMPLEMENTATION
def insert_r(self, key, value):
"""Insert the provided `value` at the provided `key` in the `RedBlackBST` using a recursive method `_put()`.
Assumes the key provided is a comparable object, and assumes uniqueness. If the `Key` already exists in the
structure, the provided value will overwrite the previous value for this key.
:param key: The unique, comparable object by which to retrieve the desired value.
:param value: The value in which to store in the `RedBlackBST`
:return: `None`
"""
self.root = self._put(self.root, key, value)
self.root.is_red = False
def _put(self, node, key, value):
"""A recursive call to insert a new value into the structure using the standard Red-Black insertion rules.
Base Case: The Node provided is None, in which case, create a new `RedBlackNode` and return.
Recursive Case: If the insertion key is equal to node.key. replace the value and return (special case). If the
insertion key is less than node.key, resurcively _put into node.left, otherwise recursively _put into node.right
After the base case if found, recursively check for necessary rotations and color flips.
:param node: The `RedBlackNode` into which a _put is attempted.
:param key: The desired key to insert into the `RedBlackBST`
:param value: The desired value to store at the provided `key`.
:return: Returns the parent node from the level of recursion that has been executed.
"""
if node is None:
return RedBlackBST.RedBlackNode(key, value)
if key < node.key:
node.left = self._put(node.left, key, value)
elif key > node.key:
node.right = self._put(node.right, key, value)
else:
node.value = value
if self._right_is_red(node) and not self._left_is_red(node):
node = self._rotate_left_r(node)
if self._left_left_is_red(node):
node = self._rotate_right_r(node)
if self._left_is_red(node) and self._right_is_red(node):
self._flip_colors(node)
return node
def _rotate_left_r(self, node):
"""Perform a `rotation_left` around the node provided. Return the new root of newly rotated local cluster.
:param node: The node around which to rotate. Does NOT manage parent links and cannot be used for iterative
insertion method
:return: The new root that exists as a result of the rotation.
"""
x = node.right
node.right = x.left
x.left = node
x.is_red = node.is_red
node.is_red = True
return x
def _rotate_right_r(self, node):
"""Perform a `rotation_right` around the node provided. Return the new root of newly rotated local cluster.
:param node: The node around which to rotate.
:return: The new root that exists as a result of the rotation.
"""
x = node.left
node.left = x.right
x.right = node
x.is_red = node.is_red
node.is_red = True
return x
########### END RECURSIVE SECTION
def test_bst(bst):
bst.insert_i(1, 'one')
r = bst.search(1)
result = "PASSED" if r == 'one' else f"FAILED, expected 'one', received {r}"
print(f"Test Inserting Single Value...{result}")
tests = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for i in tests:
try:
bst.insert_i(i, i)
print(f"Insertion of {i} passed (no exception thrown).")
except Exception as e:
print(f"Insertion of {i} failed. Exception thrown: {e}")
for i in tests:
try:
r = bst.search(i)
result = "PASSED" if r == i else f"FAILED, expected {i}, received {r}"
print(f"Search for {i}: {result}")
except Exception as e:
print(f"Search for {i} failed. Exception thrown: {e}")
bst.insert_i(100, 'one-hundred')
bst.insert_i(100, 'one hundred')
r = bst.search(100)
result = "PASSED" if r == 'one hundred' else f"Failed, expected 'one hundred', received {r}"
print(f"Test repeat Keys: {result}")
if __name__ == "__main__":
bst = RedBlackBST()
test_bst(bst) |
<filename>tracing_tool/debug_traces.py
import ast
import os
import sys
import glob
from colors import prGreen,prCyan,prRed
TRACES_DIR = './.fpchecker/traces'
TRACES_FILES = TRACES_DIR+'/'+'trace'
class TraceBackCommand:
def __init__(self):
pass
@staticmethod
def getFile(rawCommand):
files = glob.glob(TRACES_DIR+'/trace.*')
fileName = None
for f in files:
with open(f) as fd:
for line in fd:
if rawCommand in line:
fileName = f
break
return fileName
@staticmethod
def checkDirectory():
if not os.path.isdir(TRACES_DIR):
raise SystemExit('Error: traces directory does not exist: '+TRACES_DIR)
if not os.listdir(TRACES_DIR):
raise SystemExit('Error: '+TRACES_DIR+' is empty')
@staticmethod
def printTrace(lineNumber: int):
TraceBackCommand.checkDirectory()
if int(lineNumber) < 1:
raise SystemExit('Error: command ID must be >= 1')
lineNumber = int(lineNumber)
prGreen('Recreating traces for command: ' + str(lineNumber))
tracesFile = TRACES_DIR+'/raw_traces.txt'
if not os.path.isfile(tracesFile):
raise SystemExit('Error '+ tracesFile + ' does not exist. Please record build traces again.')
trace = None
with open(tracesFile, 'r') as fd:
i = 0
for line in fd:
i += 1
if i == lineNumber:
trace = ast.literal_eval(line)[1]
if trace:
print('Low-level command:')
print(trace.strip()+'\n')
print('It may take a few minutes...')
fileName = TraceBackCommand.getFile(trace)
#print(fileName)
if fileName:
TraceBackCommand.printTree(fileName)
else:
raise SystemExit('Error: could not find the command in the trace file')
@staticmethod
def getParent(fileName):
fileName = os.path.split(fileName)[1]
PID = fileName.split('.')[1]
files = glob.glob(TRACES_DIR+'/trace.*')
parent = None
for f in files:
with open(f) as fd:
for line in fd:
if 'fork(' in line or 'clone(' in line:
if line.endswith('= '+PID+'\n'):
parent = f
#print('found parent:', parent)
break
if parent:
break
if parent:
return parent
else:
sys.exit()
@staticmethod
def printTree(fileName):
if fileName == None:
return
print('<Command>')
execCmd = None
with open(fileName, 'r') as fd:
for line in fd:
if 'execve(' in line:
#print(line)
returnValue = line.split('=')[-1:][0].strip()
#print('returnValue', returnValue)
if returnValue == '0':
execCmd = line
print('\t'+execCmd.strip())
print('\t'+fileName)
parentFile = TraceBackCommand.getParent(fileName)
TraceBackCommand.printTree(parentFile)
@staticmethod
def getSomeCommands():
files = glob.glob(TRACES_DIR+'/trace.*')
fileName = None
maxCount = 13
traces = []
i = 0
for f in files:
with open(f) as fd:
for line in fd:
if 'execve(' in line and 'bin2c' in line:
traces.append(('', line))
i += 1
print('i:', i)
if i == maxCount:
break
with open('new_traces.txt', 'w') as fd:
for l in traces:
fd.write(str(l)+'\n')
if __name__ == '__main__':
#cmdId = sys.argv[1]
#TraceBackCommand.printTrace(cmdId)
TraceBackCommand.getSomeCommands()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import print_function
import base64
import os.path
from contextlib import contextmanager
from threading import Thread
import colors
import pytest
from pex.cli.testing import run_pex3
from pex.common import safe_rmtree
from pex.compatibility import PY2
from pex.testing import IntegResults, make_env, run_pex_command
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Iterator
import attr # vendor:skip
else:
from pex.third_party import attr
@attr.s(frozen=True)
class Address(object):
host = attr.ib() # type: str
port = attr.ib() # type: int
@contextmanager
def serve_authenticated(username, password, find_links):
expected_authorization = "Basic {}".format(
base64.b64encode(
"{username}:{password}".format(username=username, password=password).encode("utf8")
).decode("utf-8")
)
if PY2:
from BaseHTTPServer import HTTPServer as HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler as SimpleHTTPRequestHandler
else:
from http.server import HTTPServer as HTTPServer
from http.server import SimpleHTTPRequestHandler as SimpleHTTPRequestHandler
class BasicHTTPAuthHandler(SimpleHTTPRequestHandler):
def do_GET(self):
authorization = self.headers.get("Authorization")
if expected_authorization == authorization:
SimpleHTTPRequestHandler.do_GET(self)
else:
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="Foo"')
self.end_headers()
server = HTTPServer(("", 0), BasicHTTPAuthHandler)
server_dispatch_thread = Thread(target=server.serve_forever)
server_dispatch_thread.daemon = True
cwd = os.getcwd()
try:
os.chdir(find_links)
server_dispatch_thread.start()
host, port = server.server_address
yield Address(host=host, port=port)
finally:
server.shutdown()
server_dispatch_thread.join()
os.chdir(cwd)
@pytest.fixture(scope="module")
def ansicolors_find_links_directory(tmpdir_factory):
# type: (Any) -> str
find_links = str(tmpdir_factory.mktemp("find_links"))
run_pex_command(
args=[
"ansicolors==1.1.8",
"--include-tools",
"--",
"repository",
"extract",
"--find-links",
find_links,
],
env=make_env(PEX_TOOLS=1),
).assert_success()
return find_links
@attr.s(frozen=True)
class SecuredLock(object):
repo_address = attr.ib() # type: Address
repo_username = attr.ib() # type: str
repo_password = attr.ib() # type: str
lock = attr.ib() # type: str
pex_root = attr.ib() # type: str
@property
def repo_url(self):
# type: () -> str
return "http://{host}:{port}".format(
host=self.repo_address.host, port=self.repo_address.port
)
@property
def repo_url_with_credentials(self):
# type: () -> str
return "http://{username}:{password}@{host}:{port}".format(
username=self.repo_username,
password=self.repo_password,
host=self.repo_address.host,
port=self.repo_address.port,
)
@pytest.fixture
def secured_ansicolors_lock(
ansicolors_find_links_directory, # type: str
tmpdir, # type: Any
):
# type: (...) -> Iterator[SecuredLock]
username = "joe"
password = "<PASSWORD>"
with serve_authenticated(
username=username,
password=password,
find_links=ansicolors_find_links_directory,
) as address:
lock = os.path.join(str(tmpdir), "lock")
pex_root = os.path.join(str(tmpdir), "pex_root")
secured_lock = SecuredLock(
repo_address=address,
repo_username=username,
repo_password=password,
lock=lock,
pex_root=pex_root,
)
run_pex3(
"lock",
"create",
"--pex-root",
pex_root,
"--no-pypi",
"--find-links",
secured_lock.repo_url_with_credentials,
"ansicolors",
"--indent",
"2",
"-o",
lock,
).assert_success()
yield secured_lock
def assert_unauthorized(
secured_ansicolors_lock, # type: SecuredLock
result, # type: IntegResults
):
# type: (...) -> IntegResults
result.assert_failure()
assert (
"There was 1 error downloading required artifacts:\n"
"1. ansicolors 1.1.8 from {repo_url}/ansicolors-1.1.8-py2.py3-none-any.whl\n"
" HTTP Error 401: Unauthorized".format(repo_url=secured_ansicolors_lock.repo_url)
) in result.error
return result
def test_authenticated_lock_url_issue_1753(
tmpdir, # type: Any
secured_ansicolors_lock, # type: SecuredLock
):
# type: (...) -> None
use_lock_command_unauthenticated = [
"--pex-root",
secured_ansicolors_lock.pex_root,
"--lock",
secured_ansicolors_lock.lock,
"--",
"-c",
"import colors; print(colors.red('Authenticated'))",
]
def assert_success(result):
# type: (IntegResults) -> None
result.assert_success()
assert colors.red("Authenticated") == result.output.strip()
# N.B.: Since we created the lock locally, the Pex cache will contain the artifacts needed
# and no fetches will need to be performed; so, even though we're running without
# credentials, we should succeed anyhow.
assert_success(run_pex_command(args=use_lock_command_unauthenticated))
# But with the Pex caches cleared, fetches should be forced and they should fail without
# credentials.
safe_rmtree(secured_ansicolors_lock.pex_root)
assert_unauthorized(
secured_ansicolors_lock, run_pex_command(args=use_lock_command_unauthenticated)
)
# The find links repo URL without embedded credentials shouldn't help.
assert_unauthorized(
secured_ansicolors_lock,
run_pex_command(
args=[
"--find-links",
secured_ansicolors_lock.repo_url,
]
+ use_lock_command_unauthenticated
),
)
assert_success(
run_pex_command(
args=[
"--find-links",
secured_ansicolors_lock.repo_url_with_credentials,
]
+ use_lock_command_unauthenticated
)
)
def test_authenticated_lock_netrc_issue_1753(
tmpdir, # type: Any
secured_ansicolors_lock, # type: SecuredLock
):
# type: (...) -> None
# We don't expect the ambient ~/.netrc, if present, will have the right credentials for an
# ephemeral port server.
use_lock_command = [
"--pex-root",
secured_ansicolors_lock.pex_root,
"--lock",
secured_ansicolors_lock.lock,
"--",
"-c",
"import colors; print(colors.blue('Login Successful'))",
]
safe_rmtree(secured_ansicolors_lock.pex_root)
assert_unauthorized(secured_ansicolors_lock, run_pex_command(args=use_lock_command))
# This explicitly controlled ~/.netrc definitely doesn't have the right credentials.
home = os.path.join(str(tmpdir), "home")
os.mkdir(home)
with open(os.path.join(home, ".netrc"), "w") as fp:
print("machine foo login bar password baz", file=fp)
assert_unauthorized(
secured_ansicolors_lock, run_pex_command(args=use_lock_command, env=make_env(HOME=home))
)
def assert_authorized(result):
# type: (IntegResults) -> None
result.assert_success()
assert colors.blue("Login Successful") == result.output.strip()
with open(os.path.join(home, ".netrc"), "a") as fp:
print(
"machine {host}:{port} login {username} password {password}".format(
host=secured_ansicolors_lock.repo_address.host,
port=secured_ansicolors_lock.repo_address.port,
username=secured_ansicolors_lock.repo_username,
password=<PASSWORD>,
),
file=fp,
)
assert_authorized(run_pex_command(args=use_lock_command, env=make_env(HOME=home)))
with open(os.path.join(home, ".netrc"), "w") as fp:
print(
"machine {url} login {username} password {password}".format(
url=secured_ansicolors_lock.repo_url,
username=secured_ansicolors_lock.repo_username,
password=<PASSWORD>,
),
file=fp,
)
safe_rmtree(secured_ansicolors_lock.pex_root)
assert_authorized(run_pex_command(args=use_lock_command, env=make_env(HOME=home)))
with open(os.path.join(home, ".netrc"), "w") as fp:
print(
"default login {username} password {password}".format(
username=secured_ansicolors_lock.repo_username,
password=<PASSWORD>.repo_password,
),
file=fp,
)
safe_rmtree(secured_ansicolors_lock.pex_root)
assert_authorized(run_pex_command(args=use_lock_command, env=make_env(HOME=home)))
def test_bad_netrc_issue_1762(
tmpdir, # type: Any
secured_ansicolors_lock, # type: SecuredLock
):
# type: (...) -> None
use_lock_command = [
"--pex-root",
secured_ansicolors_lock.pex_root,
"--lock",
secured_ansicolors_lock.lock,
"--",
"-c",
"import colors; print(colors.yellow('Welcome'))",
]
safe_rmtree(secured_ansicolors_lock.pex_root)
home = os.path.join(str(tmpdir), "home")
os.mkdir(home)
netrc_path = os.path.join(home, ".netrc")
with open(netrc_path, "w") as fp:
print("default login foo password <PASSWORD>", file=fp)
print("machine foo login bar password <PASSWORD>", file=fp)
print(
"machine {host}:{port} login {username} password {password} protocol http".format(
host=secured_ansicolors_lock.repo_address.host,
port=secured_ansicolors_lock.repo_address.port,
username=secured_ansicolors_lock.repo_username,
password=<PASSWORD>,
),
file=fp,
)
def assert_netrc_skipped(result):
# type: (IntegResults) -> None
assert (
"Failed to load netrc credentials: bad follower token 'protocol' ({netrc}, line 3)\n"
"Continuing without netrc credentials.".format(netrc=netrc_path)
) in result.error
result = assert_unauthorized(
secured_ansicolors_lock,
run_pex_command(args=use_lock_command, env=make_env(HOME=home), quiet=True),
)
assert_netrc_skipped(result)
result = run_pex_command(
args=["--find-links", secured_ansicolors_lock.repo_url_with_credentials] + use_lock_command,
env=make_env(HOME=home),
quiet=True,
)
result.assert_success()
assert_netrc_skipped(result)
assert colors.yellow("Welcome") == result.output.strip()
|
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jamf.api_client import ApiClient
from jamf.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SelfServiceBrandingPreviewApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def self_service_branding_configurations_get(self, **kwargs): # noqa: E501
"""Search for sorted and paged branding configurations # noqa: E501
Search for sorted and paged branding configurations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_get(async_req=True)
>>> result = thread.get()
:param page:
:type page: int
:param pagesize:
:type pagesize: int
:param size:
:type size: int
:param page_size:
:type page_size: int
:param sort: Specifies the attribute to sort by. Defaults to ascending order. Prefix the attribute name with a minus (-) symbol for descending order.
:type sort: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BrandingSearchResults
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_configurations_get_with_http_info(**kwargs) # noqa: E501
def self_service_branding_configurations_get_with_http_info(self, **kwargs): # noqa: E501
"""Search for sorted and paged branding configurations # noqa: E501
Search for sorted and paged branding configurations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_get_with_http_info(async_req=True)
>>> result = thread.get()
:param page:
:type page: int
:param pagesize:
:type pagesize: int
:param size:
:type size: int
:param page_size:
:type page_size: int
:param sort: Specifies the attribute to sort by. Defaults to ascending order. Prefix the attribute name with a minus (-) symbol for descending order.
:type sort: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BrandingSearchResults, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'page',
'pagesize',
'size',
'page_size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_configurations_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'pagesize' in local_var_params and local_var_params['pagesize'] is not None: # noqa: E501
query_params.append(('pagesize', local_var_params['pagesize'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501
query_params.append(('page-size', local_var_params['page_size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "BrandingSearchResults",
}
return self.api_client.call_api(
'/self-service/branding/configurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def self_service_branding_configurations_id_delete(self, id, **kwargs): # noqa: E501
"""Delete the Self Service branding configuration indicated by the provided id # noqa: E501
Delete the Self Service branding configuration indicated by the provided id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_delete(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_configurations_id_delete_with_http_info(id, **kwargs) # noqa: E501
def self_service_branding_configurations_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete the Self Service branding configuration indicated by the provided id # noqa: E501
Delete the Self Service branding configuration indicated by the provided id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_configurations_id_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `self_service_branding_configurations_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/self-service/branding/configurations/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def self_service_branding_configurations_id_get(self, id, **kwargs): # noqa: E501
"""Read a single Self Service branding configuration indicated by the provided id # noqa: E501
Read a single Self Service branding configuration indicated by the provided id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_get(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BrandingConfiguration
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_configurations_id_get_with_http_info(id, **kwargs) # noqa: E501
def self_service_branding_configurations_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Read a single Self Service branding configuration indicated by the provided id # noqa: E501
Read a single Self Service branding configuration indicated by the provided id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BrandingConfiguration, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_configurations_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `self_service_branding_configurations_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'iOS Branding', 'macOS Branding']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "BrandingConfiguration",
}
return self.api_client.call_api(
'/self-service/branding/configurations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def self_service_branding_configurations_id_put(self, id, **kwargs): # noqa: E501
"""Update a Self Service branding configuration with the supplied details # noqa: E501
Update a Self Service branding configuration with the supplied details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_put(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param branding_configuration: The branding configuration values to update
:type branding_configuration: BrandingConfiguration
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BrandingConfiguration
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_configurations_id_put_with_http_info(id, **kwargs) # noqa: E501
def self_service_branding_configurations_id_put_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a Self Service branding configuration with the supplied details # noqa: E501
Update a Self Service branding configuration with the supplied details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_id_put_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: id of branding configuration (required)
:type id: int
:param branding_configuration: The branding configuration values to update
:type branding_configuration: BrandingConfiguration
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BrandingConfiguration, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'branding_configuration'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_configurations_id_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `self_service_branding_configurations_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'branding_configuration' in local_var_params:
body_params = local_var_params['branding_configuration']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "BrandingConfiguration",
404: "ApiError",
}
return self.api_client.call_api(
'/self-service/branding/configurations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def self_service_branding_configurations_post(self, **kwargs): # noqa: E501
"""Create a Self Service branding configuration with the supplied # noqa: E501
Create a Self Service branding configuration with the supplied details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_post(async_req=True)
>>> result = thread.get()
:param branding_configuration: The branding configuration to create
:type branding_configuration: BrandingConfiguration
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BrandingConfiguration
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_configurations_post_with_http_info(**kwargs) # noqa: E501
def self_service_branding_configurations_post_with_http_info(self, **kwargs): # noqa: E501
"""Create a Self Service branding configuration with the supplied # noqa: E501
Create a Self Service branding configuration with the supplied details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_configurations_post_with_http_info(async_req=True)
>>> result = thread.get()
:param branding_configuration: The branding configuration to create
:type branding_configuration: BrandingConfiguration
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BrandingConfiguration, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'branding_configuration'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_configurations_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'branding_configuration' in local_var_params:
body_params = local_var_params['branding_configuration']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "BrandingConfiguration",
}
return self.api_client.call_api(
'/self-service/branding/configurations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def self_service_branding_images_post(self, file, **kwargs): # noqa: E501
"""Upload an image # noqa: E501
Uploads an image # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_images_post(file, async_req=True)
>>> result = thread.get()
:param file: The file to upload (required)
:type file: file
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BrandingImageUrl
"""
kwargs['_return_http_data_only'] = True
return self.self_service_branding_images_post_with_http_info(file, **kwargs) # noqa: E501
def self_service_branding_images_post_with_http_info(self, file, **kwargs): # noqa: E501
"""Upload an image # noqa: E501
Uploads an image # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.self_service_branding_images_post_with_http_info(file, async_req=True)
>>> result = thread.get()
:param file: The file to upload (required)
:type file: file
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BrandingImageUrl, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'file'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method self_service_branding_images_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file' is set
if self.api_client.client_side_validation and ('file' not in local_var_params or # noqa: E501
local_var_params['file'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `file` when calling `self_service_branding_images_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
201: "BrandingImageUrl",
}
return self.api_client.call_api(
'/self-service/branding/images', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
NbrOfNodes = 35
key200 = ' TIME: GANDRC STEP: 180.000 FRAME: 1.000'
#--------------------------------------------------------------------------
# File for gain parameter 01
#--------------------------------------------------------------------------
file_g01 = open('stent_p7.rsn', 'r')
gain01 = file_g01.readlines()
g01 = pd.Series(gain01)
g01 = g01.replace(r'\n','', regex=True)
g01 = g01.replace(r'\r\n','', regex=True)
g01 = g01.replace(r'\r','', regex=True)
index_Time_g01 = g01[g01.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g01 = index_Time_g01.index.values
#--------------------------------------------------------------------------
G01 = {}
for idx in index_Time_g01.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g01[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G01[g01[idx]]=np.array(tmp_df.values, dtype=float)
Data_g01 = np.array([], dtype=np.int64)
Data_g01.shape = (-1, 7)
Data_g01_t200 = np.array([], dtype=np.int64)
Data_g01_t200.shape = (-1, 7)
for key in sorted(G01.keys()):
# print key
Data_g01 = np.append(Data_g01,[G01[key][0,:]], axis=0)
for node in range(NbrOfNodes):
Data_g01_t200 = np.append(Data_g01_t200,[G01[key200][node,:]], axis=0)
Data_g01=Data_g01[np.argsort(Data_g01[:,0])]
#Data_g01.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 02
#--------------------------------------------------------------------------
file_g02 = open('stent_ref.rsn', 'r')
gain02 = file_g02.readlines()
g02 = pd.Series(gain02)
g02 = g02.replace(r'\n','', regex=True)
g02 = g02.replace(r'\r\n','', regex=True)
g02 = g02.replace(r'\r','', regex=True)
index_Time_g02 = g02[g02.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g02 = index_Time_g02.index.values
#--------------------------------------------------------------------------
G02 = {}
for idx in index_Time_g02.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g02[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G02[g02[idx]]=np.array(tmp_df.values, dtype=float)
Data_g02 = np.array([], dtype=np.int64)
Data_g02.shape = (-1, 7)
Data_g02_t200 = np.array([], dtype=np.int64)
Data_g02_t200.shape = (-1, 7)
for key in sorted(G02.keys()):
# print key
Data_g02 = np.append(Data_g02,[G02[key][0,:]], axis=0)
for node in range(NbrOfNodes):
Data_g02_t200 = np.append(Data_g02_t200,[G02[key200][node,:]], axis=0)
Data_g02=Data_g02[np.argsort(Data_g02[:,0])]
#Data_g02.sort(axis=0)
#--------------------------------------------------------------------------
# File for gain parameter 03
#--------------------------------------------------------------------------
file_g03 = open('stent_p9.rsn', 'r')
gain03 = file_g03.readlines()
g03 = pd.Series(gain03)
g03 = g03.replace(r'\n','', regex=True)
g03 = g03.replace(r'\r\n','', regex=True)
g03 = g03.replace(r'\r','', regex=True)
index_Time_g03 = g03[g03.str.contains('TIME', case=False, regex=False)]
index_TimeValues_g03 = index_Time_g03.index.values
#--------------------------------------------------------------------------
G03 = {}
for idx in index_Time_g03.index.values:
index_start = idx + 1
index_end = index_start + NbrOfNodes
tmp_df = g03[index_start:index_end].str.strip()
tmp_df = tmp_df.str.split(' ',expand=True)
np.array(tmp_df.values, dtype=float)
G03[g03[idx]]=np.array(tmp_df.values, dtype=float)
Data_g03 = np.array([], dtype=np.int64)
Data_g03.shape = (-1, 7)
Data_g03_t200 = np.array([], dtype=np.int64)
Data_g03_t200.shape = (-1, 7)
for key in sorted(G03.keys()):
# print key
Data_g03 = np.append(Data_g03,[G03[key][0,:]], axis=0)
for node in range(NbrOfNodes):
Data_g03_t200 = np.append(Data_g03_t200,[G03[key200][node,:]], axis=0)
Data_g03=Data_g03[np.argsort(Data_g03[:,0])]
#Data_g03.sort(axis=0)
#--------------------------------------------------------------------------
fig = plt.figure()
plt.rcParams.update({'font.size': 8})
plt.rc('text', usetex=False)
plt.subplot(2,3,1)
plt.plot(Data_g01[:,0],Data_g01[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02[:,0],Data_g02[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03[:,0],Data_g03[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Time [months]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'a',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.legend(loc='lower right')
plt.axis([0,180,0,100])
plt.subplot(2,3,2)
plt.plot(Data_g01[:,0],Data_g01[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02[:,0],Data_g02[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03[:,0],Data_g03[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Time [months]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'b',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.axis([0,180,0,320])
plt.subplot(2,3,3)
plt.plot(Data_g01[:,0],Data_g01[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02[:,0],Data_g02[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03[:,0],Data_g03[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Time [months]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'c',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.axis([0,180,9.5,11])
plt.subplot(2,3,4)
plt.plot(Data_g01_t200[:,2]*1000.0,Data_g01_t200[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02_t200[:,2]*1000.0,Data_g02_t200[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03_t200[:,2]*1000.0,Data_g03_t200[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Axial position [mm]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'd',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.axis([0,100,0,100])
plt.subplot(2,3,5)
plt.plot(Data_g01_t200[:,2]*1000.0,Data_g01_t200[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02_t200[:,2]*1000.0,Data_g02_t200[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03_t200[:,2]*1000.0,Data_g03_t200[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Axial position [mm]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'e',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.axis([0,100,0,320])
plt.subplot(2,3,6)
plt.plot(Data_g01_t200[:,2]*1000.0,Data_g01_t200[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.5,markersize=10)
plt.plot(Data_g02_t200[:,2]*1000.0,Data_g02_t200[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.5,markersize=10)
plt.plot(Data_g03_t200[:,2]*1000.0,Data_g03_t200[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.5,markersize=10)
plt.xlabel(r'Axial position [mm]', {'color': 'k', 'fontsize': 10})
plt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 10,},
ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)
plt.text(0.95, 0.95, r'f',{'color': 'k', 'fontsize': 10,
'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)},
ha='right',va='top',transform=plt.gca().transAxes)
plt.axis([0,100,9.5,11])
fig.tight_layout()
plt.show
FIGURENAME = 'penalty.eps'
plt.savefig(FIGURENAME)
plt.savefig(fname=FIGURENAME,
dpi=None,
facecolor='w',
edgecolor='w',
orientation='portrait',
format=None,
transparent=False,
bbox_inches=None,
pad_inches=0.1,
frameon=None,
metadata=None)
plt.close('all')
#--------------------------------------------------------------------------
"""
radii = (Data_g02[-1,3]*1000.0, Data_g01[-1,3]*1000.0, Data_g03[-1,3]*1000.0)
fig, ax = plt.subplots()
index = np.arange(3)
bar_width = 0.45
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, radii, bar_width,
alpha=opacity, color='b',
error_kw=error_config, label='Penalty')
ax.set_xlabel('Penalty')
ax.set_ylabel('Radius [mm]')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('1e5', '1e7', '1e9'))
plt.axis([-0.25,2.7,0,20])
fig.tight_layout()
plt.show
FIGURENAME = 'sensitivity_penalty.eps'
plt.savefig(FIGURENAME)
plt.savefig(fname=FIGURENAME,
dpi=None,
facecolor='w',
edgecolor='w',
orientation='portrait',
format=None,
transparent=False,
bbox_inches=None,
pad_inches=0.1,
frameon=None,
metadata=None)
plt.close('all')
"""
#--------------------------------------------------------------------------
|
<reponame>JenDobson/blog<gh_stars>0
from sqlalchemy import Column, ForeignKey, Integer, Date, String
from sqlalchemy.orm import declarative_base, relationship, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import select, update
Base = declarative_base()
'''
Last step: buy transaction and sell transaction are subclasses of transaction. i
Use Single Table Inheritance pattern (https://docs.sqlalchemy.org/en/14/orm/inheritance.html?highlight=single%20table%20inheritance#single-table-inheritance)
'''
class Transaction(Base):
__tablename__ = 'transaction'
transaction_id = Column(Integer, primary_key=True)
date = Column(Date,nullable=False)
type = Column(String(20))
#
__mapper_args__ = {
'polymorphic_on':type
}
class BuyTransaction(Transaction):
purchased_lot = relationship("Lot",back_populates='buy_transaction',
foreign_keys='lot.c.buy_transaction_id',
uselist=False)
__mapper_args__ = {
'polymorphic_identity': 'buy_transaction'
}
@event.listens_for(BuyTransaction, "after_insert")
def after_insert(mapper,connection,instance):
connection.execute(instance.metadata.tables['lot'].insert(),
{"buy_transaction_id":instance.transaction_id})
class SellTransaction(Transaction):
sold_lot = relationship("Lot",back_populates='sell_transaction',
foreign_keys='lot.c.sell_transaction_id',
uselist=False)
__mapper_args__ = {
'polymorphic_identity': 'sell_transaction'
}
@event.listens_for(SellTransaction, "after_insert")
def after_insert(mapper,connection,instance):
# Find the earliest Lot with no SellTransaction
tables = instance.metadata.tables
lot_table = tables['lot']
transaction_table = tables['transaction']
'''
sqlalchemy.exc.AmbiguousForeignKeysError: Can't determine join between 'lot' and 'transaction'; tables have more than one foreign key constraint relationship between them. Please specify the 'onclause' of this join explicitly.
'''
qstmt = select(lot_table.c.buy_transaction_id). \
where(lot_table.c.sell_transaction_id == None). \
join_from(lot_table,transaction_table,
onclause=lot_table.c.buy_transaction_id==transaction_table.c.transaction_id). \
order_by(transaction_table.c.date)
res = connection.execute(qstmt).first()
update_stmt = update(lot_table).where(lot_table.c.buy_transaction_id == res[0]). \
values(sell_transaction_id=instance.transaction_id)
connection.execute(update_stmt)
''' Lot is like an edge connecting two transactions'''
class Lot(Base):
__tablename__ = 'lot'
# Each lot has one non-null buy transaction and zero or one sell transactions
# Creation of a buy transaction should create a lot
buy_transaction_id = Column(Integer,
ForeignKey('transaction.transaction_id'),
primary_key=True)
sell_transaction_id = Column(Integer,
ForeignKey('transaction.transaction_id'))
buy_transaction = relationship(BuyTransaction,
back_populates='purchased_lot',
foreign_keys=buy_transaction_id)
sell_transaction = relationship(SellTransaction,
back_populates='sold_lot',
foreign_keys=sell_transaction_id)
def __repr__(self):
return "<Lot(buy_transaction={buy},sell_transaction={sell}>".format(
buy=self.buy_transaction,sell=self.sell_transaction)
engine = create_engine('sqlite+pysqlite:///:memory:')
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
import datetime
b = BuyTransaction(date=datetime.date(2021,9,1))
s = SellTransaction(date=datetime.date(2021,9,8))
l = b.purchased_lot
print('\n\nb: ',b,'\n')
print('s: ',s,'\n')
print('l: ',l,'\n')
print('\n==================ADDING AND COMMITTING TO DATABASE===============\n')
session.add_all([b,s])
session.commit()
l = b.purchased_lot
print('l: ',l,'\n')
|
<filename>floodsystem/stationdata.py
'''
This module provides interface for extracting station data from
JSON objects fetched from the Internet and
'''
# pylint: disable=relative-beyond-top-level
from itertools import groupby
try:
from .datafetcher import \
fetch_stationdata, fetch_latest_water_level_data, fetch_gauge_data, fetch_latest_rainfall_data
from .station import MonitoringStation, RainfallGauge
except ImportError:
from datafetcher import \
fetch_stationdata, fetch_latest_water_level_data, fetch_gauge_data, fetch_latest_rainfall_data
from station import MonitoringStation, RainfallGauge
def build_station_list(use_cache: bool = True, return_numbers: bool = False) -> list[MonitoringStation]:
'''
Build and return a list of all river level monitoring stations
based on data fetched from the Environment agency. Each station is
represented as a MonitoringStation object.
The available data for some station is incomplete or not
available.
#### Arguments
`use_cache` (bool, default = True): whether to try fetching station data from a local cache
`return_numbers` (bool, default = False): whether to additionally return a second value,
the numbers of river-level, tidal and groundwater stations found respectively
#### Returns
list[MonitoringStation]: station objects built from obtained data
dict[str, int] (optional): a mapping of the number of types of station found, in the form
{'River Level': #, 'Tidal': #, 'Groundwater': #}
'''
river_data, coastal_data, groundwater_data = fetch_stationdata(use_cache=use_cache)
coastal_ids = {s['@id'] for s in coastal_data['items']}
groundwater_ids = {s['@id'] for s in groundwater_data['items']}
stations = []
for e in river_data["items"] + coastal_data["items"] + groundwater_data["items"]:
station_id = e.get('@id', None)
measures = e.get('measures', None)
if measures is not None or measures in {[], [{}], [None], {None}}:
measure_id = measures[-1]['@id']
else:
continue # essential field: no measure means nothing to get data from, so skip
label = e['label']
lat = e.get('lat', None)
long = e.get('long', None)
if lat is None or long is None:
coord = None
else:
coord = (float(lat), float(long))
town = e.get('town', None)
river = e.get('riverName', None)
url_id = e.get('RLOIid', '')
is_tidal = station_id in coastal_ids
is_groundwater = station_id in groundwater_ids
stage_scale = e.get('stageScale')
if stage_scale is not None and not isinstance(stage_scale, str):
typical_range_low = stage_scale.get('typicalRangeLow', None)
typical_range_high = stage_scale.get('typicalRangeHigh', None)
if typical_range_low is None or typical_range_high is None:
typical_range = None
else:
typical_range = (float(typical_range_low), float(typical_range_high))
min_on_record = stage_scale.get('minOnRecord', None)
max_on_record = stage_scale.get('maxOnRecord', None)
if min_on_record is not None:
min_on_record = min_on_record.get('value', None)
if max_on_record is not None:
max_on_record = max_on_record.get('value', None)
if min_on_record is None or max_on_record is None:
record_range = None
else:
record_range = (float(min_on_record), float(max_on_record))
else:
typical_range = record_range = None
extra = {'station_id': station_id, 'river': river, 'town': town,
'url_id': url_id, 'is_tidal': is_tidal, 'is_groundwater': is_groundwater,
'record_range': record_range}
s = MonitoringStation(measure_id, label, coord, typical_range, **extra)
stations.append(s)
if return_numbers:
nums = {k: len(list(v)) for k, v in groupby(stations, key=lambda s: s.station_type)}
return stations, nums
else:
return stations
def build_rainfall_gauge_list(use_cache: bool = True) -> list[MonitoringStation]:
'''
Build and return a list of all rainfall gauges based on data fetched from
the Environment agency. Each gauge is represented as a RainfallGauge object.
The available data for some gauges is incomplete or not available.
#### Arguments
`use_cache` (bool, default = True): whether to try fetching gauge data from a local cache
#### Returns
list[RainfallGauge]: gauge objects built from obtained data
'''
data = fetch_gauge_data(use_cache=use_cache)
gauges = []
for e in data['items']:
gauge_id = e.get('@id', None)
lat = e.get('lat', None)
long = e.get('long', None)
if (measures := e['measures']) != []:
measure_id = measures[0].get('@id', None)
period = measures[0].get('period', None)
else:
measure_id = None
period = None
if lat is None or long is None or measure_id is None:
# essential fields, so skip if not available
continue
else:
coord = (float(lat), float(long))
gauge_number = e.get('stationReference', None)
extra = {'period': period, 'gauge_number': gauge_number, 'gauge_id': gauge_id}
g = RainfallGauge(measure_id, coord, **extra)
gauges.append(g)
return gauges
def update_water_levels(stations: list[MonitoringStation]):
'''
Attach level data contained in `measure_data` to stations. Fetches over internet.
#### Arguments
`stations` (list[MonitoringStation]): list of input stations
'''
# Fetch level data
measure_data = fetch_latest_water_level_data()
# Build map from measure id to latest reading (value)
m_id_to_value = dict()
for measure in measure_data['items']:
if 'latestReading' in measure:
latest_reading = measure['latestReading']
measure_id = latest_reading['measure']
m_id_to_value[measure_id] = (latest_reading['value'], latest_reading['dateTime'])
# Attach latest reading to station objects
for s in stations:
if s.measure_id in m_id_to_value:
s.latest_level, s.latest_recorded_datetime = m_id_to_value[s.measure_id]
else:
s.latest_level, s.latest_recorded_datetime = None, None
def update_rainfall_levels(gauges: list[RainfallGauge]):
'''
Attach rainfall data contained in `measure_data` to gauges. Fetches over internet.
#### Arguments
`gauges` (list[RainfallGauges]): list of input stations
'''
# Fetch level data
measure_data = fetch_latest_rainfall_data()
# Build map from measure id to latest reading (value)
m_id_to_value = dict()
for measure in measure_data['items']:
if 'latestReading' in measure:
latest_reading = measure['latestReading']
measure_id = latest_reading['measure']
m_id_to_value[measure_id] = (latest_reading['value'], latest_reading['dateTime'])
# Attach latest reading to station objects
for g in gauges:
if g.measure_id in m_id_to_value:
g.latest_level, g.latest_recorded_datetime = m_id_to_value[g.measure_id]
else:
g.latest_level, g.latest_recorded_datetime = None, None
|
"""
Script to determine focal mechanism of the InSight station.
Important: ssh-copy-id -i .ssh/id_rsa <EMAIL> (once)
"""
__author__ = "<NAME>"
import argparse
import toml
import instaseis
from os.path import join as pjoin
from os.path import exists as exist
import SS_MTI
def define_arguments():
helptext = "Determine focal mechanisms of Marsquake"
parser = argparse.ArgumentParser(description=helptext)
helptext = "Input toml file"
parser.add_argument("input_file", help=helptext)
return parser.parse_args()
if __name__ == "__main__":
args = define_arguments()
## Step 1:
""" Read input file """
f_in = toml.load(args.input_file, _dict=dict)
# If you want to save your catalog file for only the events that you want to use in your inversion:
cat_save_name = args.input_file.split("/")[-1].strip(".toml") # None if you dont want to save
## Step 2:
""" Get the observed data """
## Variables that look if the event file is already saved from previous runs:
inv = SS_MTI.DataGetter.read_inv(inv_path=f_in["DATA"]["inventory_filepath"]) # Inventory file
cat = SS_MTI.DataGetter.read_cat(cat_path=f_in["DATA"]["catalog_filepath"]) # Catalog file
events = SS_MTI.DataGetter.read_events_from_cat(
event_params=f_in["EVENTS"],
cat=cat,
inv=inv,
local_folder=f_in["DATA"]["waveform_filepath"],
host_name=f_in["SERVER"]["host_name"],
user_name=f_in["SERVER"]["username"],
remote_folder=f_in["SERVER"]["remote_folder"],
save_file_name=cat_save_name,
)
event = event[0]
rec = instaseis.Receiver(
latitude=f_in["PARAMETERS"]["RECEIVER"]["la_r"],
longitude=f_in["PARAMETERS"]["RECEIVER"]["lon_r"],
)
## Step 3:
""" Define forward modeler """
forward_method = f_in["FORWARD"]["METHOD"]
forward_dict = f_in["FORWARD"][forward_method]
if forward_method == "INSTASEIS":
fwd = SS_MTI.Forward.Instaseis(
instaseis_db=instaseis.open_db(forward_dict["VELOC"]),
taup_model=forward_dict["VELOC_taup"],
or_time=event.origin_time,
dt=event.delta,
start_cut=f_in["PARAMETERS"]["start_cut"],
end_cut=f_in["PARAMETERS"]["end_cut"],
)
elif forward_method == "REFLECTIVITY":
fwd = SS_MTI.Forward.reflectivity()
else:
raise ValueError(
"forward_method can be either INSTASEIS or REFLECTIVITY in [FORWARD] of .toml file"
)
## Step 3:
""" Define misfit """
misfit_method = f_in["MISFIT"]["METHOD"]
misfit_dict = f_in["MISFIT"][misfit_method]
if method == "L2":
misfit = SS_MTI.Misfit.L2()
elif method == "CC":
misfit = SS_MTI.Misfit.CC()
elif method == "POL":
misfit = SS_MTI.Misfit.POL()
else:
raise ValueError("misfit can be L2, CC or POL in [MISFIT] of .toml file")
## step 4:
""" Start inversion """
inv_methods = f_in["INVERSION"]["METHOD"]
for inv_method in inv_methods:
print("Start {} inversion".format(inv_method))
for event in OBS.events:
if inv_method == "GS":
invs.Grid_Search(event=event, depths=[10], strikes=[10], dips=[10], rakes=[10])
|
<filename>main.py
from PyQt5 import uic, QtWidgets
class UI_Window(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(UI_Window, self).__init__(parent)
uic.loadUi('base.ui', self)
self.actionLogin.triggered.connect(self.back_to_Login)
self.actionCadastrar_Usuario.triggered.connect(self.goto_CadastrarUsuario)
self.actionMenu_Adm.triggered.connect(self.goto_Adm)
self.actionAdm_Usu_rio.triggered.connect(self.goto_adm_user)
self.actionMenu_usuario.triggered.connect(self.goto_common_user)
self.show()
def goto_CadastrarLivro(self):
self.stackedWidget.setCurrentIndex(8)
def goto_edt_livro(self):
self.stackedWidget.setCurrentIndex(7)
def goto_edt_cadastro(self):
self.stackedWidget.setCurrentIndex(6)
def goto_adm_livros(self):
self.stackedWidget.setCurrentIndex(5)
def goto_adm_user (self):
self.stackedWidget.setCurrentIndex(4)
def goto_common_user(self):
self.stackedWidget.setCurrentIndex(3)
def goto_Adm(self):
self.stackedWidget.setCurrentIndex(2)
def goto_CadastrarUsuario(self):
self.stackedWidget.setCurrentIndex(1)
def back_to_Login(self):
self.stackedWidget.setCurrentIndex(0)
if __name__ == "__main__":
import sys
from gui import *
app = QtWidgets.QApplication(sys.argv)
window = UI_Window()
login = LoginUI(mainWindow=window)
login.pushButton_cadastrar.clicked.connect(window.goto_CadastrarUsuario)
cadastrarUsuario = CadastrarUsuario(mainWindow=window)
cadastrarUsuario.pushButton_cancelar.clicked.connect(window.back_to_Login)
menuAdm = MenuAdm()
menuAdm.pushButton_admUsuarios.clicked.connect(window.goto_adm_user)
menuAdm.pushButton_AdmLivros.clicked.connect(window.goto_adm_livros)
menuUsuario = MenuUsuario()
adm_usuario = AdmUsuarios(mainWindow=window)
adm_usuario.pushButton_adicionar.clicked.connect(window.goto_CadastrarUsuario)
adm_usuario.pushButton_cancelar.clicked.connect(window.goto_Adm)
AdmLivros = AdmLivros(mainWindow=window)
AdmLivros.pushButton_adicionar.clicked.connect(window.goto_CadastrarLivro)
AdmLivros.pushButton_cancelar.clicked.connect(window.goto_Adm)
CadastrarLivro = CadastrarLivro()
CadastrarLivro.pushButton_cancelar.clicked.connect(window.goto_Adm)
EditarCadastro = EditarCadastro(mainWindow=window)
EditarCadastro.pushButton_alterar.clicked.connect(window.goto_Adm)
EditarCadastro.pushButton_Excluir.clicked.connect(window.goto_Adm)
EditarCadastro.pushButton_cancelar.clicked.connect(window.goto_adm_user)
EditarLivro = EditarLivro(mainWindow=window)
EditarLivro.pushButton_confirmar.clicked.connect(window.goto_Adm)
EditarLivro.pushButton_Excluir.clicked.connect(window.goto_Adm)
EditarLivro.pushButton_cancelar.clicked.connect(window.goto_adm_livros)
window.stackedWidget.insertWidget(0, login)
window.stackedWidget.insertWidget(1, cadastrarUsuario)
window.stackedWidget.insertWidget(2, menuAdm)
window.stackedWidget.insertWidget(3, menuUsuario)
window.stackedWidget.insertWidget(4, adm_usuario)
window.stackedWidget.insertWidget(5, AdmLivros)
window.stackedWidget.insertWidget(6, EditarCadastro)
window.stackedWidget.insertWidget(7, EditarLivro)
window.stackedWidget.insertWidget(8, CadastrarLivro)
#window.show()
sys.exit(app.exec_()) |
#!/usr/bin/env python
try:
import json
except ImportError:
import simplejson as json
import urllib2
import urllib
import base64
from pagerduty.version import *
__version__ = VERSION
class SchedulesError(urllib2.HTTPError):
def __init__(self, http_error):
urllib2.HTTPError.__init__(self, http_error.filename, http_error.code, http_error.msg, http_error.hdrs, http_error.fp)
try:
data = self.read()
j = json.loads(data)
error = j['error']
self.statuscode = error['code']
self.statusdesc = ' | '.join(error.get('errors', []))
self.errormessage = error['message']
except:
pass
def __repr__(self):
return 'Pagerduty Schedules Error: HTTP {0} {1} returned with message, "{2}"'.format(self.statuscode, self.statusdesc, self.errormessage)
def __str__(self):
return self.__repr__()
class SchedulesRequest(urllib2.Request):
def __init__(self, connection, resource, params):
"""Representation of a Pagerduty Schedules API HTTP request.
:type connection: :class:`Schedules`
:param connection: Schedules connection object populated with a username, password and base URL
:type resource: string
:param resource: Pagerduty resource to query (lowercase)
:type params: dict
:param params: Params to be sent with a GET request
"""
encoded_params = urllib.urlencode(params)
url = connection.base_url + resource + '?' + encoded_params
urllib2.Request.__init__(self, url)
# Add auth header
base64string = base64.encodestring('%s:%s' % (connection.username, connection.password)).replace('\n','')
self.add_header("Authorization", "Basic %s" % base64string)
def __repr__(self):
return 'SchedulesRequest: {0} {1}' % (self.get_method(), self.get_full_url())
def fetch(self):
"""Execute the request."""
try:
response = urllib2.urlopen(self)
except urllib2.HTTPError, e:
raise SchedulesError(e)
else:
return SchedulesResponse(response)
class SchedulesResponse(object):
def __init__(self, response):
"""Representation of a Pagerduty Schedules API HTTP response."""
self.data = response.read()
self.headers = response.headers
self.content = json.loads(self.data)
if 'error' in self.content:
raise SchedulesError(self.content)
def __repr__(self):
return 'SchedulesResponse: {0}'.format(self.content.items())
class Schedules(object):
""" Interface to Pagerduty Schedule API.
"""
def __init__(self, subdomain, schedule_id, username, password):
self.username = username
self.password = password
self.base_url = 'https://{0}.pagerduty.com/api/v1/schedules/{1}/'.format(subdomain, schedule_id)
def entries(self, since, until, overflow=False):
""" Query schedule entries.
The maximum range queryable at once is three months. Error raised if this is violated.
:type since: string
:param since: date in ISO 8601 format, the time element is optional
(ie. '2011-05-06' is understood as at midnight )
:type until: string
:param until: date in ISO 8601 format, the time element is optional
(ie. '2011-05-06' is understood as at midnight )
:type overflow: boolean
:param overflow: if True on call schedules are returned the way they are entered
if False only the overlaps of the on call schedules
with the time period between since and until are returned
"""
params = {'since' : since, 'until' : until}
if overflow:
params.update({'overflow' : True})
request = SchedulesRequest(self, 'entries', params)
response = request.fetch()
return response.content['entries']
class PagerDutyException(Exception):
def __init__(self, status, message, errors):
super(PagerDutyException, self).__init__(message)
self.msg = message
self.status = status
self.errors = errors
def __repr__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.status, self.msg, self.errors)
def __str__(self):
txt = "%s: %s" % (self.status, self.msg)
if self.errors:
txt += "\n" + "\n".join("* %s" % x for x in self.errors)
return txt
class PagerDuty(object):
def __init__(self, service_key, https=True, timeout=15):
self.service_key = service_key
self.api_endpoint = ("http", "https")[https] + "://events.pagerduty.com/generic/2010-04-15/create_event.json"
self.timeout = timeout
def trigger(self, description, incident_key=None, details=None):
return self._request("trigger", description=description, incident_key=incident_key, details=details)
def acknowledge(self, incident_key, description=None, details=None):
return self._request("acknowledge", description=description, incident_key=incident_key, details=details)
def resolve(self, incident_key, description=None, details=None):
return self._request("resolve", description=description, incident_key=incident_key, details=details)
def _request(self, event_type, **kwargs):
event = {
"service_key": self.service_key,
"event_type": event_type,
}
for k, v in kwargs.items():
if v is not None:
event[k] = v
encoded_event = json.dumps(event)
try:
res = urllib2.urlopen(self.api_endpoint, encoded_event, self.timeout)
except urllib2.HTTPError, exc:
if exc.code != 400:
raise
res = exc
result = json.loads(res.read())
if result['status'] != "success":
raise PagerDutyException(result['status'], result['message'], result['errors'])
# if result['warnings]: ...
return result.get('incident_key')
class IncidentsError(urllib2.HTTPError):
def __init__(self, http_error):
urllib2.HTTPError.__init__(self, http_error.filename, http_error.code, http_error.msg, http_error.hdrs, http_error.fp)
self.statuscode = http_error.code
self.statusdesc = http_error.msg
self.errormessage = ''
try:
data = self.read()
j = json.loads(data)
error = j['error']
self.statuscode = error['code']
self.statusdesc = ' | '.join(error.get('errors', []))
self.errormessage = error['message']
except:
pass
def __repr__(self):
return 'Pagerduty Incidents Error: HTTP {0} {1} returned with message, "{2}"'.format(self.statuscode, self.statusdesc, self.errormessage)
def __str__(self):
return self.__repr__()
class IncidentsRequest(urllib2.Request):
def __init__(self, connection, params):
"""Representation of a Pagerduty Incidents API HTTP request.
:type connection: :class:`Incidents`
:param connection: Incidents connection object populated with a username, password and base URL
:type params: dict
:param params: Params to be sent with a GET request
"""
encoded_params = urllib.urlencode(params)
url = connection.base_url + '?' + encoded_params
urllib2.Request.__init__(self, url)
# Add auth header
base64string = base64.encodestring('%s:%s' % (connection.username, connection.password)).replace('\n','')
self.add_header("Authorization", "Basic %s" % base64string)
def __repr__(self):
return 'IncidentsRequest: {0} {1}' % (self.get_method(), self.get_full_url())
def fetch(self):
"""Execute the request."""
try:
response = urllib2.urlopen(self)
except urllib2.HTTPError, e:
raise IncidentsError(e)
else:
return IncidentsResponse(response)
class IncidentsResponse(object):
def __init__(self, response):
"""Representation of a Pagerduty Incidents API HTTP response."""
self.data = response.read()
self.headers = response.headers
self.content = json.loads(self.data)
if 'error' in self.content:
raise IncidentsError(self.content)
def __repr__(self):
return 'IncidentsResponse: {0}'.format(self.content.items())
class Incidents(object):
""" Interface to Pagerduty Incident API.
"""
def __init__(self, subdomain, username, password):
self.username = username
self.password = password
self.base_url = 'https://{0}.pagerduty.com/api/v1/incidents'.format(subdomain)
def _make_request(self, since, until, limit, offset):
params = {'since' : since, 'until' : until, 'limit' : limit, 'offset' : offset}
request = IncidentsRequest(self, params)
response = request.fetch()
return response.content['total'], response.content['incidents']
def all(self, since, until):
""" Query incidents.
The maximum range queryable at once is 100 incidents.
This function returns an iterator that handles pagination for you.
:type since: string
:param since: date in ISO 8601 format, the time element is optional
(ie. '2011-05-06' is understood as at midnight )
:type until: string
:param until: date in ISO 8601 format, the time element is optional
(ie. '2011-05-06' is understood as at midnight )
"""
limit = 100
total, incidents = self._make_request(since, until, limit, 0)
for i in incidents:
yield i
if total > limit:
num_pages = (total / limit) + 1
for page in range(1, num_pages):
offset = page * limit
total, incidents = self._make_request(since, until, limit, offset)
for i in incidents:
yield i
|
#!/usr/bin/env python
from collections import defaultdict
from collections import OrderedDict
import copy
from intervaltree import IntervalTree
from ragoo_utilities.PAFReader import PAFReader
from ragoo_utilities.SeqReader import SeqReader
from ragoo_utilities.ReadCoverage import ReadCoverage
from ragoo_utilities.ContigAlignment import ContigAlignment
from ragoo_utilities.ContigAlignment import UniqueContigAlignment
from ragoo_utilities.ContigAlignment import LongestContigAlignment
from ragoo_utilities.GFFReader import GFFReader
from ragoo_utilities.utilities import run, log, reverse_complement, read_contigs, read_gz_contigs
from ragoo_utilities.break_chimera import get_ref_parts, cluster_contig_alns, avoid_gff_intervals, update_gff, break_contig, get_intra_contigs
def update_misasm_features(features, breaks, contig, ctg_len):
# Get ctg len from ReadCoverage object
break_list = [0] + sorted(breaks) + [ctg_len]
borders = []
for i in range(len(break_list) - 1):
borders.append((break_list[i], break_list[i+1]))
# Pop the features to be updated
contig_feats = features.pop(contig)
# Initialize lists for new broken contig headers
for i in range(len(borders)):
features[contig + '_misasm_break:' + str(borders[i][0]) + '-' + str(borders[i][1])] = []
t = IntervalTree()
for i in borders:
t[i[0]:i[1]] = i
for i in contig_feats:
query = t[i.start]
assert len(query) == 1
break_start = list(query)[0].begin
break_end = list(query)[0].end
query_border = (break_start, break_end)
break_number = borders.index(query_border)
i.seqname = contig + '_misasm_break:' + str(borders[break_number][0]) + '-' + str(borders[break_number][1])
i.start = i.start - break_start
i.end = i.end - break_start
features[
contig + '_misasm_break:' + str(borders[break_number][0]) + '-' + str(borders[break_number][1])].append(i)
return features
def remove_gff_breaks(gff_ins, breaks):
"""
Given a list of candidate breakpoints proposed by misassembly correction, remove any such break points that
fall within the interval of a gff feature. This should be called once per contig.
:param gff_ins: List of GFFLines
:param breaks: candidate break points
:return:
"""
# Make an interval tree from the intervals of the gff lines
t = IntervalTree()
for line in gff_ins:
# If the interval is one bp long, skip
if line.start == line.end:
continue
t[line.start:line.end] = (line.start, line.end)
return [i for i in breaks if not t[i]]
def write_misasm_broken_ctgs(contigs_file, breaks, out_prefix, in_gff=None, in_gff_name=None):
current_path = os.getcwd()
os.chdir('ctg_alignments')
if in_gff and in_gff_name:
with open(in_gff_name, 'w') as f:
for i in in_gff.keys():
for j in in_gff[i]:
f.write(str(j) + '\n')
x = SeqReader("../../" + contigs_file)
f = open(out_prefix + ".misasm.break.fa", 'w')
for header, seq in x.parse_fasta():
header = header[1:]
if header not in breaks:
f.write(">" + header + "\n")
f.write(seq + "\n")
else:
# Break the contig
ctg_len = len(seq)
break_list = [0] + sorted(breaks[header]) + [ctg_len]
for i in range(len(break_list) - 1):
f.write(">" + header + "_misasm_break:" + str(break_list[i]) + "-" + str(break_list[i+1]) + "\n")
f.write(seq[break_list[i]:break_list[i+1]] + "\n")
os.chdir(current_path)
def align_misasm_broken(out_prefix):
current_path = os.getcwd()
os.chdir('ctg_alignments')
ctgs_file = out_prefix + ".misasm.break.fa"
cmd = '{} -k19 -w19 -t{} ../../{} {} ' \
'> contigs_brk_against_ref.paf 2> contigs_brk_against_ref.paf.log'.format(minimap_path, t, reference_file,
ctgs_file)
if not os.path.isfile('contigs_brk_against_ref.paf'):
run(cmd)
os.chdir(current_path)
def write_contig_clusters(unique_dict, thresh, skip_list):
# Get a list of all chromosomes
all_chroms = set([unique_dict[i].ref_chrom for i in unique_dict.keys()])
current_path = os.getcwd()
output_path = current_path + '/groupings'
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir('groupings')
for i in all_chroms:
open(i + '_contigs.txt', 'w').close()
for i in unique_dict.keys():
this_chr = unique_dict[i].ref_chrom
this_confidence = unique_dict[i].confidence
if this_confidence > thresh:
if not i in skip_list:
file_name = str(this_chr) + '_contigs.txt'
with open(file_name, 'a') as f:
f.write(i + '\t' + str(this_confidence) + '\n')
os.chdir(current_path)
def clean_alignments(in_alns, l=10000, in_exclude_file='', uniq_anchor_filter=False, merge=False):
# Exclude alignments to undesired reference headers and filter alignment lengths.
exclude_list = []
if in_exclude_file:
with open('../' + in_exclude_file) as f:
for line in f:
exclude_list.append(line.rstrip().replace('>', '').split()[0])
empty_headers = []
for header in in_alns.keys():
in_alns[header].exclude_ref_chroms(exclude_list)
in_alns[header].filter_lengths(l)
if uniq_anchor_filter:
in_alns[header].unique_anchor_filter()
if merge:
in_alns[header].merge_alns()
# Check if our filtering has removed all alignments for a contig
if len(in_alns[header].ref_headers) == 0:
empty_headers.append(header)
for header in empty_headers:
in_alns.pop(header)
return in_alns
def read_paf_alignments(in_paf):
# Read in PAF alignments
# Initialize a dictionary where key is contig header, and value is ContigAlignment.
alns = dict()
x = PAFReader(in_paf)
for paf_line in x.parse_paf():
if paf_line.contig in alns:
alns[paf_line.contig].add_alignment(paf_line)
else:
alns[paf_line.contig] = ContigAlignment(paf_line.contig)
alns[paf_line.contig].add_alignment(paf_line)
return alns
def get_contigs_from_groupings(in_file):
contigs = []
with open(in_file) as f:
for line in f:
contigs.append(line.split('\t')[0])
return contigs
def get_location_confidence(in_ctg_alns):
# Use interval tree to get all alignments with the reference span
# Go through each of them and if any start is less than the min_pos or any end is greater than
# the max_pos, change the borders to those values. Then use the algorithm that Mike gave me.
min_pos = min(in_ctg_alns.ref_starts)
max_pos = max(in_ctg_alns.ref_ends)
t = IntervalTree()
# Put the reference start and end position for every alignment into the tree
for i in range(len(in_ctg_alns.ref_headers)):
t[in_ctg_alns.ref_starts[i]:in_ctg_alns.ref_ends[i]] = (in_ctg_alns.ref_starts[i], in_ctg_alns.ref_ends[i])
overlaps = t[min_pos:max_pos]
if not overlaps:
return 0
# If any intervals fall beyond the boundaries, replace the start/end with the boundary it exceeds
ovlp_list = [i.data for i in overlaps]
bounded_list = []
for i in ovlp_list:
if i[0] < min_pos:
i[0] = min_pos
if i[1] > max_pos:
i[1] = max_pos
bounded_list.append(i)
# Now can just calculate the total range covered by the intervals
ovlp_range = 0
sorted_intervals = sorted(bounded_list, key=lambda tup: tup[0])
max_end = -1
for j in sorted_intervals:
start_new_terr = max(j[0], max_end)
ovlp_range += max(0, j[1] - start_new_terr)
max_end = max(max_end, j[1])
return ovlp_range / (max_pos - min_pos)
def order_orient_contigs(in_unique_contigs, in_alns):
current_path = os.getcwd()
output_path = current_path + '/orderings'
if not os.path.exists(output_path):
os.makedirs(output_path)
# Get longest alignments
longest_contigs = dict()
for i in in_alns.keys():
# Only consider alignments to the assigned chromosome
uniq_aln = UniqueContigAlignment(in_alns[i])
best_header = uniq_aln.ref_chrom
ctg_alns = copy.deepcopy(in_alns[i])
ctg_alns.filter_ref_chroms([best_header])
longest_contigs[i] = LongestContigAlignment(ctg_alns)
# Save the orientations
final_orientations = dict()
for i in longest_contigs.keys():
final_orientations[i] = longest_contigs[i].strand
# Get the location and orientation confidence scores
orientation_confidence = dict()
location_confidence = dict()
forward_bp = 0
reverse_bp = 0
for i in in_alns.keys():
uniq_aln = UniqueContigAlignment(in_alns[i])
best_header = uniq_aln.ref_chrom
ctg_alns = copy.deepcopy(in_alns[i])
ctg_alns.filter_ref_chroms([best_header])
# Orientation confidence scores
# Every base pair votes for the orientation of the alignment in which it belongs
# Score is # votes for the assigned orientation over all votes
for j in range(len(ctg_alns.ref_headers)):
if ctg_alns.strands[j] == '+':
forward_bp += ctg_alns.aln_lens[j]
else:
reverse_bp += ctg_alns.aln_lens[j]
if final_orientations[i] == '+':
orientation_confidence[i] = forward_bp / (forward_bp + reverse_bp)
else:
orientation_confidence[i] = reverse_bp / (forward_bp + reverse_bp)
forward_bp = 0
reverse_bp = 0
# Location confidence
location_confidence[i] = get_location_confidence(ctg_alns)
all_chroms = set([in_unique_contigs[i].ref_chrom for i in in_unique_contigs.keys()])
for this_chrom in all_chroms:
# Intialize the list of start and end positions w.r.t the query
ref_pos = []
groupings_file = 'groupings/' + this_chrom + '_contigs.txt'
contigs_list = get_contigs_from_groupings(groupings_file)
for i in range(len(contigs_list)):
# There is a scope issue here. Pass this (longest_contigs) to the method explicitly.
ref_pos.append((longest_contigs[contigs_list[i]].ref_start, longest_contigs[contigs_list[i]].ref_end, i))
final_order = [contigs_list[i[2]] for i in sorted(ref_pos)]
# Get ordering confidence
# To do this, get the max and min alignments to this reference chromosome
# Then within that region, what percent of bp are covered
with open('orderings/' + this_chrom + '_orderings.txt', 'w') as out_file:
for i in final_order:
# Also have a scope issue here.
out_file.write(i + '\t' + final_orientations[i] + '\t' + str(location_confidence[i]) + '\t' + str(orientation_confidence[i]) + '\n')
def get_orderings(in_orderings_file):
all_orderings = []
with open(in_orderings_file) as f:
for line in f:
L1 = line.split('\t')
all_orderings.append((L1[0], L1[1].rstrip()))
return all_orderings
def create_pseudomolecules(in_contigs_file, in_unique_contigs, gap_size, chr0=True):
"""
Need to make a translation table for easy lift-over.
:param in_contigs_file:
:param in_unique_contigs:
:param gap_size:
:return:
"""
# First, read all of the contigs into memory
remaining_contig_headers = []
all_seqs = OrderedDict()
x = SeqReader('../' + in_contigs_file)
if in_contigs_file.endswith(".gz"):
for header, seq in x.parse_gzip_fasta():
remaining_contig_headers.append(header.split(' ')[0])
all_seqs[header.split(' ')[0]] = seq
else:
for header, seq in x.parse_fasta():
remaining_contig_headers.append(header.split(' ')[0])
all_seqs[header.split(' ')[0]] = seq
# Get all reference chromosomes
all_chroms = sorted(list(set([in_unique_contigs[i].ref_chrom for i in in_unique_contigs.keys()])))
# Iterate through each orderings file and store sequence in a dictionary
all_pms = dict()
pad = ''.join('N' for i in range(gap_size))
for this_chrom in all_chroms:
orderings_file = 'orderings/' + this_chrom + '_orderings.txt'
orderings = get_orderings(orderings_file)
if orderings:
seq_list = []
for line in orderings:
# Mark that we have seen this contig
remaining_contig_headers.pop(remaining_contig_headers.index('>' + line[0]))
if line[1] == '+':
seq_list.append(all_seqs['>' + line[0]])
else:
assert line[1] == '-'
seq_list.append(reverse_complement(all_seqs['>' + line[0]]))
all_pms[this_chrom] = pad.join(seq_list)
all_pms[this_chrom] += '\n'
# Get unincorporated sequences and place them in Chr0
if remaining_contig_headers:
if chr0:
chr0_headers = []
chr0_seq_list = []
for header in remaining_contig_headers:
chr0_headers.append(header)
chr0_seq_list.append(all_seqs[header])
all_pms['Chr0'] = pad.join(chr0_seq_list)
all_pms['Chr0'] += '\n'
# Write out the list of chr0 headers
f_chr0_g = open('groupings/Chr0_contigs.txt', 'w')
f_chr0_o = open('orderings/Chr0_orderings.txt', 'w')
for i in chr0_headers:
f_chr0_g.write(i[1:] + "\t" + "0" + '\n')
f_chr0_o.write(i[1:] + '\t' + "+" + '\t' + "0" + '\t' + "0" + '\n')
f_chr0_g.close()
f_chr0_o.close()
else:
# Instead of making a chromosome 0, add the unplaced sequences as is.
for header in remaining_contig_headers:
all_pms[header[1:]] = all_seqs[header] + "\n"
f_chr0_g = open('groupings/' + header[1:] + '_contigs.txt', 'w')
f_chr0_o = open('orderings/' + header[1:] + '_orderings.txt', 'w')
f_chr0_g.write(header[1:] + "\t" + "0" + '\n')
f_chr0_o.write(header[1:] + '\t' + "+" + '\t' + "0" + '\t' + "0" + '\n')
f_chr0_g.close()
f_chr0_o.close()
# Write the final sequences out to a file
with open('ragoo.fasta', 'w') as f:
for out_header in all_pms:
f.write(">" + out_header + "_RaGOO\n")
f.write(all_pms[out_header])
def write_broken_files(in_contigs, in_contigs_name, in_gff=None, in_gff_name=None):
current_path = os.getcwd()
output_path = current_path + '/chimera_break'
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir('chimera_break')
if in_gff and in_gff_name:
with open(in_gff_name, 'w') as f:
for i in in_gff.keys():
for j in in_gff[i]:
f.write(str(j) + '\n')
with open(in_contigs_name, 'w') as f:
for i in in_contigs.keys():
f.write('>' + i + '\n')
f.write(in_contigs[i] + '\n')
os.chdir(current_path)
def align_breaks(break_type, m_path, in_reference_file, in_contigs_file, in_num_threads):
current_path = os.getcwd()
os.chdir('chimera_break')
if break_type == 'inter':
cmd = '{} -k19 -w19 -t{} ../../{} {} ' \
'> inter_contigs_against_ref.paf 2> inter_contigs_against_ref.paf.log'.format(m_path, in_num_threads, in_reference_file, in_contigs_file)
if not os.path.isfile('inter_contigs_against_ref.paf'):
run(cmd)
else:
cmd = '{} -k19 -w19 -t{} ../../{} {} ' \
'> intra_contigs_against_ref.paf 2> intra_contigs_against_ref.paf.log'.format(m_path, in_num_threads, in_reference_file, in_contigs_file)
if not os.path.isfile('intra_contigs_against_ref.paf'):
run(cmd)
os.chdir(current_path)
def align_pms(m_path, num_threads, in_reference_file):
current_path = os.getcwd()
output_path = current_path + '/pm_alignments'
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir('pm_alignments')
cmd = '{} -ax asm5 --cs -t{} ../../{} {} ' \
'> pm_against_ref.sam 2> pm_contigs_against_ref.sam.log'.format(m_path, num_threads,
in_reference_file, '../ragoo.fasta')
if not os.path.isfile('pm_against_ref.sam'):
run(cmd)
os.chdir(current_path)
def get_SVs(sv_min, sv_max, in_ref_file):
current_path = os.getcwd()
os.chdir('pm_alignments')
# Change this when setup.py is ready. Just call script directly
cmd = 'sam2delta.py pm_against_ref.sam'
if not os.path.isfile('pm_against_ref.sam.delta'):
run(cmd)
cmd_2 = 'Assemblytics_uniq_anchor.py --delta pm_against_ref.sam.delta --unique-length 10000 --out assemblytics_out --keep-small-uniques'
if not os.path.isfile('assemblytics_out.Assemblytics.unique_length_filtered_l10000.delta'):
run(cmd_2)
cmd_3 = 'Assemblytics_between_alignments.pl assemblytics_out.coords.tab %r %r all-chromosomes exclude-longrange bed > assemblytics_out.variants_between_alignments.bed' %(sv_min, sv_max)
if not os.path.isfile('assemblytics_out.variants_between_alignments.bed'):
run(cmd_3)
cmd_4 = 'Assemblytics_within_alignment.py --delta assemblytics_out.Assemblytics.unique_length_filtered_l10000.delta --min %r > assemblytics_out.variants_within_alignments.bed' %(sv_min)
if not os.path.isfile('assemblytics_out.variants_within_alignments.bed'):
run(cmd_4)
header = "reference\tref_start\tref_stop\tID\tsize\tstrand\ttype\tref_gap_size\tquery_gap_size\tquery_coordinates\tmethod\n"
with open('assemblytics_out.variants_between_alignments.bed', 'r')as f1:
b1 = f1.read()
with open('assemblytics_out.variants_within_alignments.bed', 'r') as f2:
b2 = f2.read()
with open('assemblytics_out.Assemblytics_structural_variants.bed', 'w') as f:
f.write(header)
# Might need to add newlines here
f.write(b1)
f.write(b2)
# Filter out SVs caused by gaps
cmd_5 = 'filter_gap_SVs.py ../../%s' %(in_ref_file)
run(cmd_5)
os.chdir(current_path)
def align_reads(m_path, num_threads, in_ctg_file, reads, tech='ont'):
current_path = os.getcwd()
output_path = current_path + '/ctg_alignments'
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir('ctg_alignments')
if tech == 'sr':
cmd = '{} -x sr -t{} ../../{} ../../{} ' \
'> reads_against_ctg.paf 2> reads_against_ctg.paf.log'.format(m_path, num_threads, in_ctg_file, reads)
elif tech == 'corr':
cmd = '{} -x asm10 -t{} ../../{} ../../{} ' \
'> reads_against_ctg.paf 2> reads_against_ctg.paf.log'.format(m_path, num_threads, in_ctg_file, reads)
else:
raise ValueError("Only 'sr' or 'corr' are accepted for read type.")
if not os.path.isfile('reads_against_ctg.paf'):
run(cmd)
os.chdir(current_path)
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser(description='order and orient contigs according to minimap2 alignments to a reference (v1.1)')
parser.add_argument("contigs", metavar="<contigs.fasta>", type=str, help="fasta file with contigs to be ordered and oriented (gzipped allowed)")
parser.add_argument("reference", metavar="<reference.fasta>", type=str, help="reference fasta file (gzipped allowed)")
#parser.add_argument("-o", metavar="PATH", type=str, default="ragoo_output", help="output directory name")
parser.add_argument("-e", metavar="<exclude.txt>", type=str, default="", help="single column text file of reference headers to ignore")
parser.add_argument("-gff", metavar="<annotations.gff>", type=str, default='', help="lift-over gff features to chimera-broken contigs")
parser.add_argument("-m", metavar="PATH", type=str, default="minimap2", help='path to minimap2 executable')
parser.add_argument("-b", action='store_true', default=False, help="Break chimeric contigs")
parser.add_argument("-R", metavar="<reads.fasta>", type=str, default="", help="Turns on misassembly correction. Align provided reads to the contigs to aid misassembly correction. fastq or fasta allowed. Gzipped files allowed. Turns off '-b'.")
parser.add_argument("-T", metavar="sr", type=str, default="", help="Type of reads provided by '-R'. 'sr' and 'corr' accepted for short reads and error corrected long reads respectively.")
parser.add_argument("-p", metavar="5", type=int, default=5, help=argparse.SUPPRESS)
parser.add_argument("-l", metavar="10000", type=int, default=10000, help=argparse.SUPPRESS)
parser.add_argument("-r", metavar="100000", type=int, default=100000, help="(with -b) this many bp of >1 reference sequence must be covered for a contig to be considered an interchromosomal chimera.")
parser.add_argument("-c", metavar="1000000", type=int, default=1000000, help="(with -b) distance threshold between consecutive alignments with respect to the contig.")
parser.add_argument("-d", metavar="2000000", type=int, default=2000000, help="(with -b) distance threshold between consecutive alignments with respect to the reference.")
parser.add_argument("-t", metavar="3", type=int, default=3, help="Number of threads when running minimap.")
parser.add_argument("-g", metavar="100", type=int, default=100, help="Gap size for padding in pseudomolecules.")
parser.add_argument("-s", action='store_true', default=False, help="Call structural variants")
parser.add_argument("-a", metavar="50", type=int, default=50, help=argparse.SUPPRESS)
parser.add_argument("-f", metavar="10000", type=int, default=10000, help=argparse.SUPPRESS)
parser.add_argument("-i", metavar="0.2", type=float, default=0.2, help="Minimum grouping confidence score needed to be localized.")
parser.add_argument("-j", metavar="<skip.txt>", type=str, default="", help="List of contigs to automatically put in chr0.")
parser.add_argument("-C", action='store_true', default=False, help="Write unplaced contigs individually instead of making a chr0")
# Get the command line arguments
args = parser.parse_args()
contigs_file = args.contigs
reference_file = args.reference
#output_path = args.o
exclude_file = args.e
minimap_path = args.m
break_chimeras = args.b
gff_file = args.gff
min_break_pct = args.p
min_len = args.l
min_range = args.r
intra_wrt_ref_min = args.d
intra_wrt_ctg_min = args.c
t = args.t
g = args.g
call_svs = args.s
min_assemblytics = args.a
max_assemblytics = args.f
group_score_thresh = args.i
skip_file = args.j
corr_reads = args.R
corr_reads_tech = args.T
make_chr0 = not args.C
if corr_reads:
log("Misassembly correction has been turned on. This automatically inactivates chimeric contig correction.")
break_chimeras = False
# Make sure that if -R, -T has been specified
if corr_reads and not corr_reads_tech:
raise ValueError("'-T' must be provided when using -R.")
skip_ctg = []
if skip_file:
with open(skip_file) as f:
for line in f:
skip_ctg.append(line.rstrip())
current_path = os.getcwd()
output_path = current_path + '/ragoo_output'
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir(output_path)
# Run minimap2
cmd = '{} -k19 -w19 -t{} ../{} ../{} ' \
'> contigs_against_ref.paf 2> contigs_against_ref.paf.log'.format(minimap_path, t, reference_file, contigs_file)
if not os.path.isfile('contigs_against_ref.paf'):
run(cmd)
# Read in the minimap2 alignments just generated
log('Reading alignments')
alns = read_paf_alignments('contigs_against_ref.paf')
alns = clean_alignments(alns, l=1000, in_exclude_file=exclude_file)
# Process the gff file
if gff_file:
log('Getting gff features')
features = defaultdict(list)
z = GFFReader('../' + gff_file)
for i in z.parse_gff():
features[i.seqname].append(i)
# Break chimeras if desired
if break_chimeras:
# Record how many contigs are broken
total_inter_broken = 0
total_intra_broken = 0
alns = clean_alignments(alns, l=10000, in_exclude_file=exclude_file, uniq_anchor_filter=True)
# Process contigs
log('Getting contigs')
if contigs_file.endswith(".gz"):
contigs_dict = read_gz_contigs('../' + contigs_file)
else:
contigs_dict = read_contigs('../' + contigs_file)
log('Finding interchromosomally chimeric contigs')
all_chimeras = dict()
for i in alns.keys():
ref_parts = get_ref_parts(alns[i], min_len, min_break_pct, min_range)
if len(ref_parts) > 1:
all_chimeras[i] = ref_parts
log('Finding break points and breaking interchromosomally chimeric contigs')
break_intervals = dict()
for i in all_chimeras.keys():
break_intervals[i] = cluster_contig_alns(i, alns, all_chimeras[i], min_len)
# If its just going to break it into the same thing, skip it.
if len(break_intervals[i]) <= 1:
continue
if gff_file:
# If desired, ensure that breakpoints don't disrupt any gff intervals
break_intervals[i] = avoid_gff_intervals(break_intervals[i], features[i])
features = update_gff(features, break_intervals[i], i)
# Break contigs according to the final break points
contigs_dict = break_contig(contigs_dict, i, break_intervals[i])
total_inter_broken += 1
# Next, need to re-align before finding intrachromosomal chimeras
# First, write out the interchromosomal chimera broken fasta
out_inter_fasta = contigs_file[:contigs_file.rfind('.')] + '.inter.chimera.broken.fa'
if gff_file:
out_gff = gff_file[:gff_file.rfind('.')] + '.inter.chimera_broken.gff'
write_broken_files(contigs_dict, out_inter_fasta, features, out_gff)
else:
write_broken_files(contigs_dict, out_inter_fasta)
# Next, realign the chimera broken contigs
align_breaks('inter', minimap_path, reference_file, out_inter_fasta, t)
# Now, use those new alignments for intrachromosomal chimeras
log('Reading interchromosomal chimera broken alignments')
inter_alns = read_paf_alignments('chimera_break/inter_contigs_against_ref.paf')
inter_alns = clean_alignments(inter_alns, l=1000, in_exclude_file=exclude_file)
log('Finding intrachromosomally chimeric contigs')
# Find intrachromosomally chimeric contigs
for i in inter_alns.keys():
intra = get_intra_contigs(inter_alns[i], 15000, intra_wrt_ref_min, intra_wrt_ctg_min)
if intra:
if gff_file:
intra_break_intervals = avoid_gff_intervals(intra[1], features[intra[0]])
else:
intra_break_intervals = intra[1]
# Check if the avoidance of gff intervals pushed the break point to the end of the contig.
if intra_break_intervals[-1][0] == intra_break_intervals[-1][1]:
continue
# break the contigs and update features if desired
contigs_dict = break_contig(contigs_dict, intra[0], intra_break_intervals)
total_intra_broken += 1
if gff_file:
features = update_gff(features, intra_break_intervals, intra[0])
# Write out the intrachromosomal information
out_intra_fasta = contigs_file[:contigs_file.rfind('.')] + '.intra.chimera.broken.fa'
if gff_file:
out_intra_gff = gff_file[:gff_file.rfind('.')] + '.intra.chimera_broken.gff'
write_broken_files(contigs_dict, out_intra_fasta, features, out_intra_gff)
else:
write_broken_files(contigs_dict, out_intra_fasta)
# Re align the contigs
# Next, realign the chimera broken contigs
align_breaks('intra', minimap_path, reference_file, out_intra_fasta, t)
# Read in alignments of intrachromosomal chimeras and proceed with ordering and orientation
log('Reading intrachromosomal chimera broken alignments')
alns = read_paf_alignments('chimera_break/intra_contigs_against_ref.paf')
alns = clean_alignments(alns, l=1000, in_exclude_file=exclude_file)
contigs_file = '/ragoo_output/chimera_break/' + out_intra_fasta
log('The total number of interchromasomally chimeric contigs broken is %r' % total_inter_broken)
log('The total number of intrachromasomally chimeric contigs broken is %r' % total_intra_broken)
# Check if misassembly correction is turned on. This is mutually exclusive with chimeric contig correction
if corr_reads:
# Align the raw reads to the assembly.
log('Aligning raw reads to contigs')
align_reads(minimap_path, t, contigs_file, corr_reads, corr_reads_tech)
log('Computing contig coverage')
cov_map = ReadCoverage('ctg_alignments/reads_against_ctg.paf')
alns = clean_alignments(alns, l=10000, in_exclude_file=exclude_file, uniq_anchor_filter=True, merge=True)
# Get the initial candidate break points.
candidate_breaks = dict()
for i in alns:
candidates = alns[i].get_break_candidates()
if candidates:
candidate_breaks[i] = candidates
# Validate each breakpoint by checking for excessively high or low coverage
# Also, if a gff is provided, check to ensure that we don't break within a gff feature interval
val_candidate_breaks = dict()
for i in candidate_breaks:
candidates = cov_map.check_break_cov(i, candidate_breaks[i])
if gff_file:
candidates = remove_gff_breaks(features[i], candidates)
if candidates:
val_candidate_breaks[i] = list(set(candidates))
if gff_file:
features = update_misasm_features(features, val_candidate_breaks[i], i, cov_map.ctg_lens[i])
# Break the contigs
if gff_file:
out_misasm_gff = gff_file[:gff_file.rfind('.')] + '.misasm.broken.gff'
write_misasm_broken_ctgs(contigs_file, val_candidate_breaks, contigs_file[:contigs_file.rfind('.')], in_gff=features, in_gff_name=out_misasm_gff)
else:
write_misasm_broken_ctgs(contigs_file, val_candidate_breaks, contigs_file[:contigs_file.rfind('.')])
# Align the broken contigs back to the reference
align_misasm_broken(contigs_file[:contigs_file.rfind('.')])
alns = read_paf_alignments('ctg_alignments/contigs_brk_against_ref.paf')
alns = clean_alignments(alns, l=1000, in_exclude_file=exclude_file)
contigs_file = '/ragoo_output/ctg_alignments/' + contigs_file[:contigs_file.rfind('.')] + ".misasm.break.fa"
# Assign each contig to a corresponding reference chromosome.
log('Assigning contigs')
all_unique_contigs = dict()
for i in alns.keys():
all_unique_contigs[i] = UniqueContigAlignment(alns[i])
# Add to this the list of headers that did not make it
write_contig_clusters(all_unique_contigs, group_score_thresh, skip_ctg)
log('Ordering and orienting contigs')
order_orient_contigs(all_unique_contigs, alns)
log('Creating pseudomolecules')
create_pseudomolecules(contigs_file, all_unique_contigs, g, make_chr0)
if call_svs:
log('Aligning pseudomolecules to reference')
align_pms(minimap_path, t, reference_file)
log('Getting structural variants')
get_SVs(min_assemblytics, max_assemblytics, reference_file)
log('goodbye')
|
<reponame>megsano/tfutils<filename>tfutils/model_tool_old.py
from __future__ import absolute_import, division, print_function
import inspect
from functools import wraps
from collections import OrderedDict
from contextlib import contextmanager
import copy
import tensorflow as tf
from tfutils.crossdevice_batchnorm import crossgpu_batch_norm, CRTPUBatchNormalization
import numpy as np
def initializer(kind='xavier', *args, **kwargs):
if kind == 'xavier':
init = tf.contrib.layers.xavier_initializer(*args, **kwargs)
elif kind == 'normal':
init = normal_initializer
else:
init = getattr(tf, kind + '_initializer')(*args, **kwargs)
return init
def normal_initializer(shape, dtype=None, partition_info=None):
'''
Used for EfficientNets
'''
H, W, _, C_out = shape
fan_out = int(H * W * C_out)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def groupnorm(inputs, G=32, data_format='channels_last', weight_decay=0.0, epsilon=1e-5, trainable=True, gamma_init=1, beta_init=0):
'''
Like LayerNorm, z-scores features along the channel dimension only.
However, it only normalizes within G groups of C/G channels each.
Optionally applies learnable scale/shift parameters.
'''
assert len(inputs.shape.as_list()) == 4, "Applies only to conv2D layers"
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0,2,3,1])
elif data_format == 'channels_last':
pass
else:
raise ValueError("data_format must be 'channels_first' or 'channels_last'")
B,H,W,C = inputs.shape.as_list()
assert C % G == 0, "num groups G must divide C"
CpG = C // G
inputs = tf.reshape(inputs, [B,H,W,CpG,G])
mean, var = tf.nn.moments(inputs, axes=[1,2,3], keep_dims=True)
inputs = tf.div(inputs - mean, tf.sqrt(var + epsilon))
inputs = tf.reshape(inputs, [B,H,W,C])
if trainable:
gamma = tf.get_variable("groupnorm_scale", shape=[1,1,1,C], dtype=tf.float32,
initializer=initializer("constant", float(gamma_init)))
# regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
beta = tf.get_variable("groupnorm_shift", shape=[1,1,1,C], dtype=tf.float32,
initializer=initializer("constant", float(beta_init)))
# regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
else:
gamma = tf.constant(gamma_init, dtype=tf.float32)
beta = tf.constant(beta_init, dtype=tf.float32)
inputs = gamma*inputs + beta
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0,3,1,2])
print("applied group norm to", inputs.name.split('/')[:-1])
return inputs
def batchnorm_corr(inputs, is_training, data_format='channels_last',
decay = 0.9, epsilon = 1e-5, init_zero=None, constant_init=None,
activation=None, time_suffix=None, bn_trainable=True,
use_crossgpu_bn=False, num_dev=None, use_crtpu_bn=False):
if time_suffix is not None:
bn_op_name = "post_conv_BN_" + time_suffix
reuse_flag = tf.AUTO_REUSE # create bn variables per timestep if they do not exist
else:
bn_op_name = "post_conv_BN"
reuse_flag = None
# if activation is none, should use zeros; else ones
if constant_init is None:
if init_zero is None:
init_zero = True if activation is None else False
if init_zero:
gamma_init = tf.zeros_initializer()
else:
gamma_init = tf.ones_initializer()
else:
gamma_init = tf.constant_initializer(constant_init)
if use_crossgpu_bn:
output = crossgpu_batch_norm(inputs=inputs,
decay=decay,
epsilon=epsilon,
is_training=is_training,
data_format=data_format,
trainable=bn_trainable,
gamma_initializer=gamma_init,
scope=bn_op_name,
reuse=reuse_flag,
num_dev=num_dev)
elif use_crtpu_bn:
axis = 1 if data_format == 'channels_first' else 3
crtpu_bn_func = CRTPUBatchNormalization(axis=axis,
momentum=decay,
epsilon=epsilon,
center=True,
scale=True,
trainable=bn_trainable,
gamma_initializer=gamma_init,
name=bn_op_name,
_reuse=reuse_flag,
_scope=bn_op_name)
output = crtpu_bn_func(inputs, training=is_training)
else:
axis = 1 if data_format == 'channels_first' else 3
output = tf.layers.batch_normalization(inputs=inputs,
axis=axis,
momentum=decay,
epsilon=epsilon,
center=True,
scale=True,
training=is_training,
trainable=bn_trainable,
fused=True,
gamma_initializer=gamma_init,
name=bn_op_name,
reuse=reuse_flag)
return output
def conv(inp,
out_depth,
ksize=[3,3],
strides=[1,1,1,1],
data_format='channels_last',
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
use_bias=True,
bias=0,
weight_decay=None,
activation='relu',
batch_norm=False,
group_norm=False,
num_groups=32,
is_training=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=None,
init_zero=None,
dropout=None,
dropout_seed=0,
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='conv'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if batch_norm or group_norm:
use_bias = False
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
if out_depth is None:
out_depth = in_depth
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
if use_bias:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# ops
if dropout is not None:
inp = tf.nn.dropout(inp, keep_prob=dropout, seed=dropout_seed, name='dropout')
conv = tf.nn.conv2d(inp, kernel,
strides=strides,
padding=padding)
if use_bias:
output = tf.nn.bias_add(conv, biases, name=name)
else:
output = tf.identity(conv, name=name)
if batch_norm:
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
constant_init=batch_norm_gamma_init,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
elif group_norm:
output = groupnorm(inputs=output,
G=num_groups,
data_format=data_format,
weight_decay=weight_decay,
gamma_init=(0.0 if init_zero else 1.0),
epsilon=batch_norm_epsilon)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def conv_bnf(inp,
out_depth,
ksize=[3,3],
strides=[1,1,1,1],
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
bias=0,
weight_decay=None,
activation='relu6',
batch_norm=True,
is_training=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
init_zero=None,
data_format='channels_last',
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='conv_bnf'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
# ops
conv = tf.nn.conv2d(inp, kernel,
strides=strides,
padding=padding)
if batch_norm:
# if activation is none, should use zeros; else ones
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
else:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
output = tf.nn.bias_add(conv, biases, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def depthsep_conv(inp,
out_depth,
multiplier=1,
ksize=3,
strides=1,
dep_padding='SAME',
sep_padding='SAME',
batch_norm = True,
is_training=True,
name='depthsep_conv',
*args,
**kwargs
):
with tf.variable_scope('depthwise_conv'):
d_out = depth_conv(inp, multiplier = multiplier,
ksize = ksize,
strides = strides,
padding = dep_padding,
batch_norm = batch_norm,
is_training = is_training,
*args, **kwargs)
with tf.variable_scope('pointwise_conv'):
# we batch norm first according to mobilenet paper
p_out = conv_bnf(d_out, out_depth = out_depth,
ksize = 1,
strides = 1,
padding = sep_padding,
batch_norm = batch_norm,
is_training = is_training,
*args, **kwargs)
return p_out
def depth_conv(inp,
multiplier=1,
out_depth=None,
ksize=3,
strides=1,
padding='SAME',
kernel_init='xavier',
kernel_init_kwargs=None,
activation='relu6',
weight_decay=None,
batch_norm = False,
group_norm=False,
num_groups=32,
use_bias=False,
is_training=True,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_gamma_init=None,
init_zero=None,
data_format='channels_last',
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='depth_conv'
):
# assert out_shape is not None
if time_sep:
assert time_suffix is not None
if weight_decay is None:
weight_decay = 0.
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(strides, int):
strides = [1, strides, strides, 1]
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
in_depth = inp.get_shape().as_list()[-1]
out_depth = multiplier * in_depth
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[ksize[0], ksize[1], in_depth, multiplier],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
output = tf.nn.depthwise_conv2d(inp, kernel,
strides=strides,
padding=padding)
if batch_norm:
output = batchnorm_corr(inputs=output,
is_training=is_training,
data_format=data_format,
decay = batch_norm_decay,
epsilon = batch_norm_epsilon,
constant_init=batch_norm_gamma_init,
init_zero=init_zero,
activation=activation,
time_suffix=time_suffix,
bn_trainable=bn_trainable,
**crossdevice_bn_kwargs)
elif group_norm:
output = groupnorm(inputs=output,
G=num_groups,
data_format=data_format,
weight_decay=weight_decay,
gamma_init=(0.0 if init_zero else 1.0),
epsilon=batch_norm_epsilon)
elif use_bias:
init = initializer(kind='constant', value=1.0)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
output = tf.nn.bias_add(output, biases, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
return output
def fc(inp,
out_depth,
kernel_init='xavier',
kernel_init_kwargs=None,
use_bias=True,
bias=1,
weight_decay=None,
activation='relu',
batch_norm=False,
is_training=False,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
init_zero=None,
dropout=None,
dropout_seed=0,
time_sep=False,
time_suffix=None,
bn_trainable=True,
crossdevice_bn_kwargs={},
name='fc'):
if batch_norm:
use_bias = False
if weight_decay is None:
weight_decay = 0.
# assert out_shape is not None
if kernel_init_kwargs is None:
kernel_init_kwargs = {}
resh = tf.reshape(inp, [inp.get_shape().as_list()[0], -1], name='reshape')
in_depth = resh.get_shape().as_list()[-1]
# weights
init = initializer(kernel_init, **kernel_init_kwargs)
kernel = tf.get_variable(initializer=init,
shape=[in_depth, out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
if use_bias:
init = initializer(kind='constant', value=bias)
biases = tf.get_variable(initializer=init,
shape=[out_depth],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# ops
if dropout is not None:
resh = tf.nn.dropout(resh, keep_prob=dropout, seed=dropout_seed, name='dropout')
fcm = tf.matmul(resh, kernel)
if use_bias:
output = tf.nn.bias_add(fcm, biases, name=name)
else:
output = tf.identity(fcm, name=name)
if activation is not None:
output = getattr(tf.nn, activation)(output, name=activation)
if batch_norm:
# if activation is none, should use zeros; else ones
if init_zero is None:
init_zero = True if activation is None else False
if init_zero:
gamma_init = tf.zeros_initializer()
else:
gamma_init = tf.ones_initializer()
if time_suffix is not None:
bn_op_name = "post_conv_BN_" + time_suffix
reuse_flag = tf.AUTO_REUSE # create bn variables per timestep if they do not exist
else:
bn_op_name = "post_conv_BN"
reuse_flag = None
use_crossgpu_bn = crossdevice_bn_kwargs.get('use_crossgpu_bn', False)
use_crtpu_bn = crossdevice_bn_kwargs.get('use_crtpu_bn', False)
if use_crossgpu_bn:
cg_bn_kw = copy.deepcopy(crossdevice_bn_kwargs)
cg_bn_kw.pop('use_crossgpu_bn', False)
cg_bn_kw.pop('use_crtpu_bn', False)
output = crossgpu_batch_norm(inputs=inputs,
decay=batch_norm_decay,
epsilon=batch_norm_epsilon,
training=is_training,
trainable=bn_trainable,
gamma_initializer=gamma_init,
scope=bn_op_name,
reuse=reuse_flag,
**cg_bn_kw)
elif use_crtpu_bn:
crtpu_bn_func = CRTPUBatchNormalization(axis=-1,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
center=True,
scale=True,
trainable=bn_trainable,
gamma_initializer=gamma_init,
name=bn_op_name,
_reuse=reuse_flag,
_scope=bn_op_name)
output = crtpu_bn_func(output, training=is_training)
else:
output = tf.layers.batch_normalization(inputs=output,
axis=-1,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
trainable=bn_trainable,
fused=True,
gamma_initializer=gamma_init,
name=bn_op_name,
reuse=reuse_flag)
return output
def global_pool(inp, kind='avg', keep_dims=False, name=None):
if kind not in ['max', 'avg']:
raise ValueError('Only global avg or max pool is allowed, but'
'you requested {}.'.format(kind))
if name is None:
name = 'global_{}_pool'.format(kind)
h, w = inp.get_shape().as_list()[1:3]
out = getattr(tf.nn, kind + '_pool')(inp,
ksize=[1,h,w,1],
strides=[1,1,1,1],
padding='VALID')
if keep_dims:
output = tf.identity(out, name=name)
else:
output = tf.reshape(out, [out.get_shape().as_list()[0], -1], name=name)
return output
def avg_pool2d(inp, kernel_size, stride=2, padding='VALID', name=None):
if name is None:
name = 'avg_pool2d'
output = tf.contrib.layers.avg_pool2d(inp, kernel_size=kernel_size, stride=stride, padding=padding)
return output
class ConvNet(object):
INTERNAL_FUNC = ['arg_scope', '_func_wrapper', '_val2list', 'layer',
'_reuse_scope_name', '__call__', '_get_func']
CUSTOM_FUNC = [conv, fc, global_pool, conv_bnf, depthsep_conv, depth_conv, avg_pool2d]
def __init__(self, defaults=None, name=None):
"""
A quick convolutional neural network constructor
This is wrapper over many tf.nn functions for a quick construction of
a standard convolutional neural network that uses 2d convolutions, pooling
and fully-connected layers, and most other tf.nn methods.
It also stores layers and their parameters easily accessible per
tfutils' approach of saving everything.
Kwargs:
- defaults
Default kwargs values for functions. Complimentary to `arg_scope
- name (default: '')
If '', then the existing scope is used.
"""
self._defaults = defaults if defaults is not None else {}
self.name = name
self.state = None
self.output = None
self._layer = None
self.layers = OrderedDict()
self.params = OrderedDict()
self._scope_initialized = False
def __getattribute__(self, attr):
attrs = object.__getattribute__(self, '__dict__')
internal_func = object.__getattribute__(self, 'INTERNAL_FUNC')
if attr in attrs: # is it an attribute?
return attrs[attr]
elif attr in internal_func: # is it one of the internal functions?
return object.__getattribute__(self, attr)
else:
func = self._get_func(attr)
return self._func_wrapper(func)
def _get_func(self, attr):
custom_func = object.__getattribute__(self, 'CUSTOM_FUNC')
custom_func_names = [f.__name__ for f in custom_func]
if attr in custom_func_names: # is it one of the custom functions?
func = custom_func[custom_func_names.index(attr)]
else:
func = getattr(tf.nn, attr) # ok, so it is a tf.nn function
return func
def _func_wrapper(self, func):
"""
A wrapper on top of *any* function that is called.
- Pops `inp` and `layer` from kwargs,
- All args are turned into kwargs
- Default values from arg_scope are set
- Sets the name in kwargs to func.__name__ if not specified
- Expands `strides` from an int or list inputs for
all functions and expands `ksize` for pool functions.
If `layer` is not None, a new scope is created, else the existing scope
is reused.
Finally, all params are stored.
"""
@wraps(func)
def wrapper(*args, **kwargs):
kwargs['func_name'] = func.__name__
# convert args to kwargs
varnames = inspect.getargspec(func).args
for i, arg in enumerate(args):
kwargs[varnames[i+1]] = arg # skip the first (inputs)
layer = kwargs.pop('layer', self._layer)
if layer not in self.params:
self.params[layer] = OrderedDict()
# update kwargs with default values defined by user
if func.__name__ in self._defaults:
kwargs.update(self._defaults[func.__name__])
if 'name' not in kwargs:
fname = func.__name__
if fname in self.params[layer]:
if fname in self.params[layer]:
i = 1
while fname + '_{}'.format(i) in self.params[layer]:
i += 1
fname += '_{}'.format(i)
kwargs['name'] = fname
spec = ['avg_pool', 'max_pool', 'max_pool_with_argmax']
if 'ksize' in kwargs and func.__name__ in spec:
kwargs['ksize'] = self._val2list(kwargs['ksize'])
if 'strides' in kwargs:
kwargs['strides'] = self._val2list(kwargs['strides'])
self.params[layer][kwargs['name']] = kwargs
return wrapper
def __call__(self, inp=None):
output = inp
for layer, params in self.params.items():
with tf.variable_scope(layer):
for func_name, kwargs in params.items():
with tf.variable_scope(func_name):
output = kwargs.get('inp', output)
if output is None:
raise ValueError('Layer {} function {} got None as input'.format(layer, func_name))
kw = {k:v for k,v in kwargs.items() if k not in ['func_name', 'inp']}
func = self._get_func(kwargs['func_name'])
output = tf.identity(func(output, **kw), name='output')
self.layers[layer] = tf.identity(output, name='output')
self.output = output
return output
def _val2list(self, value):
if isinstance(value, int):
out = [1, value, value, 1]
elif len(value) == 2:
out = [1, value[0], value[1], 1]
else:
out = value
return out
@contextmanager
def arg_scope(self, defaults):
"""
Sets the arg_scope.
Pass a dict of {<func_name>: {<arg_name>: <arg_value>, ...}, ...}. These
values will then override the default values for the specified functions
whenever that function is called.
"""
self._defaults = defaults
yield
self._defaults = {}
@contextmanager
def layer(self, name):
"""
Sets the scope. Can be used with `with`.
"""
if name is None or name == '':
raise ValueError('Layer name cannot be None or an empty string')
self._layer = name
yield
def _reuse_scope_name(self, name):
graph = tf.get_default_graph()
if graph._name_stack is not None and graph._name_stack != '':
name = graph._name_stack + '/' + name + '/' # this will reuse the already-created scope
else:
name += '/'
return name
def mnist(train=True, seed=0):
m = ConvNet()
with m.arg_scope({'fc': {'kernel_init': 'truncated_normal',
'kernel_init_kwargs': {'stddev': .01, 'seed': seed},
'dropout': None, 'batch_norm': False}}):
m.fc(128, layer='hidden1')
m.fc(32, layer='hidden2')
m.fc(10, activation=None, layer='softmax_linear')
return m
def alexnet(train=True, norm=True, seed=0, **kwargs):
defaults = {'conv': {'batch_norm': False,
'kernel_init': 'xavier',
'kernel_init_kwargs': {'seed': seed}},
'weight_decay': .0005,
'max_pool': {'padding': 'SAME'},
'fc': {'batch_norm': False,
'kernel_init': 'truncated_normal',
'kernel_init_kwargs': {'stddev': .01, 'seed': seed},
'weight_decay': .0005,
'dropout_seed': 0}}
m = ConvNet(defaults=defaults)
dropout = .5 if train else None
m.conv(96, 11, 4, padding='VALID', layer='conv1')
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv1')
m.max_pool(3, 2, layer='conv1')
m.conv(256, 5, 1, layer='conv2')
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv2')
m.max_pool(3, 2, layer='conv2')
m.conv(384, 3, 1, layer='conv3')
m.conv(384, 3, 1, layer='conv4')
m.conv(256, 3, 1, layer='conv5')
m.max_pool(3, 2, layer='conv5')
m.fc(4096, dropout=dropout, bias=.1, layer='fc6')
m.fc(4096, dropout=dropout, bias=.1, layer='fc7')
m.fc(1000, activation=None, dropout=None, bias=0, layer='fc8')
return m
def mnist_tfutils(inputs, train=True, **kwargs):
m = mnist(train=train)
return m(inputs['images']), m.params
def alexnet_tfutils(inputs, **kwargs):
m = alexnet(**kwargs)
return m(inputs['images']), m.params
|
<gh_stars>10-100
import torch
import torch.nn as nn
from lib.modules.layers import FullyConnectedLayer
from network import Network
class FullyConnectedNetwork(Network):
"""
Fully-connected neural network, i.e. multi-layered perceptron.
Args:
network_config (dict): dictionary containing network configuration parameters,
keys should include n_in, n_units, n_layers,
non_linearity, connection_type, batch_norm,
weight_norm, dropout
"""
def __init__(self, network_config):
super(FullyConnectedNetwork, self).__init__(network_config)
self._construct(network_config)
def _construct(self, network_config):
"""
Method to construct the network from the network_config dictionary parameters.
"""
self.layers = nn.ModuleList([])
self.gates = nn.ModuleList([])
if 'connection_type' in network_config:
connection_types = ['sequential', 'residual', 'highway', 'concat_input', 'concat']
assert network_config['connection_type'] in connection_types, 'Connection type not found.'
self.connection_type = network_config['connection_type']
else:
self.connection_type = 'sequential'
n_in = network_config['n_in']
n_in_orig = network_config['n_in']
n_units = network_config['n_units']
batch_norm = False
if 'batch_norm' in network_config:
if network_config['batch_norm']:
batch_norm = True
weight_norm = False
if 'weight_norm' in network_config:
if network_config['weight_norm']:
weight_norm = True
non_linearity = 'linear'
if 'non_linearity' in network_config:
non_linearity = network_config['non_linearity']
dropout = None
if 'dropout' in network_config:
dropout = network_config['dropout']
output_size = 0
if self.connection_type in ['residual', 'highway']:
# intial linear layer to embed to correct size
self.initial_fc = FullyConnectedLayer({'n_in': n_in,
'n_out': n_units,
'batch_norm': batch_norm,
'weight_norm': weight_norm})
for _ in range(network_config['n_layers']):
layer = FullyConnectedLayer({'n_in': n_in, 'n_out': n_units,
'non_linearity': non_linearity,
'batch_norm': batch_norm,
'weight_norm': weight_norm,
'dropout': dropout})
self.layers.append(layer)
if self.connection_type == 'highway':
gate = FullyConnectedLayer({'n_in': n_in, 'n_out': n_units,
'non_linearity': 'sigmoid',
'batch_norm': batch_norm,
'weight_norm': weight_norm})
self.gates.append(gate)
if self.connection_type in ['sequential', 'residual', 'highway']:
n_in = n_units
elif self.connection_type == 'concat_input':
n_in = n_units + n_in_orig
elif self.connection_type == 'concat':
n_in += n_units
output_size = n_in
self.n_out = output_size
def forward(self, input):
"""
Method for forward computation.
"""
input_orig = input.clone()
for layer_num, layer in enumerate(self.layers):
if self.connection_type == 'sequential':
input = layer(input)
elif self.connection_type == 'residual':
if layer_num == 0:
input = self.initial_fc(input) + layer(input)
else:
input = input + layer(input)
elif self.connection_type == 'highway':
gate = self.gates[layer_num]
if layer_num == 0:
input = gate(input) * self.initial_fc(input) + (1 - gate(input)) * layer(input)
else:
input = gate(input) * input + (1 - gate(input)) * layer(input)
elif self.connection_type == 'concat_input':
input = torch.cat((input_orig, layer(input)), dim=1)
elif self.connection_type == 'concat':
input = torch.cat((input, layer(input)), dim=1)
return input
|
<gh_stars>10-100
#
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import serializers
from django.conf import settings as django_settings
import re
from datetime import datetime
import six
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.component.models import ReleaseComponentType, GlobalComponent
from pdc.apps.componentbranch.models import (
ComponentBranch, SLA, SLAToComponentBranch)
from pdc.apps.common.serializers import StrictSerializerMixin
def is_branch_active(branch):
"""
Checks to see if the branch is active by seeing if there are valid SLAs
tied to the branch
:param branch: a ComponentBranch object
:return: a boolean
"""
slas = branch.slas.all()
today = datetime.utcnow().date()
for sla in slas:
if sla.eol >= today:
# If the branch has at least one SLA that hasn't gone EOL, it is
# still active
return True
return False
class BranchNameField(serializers.Field):
"""
A serializer field that verifies the branch's name matches policy
"""
doc_format = "string"
@staticmethod
def bad_branch_name(branch_name):
"""
Determines if the branch name collides with the defined regex blacklist
:param branch_name: string representing the branch name
:return: boolean
"""
return django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX and \
re.match(django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX,
branch_name)
def to_representation(self, obj):
"""
Serializes the internal value
:param obj: string representing the branch name
:return: string representing the branch name
"""
return obj
def to_internal_value(self, data):
"""
Takes the supplied value and ensures it conforms to branch name
standards such as having a max length of 300 and conforming to the
configured regex.
:param data: the object representing the branch name
:return: the validated branch name
"""
if not isinstance(data, six.text_type):
msg = ('A string was not supplied. The type was "{0}".'
.format(type(data).__name__))
raise serializers.ValidationError(msg)
if len(data) > 300:
raise serializers.ValidationError(
'The string must be less than 300 characters')
if self.bad_branch_name(data):
raise serializers.ValidationError(
'The branch name is not allowed based on the regex "{0}"'
.format(django_settings.COMPONENT_BRANCH_NAME_BLACKLIST_REGEX))
return data
class SLASerializer(StrictSerializerMixin,
serializers.ModelSerializer):
"""
Serializer for the SLA model
"""
class Meta:
model = SLA
fields = ('id', 'name', 'description')
def update(self, instance, validated_data):
"""
Override the update function to not allow a user to modify the SLA name
"""
if 'name' in validated_data and instance.name != validated_data['name']:
error_msg = 'You may not modify the SLA\'s name due to policy'
raise serializers.ValidationError({'name': [error_msg]})
return super(SLASerializer, self).update(instance, validated_data)
class SLAToComponentBranchSerializerForComponentBranch(
serializers.ModelSerializer):
"""
A serializer for the SLAToComponentBranch model to be used in the
ComponentBranch serializer
"""
sla = ChoiceSlugField(slug_field='name', read_only=True)
eol = serializers.DateField(read_only=True)
class Meta:
model = SLAToComponentBranch
fields = ('id', 'sla', 'eol')
class ComponentBranchSerializer(StrictSerializerMixin,
serializers.ModelSerializer):
"""
A serializer for the ComponentBranch model
"""
name = BranchNameField()
global_component = serializers.SlugRelatedField(
slug_field='name', queryset=GlobalComponent.objects.all())
type = ChoiceSlugField(
slug_field='name', queryset=ReleaseComponentType.objects.all())
critical_path = serializers.BooleanField(default=False)
slas = SLAToComponentBranchSerializerForComponentBranch(
many=True, read_only=True)
active = serializers.SerializerMethodField('is_active')
def is_active(self, branch):
"""
Calls the is_branch_active function to determine if the branch is still
active
:param branch: a ComponentBranch object
:return: a boolean
"""
return is_branch_active(branch)
class Meta:
model = ComponentBranch
fields = ('id', 'global_component', 'name', 'slas', 'type', 'active',
'critical_path')
def update(self, instance, validated_data):
"""
Override the update function to not allow a user to modify the branch
name
"""
if 'name' in validated_data and instance.name != validated_data['name']:
raise serializers.ValidationError({
'name': ['You may not modify the branch\'s name due to policy']
})
return super(ComponentBranchSerializer, self).update(
instance, validated_data)
class ComponentBranchSerializerWithoutSLA(serializers.Serializer):
"""
A serializer for the ComponentBranch model to be used in the
SLAToComponentBranch serializer
"""
id = serializers.IntegerField(read_only=True)
name = BranchNameField()
global_component = serializers.SlugRelatedField(
slug_field='name', queryset=GlobalComponent.objects.all())
type = ChoiceSlugField(
slug_field='name', queryset=ReleaseComponentType.objects.all())
critical_path = serializers.BooleanField(required=False)
active = serializers.SerializerMethodField('is_active')
def is_active(self, branch):
"""
Calls the is_branch_active function to determine if the branch is still
active
:param branch: a ComponentBranch object
:return: a boolean
"""
return is_branch_active(branch)
class SLAToComponentBranchSerializer(StrictSerializerMixin,
serializers.Serializer):
"""
A serializer for the SLAToComponentBranch model that allows branch creation
"""
id = serializers.IntegerField(read_only=True)
sla = ChoiceSlugField(slug_field='name', queryset=SLA.objects.all())
branch = ComponentBranchSerializerWithoutSLA()
eol = serializers.DateField()
def create(self, validated_data):
"""
Creates the SLAToComponentBranch entry based on the serialized data
"""
branch_component_type_name = validated_data['branch']['type']
component_type = ReleaseComponentType.objects.filter(
name=branch_component_type_name).first()
if not component_type:
error_msg = (
'The specified ReleaseComponentType "{0}" does not exist'
.format(branch_component_type_name))
raise serializers.ValidationError({'branch.type': [error_msg]})
branch_global_component_name = \
validated_data['branch']['global_component']
branch_global_component = GlobalComponent.objects.filter(
name=branch_global_component_name).first()
if not branch_global_component:
error_msg = ('The specified GlobalComponent "{0}" does not exist'
.format(branch_global_component_name))
raise serializers.ValidationError(
{'branch.global_component': [error_msg]})
branch_name = validated_data['branch']['name']
branch_critical_path = validated_data['branch'].get('critical_path')
branch = ComponentBranch.objects.filter(
name=branch_name,
type=component_type.id,
global_component=branch_global_component.id).first()
if branch:
# The critical_path field is optional, but if it was supplied and it
# doesn't match the found branch's critical_path field, raise an
# error
if branch_critical_path is not None and \
branch.critical_path != branch_critical_path:
error_msg = ('The found branch\'s critical_path field did not '
'match the supplied value')
raise serializers.ValidationError(
{'branch.critical_path': [error_msg]})
else:
# Set the default for this optional value when creating
if branch_critical_path is None:
branch_critical_path = False
branch = ComponentBranch(
name=branch_name,
type=component_type,
global_component=branch_global_component,
critical_path=branch_critical_path,
)
sla_name = validated_data['sla']
sla = SLA.objects.filter(name=sla_name).first()
if not sla:
error_msg = 'The specified SLA "{0}" does not exist'.format(
sla_name)
raise serializers.ValidationError({'sla': [error_msg]})
if SLAToComponentBranch.objects.filter(sla=sla.id, branch=branch.id).exists():
error_msg = (
'The SLA "{0}" tied to the component "{1}" and branch "{2}" '
'already exists').format(sla.name, branch.global_component.name,
branch.name)
raise serializers.ValidationError({'branch': [error_msg]})
# This tells us if the branch object was created or not
if branch._state.adding:
branch.save()
eol = validated_data['eol']
return SLAToComponentBranch.objects.create(
sla=sla, branch=branch, eol=eol)
def update(self, instance, validated_data):
"""
Updates the SLAToComponentBranch entry based on the serialized data
"""
branch = validated_data.get('branch', {})
branch_name = branch.get('name')
component_type = branch.get('type')
global_component = branch.get('global_component')
critical_path = branch.get('critical_path', None)
if branch:
if instance.branch.name != branch_name \
or instance.branch.type != component_type \
or instance.branch.global_component != global_component \
or (critical_path is not None and
instance.branch.critical_path is not critical_path):
raise serializers.ValidationError({
'branch': ['The branch cannot be modified using this API']})
# TODO: Should we not allow this value to change?
instance.sla = validated_data.get('sla', instance.sla)
instance.eol = validated_data.get('eol', instance.eol)
instance.save()
return instance
|
"""
Example "Arcade" library code.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.decorator_drawing_example
"""
# Library imports
import arcade
import random
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Drawing With Decorators Example"
window = arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
bird_list = []
def setup():
create_birds()
arcade.schedule(update, 1 / 60)
arcade.run()
def create_birds():
for bird_count in range(10):
x = random.randrange(SCREEN_WIDTH)
y = random.randrange(SCREEN_HEIGHT/2, SCREEN_HEIGHT)
bird_list.append([x, y])
def update(_delta_time):
"""
This is run every 1/60 of a second or so. Do not draw anything
in this function.
"""
change_y = 0.3
for bird in bird_list:
bird[0] += change_y
if bird[0] > SCREEN_WIDTH + 20:
bird[0] = -20
@window.event
def on_draw():
"""
This is called every time we need to update our screen. About 60
times per second.
Just draw things in this function, don't update where they are.
"""
# Call our drawing functions.
draw_background()
draw_birds()
draw_trees()
def draw_background():
"""
This function draws the background. Specifically, the sky and ground.
"""
# Draw the sky in the top two-thirds
arcade.draw_rectangle_filled(SCREEN_WIDTH / 2, SCREEN_HEIGHT * 2 / 3,
SCREEN_WIDTH - 1, SCREEN_HEIGHT * 2 / 3,
arcade.color.SKY_BLUE)
# Draw the ground in the bottom third
arcade.draw_rectangle_filled(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 6,
SCREEN_WIDTH - 1, SCREEN_HEIGHT / 3,
arcade.color.DARK_SPRING_GREEN)
def draw_birds():
for bird in bird_list:
# Draw the bird.
draw_bird(bird[0], bird[1])
def draw_bird(x, y):
"""
Draw a bird using a couple arcs.
"""
arcade.draw_arc_outline(x, y, 20, 20, arcade.color.BLACK, 0, 90)
arcade.draw_arc_outline(x + 40, y, 20, 20, arcade.color.BLACK, 90, 180)
def draw_trees():
# Draw the top row of trees
for x in range(45, SCREEN_WIDTH, 90):
draw_pine_tree(x, SCREEN_HEIGHT / 3)
# Draw the bottom row of trees
for x in range(65, SCREEN_WIDTH, 90):
draw_pine_tree(x, (SCREEN_HEIGHT / 3) - 120)
def draw_pine_tree(center_x, center_y):
"""
This function draws a pine tree at the specified location.
Args:
:center_x: x position of the tree center.
:center_y: y position of the tree trunk center.
"""
# Draw the trunk center_x
arcade.draw_rectangle_filled(center_x, center_y, 20, 40, arcade.color.DARK_BROWN)
tree_bottom_y = center_y + 20
# Draw the triangle on top of the trunk
point_list = ((center_x - 40, tree_bottom_y),
(center_x, tree_bottom_y + 100),
(center_x + 40, tree_bottom_y))
arcade.draw_polygon_filled(point_list, arcade.color.DARK_GREEN)
if __name__ == "__main__":
setup()
|
<reponame>sdrobert/pydrobert-speech
# pylint: skip-file
import os
from math import erf
import numpy as np
import pytest
from pydrobert.speech import util
@pytest.mark.parametrize("shift", [0, 1, 100, -100])
@pytest.mark.parametrize("dft_size", [1, 2, 51, 1000], ids=["l1", "l2", "l51", "l1000"])
@pytest.mark.parametrize("copy", [True, False], ids=["copy", "keep"])
@pytest.mark.parametrize("start_idx", [0, 1, -1], ids=["at", "after", "before"])
def test_circshift_fourier(shift, dft_size, start_idx, copy):
start_idx %= dft_size
zeros = np.random.randint(dft_size)
X = 10 * np.random.random(dft_size - zeros) + 10j * np.random.random(
dft_size - zeros
)
Xs = util.circshift_fourier(
X.copy(), shift, start_idx=start_idx, dft_size=dft_size, copy=copy
)
X = np.roll(np.pad(X, (0, zeros), "constant"), start_idx)
Xs = np.roll(np.pad(Xs, (0, zeros), mode="constant"), start_idx)
assert len(X) == len(Xs)
x = np.fft.ifft(X)
xs = np.fft.ifft(Xs)
assert np.allclose(np.roll(x, shift), xs)
@pytest.mark.parametrize("mu", [0, -1, 100])
@pytest.mark.parametrize("std", [0.1, 1, 10])
@pytest.mark.parametrize("do_scipy", [True, False])
def test_gauss_quant(mu, std, do_scipy):
X = np.arange(1000, dtype=float) / 1000 - 0.5
X /= X.std()
X *= std / 2
X += mu
for x in X:
p = 0.5 * (1 + erf((x - mu) / std / np.sqrt(2)))
if do_scipy:
pytest.importorskip("scipy.norm")
x2 = util.gauss_quant(p, mu=mu, std=std)
else:
# because we don't give access to this if scipy is
# installed, we have to access the private function
x2 = util._gauss_quant_odeh_evans(p, mu=mu, std=std)
assert np.isclose(x, x2, atol=1e-5)
@pytest.mark.parametrize("key", [True, False])
def test_read_kaldi(temp_dir, key):
kaldi = pytest.importorskip("pydrobert.kaldi.io")
rxfilename = "ark:{}".format(os.path.join(temp_dir, "foo.ark"))
key_1 = "lions"
key_2 = "tigers"
buff_1 = np.random.random((100, 10))
buff_2 = np.random.random((1000, 2))
with kaldi.open(rxfilename, "dm", "w") as table:
table.write(key_1, buff_1)
table.write(key_2, buff_2)
if key:
buff_3 = util.read_signal(rxfilename, dtype="dm", key=key_2)
assert np.allclose(buff_2, buff_3)
else:
buff_3 = util.read_signal(rxfilename, dtype="dm")
assert np.allclose(buff_1, buff_3)
@pytest.mark.parametrize("use_scipy", [True, False])
@pytest.mark.parametrize("channels", [1, 2], ids=["mono", "stereo"])
@pytest.mark.parametrize("sampwidth", [2, 4])
def test_read_wave(temp_dir, use_scipy, channels, sampwidth):
import wave
rfilename = os.path.join(temp_dir, "foo.wav")
if channels > 1:
wave_buffer_1 = np.random.random((1000, channels)) * 1000
else:
wave_buffer_1 = np.random.random(1000) * 1000
wave_buffer_1 = wave_buffer_1.astype("<i{}".format(sampwidth))
wave_bytes = wave_buffer_1.tobytes("C")
wave_file = wave.open(rfilename, "wb")
wave_file.setnchannels(channels)
wave_file.setsampwidth(sampwidth)
wave_file.setframerate(8000)
wave_file.writeframes(wave_bytes)
wave_file.close()
if use_scipy:
pytest.importorskip("scipy")
wave_buffer_2 = util._scipy_io_read_signal(rfilename, None, None)
else:
wave_buffer_2 = util._wave_read_signal(rfilename, None, None)
assert np.allclose(wave_buffer_1, wave_buffer_2)
@pytest.mark.parametrize(
"name",
[
"123_1pcbe",
"123_1pcle",
"123_1ulaw",
"123_2alaw",
"123_2pcbe",
"123_2pcle",
"123_2ulaw",
],
)
def test_read_sphere(name):
audio_dir = os.path.join(os.path.dirname(__file__), "audio")
if name.endswith("alaw"):
sph_file = os.path.join(audio_dir, name + ".sph")
else:
sph_file = os.path.join(audio_dir, name + "_shn.sph")
wav_file = os.path.join(audio_dir, name + ".wav")
assert os.path.isfile(sph_file)
assert os.path.isfile(wav_file)
wav = util.read_signal(wav_file, dtype=np.int32)
sph = util.read_signal(sph_file, dtype=np.int32)
assert np.all(sph == wav)
@pytest.mark.parametrize("env_var,suffix", [("WSJ_DIR", ".wv1"), ("TIMIT_DIR", ".sph")])
def test_read_sphere_corpus(temp_dir, env_var, suffix):
num_utts = 50
env_dir = os.environ.get(env_var, None)
if env_dir is None:
pytest.skip("Corpus dir not set")
sph2pipe_path = os.environ.get("SPH2PIPE", None)
if sph2pipe_path is None:
pytest.skip("SPH2PIPE dir not set")
try:
import pathlib
except ImportError:
pathlib = pytest.importorskip("pathlib2")
sphere_files = [
str(x)
for (x, _) in zip(
pathlib.Path(env_dir).glob("**/*" + suffix), list(range(num_utts))
)
]
import subprocess
wav_files = []
for utt_idx, src in enumerate(sphere_files):
wav_file = os.path.join(temp_dir, "{}.wav".format(utt_idx))
wav_files.append(wav_file)
assert not subprocess.call([sph2pipe_path, "-f", "wav", src, wav_file])
for wav_path, sph_path in zip(wav_files, sphere_files):
wav = util.read_signal(wav_path, dtype=np.int32)
sph = util.read_signal(sph_path, dtype=np.int32, force_as="sph")
assert (wav == sph).all()
@pytest.mark.parametrize("key", [True, False])
def test_read_hdf5(temp_dir, key):
h5py = pytest.importorskip("h5py")
rfilename = os.path.join(temp_dir, "foo.hdf5")
h5py_file = h5py.File(rfilename, "w")
h5py_file.create_group("a/b/c")
h5py_file.create_group("a/b/d/e")
dset_1 = np.random.random((1000, 2000))
dset_2 = (np.random.random(10) * 1000).astype(int)
h5py_file.create_dataset("a/b/d/f", (1000, 2000), data=dset_1)
h5py_file.create_dataset("g", (10,), data=dset_2)
h5py_file.close()
if key:
dset_3 = util.read_signal(rfilename, key="g")
assert np.allclose(dset_2, dset_3)
else:
dset_3 = util.read_signal(rfilename)
assert np.allclose(dset_1, dset_3)
def test_read_torch(temp_dir):
torch = pytest.importorskip("torch")
torch.manual_seed(10)
rfilename = os.path.join(temp_dir, "foo.pt")
exp = torch.randn(10, 4)
torch.save(exp, rfilename)
exp = exp.numpy()
act = util.read_signal(rfilename)
assert np.allclose(exp, act)
@pytest.mark.parametrize(
"allow_pickle", [True, False], ids=["picklable", "notpicklable"]
)
@pytest.mark.parametrize("fix_imports", [True, False], ids=["fix", "nofix"])
def test_read_numpy_binary(temp_dir, allow_pickle, fix_imports):
rfilename = os.path.join(temp_dir, "foo.npy")
buff_1 = np.random.random((1000, 10, 5))
np.save(rfilename, buff_1, allow_pickle=allow_pickle, fix_imports=fix_imports)
buff_2 = util.read_signal(rfilename)
assert np.allclose(buff_1, buff_2)
@pytest.mark.parametrize(
"compressed", [True, False], ids=["compressed", "uncompressed"]
)
@pytest.mark.parametrize("key", [True, False], ids=["withkey", "withoutkey"])
def test_read_numpy_archive(temp_dir, compressed, key):
rfilename = os.path.join(temp_dir, "foo.npz")
buff_1 = np.random.random((5, 1, 2))
buff_2 = np.random.random((1,))
if compressed and key:
np.savez_compressed(rfilename, a=buff_1, b=buff_2)
elif compressed:
np.savez_compressed(rfilename, buff_1, buff_2)
elif key:
np.savez(rfilename, a=buff_1, b=buff_2)
else:
np.savez(rfilename, buff_1, buff_2)
if key:
buff_3 = util.read_signal(rfilename, key="a")
else:
buff_3 = util.read_signal(rfilename)
assert np.allclose(buff_1, buff_3)
@pytest.mark.parametrize("text", [True, False])
def test_read_numpy_fromfile(temp_dir, text):
rfilename = os.path.join(temp_dir, "foo")
buff_1 = np.random.random(1000)
sep = "," if text else ""
buff_1.tofile(rfilename, sep=sep)
buff_2 = util.read_signal(rfilename, sep=sep)
assert np.allclose(buff_1, buff_2)
|
"""
created matt_dumont
on: 6/02/22
"""
import datetime
import time
import pandas as pd
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
import numpy as np
from matplotlib.cm import get_cmap
from api_support.get_data import get_afk_data, get_window_watcher_data, get_manual, get_labels_from_unix, \
add_manual_data
import pyqtgraph as pg
from pyqtgraph.dockarea import DockArea, Dock
class AwQtManual(QtGui.QMainWindow):
data_mapper = {
'sum by: afk': 'afk_data',
'sum by: app': 'ww_data',
'sum by: tag': 'manual_data',
}
sum_col = {
'sum by: afk': 'status',
'sum by: app': 'app',
'sum by: tag': 'tag',
}
def __init__(self, start_day: str):
QtWidgets.QMainWindow.__init__(self)
self.bar_plots = [] # to keep track of the barplots that need to be removed
self.resize(1900, 900)
area = DockArea()
self.setCentralWidget(area)
self.dock1 = Dock("main plot", size=(1200, 900), hideTitle=True)
self.dock2 = Dock("tag data", size=(300, 300), hideTitle=True)
self.dock5 = Dock("legend", size=(200, 900), hideTitle=True)
self.dock4 = Dock("legend", size=(200, 900), hideTitle=True)
self.dock3 = Dock('table_data', size=(300, 600), hideTitle=True)
self.legend = {'afk_data': {},
'ww_data': {},
'manual_data': {}}
self.legend_widgets = []
area.addDock(self.dock1, 'left')
area.addDock(self.dock4, 'right', self.dock1)
area.addDock(self.dock5, 'right', self.dock4)
area.addDock(self.dock2, 'right', self.dock5)
area.addDock(self.dock3, 'bottom', self.dock2)
self.day = start_day
self.day_dt = datetime.datetime.fromisoformat(start_day)
self.inialize_plot_window()
self.dock1.addWidget(self.plot_window)
# add plot data
self.data = {}
self.data['afk_data'] = self.add_afk()
self.data['ww_data'] = self.add_windowwatcher()
self.data['manual_data'] = self.add_manual_data()
self.initialize_datatable()
self.initialize_button_section()
self.timer = QtCore.QTimer()
self.add_legend()
self.timer.timeout.connect(self.update_plot_data)
self.timer.start(500)
self.show()
def inialize_plot_window(self):
self.plot_window = pg.GraphicsLayoutWidget(show=False)
self.plot_label1 = pg.LabelItem(justify='left')
self.plot_label2 = pg.LabelItem(justify='left')
self.plot_label3 = pg.LabelItem(justify='left')
self.plot_window.addItem(self.plot_label1)
self.plot_window.nextRow()
self.plot_window.addItem(self.plot_label2)
self.plot_window.nextRow()
self.plot_window.addItem(self.plot_label3)
self.plot_window.nextRow()
self.plot_yaxis = pg.AxisItem(orientation='left')
self.plot_yaxis.setTicks([[(0.5, 'Win-watch'), (1.5, 'Afk'), (2.5, 'Tag')]])
self.data_plot = self.plot_window.addPlot(axisItems={'bottom': pg.DateAxisItem(), 'left': self.plot_yaxis})
self.data_plot.setYRange(0, 3)
self.data_plot.setMouseEnabled(x=True, y=False)
self.data_plot.resize(1000, 600)
self.data_plot.setWindowTitle('pyqtgraph example: Plotting')
self.selection = pg.LinearRegionItem([self.day_dt.timestamp() + 3600 * 9,
self.day_dt.timestamp() + 3600 * 10]) # set window to 9 and 10am
self.selection.setZValue(-10)
self.data_plot.addItem(self.selection)
# hover over info
self.vb = self.data_plot.vb
def initialize_button_section(self):
# delete button
self.delete_button = QtGui.QPushButton('Delete tags in selected time')
self.delete_button.clicked.connect(self.delete_events)
self.dock2.addWidget(self.delete_button)
self.date_edit = QtWidgets.QDateEdit(calendarPopup=True)
self.date_edit.setDateTime(self.day_dt)
self.date_edit.dateChanged.connect(self.change_date)
self.dock2.addWidget(self.date_edit)
self.data_selector = QtGui.QComboBox()
self.data_selector.addItem('sum by: afk') # afk_data
self.data_selector.addItem('sum by: app') # ww_data
self.data_selector.addItem('sum by: tag') # manual_data
self.dock2.addWidget(self.data_selector)
self.update_datatable(1)
self.data_selector.currentIndexChanged.connect(self.update_datatable)
# tag text area
self.tag = QtGui.QLineEdit('Tag:')
self.dock2.addWidget(self.tag)
# overlap option
self.overlap_option = QtGui.QComboBox()
self.overlap_option.addItem('overwrite') # afk_data
self.overlap_option.addItem('underwrite') # ww_data
self.overlap_option.addItem('raise') # manual_data
self.dock2.addWidget(self.overlap_option)
self.overlap_option.currentIndexChanged.connect(self.overlap_sel_change)
self.overlap_sel_change(1)
# tag button
self.tag_button = QtGui.QPushButton('Tag selected Time')
self.tag_button.clicked.connect(self.tag_time)
self.dock2.addWidget(self.tag_button)
def initialize_datatable(self):
self.datatable = pg.TableWidget()
self.datatable.resize(500, 500)
self.dock3.addWidget(self.datatable)
def tag_time(self):
low, high = self.selection.getRegion()
add_manual_data(start=datetime.datetime.fromtimestamp(low, datetime.timezone.utc),
duration=high - low, tag=self.tag.text().replace('Tag:', ''),
overlap=self.overlap)
self.update_plot_data()
self.update_datatable(1)
self.update_legend()
def change_date(self):
self.day_dt = self.date_edit.date().toPyDate()
self.day = self.day_dt.isoformat()
self.day_dt = datetime.datetime.fromisoformat(self.day)
self.update_plot_data()
self.update_datatable(1)
# auto update area
xs = self.get_databounds()
self.data_plot.setXRange(*xs, padding=0)
# set the selector
self.selection.setRegion((self.day_dt.timestamp() + 3600 * 9,
self.day_dt.timestamp() + 3600 * 10))
self.update_legend()
def add_legend(self):
self.legend_widgets = []
self.legend_font = QtGui.QFont()
self.legend_font.setBold(True)
for lgroup in ['afk_data', 'manual_data']:
litems = self.legend[lgroup]
over_label = QtGui.QLabel(lgroup, self)
over_label.setFont(self.legend_font)
self.dock4.addWidget(over_label)
self.legend_widgets.append(over_label)
for k, c in litems.items():
leg_lab = QtGui.QLabel(k, self)
leg_lab.setFont(self.legend_font)
use_c = c if isinstance(c, str) else c.name()
# setting up background color and border
leg_lab.setStyleSheet(f"background-color: {use_c}; border: 1px solid black;")
self.dock4.addWidget(leg_lab)
self.legend_widgets.append(leg_lab)
for lgroup in ['ww_data']:
litems = self.legend[lgroup]
over_label = QtGui.QLabel(lgroup, self)
over_label.setFont(self.legend_font)
self.dock5.addWidget(over_label)
self.legend_widgets.append(over_label)
for k, c in litems.items():
leg_lab = QtGui.QLabel(k, self)
leg_lab.setFont(self.legend_font)
use_c = c if isinstance(c, str) else c.name()
# setting up background color and border
leg_lab.setStyleSheet(f"background-color: {use_c}; border: 1px solid black;")
self.dock5.addWidget(leg_lab)
self.legend_widgets.append(leg_lab)
def delete_legend(self):
while len(self.legend_widgets) > 0:
li = self.legend_widgets.pop()
li.setParent(None)
self.layout().removeWidget(li)
li.deleteLater()
li = None
def get_databounds(self):
data = []
for v in self.data.values():
if v is None:
continue
data.append(v.start_unix.min())
data.append(v.stop_unix.max())
if len(data) == 0:
return self.day_dt.timestamp(), (self.day_dt + datetime.timedelta(days=1)).timestamp()
else:
return min(data), max(data)
def overlap_sel_change(self, i):
self.overlap = self.overlap_option.currentText()
def update_legend(self):
self.delete_legend()
self.add_legend()
def delete_events(self):
self.delete_legend()
low, high = self.selection.getRegion()
add_manual_data(start=datetime.datetime.fromtimestamp(low, datetime.timezone.utc),
duration=high - low, tag=self.tag.text().replace('Tag:', ''),
overlap='delete')
self.update_plot_data()
self.update_datatable(1)
self.update_legend()
def update_datatable(self, i):
j = self.data_selector.currentText()
data = self.data[self.data_mapper[j]]
if data is not None:
df = data.groupby(self.sum_col[j]).sum().loc[:, ['duration_min']]
df.loc['total'] = data.loc[:, 'duration_min'].sum()
df.loc[:, 'duration_format'] = [f'{int(e // 60):02d}:{int(e % 60):02d}' for e in df.duration_min]
temp = df.loc[:, ['duration_format']].to_records()
self.show_data = temp
else:
data = pd.DataFrame(columns=['duration min'])
data.index.name = self.sum_col[j]
self.show_data = data.to_records()
self.datatable.setData(self.show_data)
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.data_plot.sceneBoundingRect().contains(pos):
mousePoint = self.vb.mapSceneToView(pos)
index = mousePoint.x()
tag, tag_dur, afk, afk_dur, cur_app, window, ww_dur = get_labels_from_unix(
index,
self.data['afk_data'],
self.data['ww_data'],
self.data['manual_data'])
low, high = self.selection.getRegion()
selected = (high - low) / 60
text = (f"<span style='font-size: 12pt'>"
f"selection: {int(selected // 60):02d}:{int(selected % 60):02d};"
f" Tag: {tag}; {int(tag_dur // 60):02d}:{int(tag_dur % 60):02d}")
self.plot_label1.setText(text)
text2 = (f"<span style='font-size: 12pt'>AKF:{afk}; {int(afk_dur // 60):02d}:{int(afk_dur % 60):02d} "
f"app:{cur_app}; {int(ww_dur // 60):02d}:{int(ww_dur % 60):02d}")
self.plot_label2.setText(text2)
text3 = f"<span style='font-size: 12pt'> Window: {window}"
self.plot_label3.setText(text3)
def update_plot_data(self):
self.data_plot.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
for bg in self.bar_plots:
self.data_plot.removeItem(bg)
self.data['manual_data'] = self.add_manual_data()
self.data['afk_data'] = self.add_afk()
self.data['ww_data'] = self.add_windowwatcher()
def add_windowwatcher(self):
start = datetime.datetime.fromisoformat(self.day)
data = get_window_watcher_data(start.isoformat(), (start + datetime.timedelta(days=1)).isoformat())
legend = {}
if data is not None:
apps = pd.unique(data.loc[:, 'app'])
cm = pg.colormap.get('gist_earth', 'matplotlib')
n_scens = len(apps) + 1
colors = [cm[(e + 1) / n_scens] for e in range(n_scens)]
for k, c in zip(apps, colors):
legend[k] = c
idx = data.loc[:, 'app'] == k
bg = pg.BarGraphItem(x0=data.loc[idx, 'start_unix'], x1=data.loc[idx, 'stop_unix'], y0=0, y1=1,
brush=c)
self.bar_plots.append(bg)
self.data_plot.addItem(bg)
self.legend['ww_data'] = legend
return data
def add_afk(self):
start = datetime.datetime.fromisoformat(self.day)
data = get_afk_data(start.isoformat(), (start + datetime.timedelta(days=1)).isoformat())
r = QtGui.QColor(255, 0, 0)
g = QtGui.QColor(0, 225, 0)
legend = {'not-afk': g, 'afk': r}
if data is not None:
idx = data.status == 'afk'
bg2 = pg.BarGraphItem(x0=data.loc[idx, 'start_unix'], x1=data.loc[idx, 'stop_unix'], y0=1, y1=2,
brush=r)
idx = data.status != 'afk'
bg1 = pg.BarGraphItem(x0=data.loc[idx, 'start_unix'], x1=data.loc[idx, 'stop_unix'], y0=1, y1=2,
brush=g)
self.data_plot.addItem(bg1)
self.data_plot.addItem(bg2)
self.bar_plots.append(bg1)
self.bar_plots.append(bg2)
self.legend['afk_data'] = legend
return data
def add_manual_data(self):
start = datetime.datetime.fromisoformat(self.day)
data = get_manual(start.isoformat(), (start + datetime.timedelta(days=1)).isoformat())
legend = {}
if data is not None:
apps = pd.unique(data.loc[:, 'tag'])
cm = pg.colormap.get('gist_earth', 'matplotlib')
n_scens = len(apps) + 1
colors = [cm[(e + 1) / n_scens] for e in range(n_scens)]
for k, c in zip(apps, colors):
legend[k] = c
idx = data.loc[:, 'tag'] == k
bg = pg.BarGraphItem(x0=data.loc[idx, 'start_unix'], x1=data.loc[idx, 'stop_unix'], y0=2, y1=3,
brush=c)
self.data_plot.addItem(bg)
self.bar_plots.append(bg)
self.legend['manual_data'] = legend
return data
def main():
app = pg.mkQApp()
loader = AwQtManual(datetime.date.today().isoformat())
proxy = pg.SignalProxy(loader.data_plot.scene().sigMouseMoved, rateLimit=60, slot=loader.mouseMoved)
pg.exec()
|
import subprocess
import sys
import matplotlib.pyplot as plt
import os
import pandas as pd
def get_file_bucket(file):
file_bucket = " ".join(str(subprocess.check_output("file {0}".format(file),
shell=True).strip()).split(":")[1].split(",")[0].strip().split(
" ")[:2])
if file_bucket[-1] == "'":
file_bucket = file_bucket[:-1]
return file_bucket
def main(seeds_dir: str):
df = pd.DataFrame([], columns=["id", "filetype", "size_bucket"])
filetypes = []
file_buckets = []
count = 0
for file_dir in os.listdir(seeds_dir):
filedir_full_path = os.path.join(seeds_dir, file_dir)
if not os.path.isdir(filedir_full_path):
continue
if not os.listdir(filedir_full_path):
continue
# for file in os.listdir(filedir_full_path):
# print("Pre-Processing file {0}".format(file))
# file_full_path = os.path.join(filedir_full_path,file)
# file_bucket = get_file_bucket(file_full_path)
# file_buckets.append(file_bucket)
filetypes.append(file_dir.split("_")[0])
plot_df = pd.DataFrame([], columns=["<1KB", "<=500KB", "<=1000KB", ">1000KB"], index=filetypes)
plot_bucket_dict = {}
for filetype in filetypes:
plot_df.loc[filetype]["<1KB"] = 0
plot_df.loc[filetype]["<=500KB"] = 0
plot_df.loc[filetype]["<=1000KB"] = 0
plot_df.loc[filetype][">1000KB"] = 0
for file_dir in os.listdir(seeds_dir):
filedir_full_path = os.path.join(seeds_dir, file_dir)
if not os.path.isdir(filedir_full_path):
continue
if not os.listdir(filedir_full_path):
continue
for file in os.listdir(filedir_full_path):
print("Processing file {0}".format(file))
file_full_path = os.path.join(filedir_full_path, file)
bucket = ""
size = os.path.getsize(file_full_path)
if size <= 1 * 1000:
bucket = "<1KB"
elif size <= 500 * 1000:
bucket = "<=500KB"
elif size <= 1000 * 1000:
bucket = "<=1000KB"
else:
bucket = ">1000KB"
df.loc[count] = [file, file_dir.split("_")[0], bucket]
count += 1
plot_df.loc[file_dir.split("_")[0]][bucket] += 1
file_bucket = get_file_bucket(file_full_path)
if not plot_bucket_dict.get(file_bucket):
plot_bucket_dict[file_bucket] = {"<1KB": 0, "<=500KB": 0, "<=1000KB": 0, ">1000KB": 0}
plot_bucket_dict[file_bucket][bucket] = 1
elif not plot_bucket_dict.get(file_bucket).get(bucket):
plot_bucket_dict[file_bucket][bucket] = 1
else:
plot_bucket_dict[file_bucket][bucket] += 1
# plot_bucket_df.loc[get_file_bucket(file_full_path)bucket] +=1
plot_bucket_df = pd.DataFrame([], columns=["<1KB", "<=500KB", "<=1000KB", ">1000KB"], index=plot_bucket_dict.keys())
for file_bucket in plot_bucket_dict.keys():
plot_bucket_df.loc[file_bucket, "<1KB"] = plot_bucket_dict[file_bucket]["<1KB"]
plot_bucket_df.loc[file_bucket, "<=500KB"] = plot_bucket_dict[file_bucket]["<=500KB"]
plot_bucket_df.loc[file_bucket, "<=1000KB"] = plot_bucket_dict[file_bucket]["<=1000KB"]
plot_bucket_df.loc[file_bucket, ">1000KB"] = plot_bucket_dict[file_bucket][">1000KB"]
plot_bucket_df.plot.barh(stacked=True, figsize=(80, 80))
plot_bucket_df.to_csv("bucket_seeds_eval.csv")
plot_df.to_csv("fileendings_seeds_eval.csv")
plt.show()
if __name__ == "__main__":
main(sys.argv[1])
|
## This file is part of Scapy
## Copyright (C) 2007, 2008, 2009 <NAME>
## 2015, 2016, 2017 <NAME>
## This program is published under a GPLv2 license
"""
TLS server automaton. This makes for a primitive TLS stack.
Obviously you need rights for network access.
We support versions SSLv2 to TLS 1.2, along with many features.
There is no session resumption mechanism for now.
In order to run a server listening on tcp/4433:
> from scapy.all import *
> t = TLSServerAutomaton(mycert='<cert.pem>', mykey='<key.pem>')
> t.run()
"""
from __future__ import print_function
import socket
from scapy.pton_ntop import inet_pton
from scapy.utils import randstring, repr_hex
from scapy.automaton import ATMT
from scapy.layers.tls.automaton import _TLSAutomaton
from scapy.layers.tls.cert import PrivKeyRSA, PrivKeyECDSA
from scapy.layers.tls.basefields import _tls_version
from scapy.layers.tls.session import tlsSession
from scapy.layers.tls.handshake import *
from scapy.layers.tls.handshake_sslv2 import *
from scapy.layers.tls.record import (TLS, TLSAlert, TLSChangeCipherSpec,
TLSApplicationData)
from scapy.layers.tls.crypto.suites import (_tls_cipher_suites_cls,
get_usable_ciphersuites)
class TLSServerAutomaton(_TLSAutomaton):
"""
A simple TLS test server automaton. Try to overload some states or
conditions and see what happens on the other side.
Because of socket and automaton limitations, for now, the best way to
interrupt the server is by sending him 'stop_server'. Interruptions with
Ctrl-Z should work, but this might leave a loose listening socket behind.
In case the server receives a TLSAlert (whatever its type), or a 'goodbye'
message in a SSLv2 version, he will close the client session with a
similar message, and start waiting for new client connections.
_'mycert' and 'mykey' may be provided as filenames. They are needed for any
server authenticated handshake.
_'preferred_ciphersuite' allows the automaton to choose a cipher suite when
offered in the ClientHello. If absent, another one will be chosen.
_'client_auth' means the client has to provide a certificate.
_'is_echo_server' means that everything received will be sent back.
_'max_client_idle_time' is the maximum silence duration from the client.
Once this limit has been reached, the client (if still here) is dropped,
and we wait for a new connection.
"""
def parse_args(self, server="127.0.0.1", sport=4433,
mycert=None, mykey=None,
preferred_ciphersuite=None,
client_auth=False,
is_echo_server=True,
max_client_idle_time=60,
**kargs):
super(TLSServerAutomaton, self).parse_args(mycert=mycert,
mykey=mykey,
**kargs)
try:
if ':' in server:
inet_pton(socket.AF_INET6, server)
else:
inet_pton(socket.AF_INET, server)
tmp = socket.getaddrinfo(server, sport)
except:
tmp = socket.getaddrinfo(socket.getfqdn(server), sport)
self.serversocket = None
self.ip_family = tmp[0][0]
self.local_ip = tmp[0][4][0]
self.local_port = sport
self.remote_ip = None
self.remote_port = None
self.preferred_ciphersuite = preferred_ciphersuite
self.client_auth = client_auth
self.is_echo_server = is_echo_server
self.max_client_idle_time = max_client_idle_time
def vprint_sessioninfo(self):
if self.verbose:
s = self.cur_session
v = _tls_version[s.tls_version]
self.vprint("Version : %s" % v)
cs = s.wcs.ciphersuite.name
self.vprint("Cipher suite : %s" % cs)
ms = s.master_secret
self.vprint("Master secret : %s" % repr_hex(ms))
if s.client_certs:
self.vprint("Client certificate chain: %r" % s.client_certs)
self.vprint()
def http_sessioninfo(self):
header = "HTTP/1.1 200 OK\r\n"
header += "Server: Scapy TLS Extension\r\n"
header += "Content-type: text/html\r\n"
header += "Content-length: %d\r\n\r\n"
s = "----- Scapy TLS Server Automaton -----\n\n"
s += "Information on current TLS session:\n\n"
s += "Local end : %s:%d\n" % (self.local_ip, self.local_port)
s += "Remote end : %s:%d\n" % (self.remote_ip, self.remote_port)
v = _tls_version[self.cur_session.tls_version]
s += "Version : %s\n" % v
cs = self.cur_session.wcs.ciphersuite.name
s += "Cipher suite : %s\n" % cs
ms = self.cur_session.master_secret
s += "Master secret : %s\n" % repr_hex(ms)
body = "<html><body><pre>%s</pre></body></html>\r\n\r\n" % s
answer = (header+body) % len(body)
return answer
@ATMT.state(initial=True)
def INITIAL(self):
self.vprint("Starting TLS server automaton.")
self.vprint("Receiving 'stop_server' will cause a graceful exit.")
self.vprint("Interrupting with Ctrl-Z might leave a loose socket hanging.")
raise self.BIND()
@ATMT.state()
def BIND(self):
s = socket.socket(self.ip_family, socket.SOCK_STREAM)
self.serversocket = s
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind((self.local_ip, self.local_port))
s.listen(1)
except:
m = "Unable to bind on %s:%d!" % (self.local_ip, self.local_port)
self.vprint()
self.vprint(m)
self.vprint("Maybe some server is already listening there?")
self.vprint()
raise self.FINAL()
raise self.WAITING_CLIENT()
@ATMT.state()
def WAITING_CLIENT(self):
self.vprint()
self.vprint("Waiting for a new client on %s:%d" % (self.local_ip,
self.local_port))
self.socket, addr = self.serversocket.accept()
if not isinstance(addr, tuple):
addr = self.socket.getpeername()
if len(addr) > 2:
addr = (addr[0], addr[1])
self.remote_ip, self.remote_port = addr
self.vprint("Accepted connection from %s:%d" % (self.remote_ip,
self.remote_port))
self.vprint()
raise self.INIT_TLS_SESSION()
@ATMT.state()
def INIT_TLS_SESSION(self):
"""
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
"""
self.cur_session = tlsSession(connection_end="server")
self.cur_session.server_certs = [self.mycert]
self.cur_session.server_key = self.mykey
if isinstance(self.mykey, PrivKeyRSA):
self.cur_session.server_rsa_key = self.mykey
#elif isinstance(self.mykey, PrivKeyECDSA):
# self.cur_session.server_ecdsa_key = self.mykey
raise self.WAITING_CLIENTFLIGHT1()
@ATMT.state()
def WAITING_CLIENTFLIGHT1(self):
self.get_next_msg()
raise self.RECEIVED_CLIENTFLIGHT1()
@ATMT.state()
def RECEIVED_CLIENTFLIGHT1(self):
pass
########################### TLS handshake #################################
@ATMT.condition(RECEIVED_CLIENTFLIGHT1, prio=1)
def should_handle_ClientHello(self):
self.raise_on_packet(TLSClientHello,
self.HANDLED_CLIENTHELLO)
@ATMT.state()
def HANDLED_CLIENTHELLO(self):
raise self.PREPARE_SERVERFLIGHT1()
@ATMT.condition(HANDLED_CLIENTHELLO)
def should_check_ciphersuites(self):
"""
We extract cipher suites candidates from the client's proposition.
"""
if isinstance(self.mykey, PrivKeyRSA):
kx = "RSA"
elif isinstance(self.mykey, PrivKeyECDSA):
kx = "ECDSA"
if get_usable_ciphersuites(self.cur_pkt.ciphers, kx):
return
raise self.NO_USABLE_CIPHERSUITE()
@ATMT.state()
def NO_USABLE_CIPHERSUITE(self):
self.vprint("No usable cipher suite!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(RECEIVED_CLIENTFLIGHT1, prio=3)
def missing_ClientHello(self):
raise self.MISSING_CLIENTHELLO()
@ATMT.state(final=True)
def MISSING_CLIENTHELLO(self):
self.vprint("Missing ClientHello message!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def PREPARE_SERVERFLIGHT1(self):
self.add_record()
@ATMT.condition(PREPARE_SERVERFLIGHT1)
def should_add_ServerHello(self):
"""
Selecting a cipher suite should be no trouble as we already caught
the None case previously.
Also, we do not manage extensions at all.
"""
if isinstance(self.mykey, PrivKeyRSA):
kx = "RSA"
elif isinstance(self.mykey, PrivKeyECDSA):
kx = "ECDSA"
usable_suites = get_usable_ciphersuites(self.cur_pkt.ciphers, kx)
c = usable_suites[0]
if self.preferred_ciphersuite in usable_suites:
c = self.preferred_ciphersuite
self.add_msg(TLSServerHello(cipher=c))
raise self.ADDED_SERVERHELLO()
@ATMT.state()
def ADDED_SERVERHELLO(self):
pass
@ATMT.condition(ADDED_SERVERHELLO)
def should_add_Certificate(self):
c = self.buffer_out[-1].msg[0].cipher
if not _tls_cipher_suites_cls[c].kx_alg.anonymous:
self.add_msg(TLSCertificate(certs=self.cur_session.server_certs))
raise self.ADDED_CERTIFICATE()
@ATMT.state()
def ADDED_CERTIFICATE(self):
pass
@ATMT.condition(ADDED_CERTIFICATE)
def should_add_ServerKeyExchange(self):
c = self.buffer_out[-1].msg[0].cipher
if not _tls_cipher_suites_cls[c].kx_alg.no_ske:
self.add_msg(TLSServerKeyExchange())
raise self.ADDED_SERVERKEYEXCHANGE()
@ATMT.state()
def ADDED_SERVERKEYEXCHANGE(self):
pass
@ATMT.condition(ADDED_SERVERKEYEXCHANGE)
def should_add_CertificateRequest(self):
if self.client_auth:
self.add_msg(TLSCertificateRequest())
raise self.ADDED_CERTIFICATEREQUEST()
@ATMT.state()
def ADDED_CERTIFICATEREQUEST(self):
pass
@ATMT.condition(ADDED_CERTIFICATEREQUEST)
def should_add_ServerHelloDone(self):
self.add_msg(TLSServerHelloDone())
raise self.ADDED_SERVERHELLODONE()
@ATMT.state()
def ADDED_SERVERHELLODONE(self):
pass
@ATMT.condition(ADDED_SERVERHELLODONE)
def should_send_ServerFlight1(self):
self.flush_records()
raise self.WAITING_CLIENTFLIGHT2()
@ATMT.state()
def WAITING_CLIENTFLIGHT2(self):
self.get_next_msg()
raise self.RECEIVED_CLIENTFLIGHT2()
@ATMT.state()
def RECEIVED_CLIENTFLIGHT2(self):
pass
@ATMT.condition(RECEIVED_CLIENTFLIGHT2, prio=1)
def should_handle_ClientCertificate(self):
self.raise_on_packet(TLSCertificate,
self.HANDLED_CLIENTCERTIFICATE)
@ATMT.condition(RECEIVED_CLIENTFLIGHT2, prio=2)
def no_ClientCertificate(self):
if self.client_auth:
raise self.MISSING_CLIENTCERTIFICATE()
raise self.HANDLED_CLIENTCERTIFICATE()
@ATMT.state()
def MISSING_CLIENTCERTIFICATE(self):
self.vprint("Missing ClientCertificate!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def HANDLED_CLIENTCERTIFICATE(self):
if self.client_auth:
self.vprint("Received client certificate chain...")
@ATMT.condition(HANDLED_CLIENTCERTIFICATE, prio=1)
def should_handle_ClientKeyExchange(self):
self.raise_on_packet(TLSClientKeyExchange,
self.HANDLED_CLIENTKEYEXCHANGE)
@ATMT.state()
def HANDLED_CLIENTKEYEXCHANGE(self):
pass
@ATMT.condition(HANDLED_CLIENTCERTIFICATE, prio=2)
def should_handle_Alert_from_ClientCertificate(self):
self.raise_on_packet(TLSAlert,
self.HANDLED_ALERT_FROM_CLIENTCERTIFICATE)
@ATMT.state()
def HANDLED_ALERT_FROM_CLIENTCERTIFICATE(self):
self.vprint("Received Alert message instead of ClientKeyExchange!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_CLIENTCERTIFICATE, prio=3)
def missing_ClientKeyExchange(self):
raise self.MISSING_CLIENTKEYEXCHANGE()
@ATMT.state()
def MISSING_CLIENTKEYEXCHANGE(self):
self.vprint("Missing ClientKeyExchange!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_CLIENTKEYEXCHANGE, prio=1)
def should_handle_CertificateVerify(self):
self.raise_on_packet(TLSCertificateVerify,
self.HANDLED_CERTIFICATEVERIFY)
@ATMT.condition(HANDLED_CLIENTKEYEXCHANGE, prio=2)
def no_CertificateVerify(self):
if self.client_auth:
raise self.MISSING_CERTIFICATEVERIFY()
raise self.HANDLED_CERTIFICATEVERIFY()
@ATMT.state()
def MISSING_CERTIFICATEVERIFY(self):
self.vprint("Missing CertificateVerify!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def HANDLED_CERTIFICATEVERIFY(self):
pass
@ATMT.condition(HANDLED_CERTIFICATEVERIFY, prio=1)
def should_handle_ChangeCipherSpec(self):
self.raise_on_packet(TLSChangeCipherSpec,
self.HANDLED_CHANGECIPHERSPEC)
@ATMT.state()
def HANDLED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(HANDLED_CERTIFICATEVERIFY, prio=2)
def should_handle_Alert_from_ClientKeyExchange(self):
self.raise_on_packet(TLSAlert,
self.HANDLED_ALERT_FROM_CLIENTKEYEXCHANGE)
@ATMT.state()
def HANDLED_ALERT_FROM_CLIENTKEYEXCHANGE(self):
self.vprint("Received Alert message instead of ChangeCipherSpec!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_CERTIFICATEVERIFY, prio=3)
def missing_ChangeCipherSpec(self):
raise self.MISSING_CHANGECIPHERSPEC()
@ATMT.state()
def MISSING_CHANGECIPHERSPEC(self):
self.vprint("Missing ChangeCipherSpec!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_CHANGECIPHERSPEC, prio=1)
def should_handle_ClientFinished(self):
self.raise_on_packet(TLSFinished,
self.HANDLED_CLIENTFINISHED)
@ATMT.state()
def HANDLED_CLIENTFINISHED(self):
raise self.PREPARE_SERVERFLIGHT2()
@ATMT.condition(HANDLED_CHANGECIPHERSPEC, prio=2)
def should_handle_Alert_from_ClientFinished(self):
self.raise_on_packet(TLSAlert,
self.HANDLED_ALERT_FROM_CHANGECIPHERSPEC)
@ATMT.state()
def HANDLED_ALERT_FROM_CHANGECIPHERSPEC(self):
self.vprint("Received Alert message instead of Finished!")
raise self.CLOSE_NOTIFY()
@ATMT.condition(HANDLED_CHANGECIPHERSPEC, prio=3)
def missing_ClientFinished(self):
raise self.MISSING_CLIENTFINISHED()
@ATMT.state()
def MISSING_CLIENTFINISHED(self):
self.vprint("Missing Finished!")
raise self.CLOSE_NOTIFY()
@ATMT.state()
def PREPARE_SERVERFLIGHT2(self):
self.add_record()
@ATMT.condition(PREPARE_SERVERFLIGHT2)
def should_add_ChangeCipherSpec(self):
self.add_msg(TLSChangeCipherSpec())
raise self.ADDED_CHANGECIPHERSPEC()
@ATMT.state()
def ADDED_CHANGECIPHERSPEC(self):
pass
@ATMT.condition(ADDED_CHANGECIPHERSPEC)
def should_add_ServerFinished(self):
self.add_record()
self.add_msg(TLSFinished())
raise self.ADDED_SERVERFINISHED()
@ATMT.state()
def ADDED_SERVERFINISHED(self):
pass
@ATMT.condition(ADDED_SERVERFINISHED)
def should_send_ServerFlight2(self):
self.flush_records()
raise self.SENT_SERVERFLIGHT2()
@ATMT.state()
def SENT_SERVERFLIGHT2(self):
self.vprint("TLS handshake completed!")
self.vprint_sessioninfo()
if self.is_echo_server:
self.vprint("Will now act as a simple echo server.")
raise self.WAITING_CLIENTDATA()
####################### end of TLS handshake ##############################
@ATMT.state()
def WAITING_CLIENTDATA(self):
self.get_next_msg(self.max_client_idle_time, 1)
raise self.RECEIVED_CLIENTDATA()
@ATMT.state()
def RECEIVED_CLIENTDATA(self):
pass
@ATMT.condition(RECEIVED_CLIENTDATA)
def should_handle_ClientData(self):
if not self.buffer_in:
self.vprint("Client idle time maxed out.")
raise self.CLOSE_NOTIFY()
p = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
recv_data = b""
if isinstance(p, TLSApplicationData):
print("> Received: %r" % p.data)
recv_data = p.data
lines = recv_data.split(b"\n")
for l in lines:
if l.startswith(b"stop_server"):
raise self.CLOSE_NOTIFY_FINAL()
elif isinstance(p, TLSAlert):
print("> Received: %r" % p)
raise self.CLOSE_NOTIFY()
else:
print("> Received: %r" % p)
if recv_data.startswith(b"GET / HTTP/1.1"):
p = TLSApplicationData(data=self.http_sessioninfo())
if self.is_echo_server or recv_data.startswith(b"GET / HTTP/1.1"):
self.add_record()
self.add_msg(p)
raise self.ADDED_SERVERDATA()
raise self.HANDLED_CLIENTDATA()
@ATMT.state()
def HANDLED_CLIENTDATA(self):
raise self.WAITING_CLIENTDATA()
@ATMT.state()
def ADDED_SERVERDATA(self):
pass
@ATMT.condition(ADDED_SERVERDATA)
def should_send_ServerData(self):
self.flush_records()
raise self.SENT_SERVERDATA()
@ATMT.state()
def SENT_SERVERDATA(self):
raise self.WAITING_CLIENTDATA()
@ATMT.state()
def CLOSE_NOTIFY(self):
self.vprint()
self.vprint("Sending a TLSAlert to the client...")
@ATMT.condition(CLOSE_NOTIFY)
def close_session(self):
self.add_record()
self.add_msg(TLSAlert(level=1, descr=0))
try:
self.flush_records()
except:
self.vprint("Could not send termination Alert, maybe the client left?")
self.buffer_out = []
self.socket.close()
raise self.WAITING_CLIENT()
@ATMT.state()
def CLOSE_NOTIFY_FINAL(self):
self.vprint()
self.vprint("Sending a TLSAlert to the client...")
@ATMT.condition(CLOSE_NOTIFY_FINAL)
def close_session_final(self):
self.add_record()
self.add_msg(TLSAlert(level=1, descr=0))
try:
self.flush_records()
except:
self.vprint("Could not send termination Alert, maybe the client left?")
# We might call shutdown, but unit tests with s_client fail with this.
#self.socket.shutdown(1)
self.socket.close()
raise self.FINAL()
########################## SSLv2 handshake ################################
@ATMT.condition(RECEIVED_CLIENTFLIGHT1, prio=2)
def sslv2_should_handle_ClientHello(self):
self.raise_on_packet(SSLv2ClientHello,
self.SSLv2_HANDLED_CLIENTHELLO)
@ATMT.state()
def SSLv2_HANDLED_CLIENTHELLO(self):
pass
@ATMT.condition(SSLv2_HANDLED_CLIENTHELLO)
def sslv2_should_add_ServerHello(self):
self.add_record(is_sslv2=True)
cert = self.mycert
ciphers = [0x010080, 0x020080, 0x030080, 0x040080,
0x050080, 0x060040, 0x0700C0]
connection_id = randstring(16)
p = SSLv2ServerHello(cert=cert,
ciphers=ciphers,
connection_id=connection_id)
self.add_msg(p)
raise self.SSLv2_ADDED_SERVERHELLO()
@ATMT.state()
def SSLv2_ADDED_SERVERHELLO(self):
pass
@ATMT.condition(SSLv2_ADDED_SERVERHELLO)
def sslv2_should_send_ServerHello(self):
self.flush_records()
raise self.SSLv2_SENT_SERVERHELLO()
@ATMT.state()
def SSLv2_SENT_SERVERHELLO(self):
raise self.SSLv2_WAITING_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_WAITING_CLIENTMASTERKEY(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_RECEIVED_CLIENTMASTERKEY(self):
pass
@ATMT.condition(SSLv2_RECEIVED_CLIENTMASTERKEY, prio=1)
def sslv2_should_handle_ClientMasterKey(self):
self.raise_on_packet(SSLv2ClientMasterKey,
self.SSLv2_HANDLED_CLIENTMASTERKEY)
@ATMT.condition(SSLv2_RECEIVED_CLIENTMASTERKEY, prio=2)
def missing_ClientMasterKey(self):
raise self.SSLv2_MISSING_CLIENTMASTERKEY()
@ATMT.state()
def SSLv2_MISSING_CLIENTMASTERKEY(self):
self.vprint("Missing SSLv2 ClientMasterKey!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_HANDLED_CLIENTMASTERKEY(self):
raise self.SSLv2_RECEIVED_CLIENTFINISHED()
@ATMT.state()
def SSLv2_RECEIVED_CLIENTFINISHED(self):
pass
@ATMT.condition(SSLv2_RECEIVED_CLIENTFINISHED, prio=1)
def sslv2_should_handle_ClientFinished(self):
self.raise_on_packet(SSLv2ClientFinished,
self.SSLv2_HANDLED_CLIENTFINISHED)
@ATMT.state()
def SSLv2_HANDLED_CLIENTFINISHED(self):
pass
@ATMT.condition(SSLv2_HANDLED_CLIENTFINISHED, prio=1)
def sslv2_should_add_ServerVerify_from_ClientFinished(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ServerVerify in hs_msg:
return
self.add_record(is_sslv2=True)
p = SSLv2ServerVerify(challenge=self.cur_session.sslv2_challenge)
self.add_msg(p)
raise self.SSLv2_ADDED_SERVERVERIFY()
@ATMT.condition(SSLv2_RECEIVED_CLIENTFINISHED, prio=2)
def sslv2_should_add_ServerVerify_from_NoClientFinished(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ServerVerify in hs_msg:
return
self.add_record(is_sslv2=True)
p = SSLv2ServerVerify(challenge=self.cur_session.sslv2_challenge)
self.add_msg(p)
raise self.SSLv2_ADDED_SERVERVERIFY()
@ATMT.condition(SSLv2_RECEIVED_CLIENTFINISHED, prio=3)
def sslv2_missing_ClientFinished(self):
raise self.SSLv2_MISSING_CLIENTFINISHED()
@ATMT.state()
def SSLv2_MISSING_CLIENTFINISHED(self):
self.vprint("Missing SSLv2 ClientFinished!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_ADDED_SERVERVERIFY(self):
pass
@ATMT.condition(SSLv2_ADDED_SERVERVERIFY)
def sslv2_should_send_ServerVerify(self):
self.flush_records()
raise self.SSLv2_SENT_SERVERVERIFY()
@ATMT.state()
def SSLv2_SENT_SERVERVERIFY(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if SSLv2ClientFinished in hs_msg:
raise self.SSLv2_HANDLED_CLIENTFINISHED()
else:
raise self.SSLv2_RECEIVED_CLIENTFINISHED()
####################### SSLv2 client authentication #######################
@ATMT.condition(SSLv2_HANDLED_CLIENTFINISHED, prio=2)
def sslv2_should_add_RequestCertificate(self):
hs_msg = [type(m) for m in self.cur_session.handshake_messages_parsed]
if not self.client_auth or SSLv2RequestCertificate in hs_msg:
return
self.add_record(is_sslv2=True)
self.add_msg(SSLv2RequestCertificate(challenge=randstring(16)))
raise self.SSLv2_ADDED_REQUESTCERTIFICATE()
@ATMT.state()
def SSLv2_ADDED_REQUESTCERTIFICATE(self):
pass
@ATMT.condition(SSLv2_ADDED_REQUESTCERTIFICATE)
def sslv2_should_send_RequestCertificate(self):
self.flush_records()
raise self.SSLv2_SENT_REQUESTCERTIFICATE()
@ATMT.state()
def SSLv2_SENT_REQUESTCERTIFICATE(self):
raise self.SSLv2_WAITING_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_WAITING_CLIENTCERTIFICATE(self):
self.get_next_msg()
raise self.SSLv2_RECEIVED_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_RECEIVED_CLIENTCERTIFICATE(self):
pass
@ATMT.condition(SSLv2_RECEIVED_CLIENTCERTIFICATE, prio=1)
def sslv2_should_handle_ClientCertificate(self):
self.raise_on_packet(SSLv2ClientCertificate,
self.SSLv2_HANDLED_CLIENTCERTIFICATE)
@ATMT.condition(SSLv2_RECEIVED_CLIENTCERTIFICATE, prio=2)
def sslv2_missing_ClientCertificate(self):
raise self.SSLv2_MISSING_CLIENTCERTIFICATE()
@ATMT.state()
def SSLv2_MISSING_CLIENTCERTIFICATE(self):
self.vprint("Missing SSLv2 ClientCertificate!")
raise self.SSLv2_CLOSE_NOTIFY()
@ATMT.state()
def SSLv2_HANDLED_CLIENTCERTIFICATE(self):
selv.vprint("Received client certificate...")
# We could care about the client CA, but we don't.
raise self.SSLv2_HANDLED_CLIENTFINISHED()
################### end of SSLv2 client authentication ####################
@ATMT.condition(SSLv2_HANDLED_CLIENTFINISHED, prio=3)
def sslv2_should_add_ServerFinished(self):
self.add_record(is_sslv2=True)
self.add_msg(SSLv2ServerFinished(sid=randstring(16)))
raise self.SSLv2_ADDED_SERVERFINISHED()
@ATMT.state()
def SSLv2_ADDED_SERVERFINISHED(self):
pass
@ATMT.condition(SSLv2_ADDED_SERVERFINISHED)
def sslv2_should_send_ServerFinished(self):
self.flush_records()
raise self.SSLv2_SENT_SERVERFINISHED()
@ATMT.state()
def SSLv2_SENT_SERVERFINISHED(self):
self.vprint("SSLv2 handshake completed!")
self.vprint_sessioninfo()
if self.is_echo_server:
self.vprint("Will now act as a simple echo server.")
raise self.SSLv2_WAITING_CLIENTDATA()
######################## end of SSLv2 handshake ###########################
@ATMT.state()
def SSLv2_WAITING_CLIENTDATA(self):
self.get_next_msg(self.max_client_idle_time, 1)
raise self.SSLv2_RECEIVED_CLIENTDATA()
@ATMT.state()
def SSLv2_RECEIVED_CLIENTDATA(self):
pass
@ATMT.condition(SSLv2_RECEIVED_CLIENTDATA)
def sslv2_should_handle_ClientData(self):
if not self.buffer_in:
self.vprint("Client idle time maxed out.")
raise self.SSLv2_CLOSE_NOTIFY()
p = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
if hasattr(p, "load"):
cli_data = p.load
print("> Received: %r" % cli_data)
if cli_data.startswith(b"goodbye"):
self.vprint()
self.vprint("Seems like the client left...")
raise self.WAITING_CLIENT()
else:
cli_data = str(p)
print("> Received: %r" % p)
lines = cli_data.split(b"\n")
for l in lines:
if l.startswith(b"stop_server"):
raise self.SSLv2_CLOSE_NOTIFY_FINAL()
answer = b""
if cli_data.startswith(b"GET / HTTP/1.1"):
p = Raw(self.http_sessioninfo())
if self.is_echo_server or recv_data.startswith(b"GET / HTTP/1.1"):
self.add_record(is_sslv2=True)
self.add_msg(p)
raise self.SSLv2_ADDED_SERVERDATA()
raise self.SSLv2_HANDLED_CLIENTDATA()
@ATMT.state()
def SSLv2_HANDLED_CLIENTDATA(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_ADDED_SERVERDATA(self):
pass
@ATMT.condition(SSLv2_ADDED_SERVERDATA)
def sslv2_should_send_ServerData(self):
self.flush_records()
raise self.SSLv2_SENT_SERVERDATA()
@ATMT.state()
def SSLv2_SENT_SERVERDATA(self):
raise self.SSLv2_WAITING_CLIENTDATA()
@ATMT.state()
def SSLv2_CLOSE_NOTIFY(self):
"""
There is no proper way to end an SSLv2 session.
We try and send a 'goodbye' message as a substitute.
"""
self.vprint()
self.vprint("Trying to send 'goodbye' to the client...")
@ATMT.condition(SSLv2_CLOSE_NOTIFY)
def sslv2_close_session(self):
self.add_record()
self.add_msg(Raw('goodbye'))
try:
self.flush_records()
except:
self.vprint("Could not send our goodbye. The client probably left.")
self.buffer_out = []
self.socket.close()
raise self.WAITING_CLIENT()
@ATMT.state()
def SSLv2_CLOSE_NOTIFY_FINAL(self):
"""
There is no proper way to end an SSLv2 session.
We try and send a 'goodbye' message as a substitute.
"""
self.vprint()
self.vprint("Trying to send 'goodbye' to the client...")
@ATMT.condition(SSLv2_CLOSE_NOTIFY_FINAL)
def sslv2_close_session_final(self):
self.add_record()
self.add_msg(Raw('goodbye'))
try:
self.flush_records()
except:
self.vprint("Could not send our goodbye. The client probably left.")
self.socket.close()
raise self.FINAL()
@ATMT.state(final=True)
def FINAL(self):
self.vprint("Closing server socket...")
self.serversocket.close()
self.vprint("Ending TLS server automaton.")
|
import csv
import logging
import typing as t
from collections import defaultdict
import discord
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Categories, Channels, Emojis, Roles
log = logging.getLogger(__name__)
MAX_CHANNELS = 50
CATEGORY_NAME = "Code Jam"
TEAM_LEADERS_COLOUR = 0x11806a
class CodeJams(commands.Cog):
"""Manages the code-jam related parts of our server."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.group()
@commands.has_any_role(Roles.admins)
async def codejam(self, ctx: commands.Context) -> None:
"""A Group of commands for managing Code Jams."""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@codejam.command()
async def create(self, ctx: commands.Context, csv_file: t.Optional[str]) -> None:
"""
Create code-jam teams from a CSV file or a link to one, specifying the team names, leaders and members.
The CSV file must have 3 columns: 'Team Name', 'Team Member Discord ID', and 'Team Leader'.
This will create the text channels for the teams, and give the team leaders their roles.
"""
async with ctx.typing():
if csv_file:
async with self.bot.http_session.get(csv_file) as response:
if response.status != 200:
await ctx.send(f"Got a bad response from the URL: {response.status}")
return
csv_file = await response.text()
elif ctx.message.attachments:
csv_file = (await ctx.message.attachments[0].read()).decode("utf8")
else:
raise commands.BadArgument("You must include either a CSV file or a link to one.")
teams = defaultdict(list)
reader = csv.DictReader(csv_file.splitlines())
for row in reader:
member = ctx.guild.get_member(int(row["Team Member Discord ID"]))
if member is None:
log.trace(f"Got an invalid member ID: {row['Team Member Discord ID']}")
continue
teams[row["Team Name"]].append((member, row["Team Leader"].upper() == "Y"))
team_leaders = await ctx.guild.create_role(name="Code Jam Team Leaders", colour=TEAM_LEADERS_COLOUR)
for team_name, members in teams.items():
await self.create_team_channel(ctx.guild, team_name, members, team_leaders)
await self.create_team_leader_channel(ctx.guild, team_leaders)
await ctx.send(f"{Emojis.check_mark} Created Code Jam with {len(teams)} teams.")
async def get_category(self, guild: discord.Guild) -> discord.CategoryChannel:
"""
Return a code jam category.
If all categories are full or none exist, create a new category.
"""
for category in guild.categories:
if category.name == CATEGORY_NAME and len(category.channels) < MAX_CHANNELS:
return category
return await self.create_category(guild)
async def create_category(self, guild: discord.Guild) -> discord.CategoryChannel:
"""Create a new code jam category and return it."""
log.info("Creating a new code jam category.")
category_overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
category = await guild.create_category_channel(
CATEGORY_NAME,
overwrites=category_overwrites,
reason="It's code jam time!"
)
await self.send_status_update(
guild, f"Created a new category with the ID {category.id} for this Code Jam's team channels."
)
return category
@staticmethod
def get_overwrites(
members: list[tuple[discord.Member, bool]],
guild: discord.Guild,
) -> dict[t.Union[discord.Member, discord.Role], discord.PermissionOverwrite]:
"""Get code jam team channels permission overwrites."""
team_channel_overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.get_role(Roles.code_jam_event_team): discord.PermissionOverwrite(read_messages=True)
}
for member, _ in members:
team_channel_overwrites[member] = discord.PermissionOverwrite(
read_messages=True
)
return team_channel_overwrites
async def create_team_channel(
self,
guild: discord.Guild,
team_name: str,
members: list[tuple[discord.Member, bool]],
team_leaders: discord.Role
) -> None:
"""Create the team's text channel."""
await self.add_team_leader_roles(members, team_leaders)
# Get permission overwrites and category
team_channel_overwrites = self.get_overwrites(members, guild)
code_jam_category = await self.get_category(guild)
# Create a text channel for the team
await code_jam_category.create_text_channel(
team_name,
overwrites=team_channel_overwrites,
)
async def create_team_leader_channel(self, guild: discord.Guild, team_leaders: discord.Role) -> None:
"""Create the Team Leader Chat channel for the Code Jam team leaders."""
category: discord.CategoryChannel = guild.get_channel(Categories.summer_code_jam)
team_leaders_chat = await category.create_text_channel(
name="team-leaders-chat",
overwrites={
guild.default_role: discord.PermissionOverwrite(read_messages=False),
team_leaders: discord.PermissionOverwrite(read_messages=True)
}
)
await self.send_status_update(guild, f"Created {team_leaders_chat.mention} in the {category} category.")
async def send_status_update(self, guild: discord.Guild, message: str) -> None:
"""Inform the events lead with a status update when the command is ran."""
channel: discord.TextChannel = guild.get_channel(Channels.code_jam_planning)
await channel.send(f"<@&{Roles.events_lead}>\n\n{message}")
@staticmethod
async def add_team_leader_roles(members: list[tuple[discord.Member, bool]], team_leaders: discord.Role) -> None:
"""Assign team leader role, the jammer role and their team role."""
for member, is_leader in members:
if is_leader:
await member.add_roles(team_leaders)
def setup(bot: Bot) -> None:
"""Load the CodeJams cog."""
bot.add_cog(CodeJams(bot))
|
<filename>src/python/bezier/_plot_helpers.py<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting utilities."""
import numpy as np
from bezier import _helpers
def new_axis():
"""Get a new matplotlib axis.
Returns:
matplotlib.artist.Artist: A newly created axis.
"""
# NOTE: We import the plotting library at runtime to
# avoid the cost for users that only want to compute.
# The ``matplotlib`` import is a tad expensive.
import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel
figure = plt.figure()
return figure.gca()
def add_plot_boundary(ax, padding=0.125):
"""Add a buffer of empty space around a plot boundary.
.. note::
This only uses ``line`` data from the axis. It **could**
use ``patch`` data, but doesn't at this time.
Args:
ax (matplotlib.artist.Artist): A matplotlib axis.
padding (Optional[float]): Amount (as a fraction of width and height)
of padding to add around data. Defaults to ``0.125``.
"""
nodes = np.asfortranarray(
np.vstack([line.get_xydata() for line in ax.lines]).T
)
left, right, bottom, top = _helpers.bbox(nodes)
center_x = 0.5 * (right + left)
delta_x = right - left
center_y = 0.5 * (top + bottom)
delta_y = top - bottom
multiplier = (1.0 + padding) * 0.5
ax.set_xlim(
center_x - multiplier * delta_x, center_x + multiplier * delta_x
)
ax.set_ylim(
center_y - multiplier * delta_y, center_y + multiplier * delta_y
)
def add_patch(ax, color, pts_per_edge, *edges):
"""Add a polygonal surface patch to a plot.
Args:
ax (matplotlib.artist.Artist): A matplotlib axis.
color (Tuple[float, float, float]): Color as RGB profile.
pts_per_edge (int): Number of points to use in polygonal
approximation of edge.
*edges (Tuple[~bezier.curve.Curve, ...]): Curved edges defining
a boundary.
"""
# pylint: disable=import-outside-toplevel
from matplotlib import patches
from matplotlib import path as _path_mod
# pylint: enable=import-outside-toplevel
s_vals = np.linspace(0.0, 1.0, pts_per_edge)
# Evaluate points on each edge.
all_points = []
for edge in edges:
points = edge.evaluate_multi(s_vals)
# We assume the edges overlap and leave out the first point
# in each.
all_points.append(points[:, 1:])
# Add first point as last point (polygon is closed).
first_edge = all_points[0]
all_points.append(first_edge[:, [0]])
# Add boundary first.
polygon = np.asfortranarray(np.hstack(all_points))
(line,) = ax.plot(polygon[0, :], polygon[1, :], color=color)
# Reset ``color`` in case it was ``None`` and set from color wheel.
color = line.get_color()
# ``polygon`` is stored Fortran-contiguous with ``x-y`` points in each
# column but ``Path()`` wants ``x-y`` points in each row.
path = _path_mod.Path(polygon.T)
patch = patches.PathPatch(
path, facecolor=color, edgecolor=color, alpha=0.625
)
ax.add_patch(patch)
|
import pygame
import os
import random
import sys
import math
from pygame.locals import *
from gamestate import *
from battle import battle
from repair import repair
from events import events
from shop import shop
from gameover import game_over, game_win
from escape import Escape
LAST = -1
FIRST = 1
def get_rand():
# Gets a random numbers from an uneven distrubution
nums = [2, 2, 2, 3, 3, 3, 4, 4, 5, 6]
return random.choice(nums)
class Icon(pygame.sprite.Sprite):
def __init__(self, image, shadow_image, type=None, x=0, y=0):
super().__init__()
self.x = x
self.y = y
self.type = type
self.image = image
self.shadow_image = shadow_image
self.rect = self.image.get_rect(topleft=(self.x, self.y))
self.bounding_rect = self.image.get_bounding_rect()
self.bounding_rect.center = self.rect.center
self.children = []
def connect(self, screen):
# draw connections between nodes
[pygame.draw.line(screen, ORCHIRD, self.rect.midtop, child.rect.midbottom) for child in self.children]
def draw(self, screen, position, access=False):
if self.y < -50 or self.y > SCREEN_HEIGHT + 50:
return
if self == position:
screen.blit(self.shadow_image, self.rect)
else:
screen.blit(self.image, self.rect)
self.connect(screen)
def up(self):
self.y += MAP_DELTA
self.update()
def down(self):
self.y -= MAP_DELTA
self.update()
def collide(self, position):
return self.bounding_rect.collidepoint(position)
def copy(self):
return Icon(self.image, self.shadow_image, self.type, self.x, self.y)
def update(self):
self.rect = self.image.get_rect(center=(self.x, self.y))
self.bounding_rect = self.image.get_bounding_rect()
self.bounding_rect.center = self.rect.center
def is_child(self, parent):
return self in parent.children
class IconTree(pygame.sprite.Group):
def __init__(self, images):
super().__init__()
# placeholder surface until actual images are filled in
image = pygame.surface.Surface((1, 1))
# start with 3-5 nodes for the player to start at
starting_nodes = random.randint(3, 5)
last_level = [Icon(image, image) for i in range(1, starting_nodes + 1)]
self.root = Icon(image, image)
self.root.children = last_level
# holds node list for each level of the tree
self.levels = [[self.root], last_level]
while len(self.levels) < 14:
new_level = [] # nodes that are being newly added
'''
Generate between 2 and 6 nodes for the next level
Each node can have at most half of the previous nodes connecting to it.
Only the last node connected
'''
new_level = [Icon(image, image) for _ in range(get_rand())] # random.randint(2, 6))]
has_parent = [False for _ in range(len(new_level))]
max_conn = math.ceil(len(last_level) / 2)
start_idx = 0
for parent in last_level:
if max_conn <= 1:
new_conn = 1
else:
new_conn = random.randint(1, max_conn)
end = min(start_idx + new_conn, len(new_level))
for i in range(start_idx, end):
parent.children.append(new_level[i])
has_parent[i] = True
start_idx = min(start_idx + new_conn - 1, len(new_level) - 1)
# Hook up oprhaned nodes
for chld, parent in zip(new_level, has_parent):
if not parent:
last_level[-1].children.append(chld)
# Get ready to move onto next level
self.levels.append(new_level)
last_level = new_level
'''
Add final repair nodes.
Before the boss players will have the option to repair.
'''
repair = []
repair_nodes = min(4, len(self.levels[LAST]))
last = 0
if repair_nodes == len(self.levels[LAST]):
nodes_per_repair = 1
else:
nodes_per_repair = len(self.levels[LAST]) // repair_nodes
for i in range(repair_nodes):
temp = Icon(*images['repair'])
temp.type = 'repair'
repair.append(temp)
for parent in self.levels[LAST][i * nodes_per_repair : (i + 1) * nodes_per_repair]:
parent.children.append(temp)
for parent in self.levels[LAST][i * nodes_per_repair:]:
parent.children.append(temp)
self.levels.append(repair)
''' Add final boss node. '''
boss = Icon(*images['boss'])
boss.type ='boss'
for parent in self.levels[LAST]:
parent.children.append(boss)
self.levels.append([boss])
'''
Generate actual icons at each node positoin in each map
Rest, Shop, Unknown, Minion
'''
total_encounters = sum([len(lvl) for lvl in self.levels[1:-2]])
selections = ['unknown'] * int(total_encounters * 0.30) + \
['minion'] * int(total_encounters * 0.40)
random.shuffle(selections)
for nodes in self.levels[1:3]:
for node in nodes:
node.type = selections.pop()
node.image, node.shadow_image = images[node.type]
node.update()
selections += ['repair'] * int(total_encounters * .15) + \
['shop'] * int(total_encounters * .15)
rem_nodes = sum(len(lvl) for lvl in self.levels[3:-2])
while len(selections) < rem_nodes:
selections.append('minion')
random.shuffle(selections)
for nodes in self.levels[3:-2]:
for node in nodes:
node.type = selections.pop()
node.image, node.shadow_image = images[node.type]
node.update()
# Add all newly created nodes to the group
for l in self.levels:
self.add(*l)
# generate positions for each node based on the number of nodes per level
self.position()
def draw(self, screen, position):
for icon in self.sprites():
if icon != self.root:
icon.draw(screen, position)
def position(self):
'''
Establishes positions of procedurally generated map icons.
Only needs to be called directly after new map initialization.
'''
delta_y = 150
y_coord = SCREEN_HEIGHT - 60
# establish positions of current level, skip root level
for current in self.levels[1:]:
n = len(current)
field = int(SCREEN_WIDTH * .9)
delta_x = (field - n * 40) / (n + 1)
x_coord = delta_x + int(SCREEN_WIDTH *.1)
for n in current:
n.x = x_coord
n.y = y_coord
x_coord += delta_x + 40
y_coord -= delta_y
self.update()
def scroll(self, screen, player_loc, bg, legend, up, down, up_rect, down_rect):
'''
Enables scrolling of map with arrow icons at bottom right hand side.
Will stop when the last row of icons display is going off the screen.
'''
while pygame.mouse.get_pressed()[MOUSE_ONE]:
pygame.time.Clock().tick(40)
screen.blit(bg, (0, 0))
screen.blit(up, up_rect)
screen.blit(down, down_rect)
pos = pygame.mouse.get_pos()
if down_rect.collidepoint(pos):
self.down()
elif up_rect.collidepoint(pos):
self.up()
self.draw(screen, player_loc)
screen.blit(legend, (580, 20))
pygame.display.update()
pygame.event.pump()
def up(self):
# Stop scrolling when last level is about to go off the bottom
if self.levels[LAST][0].y >= SCREEN_HEIGHT:
return
for sp in self.sprites():
sp.up()
def down(self):
# Stop scrolling when first level is about to go off the top
if self.levels[FIRST][0].y <= 0:
return
for sp in self.sprites():
sp.down()
class Map:
def __init__(self):
self.images = {}
#Boss
bi = pygame.transform.scale(pygame.image.load('assets/map_icons/battle-mech.png').convert_alpha(), (70, 70))
bi_shadow = pygame.transform.scale(pygame.image.load('assets/map_icons/battle-mech-here.png').convert_alpha(), (70, 70))
self.images['boss'] = (bi, bi_shadow)
#Minions
minion = pygame.transform.scale(pygame.image.load('assets/map_icons/spider-bot.png').convert_alpha(), (40, 40))
minion_shadow = pygame.transform.scale(pygame.image.load('assets/map_icons/spider-bot-here.png').convert_alpha(), (40, 40))
self.images['minion'] = (minion, minion_shadow)
#Stores
store = pygame.transform.scale(pygame.image.load('assets/map_icons/energy-tank.png').convert_alpha(), (40, 40))
store_shadow = pygame.transform.scale(pygame.image.load('assets/map_icons/energy-tank-here.png').convert_alpha(), (40, 40))
self.images['shop'] = (store, store_shadow)
#Unknown
unk = pygame.transform.scale(pygame.image.load('assets/map_icons/uncertainty.png').convert_alpha(), (40, 40))
unk_shadow = pygame.transform.scale(pygame.image.load('assets/map_icons/uncertainty-here.png').convert_alpha(), (40, 40))
self.images['unknown'] = (unk, unk_shadow)
# Repair
rep = pygame.transform.scale(pygame.image.load('assets/map_icons/auto-repair.png').convert_alpha(), (40, 40))
rep_shadow = pygame.transform.scale(pygame.image.load('assets/map_icons/auto-repair-here.png').convert_alpha(), (40, 40))
self.images['repair'] = (rep, rep_shadow)
# Background
self.bg = pygame.transform.scale(pygame.image.load(os.path.join(BACKGROUND_PATH, "nebula/nebula09.png")), (SCREEN_WIDTH, SCREEN_HEIGHT))
#Legend
self.legend = pygame.transform.scale(pygame.image.load('assets/map_icons/Legend.png'), (200, 50))
# Up/Down buttons
self.up = pygame.transform.scale(pygame.image.load(os.path.join(ICON_PATH, "upgrade.png")), (ICON_SIZE, ICON_SIZE))
self.down = pygame.transform.scale(pygame.image.load(os.path.join(ICON_PATH, "downgrade.png")), (ICON_SIZE, ICON_SIZE))
self.down_rect = self.down.get_rect(bottomright=(SCREEN_WIDTH, SCREEN_HEIGHT))
self.up_rect = self.up.get_rect(topright=self.down_rect.topleft)
def main_map(self, screen, player, assets):
escape_call = Escape()
sector_map = IconTree(self.images)
sector_map.update()
player_loc = sector_map.root
alive = True
win = None
while alive and win is None:
screen.blit(self.bg, (0, 0))
screen.blit(self.legend, (580, 20))
screen.blit(self.up, self.up_rect)
screen.blit(self.down, self.down_rect)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
escape_call.escape_menu(screen)
break
elif event.type == MOUSEBUTTONDOWN:
position = pygame.mouse.get_pos()
if self.up_rect.collidepoint(position) or self.down_rect.collidepoint(position):
sector_map.scroll(screen, player_loc, self.bg,
self.legend, self.up, self.down,
self.up_rect, self.down_rect)
for sp in sector_map.sprites():
if sp.is_child(player_loc) and sp.collide(position):
player_loc = sp
if sp.type == 'minion':
alive = battle(screen, player, assets, escape_call)
elif sp.type == 'boss':
win = battle(screen, player, assets, escape_call, boss=True)
elif sp.type == 'unknown':
alive = events(screen, player, assets, escape_call)
elif sp.type == 'repair':
repair(screen, player, assets, escape_call)
elif sp.type == 'shop':
shop(screen, player, assets, escape_call)
if alive:
sector_map.draw(screen, player_loc)
pygame.display.update()
if player.current_health <= 0:
break
if not win or player.current_health <= 0:
game_over(screen)
if(win):
game_win(screen)
|
<reponame>czhang475/DEL_analysis
import copy
import argparse
import numpy as np
import pickle
def best_stereoisomer(sim_mat, stereo_groups, ref):
'''
Inputs
------
sim_mat : numpy array
matrix of 3D similarity scores to be modified
stereo_groups : dictionary
dictionary of compound index as key and indices of all its enumerated stereoisomers as value
ref: bool
indicates whether input is reference or test set
Returns
-----
keep_dict : dictionary
dictionary of best scoring stereoisomer for each compound
'''
keep_dict = {}
for key in stereo_groups:
current_score = 0
for value in stereo_groups[key]:
if ref:
score = np.mean(sim_mat[value, :])
else:
score = np.mean(sim_mat[:, value])
# saves only the stereoisomer with highest average similarity score across all compounds
if score > current_score:
keep_dict[key] = value
current_score = score
return keep_dict
def keep_indices(stereo_groups, keep_groups, N, ref):
'''
Inputs
------
stereo_groups : dictionary
dictionary of compound index as key and indices of all its enumerated stereoisomers as value
keep_groups : dictionary
output of `best_stereoisomer`; dictionary of best scoring stereoisomer for each compound
N : numpy array
shape of matrix of 3D similarity scores to be modified (depends on ref)
ref : bool
indicates whether input is reference or test set
Returns
-------
keep_ind : list
list of all indices to keep
'''
total = []
# Create a list containing only the indices of stereoisomers we wish to remove
for key in stereo_groups:
stereo_groups[key].remove(keep_groups[key])
total.append(stereo_groups[key])
total = [item for sublist in total for item in sublist]
if ref:
keep_ind = list(set(np.arange(N[0])) - set(total))
else:
keep_ind = list(set(np.arange(N[1])) - set(total))
return keep_ind
def modify_3D_sim_mat(sim_mat, ref_groups, test_groups):
'''
Inputs
------
sim_mat : numpy array
matrix of 3D similarity scores to be modified
ref_groups : dictionary
dictionary of reference compound index as key and indices of all its enumerated stereoisomers as value
test_groups : dictionary
dictionary of test compound index as key and indices of all its enumerated stereoisomers as value
Returns
-------
new_sim_mat_3D: numpy array
matrix of 3D similarity scores with only best scoring stereoisomer for each compound
'''
# Load in files
#sim_mat = np.load(sim_mat)
ref_groups = pickle.load(open(ref_groups, 'rb'))
test_groups = pickle.load(open(test_groups, 'rb'))
# Make deep copy of dictionaries so the original is not modified
ref_ind = copy.deepcopy(ref_groups)
test_ind = copy.deepcopy(test_groups)
keep_groups_ref = best_stereoisomer(sim_mat, ref_ind, ref=True)
keep_groups_test = best_stereoisomer(sim_mat, test_ind, ref=False)
keep_ind_ref = keep_indices(ref_ind, keep_groups_ref, np.shape(sim_mat), ref=True)
keep_ind_test = keep_indices(test_ind, keep_groups_test, np.shape(sim_mat), ref=False)
# Slice the rows and columns to keep only one stereoisomer per compound
new_sim_mat_3D = sim_mat[keep_ind_ref, :][:, keep_ind_test]
return new_sim_mat_3D
if __name__ == "__main__":
my_parser = argparse.ArgumentParser(
description="Remove additional compound stereoisomers from 3D similarity score matrix",
allow_abbrev=False)
my_parser.add_argument('--matrix',
action='store',
type=str,
help='path to 3D similarity matrix',
required=True)
my_parser.add_argument('--ref_group',
action='store',
type=str,
help='path to dictionary of reference groups',
required=True)
my_parser.add_argument('--test_group',
action='store',
type=str,
help='path to dictionary of test groups',
required=True)
args = my_parser.parse_args()
new_mat = modify_3D_sim_mat(args.matrix, args.ref_group, args.test_group)
np.save('{}_mod.npy'.format(args.matrix[:-4]), new_mat)
|
# -*- coding: utf-8 -*-
'''
@Date: 2020/1/10
@Author: fanyibin
@Description: 央视网新闻爬虫
'''
from core.genius import Genius
from frame_library.common_library import timestr_to_timestamp, get_content_from_html, check_image
import re
class SpiderCctvNews(Genius):
name = 'cctv_news'
news_type = ('china', 'world', 'society', 'law', 'ent', 'tech', 'life')
api_url = 'http://news.cctv.com/2019/07/gaiban/cmsdatainterface/page/{}_{}.jsonp?cb=t&cb={}'
type_map = {'jingji': 'finance', 'military': 'mil'}
old_api_url = 'http://{}.cctv.com/data/index.json'
def start_requests(self):
for _type in self.news_type:
for page in range(1, 8):
url = self.api_url.format(_type, page, _type)
yield self.seeding(url, self.parse_api_list, meta=_type)
for t, t_ in self.type_map.items():
yield self.seeding(self.old_api_url.format(t), self.parse_old_api_list, meta=t_)
def parse_api_list(self, response):
listnews = re.search(r'"list":(.*?)}}\)', response.text).group(1)
allnews = re.findall(r'({.*?})', listnews)
for news in allnews:
_news = eval(news)
url = _news['url']
if self.filter_item(url):
item = {}
item['url'] = url
item['title'] = _news['title']
item['classify'] = response.meta
item['publish_timestr'] = _news['focus_date']
item['publish_timestamp'] = timestr_to_timestamp(item['publish_timestr'])
item['cover'] = _news['image']
item['keywords'] = _news['keywords'].split(' ')
item['web_source'] = '央视网'
yield self.seeding(url, self.parse_article, meta=item)
def parse_old_api_list(self, response):
resp = response.json
for news in resp['rollData']:
url = news['url']
if self.filter_item(url):
item = {}
item['url'] = url
item['title'] = news['title']
item['classify'] = response.meta
item['publish_timestr'] = news['dateTime']
item['publish_timestamp'] = timestr_to_timestamp(item['publish_timestr'])
item['cover'] = news['image']
item['keywords'] = news['content'].split(' ')
item['web_source'] = '央视网'
yield self.seeding(url, self.parse_article, meta=item)
def parse_article(self, response):
item = response.meta
resp = response.etree_html
if resp.xpath("//meta[@name='spm-id']"):
content_ = get_content_from_html(resp, "//div[@class='content_area']", 'a', 'strong')
if content_ is None:
return self.log.info('html未提取到内容,故放弃本次请求。')
item['content_text'] = content_[1]
else:
content_ = get_content_from_html(resp, "//div[@class='cnt_bd']", 'a', 'strong')
if content_ is None:
return self.log.info('html未提取到内容,故放弃本次请求。')
item['content_text'] = content_[1][1:]
if '威虎堂' in item['content_text'][-1]:
return self.log.info('此新闻无用,抛弃。')
item['content_html'] = content_[0]
item['images'] = []
images_ = content_[2]
for img in images_:
if 'erweima' in img:
continue
if 'http' not in img:
img = 'http:' + img
if not check_image(img):
continue
item['images'].append(img)
item['article_type'] = 2 if item['images'] else 1
source = resp.xpath("//meta[@name='source']/@content")
item['source'] = source[0] if source else '央视网'
self.save_to_mongo(item)
if __name__ == '__main__':
cctv = SpiderCctvNews()
cctv.run(start_seeds=True)
|
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import pytest
import os
import copy
import numpy as np
from numpy.testing import assert_allclose
from hyperion.utils import Utt2Info
from hyperion.io import H5DataWriter
from hyperion.generators.sequence_batch_generator_v2 import SequenceBatchGeneratorV2 as SBG
output_dir = './tests/data_out/generators'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
h5_file = output_dir + '/seqbgv2.h5'
key_file = output_dir + '/seqbgv2.scp'
num_seqs = 10
dim = 2
min_seq_length = 100
delta = 10
max_seq_length = min_seq_length + (num_seqs-1)*delta
seq_lengths = np.arange(100, max_seq_length+1, delta)
def create_dataset():
file_path = [str(k) for k in xrange(num_seqs)]
key=[]
i = 0
j = 0
while i < num_seqs:
key_i = (j+1)*str(j)
i += (i+1)
j += 1
key += key_i
key = key[:num_seqs]
u2c = Utt2Info.create(file_path, key)
if os.path.exists(h5_file):
return u2c
u2c.save(key_file, sep=' ')
h = H5DataWriter(h5_file)
rng = np.random.RandomState(seed=0)
for i in xrange(num_seqs):
x_i = rng.randn(seq_lengths[i], dim)
h.write(file_path[i], x_i)
return u2c
def test_num_seqs():
create_dataset()
sr = SBG(h5_file, key_file)
assert sr.num_seqs==num_seqs
def test_seq_lengths():
create_dataset()
sr = SBG(h5_file, key_file, shuffle_seqs=False)
assert np.all(sr.seq_lengths==seq_lengths)
assert sr.total_length==np.sum(seq_lengths)
assert sr.min_seq_length == min_seq_length
assert sr.max_seq_length == max_seq_length
def test_num_total_subseqs():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5)
sr.num_total_subseqs == num_seqs
def test_prune_min_length():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5, shuffle_seqs=False,
prune_min_length=min_seq_length+5)
assert sr.num_seqs==num_seqs - 1
assert np.all(sr.seq_lengths==seq_lengths[1:])
assert sr.total_length==np.sum(seq_lengths[1:])
assert sr.min_seq_length == np.min(seq_lengths[1:])
assert sr.max_seq_length == max_seq_length
def test_class_info():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5, shuffle_seqs=False)
assert sr.num_classes == 4
print(sr.u2c.info)
print(sr.u2c.key)
print(sr.class2utt)
print(sr.class2num_utt)
class2utt = {0: ['0'], 1: ['1','2'], 2: ['3','4','5'], 3: ['6','7','8','9']}
class2utt_idx = {0: np.array([0], dtype=int),
1: np.array([1,2], dtype=int),
2: np.array([3,4,5], dtype=int),
3: np.array([6,7,8,9], dtype=int)}
#class2num_utt = {0: 1, 1: 2, 2: 3, 3: 4}
class2num_utt = np.array([1,2,3,4], dtype=int)
assert sr.class2utt == class2utt
assert np.all(sr.class2num_utt == class2num_utt)
for k in sr.class2utt.keys():
assert_allclose(sr.class2utt_idx[k], class2utt_idx[k])
def test_compute_iters_auto():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5)
assert sr.iters_per_epoch == 1
sr = SBG(h5_file, key_file, batch_size=5,
max_seq_length=min_seq_length)
assert sr.iters_per_epoch == 2
def test_reset():
create_dataset()
sr = SBG(h5_file, key_file, batch_size=5, reset_rng=True,
min_seq_length=5, max_seq_length=17)
x1 = sr.rng.randn(3)
sr.reset()
assert sr.cur_step == 0
assert_allclose(x1, sr.rng.randn(3))
sr = SBG(h5_file, key_file, batch_size=5, reset_rng=False,
min_seq_length=5, max_seq_length=17)
sr.cur_epoch = 100
sr.reset()
assert sr.cur_step == 0
assert np.mean(x1) != np.mean(sr.rng.rand(3))
def read_func(batch_size, nepc, nepu):
u2c = create_dataset()
sr = SBG(h5_file, key_file,
reset_rng=True, iters_per_epoch=2,
num_egs_per_class=nepc,
num_egs_per_utt=nepu,
min_seq_length=10, max_seq_length=20,
batch_size=batch_size)
x_e = []
for epoch in xrange(2):
x0 = x_e
key_e = []
c_e = []
x_e = []
sw_e = []
for i in xrange(sr.steps_per_epoch):
key_i, x_i, sw_i, y_i = sr.read()
c_i =[ i for i in np.argmax(y_i, axis=-1)]
assert len(x_i) == batch_size
key_e += key_i
c_e += c_i
x_e.append(x_i)
sw_e.append(sw_i)
skey, key_ids = np.unique(key_i, return_inverse=True)
counts_key = np.zeros((len(skey),), dtype=int)
for k in xrange(len(skey)):
counts_key[k] = np.sum(key_ids==k)
assert np.all(counts_key>=nepu) and np.all(counts_key<=batch_size)
sc, c_ids = np.unique(c_i, return_inverse=True)
counts_c = np.zeros((len(sc),), dtype=int)
for k in xrange(len(sc)):
counts_c[k] = np.sum(c_ids==k)
assert np.all(counts_c>=nepc*nepu) and np.all(counts_c<=batch_size)
x_e = np.vstack(tuple(x_e))
sw_e = np.vstack(tuple(sw_e))
sl_e = np.sum(sw_e, axis=-1).astype(int)
if epoch > 0:
assert_allclose(x0, x_e)
assert np.all(np.logical_and(sl_e>=10, sl_e<=20))
print(c_e)
print(key_e)
# assert 0
def test_read_1epc_1epu():
read_func(5,1,1)
def test_read_2epc_1epu():
read_func(6,2,1)
def test_read_2epc_2epu():
read_func(16,2,2)
if __name__ == '__main__':
pytest.main([__file__])
|
import csv
import datetime
import hashlib
import numpy
import json
import logging
import random
from itertools import chain
from functools import partial
import postgres_copy
import sqlalchemy
from retrying import retry
from sqlalchemy.orm import sessionmaker
from ohio import PipeTextIO
from triage.component.results_schema import (
Experiment,
Matrix,
Model,
ExperimentMatrix,
ExperimentModel,
)
def filename_friendly_hash(inputs):
def dt_handler(x):
if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
return x.isoformat()
raise TypeError("Unknown type")
return hashlib.md5(
json.dumps(inputs, default=dt_handler, sort_keys=True).encode("utf-8")
).hexdigest()
def get_subset_table_name(subset_config):
return "subset_{}_{}".format(
subset_config.get("name", "default"),
filename_friendly_hash(subset_config),
)
def retry_if_db_error(exception):
return isinstance(exception, sqlalchemy.exc.OperationalError)
DEFAULT_RETRY_KWARGS = {
"retry_on_exception": retry_if_db_error,
"wait_exponential_multiplier": 1000, # wait 2^x*1000ms between each retry
"stop_max_attempt_number": 14,
# with this configuration, last wait will be ~2 hours
# for a total of ~4.5 hours waiting
}
db_retry = retry(**DEFAULT_RETRY_KWARGS)
@db_retry
def save_experiment_and_get_hash(config, db_engine):
experiment_hash = filename_friendly_hash(config)
session = sessionmaker(bind=db_engine)()
session.merge(Experiment(experiment_hash=experiment_hash, config=config))
session.commit()
session.close()
return experiment_hash
@db_retry
def associate_matrices_with_experiment(experiment_hash, matrix_uuids, db_engine):
session = sessionmaker(bind=db_engine)()
for matrix_uuid in matrix_uuids:
session.merge(ExperimentMatrix(experiment_hash=experiment_hash, matrix_uuid=matrix_uuid))
session.commit()
session.close()
logging.info("Associated matrices with experiment in database")
@db_retry
def associate_models_with_experiment(experiment_hash, model_hashes, db_engine):
session = sessionmaker(bind=db_engine)()
for model_hash in model_hashes:
session.merge(ExperimentModel(experiment_hash=experiment_hash, model_hash=model_hash))
session.commit()
session.close()
logging.info("Associated models with experiment in database")
@db_retry
def missing_matrix_uuids(experiment_hash, db_engine):
"""Compare the contents of the experiment_matrices table with that of the
matrices table to produce a list of matrix_uuids that the experiment wants
but are not available.
"""
query = f"""
select experiment_matrices.matrix_uuid
from {ExperimentMatrix.__table__.fullname} experiment_matrices
left join {Matrix.__table__.fullname} matrices
on (experiment_matrices.matrix_uuid = matrices.matrix_uuid)
where experiment_hash = %s
and matrices.matrix_uuid is null
"""
return [row[0] for row in db_engine.execute(query, experiment_hash)]
@db_retry
def missing_model_hashes(experiment_hash, db_engine):
"""Compare the contents of the experiment_models table with that of the
models table to produce a list of model hashes the experiment wants
but are not available.
"""
query = f"""
select experiment_models.model_hash
from {ExperimentModel.__table__.fullname} experiment_models
left join {Model.__table__.fullname} models
on (experiment_models.model_hash = models.model_hash)
where experiment_hash = %s
and models.model_hash is null
"""
return [row[0] for row in db_engine.execute(query, experiment_hash)]
class Batch:
# modified from
# http://codereview.stackexchange.com/questions/118883/split-up-an-iterable-into-batches
def __init__(self, iterable, limit=None):
self.iterator = iter(iterable)
self.limit = limit
try:
self.current = next(self.iterator)
except StopIteration:
self.on_going = False
else:
self.on_going = True
def group(self):
yield self.current
# start enumerate at 1 because we already yielded the last saved item
for num, item in enumerate(self.iterator, 1):
self.current = item
if num == self.limit:
break
yield item
else:
self.on_going = False
def __iter__(self):
while self.on_going:
yield self.group()
AVAILABLE_TIEBREAKERS = {'random', 'best', 'worst'}
def sort_predictions_and_labels(predictions_proba, labels, tiebreaker='random', sort_seed=None, parallel_arrays=()):
"""Sort predictions and labels with a configured tiebreaking rule
Args:
predictions_proba (numpy.array) The predicted scores
labels (numpy.array) The numeric labels (1/0, not True/False)
tiebreaker (string) The tiebreaking method ('best', 'worst', 'random')
sort_seed (signed int) The sort seed. Needed if 'random' tiebreaking is picked.
parallel_arrays (tuple of numpy.array) Any other arrays, understood to be the same size
as the predictions and labels, that should be sorted alongside them.
Returns:
(tuple) (predictions_proba, labels), sorted
"""
if len(labels) == 0:
logging.debug("No labels present, skipping sorting.")
if parallel_arrays:
return (predictions_proba, labels, parallel_arrays)
else:
return (predictions_proba, labels)
mask = None
if tiebreaker == 'random':
if not sort_seed:
raise ValueError("If random tiebreaker is used, a sort seed must be given")
random.seed(sort_seed)
numpy.random.seed(sort_seed)
random_arr = numpy.random.rand(*predictions_proba.shape)
mask = numpy.lexsort((random_arr, predictions_proba))
elif tiebreaker == 'worst':
mask = numpy.lexsort((-labels, predictions_proba))
elif tiebreaker == 'best':
mask = numpy.lexsort((labels, predictions_proba))
else:
raise ValueError("Unknown tiebreaker")
return_value = [
numpy.flip(predictions_proba[mask]),
numpy.flip(labels[mask]),
]
if parallel_arrays:
return_value.append(tuple(numpy.flip(arr[mask]) for arr in parallel_arrays))
return return_value
@db_retry
def retrieve_model_id_from_hash(db_engine, model_hash):
"""Retrieves a model id from the database that matches the given hash
Args:
db_engine (sqlalchemy.engine) A database engine
model_hash (str) The model hash to lookup
Returns: (int) The model id (if found in DB), None (if not)
"""
session = sessionmaker(bind=db_engine)()
try:
saved = session.query(Model).filter_by(model_hash=model_hash).one_or_none()
return saved.model_id if saved else None
finally:
session.close()
@db_retry
def retrieve_model_hash_from_id(db_engine, model_id):
"""Retrieves the model hash associated with a given model id
Args:
model_id (int) The id of a given model in the database
Returns: (str) the stored hash of the model
"""
session = sessionmaker(bind=db_engine)()
try:
return session.query(Model).get(model_id).model_hash
finally:
session.close()
def _write_csv(file_like, db_objects, type_of_object):
writer = csv.writer(file_like, quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
for db_object in db_objects:
if type(db_object) != type_of_object:
raise TypeError("Cannot copy collection of objects to db as they are not all "
f"of the same type. First object was {type_of_object} "
f"and later encountered a {type(db_object)}")
writer.writerow(
[getattr(db_object, col.name) for col in db_object.__table__.columns]
)
@db_retry
def save_db_objects(db_engine, db_objects):
"""Saves a collection of SQLAlchemy model objects to the database using a COPY command
Args:
db_engine (sqlalchemy.engine)
db_objects (iterable) SQLAlchemy model objects, corresponding to a valid table
"""
db_objects = iter(db_objects)
first_object = next(db_objects)
type_of_object = type(first_object)
with PipeTextIO(partial(
_write_csv,
db_objects=chain((first_object,), db_objects),
type_of_object=type_of_object
)) as pipe:
postgres_copy.copy_from(pipe, type_of_object, db_engine, format="csv")
|
<reponame>Mandy-77/MTCNN_Tucker2
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import numpy as np
from numpy import linalg as la
import torch.nn as nn
class Tkd2Conv(nn.Module):
def __init__(self, conv_nn_module, rc, rf):
def tucker2decomposition(conv_nn_module, rc, rf):
bias = conv_nn_module.bias
stride = conv_nn_module.stride
padding = conv_nn_module.padding
conv = conv_nn_module.weight.detach().data.numpy()
conv = conv.transpose((2,3,1,0))
dim_tensor = conv.shape
mode3_matrix = np.transpose(conv, (2, 0, 1, 3)).reshape([dim_tensor[2], -1])
u3, sigma3, vt3 = la.svd(mode3_matrix)
conv1_matrix = u3[:, 0:rc]
mode4_matrix = np.transpose(conv, (3, 0, 1, 2)).reshape([dim_tensor[3], -1])
u4, sigma4, vt4 = la.svd(mode4_matrix)
conv3_matrix = u4[:, 0:rf]
conv2 = np.dot(conv1_matrix.transpose(), mode3_matrix).reshape(rc, dim_tensor[0],
dim_tensor[1], dim_tensor[3]).transpose([1, 2, 0, 3])
conv2 = np.transpose(conv2, (3, 0, 1, 2)).reshape([dim_tensor[3], -1])
conv2 = np.dot(conv3_matrix.transpose(), conv2).reshape(rf, dim_tensor[0],
dim_tensor[1], rc).transpose([1, 2, 3, 0])
conv1 = conv1_matrix.reshape([1, 1, dim_tensor[2], rc])
conv3 = conv3_matrix.transpose().reshape([1, 1, rf, dim_tensor[3]])
return conv1, conv2, conv3, bias, stride, padding
super(Tkd2Conv,self).__init__()
conv1, conv2, conv3, bias, stride, padding = tucker2decomposition(conv_nn_module, rc, rf)
size1 = conv1.shape
size2 = conv2.shape
size3 = conv3.shape
conv1_weight = torch.from_numpy(conv1).permute(3, 2, 0, 1).float()
conv2_weight = torch.from_numpy(conv2).permute(3, 2, 0, 1).float()
conv3_weight = torch.from_numpy(conv3).permute(3, 2, 0, 1).float()
self.conv1 = nn.Conv2d(size1[2], size1[3], size1[0], bias=False)
self.conv2 = nn.Conv2d(size2[2], size2[3], size2[0], stride = stride,
padding = padding, bias=False)
self.conv3 = nn.Conv2d(size3[2], size3[3], size3[0], bias=False)
self.conv1.weight = nn.Parameter(data=conv1_weight, requires_grad=True)
self.conv2.weight = nn.Parameter(data=conv2_weight, requires_grad=True)
self.conv3.weight = nn.Parameter(data=conv3_weight, requires_grad=True)
# self.conv3.bias = nn.Parameter(data=bias, requires_grad=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
out = self.conv3(x)
return out
# test
def main():
conv_nn_module = torch.nn.Conv2d(16, 32, 5, 5)
rc = 4
rf = 4
decomposed_conv = Tkd2Conv(conv_nn_module, rc, rf)
x = torch.rand([1, 16, 32, 32])
out = decomposed_conv(x)
out2 = conv_nn_module(x)
error = out - out2
norm_n = np.linalg.norm(error.data.numpy().reshape(-1), ord=2)
norm_d = np.linalg.norm(out2.data.numpy().reshape(-1), ord=2)
result = norm_n / norm_d
relative = error/out2
result2 = np.linalg.norm(relative.data.numpy().reshape(-1), ord=1)/(1*32*28*28)
return out, out2, result, result2
if __name__ == "__main__":
out, out2, result, result2 = main()
print(out, out2, result, result2)
|
import random
import unittest
from simulator.helper.environment import get_mortalty_rate, get_hospitalization_rate, get_symptom_rate
from simulator.helper.simulation import get_infection_parameters
from simulator.helper.utils import invert_map_list, invert_map, flatten, reduce_multiply_by_key, choose_weight_order, \
rec_get_manhattan_walk, get_random_choice_list
class TestHelpers(unittest.TestCase):
@classmethod
def setUp(cls):
random.seed(12)
def test_invert_map(self):
input_dic = {0: 1, 1: 1, 2: 2}
result = invert_map(input_dic)
self.assertEqual(list(result.keys()), [1, 2])
self.assertEqual(result[1], [0, 1])
self.assertEqual(result[2], [2])
def test_invert_map_list(self):
input_dic = {0: [(1, 2), (2, 1)], 1: [(2, 1)], 2: [(1, 3), (2, 1)]}
result = invert_map_list(input_dic)
self.assertEqual(list(result.keys()), [(1, 2), (2, 1), (1, 3)])
self.assertEqual(result[(1, 2)], [0])
self.assertEqual(result[(2, 1)], [0, 1, 2])
self.assertEqual(result[(1, 3)], [2])
def test_flatten(self):
input_list = [[1, 2], [0], [1, 3]]
result = flatten(input_list)
self.assertEqual(result, [1, 2, 0, 1, 3])
def test_get_random_choice_list(self):
input_list_list = [[1, 2], [0], [1, 3], []]
result = list(get_random_choice_list(input_list_list))
self.assertEqual(len(result), 3)
self.assertTrue(0 in result)
self.assertEqual(result, [1, 0, 3])
def test_get_infection_parameters(self):
result = get_infection_parameters(2, 7, 7, 21, 21, 39, 30, 60)
self.assertEqual(result[0], 4)
self.assertEqual(result[1], 16)
self.assertEqual(result[2], 32)
self.assertEqual(result[3], 34)
def test_get_mortalty_rate(self):
self.assertEqual(get_mortalty_rate(62), 0.036)
self.assertEqual(get_mortalty_rate(31), 0.02)
self.assertEqual(get_hospitalization_rate(44), 0.025)
self.assertEqual(get_hospitalization_rate(19), 0.01)
def test_get_symptom_rate(self):
self.assertEqual(get_symptom_rate(44), 0.06108)
self.assertEqual(get_symptom_rate(19), 0.009045000000000001)
def test_rec_get_manhattan_walk(self):
result = rec_get_manhattan_walk([], (1, 1), (3, 3))
self.assertEqual(result, [(1, 1), (1, 2), (1, 3), (2, 3), (3, 3)])
def test_rec_get_manhattan_walk_backward(self):
result = rec_get_manhattan_walk([], (3, 3), (1, 1))
self.assertEqual(result, [(3, 3), (3, 2), (3, 1), (2, 1), (1, 1)])
def test_rec_get_manhattan_walk_same_block(self):
result = rec_get_manhattan_walk([], (1, 1), (1, 1))
self.assertEqual(result, [(1, 1)])
def test_reduce_multiply_by_key(self):
result = reduce_multiply_by_key([(0, 2), (0, 1.5), (1, 2), ('a', 5), (99, 0), (99, 12)])
self.assertEqual(result, {
0: 3,
1: 2,
'a': 5,
99: 0
})
def test_choose_weight_order(self):
self.assertEqual(choose_weight_order(list(range(100)), 0.001), 99)
self.assertEqual(choose_weight_order(list(range(100)), 0.999), 0)
self.assertEqual(choose_weight_order(list(range(100)), 0.021), 45)
self.assertEqual(choose_weight_order(list(range(100)), 200), 0)
if __name__ == '__main__':
unittest.main()
|
<reponame>adammacudzinski/libsbp
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Utilities for running YAML-defined unit tests.
"""
from sbp.msg import SBP
from sbp.table import dispatch, _SBP_TABLE
import base64
import json
import yaml
def _encoded_string(s):
"""Encode the string-like argument as bytes if suitable"""
return s.encode('ascii') if hasattr(s, 'encode') else s
def _assert_sbp(sbp, test_case):
"""
Assert that a proper SBP parsing from a raw package of data.
Parameters
----------
sbp : :class: `SBP`
SBP message parsed from unit test's raw packet.
test_case : dict
Unit test case parsed from YAML.
"""
assert sbp.crc == int(test_case['crc'], 0), "Invalid crc."
assert sbp.msg_type == int(test_case['msg_type'], 0), "Invalid msg_type."
assert sbp.sender == int(test_case['sender'], 0), "Invalid sender."
assert sbp.length == test_case['length'], "Invalid length."
assert base64.standard_b64encode(sbp.payload) == _encoded_string(test_case['payload']), \
"Invalid payload."
def field_eq(p, e):
"""
Checks the field values of a parsed message for equality against
some ground truth value.
Parameters
----------
p : object with dict-like attributed access
Parsed field contents.
e : object with dict-like attributed access
Expected field contents.
Returns
----------
True if fields are equal, else False.
"""
if isinstance(e, dict):
return all(field_eq(p[i], j) for (i, j) in iter(e.items()))
elif isinstance(e, list):
return all(field_eq(p[i], j) for (i, j) in enumerate(e))
else:
return p == e
def _assert_msg(msg, test_case):
"""
Asserts that the parsed payload of an SBP message has the expected
field values.
Parameters
----------
msg : Parsed SBP message.
Parsed SBP message.
test_case : dict
Unit test case for this message.
"""
assert msg.__class__.__name__ == test_case['name']
if test_case['fields']:
for field_name, field_value in test_case['fields'].items():
assert field_eq(getattr(msg, field_name), _encoded_string(field_value)), \
"Unequal field values (name: %s): got %r, but expected %r!" \
% (field_name, getattr(msg, field_name), field_value)
def _assert_msg_roundtrip(msg, raw_packet):
"""
Asserts that a msg gets serialized back into binary with the
expected value.
Parameters
----------
msg : Parsed SBP message.
Parsed SBP message.
raw_packet : dict
Unit test case for this message.
"""
encoding = base64.standard_b64encode(msg.to_binary())
assert encoding == _encoded_string(raw_packet)
def _assert_msg_roundtrip_json(msg, raw_json):
"""
Asserts that a msg gets serialized back into JSON with the
expected value, as well as gets serialized from JSON into
an expected object.
"""
to_json = json.loads(msg.to_json())
from_json = json.loads(raw_json)
assert sorted(to_json.items()) == sorted(from_json.items())
assert msg == msg.from_json(raw_json)
def _assert_materialization(msg, sbp, raw_json):
"""Asserts that a message materialized will get serialized into the
right JSON object.
"""
fields = msg['fields'] or dict()
live_msg = _SBP_TABLE[sbp.msg_type](sbp, **fields)
assert isinstance(live_msg.to_json_dict(), dict)
assert live_msg.to_json_dict() == json.loads(raw_json)
def _assert_sane_package(pkg_name, pkg):
"""
Sanity check the package collection of tests before actually
running the tests.
Parameters
----------
pkg_name : str
Name of package to test
pkg : dict
Parsed contents of YAML file.
"""
assert len(pkg['tests']) > 0, "Package has no tests!"
def assert_package(test_filename, pkg_name):
"""
Runs unit tests for message bindings by reading a YAML unit test
specification, parsing a raw packet for each test, and then
asserting that SBP messages and parsed payloads have their intended
values.
Parameters
----------
test_filename : str
Filepath to unit test specifications
pkg_name : str
Name of package to test
"""
with open(test_filename, 'r') as f:
pkg = yaml.load(f.read(), Loader=yaml.FullLoader)
_assert_sane_package(pkg_name, pkg)
for test_case in pkg['tests']:
sbp = SBP.unpack(base64.standard_b64decode(test_case['raw_packet']))
_assert_sbp(sbp, test_case['sbp'])
_assert_msg(dispatch(sbp), test_case['msg'])
_assert_msg_roundtrip(dispatch(sbp), test_case['raw_packet'])
_assert_msg_roundtrip_json(dispatch(sbp), test_case['raw_json'])
_assert_materialization(test_case['msg'], sbp, test_case['raw_json'])
|
<filename>loopy/transform/pack_and_unpack_args.py
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2018 <NAME>, <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from loopy.diagnostic import LoopyError
from loopy.kernel.instruction import CallInstruction
from loopy.program import Program
from loopy.kernel import LoopKernel
from loopy.kernel.function_interface import CallableKernel, ScalarCallable
from loopy.symbolic import SubArrayRef
__doc__ = """
.. currentmodule:: loopy
.. autofunction:: pack_and_unpack_args_for_call
"""
def pack_and_unpack_args_for_call_for_single_kernel(kernel,
callables_table, call_name, args_to_pack=None,
args_to_unpack=None):
"""
Returns a a copy of *kernel* with instructions appended to copy the
arguments in *args* to match the alignment expected by the *call_name* in
the kernel. The arguments are copied back to *args* with the appropriate
data layout.
:arg call_name: An instance of :class:`str` denoting the function call in
the *kernel*.
:arg args_to_unpack: A list of the arguments as instances of :class:`str` which
must be packed. If set *None*, it is interpreted that all the array
arguments would be packed.
:arg args_to_unpack: A list of the arguments as instances of :class:`str`
which must be unpacked. If set *None*, it is interpreted that
all the array arguments should be unpacked.
"""
assert isinstance(kernel, LoopKernel)
new_domains = []
new_tmps = kernel.temporary_variables.copy()
old_insn_to_new_insns = {}
for insn in kernel.instructions:
if not isinstance(insn, CallInstruction):
# pack and unpack call only be done for CallInstructions.
continue
if insn.expression.function.name not in callables_table:
continue
in_knl_callable = callables_table[
insn.expression.function.name]
if in_knl_callable.name != call_name:
# not the function we're looking for.
continue
in_knl_callable = in_knl_callable.with_packing_for_args()
vng = kernel.get_var_name_generator()
ing = kernel.get_instruction_id_generator()
parameters = insn.expression.parameters
if args_to_pack is None:
args_to_pack = [par.subscript.aggregate.name for par in
parameters+insn.assignees if isinstance(par, SubArrayRef)
and (par.swept_inames)]
if args_to_unpack is None:
args_to_unpack = [par.subscript.aggregate.name for par in
parameters+insn.assignees if isinstance(par, SubArrayRef)
and (par.swept_inames)]
# {{{ sanity checks for args
assert isinstance(args_to_pack, list)
assert isinstance(args_to_unpack, list)
for arg in args_to_pack:
found_sub_array_ref = False
for par in parameters + insn.assignees:
# checking that the given args is a sub array ref
if isinstance(par, SubArrayRef) and (
par.subscript.aggregate.name == arg):
found_sub_array_ref = True
break
if not found_sub_array_ref:
raise LoopyError("No match found for packing arg '%s' of call '%s' "
"at insn '%s'." % (arg, call_name, insn.id))
for arg in args_to_unpack:
if arg not in args_to_pack:
raise LoopyError("Argument %s should be packed in order to be "
"unpacked." % arg)
# }}}
packing_insns = []
unpacking_insns = []
# {{{ handling ilp tags
from loopy.kernel.data import IlpBaseTag, VectorizeTag
import islpy as isl
from pymbolic import var
dim_type = isl.dim_type.set
ilp_inames = set(iname for iname in insn.within_inames
if all(isinstance(tag, (IlpBaseTag, VectorizeTag))
for tag in kernel.iname_to_tags.get(iname, [])))
new_ilp_inames = set()
ilp_inames_map = {}
for iname in ilp_inames:
new_iname_name = vng(iname + "_ilp")
ilp_inames_map[var(iname)] = var(new_iname_name)
new_ilp_inames.add(new_iname_name)
for iname in ilp_inames:
new_domain = kernel.get_inames_domain(iname).copy()
for i in range(new_domain.n_dim()):
old_iname = new_domain.get_dim_name(dim_type, i)
if old_iname in ilp_inames:
new_domain = new_domain.set_dim_name(
dim_type, i, ilp_inames_map[var(old_iname)].name)
new_domains.append(new_domain)
# }}}
from pymbolic.mapper.substitutor import make_subst_func
from loopy.symbolic import SubstitutionMapper
# dict to store the new assignees and parameters, the mapping pattern
# from arg_id to parameters is identical to InKernelCallable.arg_id_to_dtype
id_to_parameters = tuple(enumerate(parameters)) + tuple(
(-i-1, assignee) for i, assignee in enumerate(insn.assignees))
new_id_to_parameters = {}
for arg_id, p in id_to_parameters:
if isinstance(p, SubArrayRef) and (p.subscript.aggregate.name in
args_to_pack):
new_pack_inames = ilp_inames_map.copy() # packing-specific inames
new_unpack_inames = ilp_inames_map.copy() # unpacking-specific iname
new_pack_inames = dict((iname, var(vng(iname.name +
"_pack"))) for iname in p.swept_inames)
new_unpack_inames = dict((iname, var(vng(iname.name +
"_unpack"))) for iname in p.swept_inames)
# Updating the domains corresponding to the new inames.
for iname in p.swept_inames:
new_domain_pack = kernel.get_inames_domain(iname.name).copy()
new_domain_unpack = kernel.get_inames_domain(iname.name).copy()
for i in range(new_domain_pack.n_dim()):
old_iname = new_domain_pack.get_dim_name(dim_type, i)
if var(old_iname) in new_pack_inames:
new_domain_pack = new_domain_pack.set_dim_name(
dim_type, i, new_pack_inames[var(old_iname)].name)
new_domain_unpack = new_domain_unpack.set_dim_name(
dim_type, i, new_unpack_inames[var(old_iname)].name)
new_domains.append(new_domain_pack)
new_domains.append(new_domain_unpack)
arg = p.subscript.aggregate.name
pack_name = vng(arg + "_pack")
from loopy.kernel.data import (TemporaryVariable,
temp_var_scope)
if arg in kernel.arg_dict:
arg_in_caller = kernel.arg_dict[arg]
else:
arg_in_caller = kernel.temporary_variables[arg]
pack_tmp = TemporaryVariable(
name=pack_name,
dtype=arg_in_caller.dtype,
dim_tags=in_knl_callable.arg_id_to_descr[arg_id].dim_tags,
shape=in_knl_callable.arg_id_to_descr[arg_id].shape,
scope=temp_var_scope.PRIVATE,
)
new_tmps[pack_name] = pack_tmp
from loopy import Assignment
pack_subst_mapper = SubstitutionMapper(make_subst_func(
new_pack_inames))
unpack_subst_mapper = SubstitutionMapper(make_subst_func(
new_unpack_inames))
# {{{ getting the lhs for packing and rhs for unpacking
from loopy.isl_helpers import simplify_via_aff, make_slab
flatten_index = simplify_via_aff(
sum(dim_tag.stride*idx for dim_tag, idx in
zip(arg_in_caller.dim_tags, p.subscript.index_tuple)))
new_indices = []
for dim_tag in in_knl_callable.arg_id_to_descr[arg_id].dim_tags:
ind = flatten_index // dim_tag.stride
flatten_index -= (dim_tag.stride * ind)
new_indices.append(ind)
new_indices = tuple(simplify_via_aff(i) for i in new_indices)
pack_lhs_assignee = pack_subst_mapper(
var(pack_name).index(new_indices))
unpack_rhs = unpack_subst_mapper(
var(pack_name).index(new_indices))
# }}}
packing_insns.append(Assignment(
assignee=pack_lhs_assignee,
expression=pack_subst_mapper.map_subscript(p.subscript),
within_inames=insn.within_inames - ilp_inames | set(
new_pack_inames[i].name for i in p.swept_inames) | (
new_ilp_inames),
depends_on=insn.depends_on,
id=ing(insn.id+"_pack"),
depends_on_is_final=True
))
if p.subscript.aggregate.name in args_to_unpack:
unpacking_insns.append(Assignment(
expression=unpack_rhs,
assignee=unpack_subst_mapper.map_subscript(p.subscript),
within_inames=insn.within_inames - ilp_inames | set(
new_unpack_inames[i].name for i in p.swept_inames) | (
new_ilp_inames),
id=ing(insn.id+"_unpack"),
depends_on=frozenset([insn.id]),
depends_on_is_final=True
))
# {{{ creating the sweep inames for the new sub array refs
updated_swept_inames = []
for i, _ in enumerate(
in_knl_callable.arg_id_to_descr[arg_id].shape):
updated_swept_inames.append(var(vng("i_packsweep_"+arg)))
ctx = kernel.isl_context
space = isl.Space.create_from_names(ctx,
set=[iname.name for iname in updated_swept_inames])
iname_set = isl.BasicSet.universe(space)
for iname, axis_length in zip(updated_swept_inames,
in_knl_callable.arg_id_to_descr[arg_id].shape):
iname_set = iname_set & make_slab(space, iname.name, 0,
axis_length)
new_domains = new_domains + [iname_set]
# }}}
new_id_to_parameters[arg_id] = SubArrayRef(
tuple(updated_swept_inames),
(var(pack_name).index(tuple(updated_swept_inames))))
else:
new_id_to_parameters[arg_id] = p
if packing_insns:
subst_mapper = SubstitutionMapper(make_subst_func(ilp_inames_map))
new_call_insn = insn.with_transformed_expressions(subst_mapper)
new_params = tuple(subst_mapper(new_id_to_parameters[i]) for i, _ in
enumerate(parameters))
new_assignees = tuple(subst_mapper(new_id_to_parameters[-i-1])
for i, _ in enumerate(insn.assignees))
new_call_insn = new_call_insn.copy(
depends_on=new_call_insn.depends_on | set(
pack.id for pack in packing_insns),
within_inames=new_call_insn.within_inames - ilp_inames | (
new_ilp_inames),
expression=new_call_insn.expression.function(*new_params),
assignees=new_assignees)
old_insn_to_new_insns[insn.id] = (packing_insns + [new_call_insn] +
unpacking_insns)
if old_insn_to_new_insns:
new_instructions = []
for insn in kernel.instructions:
if insn.id in old_insn_to_new_insns:
# Replacing the current instruction with the group of
# instructions including the packing and unpacking instructions
new_instructions.extend(old_insn_to_new_insns[insn.id])
else:
# for the instructions that depend on the call instruction that
# are to be packed and unpacked, we need to add the complete
# instruction block as a dependency for them.
new_depends_on = insn.depends_on
if insn.depends_on & set(old_insn_to_new_insns):
# need to add the unpack instructions on dependencies.
for old_insn_id in insn.depends_on & set(old_insn_to_new_insns):
new_depends_on |= frozenset(i.id for i
in old_insn_to_new_insns[old_insn_id])
new_instructions.append(insn.copy(depends_on=new_depends_on))
kernel = kernel.copy(
domains=kernel.domains + new_domains,
instructions=new_instructions,
temporary_variables=new_tmps
)
return kernel
def pack_and_unpack_args_for_call(program, *args, **kwargs):
assert isinstance(program, Program)
new_resolved_functions = {}
for func_id, in_knl_callable in program.callables_table.items():
if isinstance(in_knl_callable, CallableKernel):
new_subkernel = pack_and_unpack_args_for_call_for_single_kernel(
in_knl_callable.subkernel, program.callables_table,
*args, **kwargs)
in_knl_callable = in_knl_callable.copy(
subkernel=new_subkernel)
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown type of callable %s." % (
type(in_knl_callable).__name__))
new_resolved_functions[func_id] = in_knl_callable
new_callables_table = program.callables_table.copy(
resolved_functions=new_resolved_functions)
return program.copy(callables_table=new_callables_table)
# vim: foldmethod=marker
|
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the JouleScope device
This test requires that JouleScope hardware be attached to this PC!
"""
import unittest
from joulescope.driver import scan, UsbdRequest, LOOPBACK_BUFFER_SIZE, Device
from joulescope.usb import hw_tests
from joulescope.pattern_buffer import PatternBuffer
class TestPattern(unittest.TestCase):
def setUp(self):
self.device = None
self.devices = scan(name='joulescope')
if not len(self.devices):
raise unittest.SkipTest("no devices found")
if len(self.devices) > 1:
print("multiple devices found")
self.device = self.devices[0]
self.device.open()
def tearDown(self):
if self.device is not None:
self.device.close()
def test_control_loopback_wvalue(self):
usb = self.device.usb_device
hw_tests.control_loopback_wvalue(usb, UsbdRequest.LOOPBACK_WVALUE, 17)
def test_control_loopback_buffer(self):
usb = self.device.usb_device
hw_tests.control_loopback_buffer(usb, UsbdRequest.LOOPBACK_BUFFER, LOOPBACK_BUFFER_SIZE, 4)
def _pattern(self, duration=None):
duration = int(duration) if duration is not None else 1.0
buffer = PatternBuffer()
self.device.stream_buffer = buffer
self.device.read(duration=duration, out_format='raw')
s = buffer.status()
self.assertGreater(s['sample_id'], 1000000)
self.assertEqual(s['header_error'], 0)
self.assertLessEqual(s['pkt_index_error'], 1)
self.assertLessEqual(s['pattern_error'], 1)
def test_datapath_usb(self):
self.device.parameter_set('control_test_mode', 'usb')
self.device.parameter_set('source', 'pattern_usb')
self._pattern(1.0)
def test_datapath_sensor(self):
self.device.parameter_set('control_test_mode', 'normal')
self.device.parameter_set('source', 'pattern_sensor')
self._pattern(1.0)
def test_read(self):
self.device.parameter_set('source', 'raw')
for i in range(10):
v = self.device.read(contiguous_duration=0.125, out_format='raw')
self.assertEqual((self.device.sampling_frequency // 8, 2), v.shape)
class TestAttributes(unittest.TestCase):
def test_defaults(self):
d = Device(None)
self.assertEqual(2000000, d.sampling_frequency)
self.assertEqual('30 seconds', d.parameter_get('buffer_duration'))
self.assertEqual(30, d.parameter_get('buffer_duration', dtype='actual'))
self.assertEqual('2 Hz', d.parameter_get('reduction_frequency'))
self.assertIsNone(d.stream_buffer)
self.assertIsNone(d.calibration)
self.assertEqual('i_range', d.parameters(name='i_range').name)
self.assertEqual('off', d.parameter_get('i_range'))
with self.assertRaises(Exception):
d.serial_number
with self.assertRaises(Exception):
d.info()
self.assertFalse(d.is_streaming)
with self.assertRaises(Exception):
d.status()
with self.assertRaises(Exception):
d.extio_status()
def test_parameter_arbitrary(self):
d = Device(None)
d.parameter_set('buffer_duration', 2)
def test_reduction_frequency(self):
d = Device(None)
for frequency in [1, 2, 4, 10, 20, 50, 100]:
with self.subTest(frequency=frequency):
d.reduction_frequency = frequency
self.assertEqual(frequency, d.reduction_frequency)
def test_info(self):
d = Device(None)
self.assertEqual('JS110', d.parameter_get('model'))
self.assertEqual(None, d.parameter_get('device_serial_number'))
self.assertEqual(None, d.parameter_get('hardware_serial_number'))
|
# coding: utf-8
"""
.. _l-estim-sird-theory:
Estimation des paramètres d'un modèle SIRD
==========================================
On part d'un modèle :class:`CovidSIRD <aftercovid.models.CovidSIRD>`
qu'on utilise pour simuler des données. On regarde s'il est possible
de réestimer les paramètres du modèle à partir des observations.
.. contents::
:local:
Simulation des données
++++++++++++++++++++++
"""
import warnings
from pprint import pprint
import numpy
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
import matplotlib.pyplot as plt
import pandas
from aftercovid.models import EpidemicRegressor, CovidSIRD
model = CovidSIRD()
model
###########################################
# Mise à jour des coefficients.
model['beta'] = 0.4
model["mu"] = 0.06
model["nu"] = 0.04
pprint(model.P)
###################################
# Point de départ
pprint(model.Q)
###################################
# Simulation
X, y = model.iterate2array(50, derivatives=True)
data = {_[0]: x for _, x in zip(model.Q, X.T)}
data.update({('d' + _[0]): c for _, c in zip(model.Q, y.T)})
df = pandas.DataFrame(data)
df.tail()
######################################
# Visualisation
df.plot(title="Simulation SIRD")
###########################################
# Estimation
# ++++++++++
#
# Le module implémente la class :class:`EpidemicRegressor
# <aftercovid.models.EpidemicRegressor>` qui réplique
# l'API de :epkg:`scikit-learn`.
m = EpidemicRegressor('SIRD', verbose=True, learning_rate_init=1e-3,
max_iter=10, early_th=1)
m.fit(X, y)
pprint(m.model_.P)
###############################################
# La réaction de la population n'est pas constante
# tout au long de l'épidémie. Il est possible qu'elle
# change de comportement tout au long de la propagation.
# On estime alors les coefficients du modèle sur une
# fenêtre glissante.
def find_best_model(Xt, yt, lrs, th):
best_est, best_loss = None, None
for lr in lrs:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
m = EpidemicRegressor(
'SIRD',
learning_rate_init=lr,
max_iter=500,
early_th=1)
m.fit(Xt, yt)
loss = m.score(Xt, yt)
if numpy.isnan(loss):
continue
if best_est is None or best_loss > loss:
best_est = m
best_loss = loss
if best_loss < th:
return best_est, best_loss
return best_est, best_loss
###############################################
# On estime les coefficients du modèle tous les 2 jours
# sur les 10 derniers jours.
coefs = []
for k in range(0, X.shape[0] - 9, 2):
end = min(k + 10, X.shape[0])
Xt, yt = X[k:end], y[k:end]
m, loss = find_best_model(Xt, yt, [1e-2, 1e-3], 10)
loss = m.score(Xt, yt)
print("k={} iter={} loss={:1.3f} coef={}".format(
k, m.iter_, loss, m.model_._val_p))
obs = dict(k=k, loss=loss, it=m.iter_, R0=m.model_.R0())
obs.update({k: v for k, v in zip(m.model_.param_names, m.model_._val_p)})
coefs.append(obs)
#######################################
# Résumé
dfcoef = pandas.DataFrame(coefs).set_index('k')
dfcoef
#############################################################
# On visualise.
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
fig, ax = plt.subplots(2, 3, figsize=(14, 6))
dfcoef[["mu", "nu"]].plot(ax=ax[0, 0], logy=True)
dfcoef[["beta"]].plot(ax=ax[0, 1], logy=True)
dfcoef[["loss"]].plot(ax=ax[1, 0], logy=True)
dfcoef[["R0"]].plot(ax=ax[0, 2])
df.plot(ax=ax[1, 1], logy=True)
fig.suptitle('Estimation de R0 tout au long de la simulation', fontsize=12)
#################################
# L'estimation des coefficients est plus compliquée
# au début et à la fin de l'expérience. Il faudrait sans
# doute changer de stratégie.
####################################################
# Différentes tailles d'estimation
# ++++++++++++++++++++++++++++++++
#
# Le paramètre `beta` a été estimé sur une période de 10 jours.
# Est-ce que cela change sur une période plus courte ou plus longue ?
# Sur des données parfaites (sans bruit), cela ne devrait pas changer
# grand chose.
coefs = []
for delay in [4, 5, 6, 7, 8, 9, 10]:
print('delay', delay)
for k in range(0, X.shape[0] - delay, 4):
end = min(k + delay, X.shape[0])
Xt, yt = X[k:end], y[k:end]
m, loss = find_best_model(Xt, yt, [1e-3, 1e-4], 10)
loss = m.score(Xt, yt)
if k == 0:
print(
"k={} iter={} loss={:1.3f} coef={}".format(
k, m.iter_, loss, m.model_._val_p))
obs = dict(k=k, loss=loss, it=m.iter_, R0=m.model_.R0(), delay=delay)
obs.update({k: v for k, v in zip(
m.model_.param_names, m.model_._val_p)})
coefs.append(obs)
#############################################
# Résumé
dfcoef = pandas.DataFrame(coefs)
dfcoef
################################################
# Graphes
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
fig, ax = plt.subplots(2, 3, figsize=(14, 6))
for delay in sorted(set(dfcoef['delay'])):
dfcoef.pivot('k', 'delay', 'mu').plot(
ax=ax[0, 0], logy=True, legend=False).set_title('mu')
dfcoef.pivot('k', 'delay', 'nu').plot(
ax=ax[0, 1], logy=True, legend=False).set_title('nu')
dfcoef.pivot('k', 'delay', 'beta').plot(
ax=ax[0, 2], logy=True, legend=False).set_title('beta')
dfcoef.pivot('k', 'delay', 'R0').plot(
ax=ax[1, 2], logy=True, legend=False).set_title('R0')
ax[1, 2].plot([dfcoef.index[0], dfcoef.index[-1]], [1, 1], '--',
label="R0=1")
ax[1, 2].set_ylim(0, 5)
dfcoef.pivot('k', 'delay', 'loss').plot(
ax=ax[1, 0], logy=True, legend=False).set_title('loss')
df.plot(ax=ax[1, 1], logy=True)
fig.suptitle('Estimation de R0 tout au long de la simulation '
'avec différentes tailles de fenêtre', fontsize=12)
###################################
# Le graphique manque de légende.
# Ce sera pour plus tard.
######################################################
# Données bruitées
# ++++++++++++++++
#
# L'idée est de voir si l'estimation se comporte
# aussi bien sur des données bruitées.
Xeps = CovidSIRD.add_noise(X, epsilon=1.)
yeps = numpy.vstack([Xeps[1:] - Xeps[:-1], y[-1:]])
###########################################
# On recommence.
coefs = []
for k in range(0, X.shape[0] - 9, 2):
end = min(k + 10, X.shape[0])
Xt, yt = Xeps[k:end], yeps[k:end]
m, loss = find_best_model(Xt, yt, [1e-2, 1e-3, 1e-4], 10)
loss = m.score(Xt, yt)
print(
"k={} iter={} loss={:1.3f} coef={}".format(
k, m.iter_, loss, m.model_._val_p))
obs = dict(k=k, loss=loss, it=m.iter_, R0=m.model_.R0())
obs.update({k: v for k, v in zip(
m.model_.param_names, m.model_._val_p)})
coefs.append(obs)
dfcoef = pandas.DataFrame(coefs).set_index('k')
dfcoef
##########################
# Graphes.
dfeps = pandas.DataFrame({_[0]: x for _, x in zip(model.Q, Xeps.T)})
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
fig, ax = plt.subplots(2, 3, figsize=(14, 6))
dfcoef[["mu", "nu"]].plot(ax=ax[0, 0], logy=True)
dfcoef[["beta"]].plot(ax=ax[0, 1], logy=True)
dfcoef[["loss"]].plot(ax=ax[1, 0], logy=True)
dfcoef[["R0"]].plot(ax=ax[0, 2])
dfeps.plot(ax=ax[1, 1])
fig.suptitle(
'Estimation de R0 tout au long de la simulation sur '
'des données bruitées', fontsize=12)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class PolicyAttachment(pulumi.CustomResource):
groups: pulumi.Output[list]
"""
The group(s) the policy should be applied to
"""
name: pulumi.Output[str]
"""
The name of the attachment. This cannot be an empty string.
"""
policy_arn: pulumi.Output[str]
"""
The ARN of the policy you want to apply
"""
roles: pulumi.Output[list]
"""
The role(s) the policy should be applied to
"""
users: pulumi.Output[list]
"""
The user(s) the policy should be applied to
"""
def __init__(__self__, resource_name, opts=None, groups=None, name=None, policy_arn=None, roles=None, users=None, __name__=None, __opts__=None):
"""
Attaches a Managed IAM Policy to user(s), role(s), and/or group(s)
!> **WARNING:** The aws_iam_policy_attachment resource creates **exclusive** attachments of IAM policies. Across the entire AWS account, all of the users/roles/groups to which a single policy is attached must be declared by a single aws_iam_policy_attachment resource. This means that even any users/roles/groups that have the attached policy via any other mechanism (including other Terraform resources) will have that attached policy revoked by this resource. Consider `aws_iam_role_policy_attachment`, `aws_iam_user_policy_attachment`, or `aws_iam_group_policy_attachment` instead. These resources do not enforce exclusive attachment of an IAM policy.
> **NOTE:** The usage of this resource conflicts with the `aws_iam_group_policy_attachment`, `aws_iam_role_policy_attachment`, and `aws_iam_user_policy_attachment` resources and will permanently show a difference if both are defined.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] groups: The group(s) the policy should be applied to
:param pulumi.Input[str] name: The name of the attachment. This cannot be an empty string.
:param pulumi.Input[str] policy_arn: The ARN of the policy you want to apply
:param pulumi.Input[list] roles: The role(s) the policy should be applied to
:param pulumi.Input[list] users: The user(s) the policy should be applied to
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['groups'] = groups
__props__['name'] = name
if policy_arn is None:
raise TypeError("Missing required property 'policy_arn'")
__props__['policy_arn'] = policy_arn
__props__['roles'] = roles
__props__['users'] = users
super(PolicyAttachment, __self__).__init__(
'aws:iam/policyAttachment:PolicyAttachment',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
<gh_stars>1-10
"""Vanilla Policy Gradient (REINFORCE)."""
import collections
import copy
from dowel import tabular
import numpy as np
import torch
import torch.nn.functional as F
from garage import log_performance
from garage.np import discount_cumsum
from garage.np.algos import RLAlgorithm
from garage.torch import compute_advantages, filter_valids
from garage.torch.optimizers import OptimizerWrapper
from garage.torch.algorithm import garageVPG
class VPG(garageVPG):
# Fix the boundary issue due to timeout by bootstrapping.
"""Vanilla Policy Gradient (REINFORCE).
VPG, also known as Reinforce, trains stochastic policy in an on-policy way.
Args:
env_spec (EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
sampler (garage.sampler.Sampler): Sampler.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
FIX
append_terminal_val (bool): Whether to append the value evaluated at the timeout observation.
"""
def __init__(
self,
env_spec,
policy,
value_function,
sampler,
policy_optimizer=None,
vf_optimizer=None,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
append_terminal_val=True,
):
# HACK
self._append_terminal_val = append_terminal_val
self._discount = discount
self.policy = policy
self.max_episode_length = env_spec.max_episode_length
self._value_function = value_function
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._policy_ent_coeff = policy_ent_coeff
self._use_softplus_entropy = use_softplus_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._entropy_method = entropy_method
self._n_samples = num_train_per_epoch
self._env_spec = env_spec
self._maximum_entropy = (entropy_method == 'max')
self._entropy_regularzied = (entropy_method == 'regularized')
self._check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient,
policy_ent_coeff)
self._episode_reward_mean = collections.deque(maxlen=100)
self._sampler = sampler
if policy_optimizer:
self._policy_optimizer = policy_optimizer
else:
self._policy_optimizer = OptimizerWrapper(torch.optim.Adam, policy)
if vf_optimizer:
self._vf_optimizer = vf_optimizer
else:
self._vf_optimizer = OptimizerWrapper(torch.optim.Adam,
value_function)
self._old_policy = copy.deepcopy(self.policy)
def _train_once(self, itr, eps):
"""Train the algorithm once.
Args:
itr (int): Iteration number.
eps (EpisodeBatch): A batch of collected paths.
Returns:
numpy.float64: Calculated mean value of undiscounted returns.
"""
obs = torch.Tensor(eps.padded_observations)
rewards = torch.Tensor(eps.padded_rewards)
# HACK Fix the boundary issue
rewards_ = eps.padded_rewards
if self._append_terminal_val:
tt_ = eps.padded_env_infos['GymEnv.TimeLimitTerminated'][:,-1]
vlast_ = self._value_function(torch.Tensor(eps.last_observations)).detach().numpy()
vlast_ = tt_*vlast_
rewards_ = np.concatenate((rewards_, vlast_[...,np.newaxis]), axis=1)
returns = torch.Tensor(np.stack([discount_cumsum(reward, self.discount) for reward in rewards_]))
returns = returns[:,:-1]
else:
returns = torch.Tensor(np.stack([discount_cumsum(reward, self.discount) for reward in rewards_]))
# END OF HACK
valids = eps.lengths
with torch.no_grad():
baselines = self._value_function(obs)
if self._maximum_entropy:
policy_entropies = self._compute_policy_entropy(obs)
rewards += self._policy_ent_coeff * policy_entropies
obs_flat = torch.Tensor(eps.observations)
actions_flat = torch.Tensor(eps.actions)
rewards_flat = torch.Tensor(eps.rewards)
returns_flat = torch.cat(filter_valids(returns, valids))
advs_flat = self._compute_advantage(rewards, valids, baselines)
with torch.no_grad():
policy_loss_before = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_before = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_before = self._compute_kl_constraint(obs)
self._train(obs_flat, actions_flat, rewards_flat, returns_flat,
advs_flat)
with torch.no_grad():
policy_loss_after = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_after = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_after = self._compute_kl_constraint(obs)
policy_entropy = self._compute_policy_entropy(obs)
with tabular.prefix(self.policy.name):
tabular.record('/LossBefore', policy_loss_before.item())
tabular.record('/LossAfter', policy_loss_after.item())
tabular.record('/dLoss',
(policy_loss_before - policy_loss_after).item())
tabular.record('/KLBefore', kl_before.item())
tabular.record('/KL', kl_after.item())
tabular.record('/Entropy', policy_entropy.mean().item())
with tabular.prefix(self._value_function.name):
tabular.record('/LossBefore', vf_loss_before.item())
tabular.record('/LossAfter', vf_loss_after.item())
tabular.record('/dLoss',
vf_loss_before.item() - vf_loss_after.item())
self._old_policy.load_state_dict(self.policy.state_dict())
undiscounted_returns = log_performance(itr,
eps,
discount=self._discount)
return np.mean(undiscounted_returns)
|
<reponame>masschallenge/impact-api
from datetime import (
datetime,
timedelta,
)
from pytz import utc
import calendar
from django.db import connection
from django.test.utils import CaptureQueriesContext
from django.urls import reverse
from accelerator_abstract.models.base_clearance import (
CLEARANCE_LEVEL_EXEC_MD,
CLEARANCE_LEVEL_GLOBAL_MANAGER,
CLEARANCE_LEVEL_POM,
CLEARANCE_LEVEL_STAFF
)
from accelerator.models import UserRole
from accelerator.tests.factories import (
LocationFactory,
MentorProgramOfficeHourFactory,
ProgramFactory,
ProgramFamilyFactory,
ProgramFamilyLocationFactory,
ProgramRoleGrantFactory,
StartupTeamMemberFactory,
UserFactory
)
from accelerator.tests.contexts import UserRoleContext
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from accelerator.tests.utils import days_from_now
from .api_test_case import APITestCase
from ..v1.views import (
ISO_8601_DATE_FORMAT,
OfficeHoursCalendarView,
)
from ..v1.views.office_hours_calendar_view import FINALIST, MENTOR, STAFF
from ..permissions.v1_api_permissions import DEFAULT_PERMISSION_DENIED_DETAIL
from .factories import UserFactory
from .utils import nonexistent_object_id
class TestOfficeHoursCalendarView(APITestCase):
view = OfficeHoursCalendarView
def test_no_focal_date_specified_sees_current_week(self):
office_hour = self.create_office_hour()
response = self.get_response(user=office_hour.mentor)
self.assert_hour_in_response(response, office_hour)
def test_no_focal_date_specified_does_not_see_last_week(self):
office_hour = self.create_office_hour(
start_date_time=days_from_now(-9))
response = self.get_response(user=office_hour.mentor)
self.assert_hour_not_in_response(response, office_hour)
def test_focal_date_specified_sees_sessions_in_range(self):
two_weeks_ago = days_from_now(-14)
focal_date = two_weeks_ago.strftime(ISO_8601_DATE_FORMAT)
office_hour = self.create_office_hour(
start_date_time=two_weeks_ago)
response = self.get_response(user=office_hour.mentor,
focal_date=focal_date)
self.assert_hour_in_response(response, office_hour)
def test_calendar_data_for_month_span_last_day_of_the_month(self):
end_of_month = datetime(2020,1,31,5,0,0)
focal_day = datetime(2020,1,3,5,0,0)
office_hour = self.create_office_hour(start_date_time=end_of_month)
focal_date = focal_day.strftime(ISO_8601_DATE_FORMAT)
response = self.get_response(user=office_hour.mentor,
focal_date=focal_date,
calendar_span="month")
self.assert_hour_in_response(response, office_hour)
def test_calendar_data_for_month_span_mid_month(self):
end_of_month = datetime(2020,1,31,5,0,0)
mid_month = datetime(2020,1,15,5,0,0)
office_hour = self.create_office_hour(start_date_time=mid_month)
focal_date = end_of_month.strftime(ISO_8601_DATE_FORMAT)
response = self.get_response(user=office_hour.mentor,
focal_date=focal_date,
calendar_span="month")
self.assert_hour_in_response(response, office_hour)
def test_focal_date_specified_does_not_see_sessions_not_in_range(self):
two_weeks_ago = days_from_now(-14)
focal_date = two_weeks_ago.strftime(ISO_8601_DATE_FORMAT)
office_hour = self.create_office_hour()
response = self.get_response(user=office_hour.mentor,
focal_date=focal_date)
self.assert_hour_not_in_response(response, office_hour)
def test_hours_returned_in_date_sorted_order(self):
one_day = timedelta(1)
wednesday = utc.localize(datetime(2020, 1, 31))
focal_date = wednesday.strftime(ISO_8601_DATE_FORMAT)
office_hour = self.create_office_hour(start_date_time=wednesday)
self.create_office_hour(start_date_time=wednesday-one_day,
mentor=office_hour.mentor)
self.create_office_hour(start_date_time=wednesday+one_day,
mentor=office_hour.mentor)
response = self.get_response(user=office_hour.mentor,
focal_date=focal_date)
self.assert_sessions_sorted_by_date(response)
def test_user_with_no_hours_sees_empty_response(self):
user = _mentor()
self.create_office_hour()
response = self.get_response(user=user)
sessions = response.data['calendar_data']
self.assertEqual(len(sessions), 0)
def test_user_with_no_hours_gets_success_response(self):
user = _mentor()
self.create_office_hour()
response = self.get_response(user=user)
self.assert_success(response)
def test_current_finalist_sees_current_reserved_hours(self):
finalist = _finalist()
office_hour = self.create_office_hour(finalist=finalist)
response = self.get_response(user=finalist)
self.assert_hour_in_response(response, office_hour)
def test_current_finalist_sees_current_open_hours(self):
program = ProgramFactory()
finalist = _finalist(program=program)
mentor = _mentor(program=program)
office_hour = self.create_office_hour(mentor=mentor)
response = self.get_response(user=finalist)
self.assert_hour_in_response(response, office_hour)
def test_current_finalist_sees_only_relevant_open_hours(self):
program = ProgramFactory()
finalist = _finalist(program=program)
office_hour = self.create_office_hour()
response = self.get_response(user=finalist)
self.assert_hour_not_in_response(response, office_hour)
def test_current_finalist_sees_staff_hours(self):
program = ProgramFactory()
finalist = _finalist(program=program)
staff_user = self.staff_user(program_family=program.program_family)
office_hour = self.create_office_hour(mentor=staff_user)
response = self.get_response(user=finalist)
self.assert_hour_in_response(response, office_hour)
def test_staff_user_sees_staff_hours(self):
program = ProgramFactory()
staff_user = self.staff_user(program_family=program.program_family)
staff_mentor = self.staff_user(program_family=program.program_family)
office_hour = self.create_office_hour(mentor=staff_mentor)
response = self.get_response(user=staff_user)
self.assert_hour_in_response(response, office_hour)
def test_staff_sees_current_open_hours_for_their_program(self):
program = ProgramFactory()
staff_user = self.staff_user(program_family=program.program_family)
mentor = _mentor(program=program)
office_hour = self.create_office_hour(mentor=mentor)
response = self.get_response(user=staff_user)
self.assert_hour_in_response(response, office_hour)
def test_staff_sees_own_office_hour_flag_for_their_hours(self):
program = ProgramFactory()
staff_user = self.staff_user(program_family=program.program_family)
_mentor(program=program, user=staff_user)
self.create_office_hour(mentor=staff_user)
response = self.get_response(user=staff_user)
office_hour_data = response.data['calendar_data'][0]
self.assertTrue(office_hour_data['own_office_hour'])
def test_staff_sees_only_relevant_open_hours(self):
program = ProgramFactory()
staff_user = self.staff_user(program_family=program.program_family)
office_hour = self.create_office_hour()
response = self.get_response(staff_user)
self.assert_hour_not_in_response(response, office_hour)
def test_mentor_with_no_hours_in_range_sees_empty_response(self):
two_weeks_ago = days_from_now(-14)
session = self.create_office_hour(start_date_time=two_weeks_ago)
response = self.get_response(user=session.mentor)
sessions = response.data['calendar_data']
self.assertEqual(len(sessions), 0)
def test_mentor_with_no_hours_in_range_sees_success_response(self):
two_weeks_ago = days_from_now(-14)
session = self.create_office_hour(start_date_time=two_weeks_ago)
response = self.get_response(user=session.mentor)
self.assert_success(response)
def test_return_includes_session_timezones(self):
office_hour = self.create_office_hour()
timezones = ["America/New_York", "Asia/Jerusalem", "Africa/Accra"]
for tz in timezones:
self.create_office_hour(timezone=tz,
mentor=office_hour.mentor)
response = self.get_response(user=office_hour.mentor)
response_timezones = set(response.data['timezones'])
self.assertSetEqual(response_timezones, set(timezones))
def test_return_includes_mentor_locations(self):
office_hour = self.create_office_hour()
user_role = get_user_role_by_name(UserRole.MENTOR)
locations = LocationFactory.create_batch(3)
program_families = ProgramFamilyFactory.create_batch(3)
[ProgramFamilyLocationFactory(location=location,
program_family=program_family)
for (location, program_family) in zip(locations, program_families)]
[ProgramRoleGrantFactory(
person=office_hour.mentor,
program_role__user_role=user_role,
program_role__program__program_status="active",
program_role__program__program_family=program_family)
for program_family in program_families]
response = self.get_response(user=office_hour.mentor)
response_locations = response.data['location_choices']
response_location_names = [
location['location_name'] for location in response_locations]
self.assertTrue(all([loc.name in response_location_names
for loc in locations]))
def test_response_data_includes_user_startups(self):
self.create_office_hour()
finalist = _finalist()
stms = StartupTeamMemberFactory.create_batch(5, user=finalist)
startup_names = [stm.startup.name for stm in stms]
response = self.get_response(user=finalist)
response_startup_names = response.data['user_startups'].values_list(
"name",
flat=True)
self.assertTrue(all([name in response_startup_names
for name in startup_names]))
def test_bad_focal_date_gets_fail_response(self):
bad_focal_date = "2020-20-20" # this cannot be parsed as a date
response = self.get_response(focal_date=bad_focal_date)
self.assert_failure(response, self.view.BAD_FOCAL_DATE)
def test_nonexistent_user_gets_fail_response(self):
bad_user_id = nonexistent_object_id(UserFactory)
response = self.get_response(target_user_id=bad_user_id)
self.assert_failure(response, self.view.NO_SUCH_USER)
def test_mentor_program_families_in_result(self):
office_hour = self.create_office_hour()
user_role = get_user_role_by_name(UserRole.MENTOR)
prgs = ProgramRoleGrantFactory.create_batch(
3,
person=office_hour.mentor,
program_role__user_role=user_role)
response = self.get_response(user=office_hour.mentor)
response_program_families = response.data['mentor_program_families']
self.assertTrue(all([prg.program_role.program.program_family.name
in response_program_families
for prg in prgs]))
def test_non_office_hour_viewer_user_sees_no_hours(self):
user = _judge() # judges are not office hour viewers
office_hour = self.create_office_hour()
response = self.get_response(user=user)
self.assert_hour_not_in_response(response, office_hour)
def test_no_n_plus_one_queries(self):
office_hour = self.create_office_hour()
with CaptureQueriesContext(connection) as captured_queries:
self.get_response(target_user_id=office_hour.mentor_id)
total_queries = len(captured_queries)
[self.create_office_hour(mentor=office_hour.mentor) for _ in range(10)]
with self.assertNumQueries(total_queries):
self.get_response(target_user_id=office_hour.mentor_id)
def test_meeting_info_returned_in_response(self):
office_hour = self.create_office_hour()
response = self.get_response(target_user_id=office_hour.mentor_id)
calendar_data = response.data['calendar_data'][0]
self.assertIn("meeting_info", calendar_data)
def test_user_with_staff_clearance_sees_own_office_hour(self):
self.assert_user_with_clearance_sees_own_office_hour(
CLEARANCE_LEVEL_STAFF)
def test_location_choices_for_staff_with_clearance_in_response(self):
program_family_location = ProgramFamilyLocationFactory()
program_family = program_family_location.program_family
location = program_family_location.location
ProgramFactory(program_family=program_family)
staff_user = self.staff_user(program_family=program_family)
self.create_office_hour(mentor=staff_user)
response = self.get_response(user=staff_user)
location_choices = response.data['location_choices']
response_location_names = [
location['location_name'] for location in location_choices]
self.assertTrue(location.name in response_location_names)
def test_program_family_for_staff_with_clearance_in_response(self):
program_family = ProgramFactory().program_family
staff_user = self.staff_user(program_family=program_family)
self.create_office_hour(mentor=staff_user)
response = self.get_response(user=staff_user)
mentor_program_families = response.data['mentor_program_families']
self.assertTrue(program_family.name in mentor_program_families)
def test_mentor_user_type_is_returned_in_response(self):
response = self.get_response(user=_mentor())
self.assert_correct_user_type(response, MENTOR)
def test_finalist_user_type_is_returned_in_response(self):
response = self.get_response(user=_finalist())
self.assert_correct_user_type(response, FINALIST)
def test_staff_user_type_is_returned_in_response(self):
response = self.get_response()
self.assert_correct_user_type(response, STAFF)
def test_timezone_response_data_excludes_null_values(self):
office_hour = self.create_office_hour(timezone=None)
response = self.get_response(target_user_id=office_hour.mentor_id)
timezone_data = response.data['timezones']
self.assertEqual(timezone_data.count(), 0)
def test_location_always_include_remote_location(self):
remote_location = LocationFactory(name="Remote")
office_hour = self.create_office_hour()
response = self.get_response(user=office_hour.mentor)
location_choices = response.data['location_choices']
response_location_names = [
location['location_name'] for location in location_choices]
self.assertTrue(remote_location.name in response_location_names)
def test_user_with_pom_clearance_sees_own_office_hour(self):
self.assert_user_with_clearance_sees_own_office_hour(
CLEARANCE_LEVEL_POM)
def test_user_with_exec_md_clearance_sees_own_office_hour(self):
self.assert_user_with_clearance_sees_own_office_hour(
CLEARANCE_LEVEL_EXEC_MD)
def test_global_manager_sees_own_office_hour(self):
self.assert_user_with_clearance_sees_own_office_hour(
CLEARANCE_LEVEL_GLOBAL_MANAGER)
def test_location_choices_doesnt_include_null_location_values(self):
program_family_location = ProgramFamilyLocationFactory()
program_family = program_family_location.program_family
program_family_location.location.delete()
ProgramFactory(program_family=program_family)
staff_user = self.staff_user(program_family=program_family)
self.create_office_hour(mentor=staff_user)
response = self.get_response(user=staff_user)
location_choices = response.data['location_choices']
self.assertEqual([], location_choices)
def test_calendar_data_includes_startup_industry(self):
startup_team_member = StartupTeamMemberFactory()
startup = startup_team_member.startup
finalist = startup_team_member.user
office_hour = self.create_office_hour(finalist=finalist,
startup=startup)
response = self.get_response(user=office_hour.mentor)
self.assertEqual(
startup_team_member.startup.primary_industry.name,
response.data['calendar_data'][0]['startup_primary_industry'])
def test_calendar_data_includes_startup_short_pitch(self):
startup_team_member = StartupTeamMemberFactory()
startup = startup_team_member.startup
finalist = startup_team_member.user
office_hour = self.create_office_hour(finalist=finalist,
startup=startup)
response = self.get_response(user=office_hour.mentor)
self.assertEqual(
startup_team_member.startup.short_pitch,
response.data['calendar_data'][0]['startup_short_pitch'])
def create_office_hour(self,
mentor=None,
finalist=None,
start_date_time=None,
duration_minutes=30,
timezone="America/New_York",
program=None,
location=None,
startup=None):
create_params = {}
mentor = mentor or _mentor(program)
create_params['mentor'] = mentor
duration = timedelta(duration_minutes)
start_date_time = start_date_time or utc.localize(datetime.now())
end_date_time = start_date_time + duration
create_params['start_date_time'] = start_date_time
create_params['end_date_time'] = end_date_time
create_params['location__timezone'] = timezone
create_params['finalist'] = finalist
create_params['program'] = program
if startup:
create_params['startup'] = startup
if not timezone:
create_params['location'] = location
return MentorProgramOfficeHourFactory(**create_params)
def assert_hour_in_response(self, response, hour):
self.assertTrue(check_hour_in_response(response, hour),
msg="The office hour session was not in the response")
def assert_hour_not_in_response(self, response, hour):
self.assertFalse(check_hour_in_response(response, hour),
msg="The office hour session was in the response")
def assert_sessions_sorted_by_date(self, response):
dates = [session['start_date_time']
for session in response.data['calendar_data']]
self.assertEqual(dates, sorted(dates))
def assert_success(self, response):
self.assertTrue(response.data['success'])
def assert_failure(self, response, failure_message):
data = response.data
self.assertFalse(data['success'])
self.assertEqual(data['header'], self.view.FAIL_HEADER)
self.assertEqual(data['detail'], failure_message)
def assert_correct_user_type(self, response, user_type):
self.assertEqual(response.data['user_type'], user_type)
def assert_user_with_clearance_sees_own_office_hour(self, clearance_level):
program = ProgramFactory()
staff_user = self.staff_user(program_family=program.program_family,
level=clearance_level)
office_hour = self.create_office_hour(mentor=staff_user)
response = self.get_response(user=staff_user)
self.assert_hour_in_response(response, office_hour)
def get_response(self,
user=None,
target_user_id=None,
focal_date=None,
calendar_span=None,
upcoming=None):
user = user or self.staff_user()
user.set_password("password")
user.save()
url = reverse(self.view.view_name)
data = {}
if focal_date is not None:
data['focal_date'] = focal_date
if calendar_span is not None:
data['calendar_span'] = calendar_span
if upcoming is not None:
data['upcoming'] = upcoming
if target_user_id is not None:
data['user_id'] = target_user_id
with self.login(email=user.email):
return self.get(url, data=data)
def check_hour_in_response(response, hour):
response_data = response.data['calendar_data']
return hour.id in [response_hour['id']
for response_hour in response_data]
def _user_with_role(role_name, program, user):
program = program or ProgramFactory()
return UserRoleContext(role_name, program=program, user=user).user
def _finalist(program=None, user=None):
return _user_with_role(UserRole.FINALIST, program, user)
def _mentor(program=None, user=None):
return _user_with_role(UserRole.MENTOR, program, user)
def _judge(program=None, user=None):
return _user_with_role(UserRole.JUDGE, program, user)
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_resized import ResizedImageField
from martor.models import MartorField
from ..validators import file_size_validator, image_extension_validator
from .mixins import ImageFromUrlMixin
User = get_user_model()
class History(models.Model, ImageFromUrlMixin):
title = models.CharField(
verbose_name=_('Заголовок'),
max_length=200,
)
mentor = models.ForeignKey(
User,
verbose_name=_('Наставник'),
on_delete=models.CASCADE,
)
child = models.CharField(
verbose_name=_('Имя ребёнка'),
max_length=100,
)
together_since = models.DateField(
verbose_name=_('Вместе с'),
)
image = ResizedImageField(
verbose_name=_('Изображение'),
upload_to='history/',
blank=True,
null=True,
size=[1280, 720],
crop=['middle', 'center'],
help_text=settings.IMAGE_FIELD_HELP_TEXT,
validators=[file_size_validator, image_extension_validator],
)
image_url = models.URLField(
verbose_name=_('Ссылка на изображение'),
max_length=192,
help_text=_(
'Альтернативный способ загрузки изображения. Приоритет у файла.'
),
)
description = models.TextField(
verbose_name=_('Верхний абзац'),
max_length=1024,
help_text=_(
'Отображается над основным текстом статьи.'
),
)
uper_body = MartorField(
verbose_name=_('Текст статьи над слайдером'),
help_text=_(
'Текст статьи над слайдером с изображениями. '
'Для выделения абзаца используйте блок Quote (Ctrl + Q).'
),
)
lower_body = MartorField(
verbose_name=_('Текст статьи под слайдером'),
help_text=_(
'Текст статьи под слайдером с изображениями. '
),
)
output_to_main = models.BooleanField(
verbose_name=_('Отображать на главной странице'),
default=False,
help_text=_(
'Истории с этой меткой будут отображаться на главной странице.'
),
)
class Meta:
app_label = 'api'
ordering = ('-id',)
verbose_name = _('История')
verbose_name_plural = _('Истории')
constraints = [
models.UniqueConstraint(
fields=['mentor', 'child'],
name='mentor_and_child_uniq_together'),
]
def __str__(self):
return self.title
def save(self, *args, **kwargs) -> None:
if self.image_url and not self.image:
self.load_image(image_url=self.image_url)
return super().save(*args, **kwargs)
class HistoryImage(models.Model):
history = models.ForeignKey(
History,
verbose_name=_('История'),
related_name='images',
on_delete=models.CASCADE,
)
image = models.ForeignKey(
'common.Image',
verbose_name=_('Изображение'),
related_name='histories',
on_delete=models.PROTECT,
)
order = models.PositiveSmallIntegerField(
verbose_name=_('Порядок вывода'),
default=0,
)
class Meta:
app_label = 'api'
ordering = ('order',)
verbose_name = _('Изображение в слайдере')
verbose_name_plural = _('Изображения в слайдере')
|
#import copy
#import re, sys
from collections import defaultdict
#from Queue import Queue
from data_structures import CanonicalDerivation, Edge, RuleInstance
class CanonicalParser(object):
def __init__(self,s):
"""
Takes a sentence and learns a canonical derivation according to the simple grammar defined below.
"""
derivs_cur = set()
derivs_fail = set()
self.derivs_done = set()
derivs_all = set()
self.s = s
# Add the full AMR to start with
derivs_cur.add(CanonicalDerivation([s['mrt']]))
while len(derivs_cur) > 0:
derivation = derivs_cur.pop()
if len(derivation.get_triples()) == 1 and derivation.get_triples()[0][1].isNonterminal():
self.derivs_done.add(derivation)
derivs_all.add(derivation)
else:
deriv = False
for rule in [self.applyDelex,self.applySL,self.applySW, \
self.applySO,self.applyCircle,self.applyJointHit,self.applyElongate]:
deriv = rule(derivation)
if deriv: break
# If we don't learn anything, add this derivation to the failures
if not deriv:
derivs_fail.add(derivation)
else:
# If we've seen this derivation before, don't go there again
if deriv not in derivs_all:
derivs_cur.add(deriv)
derivs_all.add(deriv)
self.derivs_done = list(self.derivs_done)
self.derivs_fail = list(derivs_fail)
#print "Failed derivations: ", len(derivs_fail)
print "Complete derivations: ", len(self.derivs_done)
"""
# Print the failed derivations to see what went wrong
for d in self.derivs_fail:
print "Failed derivation: "
print d.get_triples()
"""
def applyDelex(self,d):
triples = d.get_triples()
for i in xrange(len(triples)):
(a,b,c) = triples[i]
if b.isTerminal():
ntLabel,tmp = b[0].split(":",1)
nrf = (a,Edge(ntLabel,d.count),c)
nrt = [triples[i]]
new_mrt = list(triples)
new_mrt[i] = nrf # replace triple with new triple
new_rule = RuleInstance(nrf,nrt,'DL')
return CanonicalDerivation.derive(d,new_mrt,new_rule)
return False
def applySL(self,d):
"""
Search for any node with one occurence as p1 and one as p2 only.
Combine these two by removing that node and merging the edges.
"""
triples = d.get_triples()
ANodes = defaultdict(int)
BNodes = defaultdict(int)
for (a,b,c) in triples:
ANodes[a] += 1
BNodes[c] += 1
for a in ANodes.keys():
if ANodes[a] == 1 and BNodes[a] == 1:
# we have an edge that we can shorten: remove (x,X,a) and (a,X,z) for (x,Y,z)
nrf = [None,Edge('*',d.count),None] # new rule from
nrt = [0,0] # new rule to
new_amr = list(triples)
for i in xrange(len(triples)):
at = triples[i]
if at[0] == a and at[2] != a:
nrf[2] = at[2]
nrt[1] = at
elif at[2] == a and at[0] != a:
nrf[0] = at[0]
nrf[1][0] = at[1][0]
nrt[0] = at
index = i
if nrt[0][1].isNonterminal() and nrt[1][1].isNonterminal():
new_amr[index] = tuple(nrf)
new_amr.remove(nrt[1])
new_rule = RuleInstance(tuple(nrf),nrt,'SL')
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
def applySW(self,d):
"""
Search for any multiple edges (a-X-b) and merge two of these
"""
triples = d.get_triples()
Nodes = defaultdict(int)
for (a,b,c) in triples:
Nodes[(a,c)] += 1
for (a,c) in Nodes.keys():
if Nodes[(a,c)] > 1:
# We have one edge that we can remove: remove (a,X,b) and (a,Y,b) for (a,Y,b)
# If more than two, we can remove any one of these, given any other one of these
for i in xrange(len(triples)):
candidate = triples[i]
(x,y,z) = candidate
if x == a and z == c and y.isNonterminal():
for j in xrange(i+1,len(triples)):
candidate2 = triples[j]
(k,l,m) = candidate2
if k == x and m == z and l.isNonterminal() and candidate != candidate2:
nrf = (k,Edge(y[0],d.count),m)
nrt = [candidate,candidate2]
new_amr = list(triples)
new_amr[i] = nrf
del new_amr[j]
new_rule = RuleInstance(nrf,nrt,'SW')
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
def applySO(self,d):
"""
Search for any split a-X-b,a-Y-c where c is a leaf node
Remove a-Y-c and let it be generated by a-X-b
"""
triples = d.get_triples()
Leaves = defaultdict(int)
Branches = defaultdict(int)
for (a,b,c) in triples:
Leaves[c] += 1
Branches[a] += 1
# If leaves[b] == 1 and branches[a] > 1 we can remove the (a,X,b) edge using SO
for i in xrange(len(triples)):
candidate = triples[i]
(a,b,c) = candidate
if Leaves[c] == 1 and Branches[a] > 1 and Branches[c] == 0 and b.isNonterminal():
for j in xrange(len(triples)):
candidate2 = triples[j]
(x,y,z) = candidate2
if x == a and z != c and y.isNonterminal():
# Depending on the grammar it would make sense to install a clause here
# which determines the 'surviving' edge based on some implicit ordering
nrf = (x,Edge(y[0],d.count),z)
nrt = [candidate2,candidate]
rulename = 'OL' # short for open-left
new_amr = list(triples)
new_amr[j] = nrf
del new_amr[i]
new_rule = RuleInstance(nrf,nrt,rulename)
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
def applyJointHit(self,d):
"""
edge A-B becomes edges A-C and B-C in reverse
"""
child = defaultdict(set)
parent = defaultdict(set)
triples = d.get_triples()
for trip in triples:
(a,b,c) = trip
child[a].add(trip)
parent[c].add(trip)
for i in xrange(len(triples)):
candidate1 = triples[i]
(a,x,c) = candidate1
if len(child[c]) == 0 and len(parent[c]) == 2 and x.isNonterminal():
for candidate2 in parent[c]:
(b,y,tmp) = candidate2
if y.isNonterminal() and b != a: # we know that c == tmp
wrongWay = False
for check in child[b]:
# optional (attempts to avoid generating looped structures)
(k,l,m) = check
if m == a: wrongWay = True
if not wrongWay:
# We found a candidate to remove (a,x,c) (b,y,c) down to (a,?,b)
# Now, let's iterate so that we can find the suitable edges (with labels)
nrf = (a,Edge('*',d.count),b)
nrt = [candidate1,candidate2]
new_amr = list(triples)
new_amr[i] = nrf
new_amr.remove(candidate2)
new_rule = RuleInstance(nrf,nrt,'JH')
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
def applyElongate(self,d):
"""
A->B becomes A->B->C in reverse
"""
child = defaultdict(set)
parent = defaultdict(set)
triples = d.get_triples()
for trip in triples:
(a,b,c) = trip
child[a].add(trip)
parent[c].add(trip)
for i in xrange(len(triples)):
candidate1 = triples[i]
(b,x,c) = candidate1
if len(child[c]) == 0 and len(parent[c]) == 1 and x.isNonterminal():
for candidate2 in parent[b]:
(a,y,tmp) = candidate2
if y.isNonterminal(): # we already know tmp == b
# We found a candidate to remove (a,y,b,x,c) down to (a,y,b)
nrf = (a,Edge(y[0],d.count),b)
nrt = [candidate2,candidate1]
new_amr = list(triples)
new_amr[i] = nrf
new_amr.remove(candidate2)
new_rule = RuleInstance(nrf,nrt,'LL')
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
def applyCircle(self,d):
"""
A->B becomes A->B->B (circle) in reverse
"""
parent = defaultdict(set)
triples = d.get_triples()
for i in xrange(len(triples)):
(a,b,c) = triples[i]
parent[c].add((i,triples[i]))
for i in xrange(len(triples)):
candidate1 = triples[i]
(a,b,c) = candidate1
if a == c and b.isNonterminal():
for index,candidate2 in parent[c]:
(x,y,z) = candidate2
if y.isNonterminal():
# We found a candidate to remove (x,y,a,b,a) down to (x,y,a)
nrf = (x,Edge(y[0],d.count),z)
nrt = [candidate2,candidate1]
new_amr = list(triples)
new_amr[index] = nrf
del new_amr[i]
new_rule = RuleInstance(nrf,nrt,'CC')
return CanonicalDerivation.derive(d,new_amr,new_rule)
return False
|
<filename>thimbles/tests/utils.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Purpose: Utilities for Thimbles
# Author: <NAME>
# Date: Jan 18, 2014
# ########################################################################### #
# Standard Library
from collections import OrderedDict
# 3rd Party
import numpy as np
import h5py
# Internal
from ..stellar_atmospheres import solar_abundance as abund_standard
from ..utils import piecewise_polynomial
from .. import __path__ as timbles_path
tpath = timbles_path[0]
# ########################################################################### #
cog_ppol_hf = h5py.File("%s/resources/cog_ppol.h5" % tpath)
_elements_params = {'La I':dict(n=10),
'Fe I':dict(n=320),
'O III':dict(n=1),
'Au I':dict(n=1),
'Zn I':dict(n=10),
'Na I':dict(n=4),
'Ca II':dict(n=50),
'Ti I':dict(n=250),
'Y II':dict(n=20),
'U I':dict(n=2),
'Hg I':dict(n=1),
'Yb I':dict(n=1),
'Fe II':dict(n=130),
'Ca I':dict(n=110),
'Cr II':dict(n=60),
'Pb II':dict(n=2),
'Zr I':dict(n=10),
'Ag I':dict(n=10),
'Eu I':dict(n=1),
'V I':dict(n=10),
'Mg I':dict(n=60),
'Sc II':dict(n=20),
'Nb II':dict(n=10),
'Ti II':dict(n=90),
'Cu I':dict(n=60),
'Ba II':dict(n=15),
'Ni I':dict(n=90),
'Mg II':dict(n=20),
'Os II':dict(n=2),
'Co I':dict(n=70),
'Mn I':dict(n=30),
'Si I':dict(n=10),
'Cr I':dict(n=20)}
def estimate_lorentz_width (x,iqp_deriv):
"""
"""
lorz_width = lambda xval: 0.5*(1-iqp_deriv(xval)**2)
# TODO: make this better, sqrt part of COG is bad
return lorz_width(x)
def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):
"""
Randomly sample wavelengths
For the elements in the input parameters the linelist is generated by
uniformly sampling n lines (from the input params) between the wavelength
bounds, uniformly sampling the EP, exponentially sampling the EW. Then uses
a general curve-of-growth (COG) to evaluate the loggf values for these lines
via
solar_abundance + element_abundance_offset + species_abundance_offset +
+ loggf - ep*5040/teff = COG(ew)
Parameters
----------
teff : float
The effective temperature of the fake star, K
species_params : dict
keys are the species id (e.g. "Fe I") and the values are lists of
parameters [number_lines]
wv_bounds : (wv_min,wv_max)
Gives the bounds for the wavelength
filepath : string or None
Gives the file path for the output linelist in MOOG compatible format.
If None then won't save to file
Returns
-------
linelist : dict
The keys are by element and the values are the fake lines with columns
wv species ep loggf ew est_gauss_width est_lorz_width
"""
abund_offset_range = (-1,1)
species_offset_range = (-1,1)
ew_dist_width = 30
ep_range = (0,12)
loggf_range = (-6.0,0.5)
theta = 5040.0/teff
# # TODO: remove this calculation???
# # # fix to a particular line which should be by the turnoff
# # # Fe I 88.2 2.22 EP -4.2 loggf
# loggf = -4.2
# ep = 2.22
# x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep
# x-x_turnoff = -5
#
# based on the model abundance used in the cog file
xnorm = -6.5
ynorm = -2.0
# read in the parameters
if species_params is None:
species_params = _elements_params
el_params = species_params.copy()
for el,pars in _elements_params.items():
el_params.setdefault(el,pars)
coeffs, knots, centers, scales = np.array(cog_ppol_hf["coefficients"]), np.array(cog_ppol_hf["knots"]), np.array(cog_ppol_hf["centers"]), np.array(cog_ppol_hf["scales"])
iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)
iqp_deriv = iqp.deriv()
# calc the linelist
linelist = {}
element_abund = {}
for species,pars in list(species_params.items()):
wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])
solar_abund_offset = np.random.uniform(*abund_offset_range)
# get the abundance for this element, ignore species
abund = abund_standard[species]['abundance']+solar_abund_offset
element_abund.setdefault(abund_standard[species]['element'],abund)
species_offset = np.random.uniform(*species_offset_range)
species_abund = element_abund[abund_standard[species]['element']]+species_offset
species_abund = np.repeat(species_abund,pars['n'])
# generate the parameters for the lines
spe_col = np.repeat(abund_standard.species_id(species),pars['n'])
ew = np.random.exponential(ew_dist_width,pars['n'])
ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])
loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])
# calculate the line strengths from the COG
#x = species_abund + loggf - theta*ep + xnorm
logrw = np.log10(ew/wvs)
x = iqp.inverse(logrw-ynorm)
loggf = species_abund - x - theta*ep + xnorm
# estimate the lorzentian and gaussian widths for this line
lorz_width = estimate_lorentz_width(x, iqp_deriv)
gauss_width = np.repeat(99.9,pars['n'])
# add to the linelist
linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]
if filepath is not None:
# save moog file
f = open(filepath,'w')
header = "# Fake linelist created THIMBLES with teff {} # "
header += "wvs species ep loggf ew gauss_width lorz_width # "
header += "guassian and lorentzian widths are estimate\n"
f.write(header.format(teff))
fmt = "{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}"+20*" "+" {4:>9.2f}"+10*" "
fmt += " {5:>9.2f} {6:>9.2f} FAKE_LINE\n"
for species,ll in linelist.items():
for row in ll:
f.write(fmt.format(*row))
return linelist
|
import tkinter
from collections import OrderedDict
from tkinter.messagebox import showwarning
from db.orm import Manager
from staff_info.gui_exceptions import TooManyItemsChecked
from .models import Employee, Result
from .views import GuiBuilder, ToplevelBuilder, gui_table_fields, gui_labels, add_employee_labels
class GuiDirector:
def __init__(self, parent=None):
self.builder = None
self.parent = parent
def construct_gui(self, row_count, col_count, labels, toolbar1, toolbar2):
self.builder = GuiBuilder(self.parent)
self.builder.makeCheckbar(row_count)
self.builder.makeTable(labels, row_count, col_count)
self.builder.makeToolbar(row_count+1, toolbar1)
self.builder.makeToolbar(row_count+2, toolbar2)
def load_init_data(self, model):
objects = model.objects
for row, obj in zip(self.builder.row_vars, objects): # iterate over entries & objects
for i, col in enumerate(gui_table_fields):
key, model = col[0], col[1]
if key not in obj.__dict__.keys():
values = manager.get_related_last(obj, model)
keys = model.fields
d = dict(zip(keys, values[0]))
row[i].set(d[key])
else:
row[i].set(obj[key])
def __getattribute__(self, item):
if item in ('row_vars', 'cblist'):
return self.builder.__getattribute__(item)
else:
return object.__getattribute__(self, item)
class Callbacks:
def __init__(self, director):
self.director = director
def onChangeAttribute(self, table, labels_dict, operation):
try:
toplevel = ToplevelBuilder()
ids = takeids(self.director.cblist, self.director.row_vars)
show_warning_for_id(ids)
title = 'Change {} data'.format(table)
# columns = table_descriptions[table][:]
for item in ('id', 'idEmp', '{}_available'.format(table)):
columns.remove(item)
field_list = OrderedDict(zip(columns, columns))
field_list['delta'] = 'Delta'
btn_list = (('Commit', lambda: self.onCommit(), 'left'),
('Cancel', lambda: toplevel.destroy(), 'right'),)
toplevel.input_template(title, field_list, btn_list)
except TooManyItemsChecked:
showwarning('Selection WARNING', 'Select 1 Employee for this operation')
def onAddEmployee(self):
toplevel = ToplevelBuilder()
title = 'Add Employee'
labels = add_employee_labels
btn_list = (('Commit', lambda: self.onAddEmployeeCommit(toplevel), 'left'),
('Cancel', lambda: toplevel.destroy(), 'right'),)
toplevel.input_template(title, labels, btn_list)
def onCommit(self):
ids = takeids(self.director.cblist, self.director.row_vars)
print(ids)
def onAddEmployeeCommit(self, toplevel):
d = {}
for k, v in toplevel.ent_dict.items():
d[k] = v.get()
if not Result.objects:
Manager(Result).create_object()
manager.create_object(**d)
Employee.objects = []
run(root)
toplevel.destroy()
@staticmethod
def onCancel():
pass
def get_key(row_num, labels):
label_text = [item[1] for item in labels]
label_text.insert(0, 'id')
def link_labels_to_table_keys(labels, employee):
label_text = [item[1] for item in labels]
label_text.insert(0, 'id')
def takeids(cblist, rows_var):
""" Take check button list and returns Employee ids for continue operation.
Show warning message if selected 'all' button for critical operation (flag warning).
Return tuples of (id, rownum) of check button
"""
rowsnum = tuple([i-1 for (i, var) in enumerate(cblist) if var.get()])
ids = tuple([(int(rows_var[i][0].get()), i+1) for i in rowsnum])
return ids
def show_warning_for_id(ids):
if len(ids) > 1:
raise TooManyItemsChecked(*ids)
def shrink_label_dict(*keys, **label_dict):
d = OrderedDict()
print(keys)
print(label_dict)
for k in keys:
if k in label_dict:
d[k] = label_dict[k]
return d
def run(root):
director = GuiDirector(root)
toolbar1 = {'1': (('Add Empl', 'left', lambda: Callbacks(director).onAddEmployee()), ('Del Empl', 'right'),),
'4': (('Change', 'left'),),
'5': (('Add', 'left'), ('Rem', 'right')),
'6': (('Add', 'left'), ('Rem', 'right')),
'7': (('Add', 'left'), ('Rem', 'right'))}
toolbar2 = {'1': (('Salary', 'left'), ('Commission', 'right')),
'4': (('History', 'left'),),
'5': (('History', 'left'),),
'6': (('History', 'left'),),
'7': (('History', 'left'),)}
objects = manager.get_all()
row_count = len(objects)
col_count = len(gui_table_fields)
director.construct_gui(row_count, col_count, gui_labels, toolbar1, toolbar2)
director.load_init_data(Employee)
manager = Manager(Employee)
root = tkinter.Tk()
if __name__ == '__main__':
run(root)
root.mainloop()
|
<filename>env/lib/python2.7/site-packages/MySQLdb/constants/ER.py<gh_stars>0
"""MySQL ER Constants
These constants are error codes for the bulk of the error conditions
that may occur.
"""
HASHCHK = 1000
NISAMCHK = 1001
NO = 1002
YES = 1003
CANT_CREATE_FILE = 1004
CANT_CREATE_TABLE = 1005
CANT_CREATE_DB = 1006
DB_CREATE_EXISTS = 1007
DB_DROP_EXISTS = 1008
DB_DROP_DELETE = 1009
DB_DROP_RMDIR = 1010
CANT_DELETE_FILE = 1011
CANT_FIND_SYSTEM_REC = 1012
CANT_GET_STAT = 1013
CANT_GET_WD = 1014
CANT_LOCK = 1015
CANT_OPEN_FILE = 1016
FILE_NOT_FOUND = 1017
CANT_READ_DIR = 1018
CANT_SET_WD = 1019
CHECKREAD = 1020
DISK_FULL = 1021
DUP_KEY = 1022
ERROR_ON_CLOSE = 1023
ERROR_ON_READ = 1024
ERROR_ON_RENAME = 1025
ERROR_ON_WRITE = 1026
FILE_USED = 1027
FILSORT_ABORT = 1028
FORM_NOT_FOUND = 1029
GET_ERRNO = 1030
ILLEGAL_HA = 1031
KEY_NOT_FOUND = 1032
NOT_FORM_FILE = 1033
NOT_KEYFILE = 1034
OLD_KEYFILE = 1035
OPEN_AS_READONLY = 1036
OUTOFMEMORY = 1037
OUT_OF_SORTMEMORY = 1038
UNEXPECTED_EOF = 1039
CON_COUNT_ERROR = 1040
OUT_OF_RESOURCES = 1041
BAD_HOST_ERROR = 1042
HANDSHAKE_ERROR = 1043
DBACCESS_DENIED_ERROR = 1044
ACCESS_DENIED_ERROR = 1045
NO_DB_ERROR = 1046
UNKNOWN_COM_ERROR = 1047
BAD_NULL_ERROR = 1048
BAD_DB_ERROR = 1049
TABLE_EXISTS_ERROR = 1050
BAD_TABLE_ERROR = 1051
NON_UNIQ_ERROR = 1052
SERVER_SHUTDOWN = 1053
BAD_FIELD_ERROR = 1054
WRONG_FIELD_WITH_GROUP = 1055
WRONG_GROUP_FIELD = 1056
WRONG_SUM_SELECT = 1057
WRONG_VALUE_COUNT = 1058
TOO_LONG_IDENT = 1059
DUP_FIELDNAME = 1060
DUP_KEYNAME = 1061
DUP_ENTRY = 1062
WRONG_FIELD_SPEC = 1063
PARSE_ERROR = 1064
EMPTY_QUERY = 1065
NONUNIQ_TABLE = 1066
INVALID_DEFAULT = 1067
MULTIPLE_PRI_KEY = 1068
TOO_MANY_KEYS = 1069
TOO_MANY_KEY_PARTS = 1070
TOO_LONG_KEY = 1071
KEY_COLUMN_DOES_NOT_EXITS = 1072
BLOB_USED_AS_KEY = 1073
TOO_BIG_FIELDLENGTH = 1074
WRONG_AUTO_KEY = 1075
READY = 1076
NORMAL_SHUTDOWN = 1077
GOT_SIGNAL = 1078
SHUTDOWN_COMPLETE = 1079
FORCING_CLOSE = 1080
IPSOCK_ERROR = 1081
NO_SUCH_INDEX = 1082
WRONG_FIELD_TERMINATORS = 1083
BLOBS_AND_NO_TERMINATED = 1084
TEXTFILE_NOT_READABLE = 1085
FILE_EXISTS_ERROR = 1086
LOAD_INFO = 1087
ALTER_INFO = 1088
WRONG_SUB_KEY = 1089
CANT_REMOVE_ALL_FIELDS = 1090
CANT_DROP_FIELD_OR_KEY = 1091
INSERT_INFO = 1092
INSERT_TABLE_USED = 1093
NO_SUCH_THREAD = 1094
KILL_DENIED_ERROR = 1095
NO_TABLES_USED = 1096
TOO_BIG_SET = 1097
NO_UNIQUE_LOGFILE = 1098
TABLE_NOT_LOCKED_FOR_WRITE = 1099
TABLE_NOT_LOCKED = 1100
BLOB_CANT_HAVE_DEFAULT = 1101
WRONG_DB_NAME = 1102
WRONG_TABLE_NAME = 1103
TOO_BIG_SELECT = 1104
UNKNOWN_ERROR = 1105
UNKNOWN_PROCEDURE = 1106
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
WRONG_PARAMETERS_TO_PROCEDURE = 1108
UNKNOWN_TABLE = 1109
FIELD_SPECIFIED_TWICE = 1110
INVALID_GROUP_FUNC_USE = 1111
UNSUPPORTED_EXTENSION = 1112
TABLE_MUST_HAVE_COLUMNS = 1113
RECORD_FILE_FULL = 1114
UNKNOWN_CHARACTER_SET = 1115
TOO_MANY_TABLES = 1116
TOO_MANY_FIELDS = 1117
TOO_BIG_ROWSIZE = 1118
STACK_OVERRUN = 1119
WRONG_OUTER_JOIN = 1120
NULL_COLUMN_IN_INDEX = 1121
CANT_FIND_UDF = 1122
CANT_INITIALIZE_UDF = 1123
UDF_NO_PATHS = 1124
UDF_EXISTS = 1125
CANT_OPEN_LIBRARY = 1126
CANT_FIND_DL_ENTRY = 1127
FUNCTION_NOT_DEFINED = 1128
HOST_IS_BLOCKED = 1129
HOST_NOT_PRIVILEGED = 1130
PASSWORD_ANONYMOUS_USER = <PASSWORD>
PASSWORD_NOT_ALLOWED = <PASSWORD>
PASSWORD_NO_MATCH = <PASSWORD>
UPDATE_INFO = 1134
CANT_CREATE_THREAD = 1135
WRONG_VALUE_COUNT_ON_ROW = 1136
CANT_REOPEN_TABLE = 1137
INVALID_USE_OF_NULL = 1138
REGEXP_ERROR = 1139
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
NONEXISTING_GRANT = 1141
TABLEACCESS_DENIED_ERROR = 1142
COLUMNACCESS_DENIED_ERROR = 1143
ILLEGAL_GRANT_FOR_TABLE = 1144
GRANT_WRONG_HOST_OR_USER = 1145
NO_SUCH_TABLE = 1146
NONEXISTING_TABLE_GRANT = 1147
NOT_ALLOWED_COMMAND = 1148
SYNTAX_ERROR = 1149
DELAYED_CANT_CHANGE_LOCK = 1150
TOO_MANY_DELAYED_THREADS = 1151
ABORTING_CONNECTION = 1152
NET_PACKET_TOO_LARGE = 1153
NET_READ_ERROR_FROM_PIPE = 1154
NET_FCNTL_ERROR = 1155
NET_PACKETS_OUT_OF_ORDER = 1156
NET_UNCOMPRESS_ERROR = 1157
NET_READ_ERROR = 1158
NET_READ_INTERRUPTED = 1159
NET_ERROR_ON_WRITE = 1160
NET_WRITE_INTERRUPTED = 1161
TOO_LONG_STRING = 1162
TABLE_CANT_HANDLE_BLOB = 1163
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
DELAYED_INSERT_TABLE_LOCKED = 1165
WRONG_COLUMN_NAME = 1166
WRONG_KEY_COLUMN = 1167
WRONG_MRG_TABLE = 1168
DUP_UNIQUE = 1169
BLOB_KEY_WITHOUT_LENGTH = 1170
PRIMARY_CANT_HAVE_NULL = 1171
TOO_MANY_ROWS = 1172
REQUIRES_PRIMARY_KEY = 1173
NO_RAID_COMPILED = 1174
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
KEY_DOES_NOT_EXITS = 1176
CHECK_NO_SUCH_TABLE = 1177
CHECK_NOT_IMPLEMENTED = 1178
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ERROR_DURING_COMMIT = 1180
ERROR_DURING_ROLLBACK = 1181
ERROR_DURING_FLUSH_LOGS = 1182
ERROR_DURING_CHECKPOINT = 1183
NEW_ABORTING_CONNECTION = 1184
DUMP_NOT_IMPLEMENTED = 1185
FLUSH_MASTER_BINLOG_CLOSED = 1186
INDEX_REBUILD = 1187
MASTER = 1188
MASTER_NET_READ = 1189
MASTER_NET_WRITE = 1190
FT_MATCHING_KEY_NOT_FOUND = 1191
LOCK_OR_ACTIVE_TRANSACTION = 1192
UNKNOWN_SYSTEM_VARIABLE = 1193
CRASHED_ON_USAGE = 1194
CRASHED_ON_REPAIR = 1195
WARNING_NOT_COMPLETE_ROLLBACK = 1196
TRANS_CACHE_FULL = 1197
SLAVE_MUST_STOP = 1198
SLAVE_NOT_RUNNING = 1199
BAD_SLAVE = 1200
MASTER_INFO = 1201
SLAVE_THREAD = 1202
TOO_MANY_USER_CONNECTIONS = 1203
SET_CONSTANTS_ONLY = 1204
LOCK_WAIT_TIMEOUT = 1205
LOCK_TABLE_FULL = 1206
READ_ONLY_TRANSACTION = 1207
DROP_DB_WITH_READ_LOCK = 1208
CREATE_DB_WITH_READ_LOCK = 1209
WRONG_ARGUMENTS = 1210
NO_PERMISSION_TO_CREATE_USER = 1211
UNION_TABLES_IN_DIFFERENT_DIR = 1212
LOCK_DEADLOCK = 1213
TABLE_CANT_HANDLE_FT = 1214
CANNOT_ADD_FOREIGN = 1215
NO_REFERENCED_ROW = 1216
ROW_IS_REFERENCED = 1217
CONNECT_TO_MASTER = 1218
QUERY_ON_MASTER = 1219
ERROR_WHEN_EXECUTING_COMMAND = 1220
WRONG_USAGE = 1221
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
CANT_UPDATE_WITH_READLOCK = 1223
MIXING_NOT_ALLOWED = 1224
DUP_ARGUMENT = 1225
USER_LIMIT_REACHED = 1226
SPECIFIC_ACCESS_DENIED_ERROR = 1227
LOCAL_VARIABLE = 1228
GLOBAL_VARIABLE = 1229
NO_DEFAULT = 1230
WRONG_VALUE_FOR_VAR = 1231
WRONG_TYPE_FOR_VAR = 1232
VAR_CANT_BE_READ = 1233
CANT_USE_OPTION_HERE = 1234
NOT_SUPPORTED_YET = 1235
MASTER_FATAL_ERROR_READING_BINLOG = 1236
SLAVE_IGNORED_TABLE = 1237
INCORRECT_GLOBAL_LOCAL_VAR = 1238
WRONG_FK_DEF = 1239
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
OPERAND_COLUMNS = 1241
SUBQUERY_NO_1_ROW = 1242
UNKNOWN_STMT_HANDLER = 1243
CORRUPT_HELP_DB = 1244
CYCLIC_REFERENCE = 1245
AUTO_CONVERT = 1246
ILLEGAL_REFERENCE = 1247
DERIVED_MUST_HAVE_ALIAS = 1248
SELECT_REDUCED = 1249
TABLENAME_NOT_ALLOWED_HERE = 1250
NOT_SUPPORTED_AUTH_MODE = 1251
SPATIAL_CANT_HAVE_NULL = 1252
COLLATION_CHARSET_MISMATCH = 1253
SLAVE_WAS_RUNNING = 1254
SLAVE_WAS_NOT_RUNNING = 1255
TOO_BIG_FOR_UNCOMPRESS = 1256
ZLIB_Z_MEM_ERROR = 1257
ZLIB_Z_BUF_ERROR = 1258
ZLIB_Z_DATA_ERROR = 1259
CUT_VALUE_GROUP_CONCAT = 1260
WARN_TOO_FEW_RECORDS = 1261
WARN_TOO_MANY_RECORDS = 1262
WARN_NULL_TO_NOTNULL = 1263
WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
WARN_USING_OTHER_HANDLER = 1266
CANT_AGGREGATE_2COLLATIONS = 1267
DROP_USER = 1268
REVOKE_GRANTS = 1269
CANT_AGGREGATE_3COLLATIONS = 1270
CANT_AGGREGATE_NCOLLATIONS = 1271
VARIABLE_IS_NOT_STRUCT = 1272
UNKNOWN_COLLATION = 1273
SLAVE_IGNORED_SSL_PARAMS = 1274
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
WARN_FIELD_RESOLVED = 1276
BAD_SLAVE_UNTIL_COND = 1277
MISSING_SKIP_SLAVE = 1278
UNTIL_COND_IGNORED = 1279
WRONG_NAME_FOR_INDEX = 1280
WRONG_NAME_FOR_CATALOG = 1281
WARN_QC_RESIZE = 1282
BAD_FT_COLUMN = 1283
UNKNOWN_KEY_CACHE = 1284
WARN_HOSTNAME_WONT_WORK = 1285
UNKNOWN_STORAGE_ENGINE = 1286
WARN_DEPRECATED_SYNTAX = 1287
NON_UPDATABLE_TABLE = 1288
FEATURE_DISABLED = 1289
OPTION_PREVENTS_STATEMENT = 1290
DUPLICATED_VALUE_IN_TYPE = 1291
TRUNCATED_WRONG_VALUE = 1292
TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
INVALID_ON_UPDATE = 1294
UNSUPPORTED_PS = 1295
GET_ERRMSG = 1296
GET_TEMPORARY_ERRMSG = 1297
UNKNOWN_TIME_ZONE = 1298
WARN_INVALID_TIMESTAMP = 1299
INVALID_CHARACTER_STRING = 1300
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
CONFLICTING_DECLARATIONS = 1302
SP_NO_RECURSIVE_CREATE = 1303
SP_ALREADY_EXISTS = 1304
SP_DOES_NOT_EXIST = 1305
SP_DROP_FAILED = 1306
SP_STORE_FAILED = 1307
SP_LILABEL_MISMATCH = 1308
SP_LABEL_REDEFINE = 1309
SP_LABEL_MISMATCH = 1310
SP_UNINIT_VAR = 1311
SP_BADSELECT = 1312
SP_BADRETURN = 1313
SP_BADSTATEMENT = 1314
UPDATE_LOG_DEPRECATED_IGNORED = 1315
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
QUERY_INTERRUPTED = 1317
SP_WRONG_NO_OF_ARGS = 1318
SP_COND_MISMATCH = 1319
SP_NORETURN = 1320
SP_NORETURNEND = 1321
SP_BAD_CURSOR_QUERY = 1322
SP_BAD_CURSOR_SELECT = 1323
SP_CURSOR_MISMATCH = 1324
SP_CURSOR_ALREADY_OPEN = 1325
SP_CURSOR_NOT_OPEN = 1326
SP_UNDECLARED_VAR = 1327
SP_WRONG_NO_OF_FETCH_ARGS = 1328
SP_FETCH_NO_DATA = 1329
SP_DUP_PARAM = 1330
SP_DUP_VAR = 1331
SP_DUP_COND = 1332
SP_DUP_CURS = 1333
SP_CANT_ALTER = 1334
SP_SUBSELECT_NYI = 1335
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
SP_VARCOND_AFTER_CURSHNDLR = 1337
SP_CURSOR_AFTER_HANDLER = 1338
SP_CASE_NOT_FOUND = 1339
FPARSER_TOO_BIG_FILE = 1340
FPARSER_BAD_HEADER = 1341
FPARSER_EOF_IN_COMMENT = 1342
FPARSER_ERROR_IN_PARAMETER = 1343
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
VIEW_NO_EXPLAIN = 1345
FRM_UNKNOWN_TYPE = 1346
WRONG_OBJECT = 1347
NONUPDATEABLE_COLUMN = 1348
VIEW_SELECT_DERIVED = 1349
VIEW_SELECT_CLAUSE = 1350
VIEW_SELECT_VARIABLE = 1351
VIEW_SELECT_TMPTABLE = 1352
VIEW_WRONG_LIST = 1353
WARN_VIEW_MERGE = 1354
WARN_VIEW_WITHOUT_KEY = 1355
VIEW_INVALID = 1356
SP_NO_DROP_SP = 1357
SP_GOTO_IN_HNDLR = 1358
TRG_ALREADY_EXISTS = 1359
TRG_DOES_NOT_EXIST = 1360
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
TRG_CANT_CHANGE_ROW = 1362
TRG_NO_SUCH_ROW_IN_TRG = 1363
NO_DEFAULT_FOR_FIELD = 1364
DIVISION_BY_ZERO = 1365
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ILLEGAL_VALUE_FOR_TYPE = 1367
VIEW_NONUPD_CHECK = 1368
VIEW_CHECK_FAILED = 1369
PROCACCESS_DENIED_ERROR = 1370
RELAY_LOG_FAIL = 1371
PASSWD_LENGTH = 1372
UNKNOWN_TARGET_BINLOG = 1373
IO_ERR_LOG_INDEX_READ = 1374
BINLOG_PURGE_PROHIBITED = 1375
FSEEK_FAIL = 1376
BINLOG_PURGE_FATAL_ERR = 1377
LOG_IN_USE = 1378
LOG_PURGE_UNKNOWN_ERR = 1379
RELAY_LOG_INIT = 1380
NO_BINARY_LOGGING = 1381
RESERVED_SYNTAX = 1382
WSAS_FAILED = 1383
DIFF_GROUPS_PROC = 1384
NO_GROUP_FOR_PROC = 1385
ORDER_WITH_PROC = 1386
LOGGING_PROHIBIT_CHANGING_OF = 1387
NO_FILE_MAPPING = 1388
WRONG_MAGIC = 1389
PS_MANY_PARAM = 1390
KEY_PART_0 = 1391
VIEW_CHECKSUM = 1392
VIEW_MULTIUPDATE = 1393
VIEW_NO_INSERT_FIELD_LIST = 1394
VIEW_DELETE_MERGE_VIEW = 1395
CANNOT_USER = 1396
XAER_NOTA = 1397
XAER_INVAL = 1398
XAER_RMFAIL = 1399
XAER_OUTSIDE = 1400
XAER_RMERR = 1401
XA_RBROLLBACK = 1402
NONEXISTING_PROC_GRANT = 1403
PROC_AUTO_GRANT_FAIL = 1404
PROC_AUTO_REVOKE_FAIL = 1405
DATA_TOO_LONG = 1406
SP_BAD_SQLSTATE = 1407
STARTUP = 1408
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
CANT_CREATE_USER_WITH_GRANT = 1410
WRONG_VALUE_FOR_TYPE = 1411
TABLE_DEF_CHANGED = 1412
SP_DUP_HANDLER = 1413
SP_NOT_VAR_ARG = 1414
SP_NO_RETSET = 1415
CANT_CREATE_GEOMETRY_OBJECT = 1416
FAILED_ROUTINE_BREAK_BINLOG = 1417
BINLOG_UNSAFE_ROUTINE = 1418
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
EXEC_STMT_WITH_OPEN_CURSOR = 1420
STMT_HAS_NO_OPEN_CURSOR = 1421
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
NO_DEFAULT_FOR_VIEW_FIELD = 1423
SP_NO_RECURSION = 1424
TOO_BIG_SCALE = 1425
TOO_BIG_PRECISION = 1426
M_BIGGER_THAN_D = 1427
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
FOREIGN_DATA_STRING_INVALID = 1433
CANT_CREATE_FEDERATED_TABLE = 1434
TRG_IN_WRONG_SCHEMA = 1435
STACK_OVERRUN_NEED_MORE = 1436
TOO_LONG_BODY = 1437
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
TOO_BIG_DISPLAYWIDTH = 1439
XAER_DUPID = 1440
DATETIME_FUNCTION_OVERFLOW = 1441
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
VIEW_PREVENT_UPDATE = 1443
PS_NO_RECURSION = 1444
SP_CANT_SET_AUTOCOMMIT = 1445
MALFORMED_DEFINER = 1446
VIEW_FRM_NO_USER = 1447
VIEW_OTHER_USER = 1448
NO_SUCH_USER = 1449
FORBID_SCHEMA_CHANGE = 1450
ROW_IS_REFERENCED_2 = 1451
NO_REFERENCED_ROW_2 = 1452
SP_BAD_VAR_SHADOW = 1453
TRG_NO_DEFINER = 1454
OLD_FILE_FORMAT = 1455
SP_RECURSION_LIMIT = 1456
SP_PROC_TABLE_CORRUPT = 1457
ERROR_LAST = 1457
|
<reponame>saulshanabrook/CrossHair
import collections
import copy
import dataclasses
import re
import sys
import unittest
from typing import *
from crosshair.core import make_fake_object
from crosshair.core_and_libs import *
from crosshair.test_util import check_ok
from crosshair.test_util import check_exec_err
from crosshair.test_util import check_post_err
from crosshair.test_util import check_fail
from crosshair.test_util import check_unknown
from crosshair.test_util import check_messages
from crosshair.util import set_debug
from crosshair.statespace import SimpleStateSpace
#
# Begin fixed line number area.
# Tests depend on the line number of the following section.
#
class Pokeable:
'''
inv: self.x >= 0
'''
x: int = 1
def poke(self) -> None:
'''
post[self]: True
'''
self.x += 1
def wild_pokeby(self, amount: int) -> None:
'''
post[self]: True
'''
self.x += amount
def safe_pokeby(self, amount: int) -> None:
'''
pre: amount >= 0
post[self]: True
'''
self.x += amount
def __repr__(self) -> str:
return 'Pokeable(' + repr(self.x) + ')'
def __init__(self, x: int) -> None:
'''
pre: x >= 0
'''
self.x = x
#
# End fixed line number area.
#
class Cat:
def size(self) -> int:
return 1
class BiggerCat(Cat):
def size(self) -> int:
return 2
class PersonTuple(NamedTuple):
name: str
age: int
class PersonWithoutAttributes:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
NOW = 1000
@dataclasses.dataclass(
repr=False # make checking faster (repr has an infinite search tree)
)
class Person:
'''
Contains various features that we expect to be successfully checkable.
inv: True # TODO: test that NameError in invariant does the right thing
'''
name: str
birth: int
def _getage(self):
return NOW - self.birth
def _setage(self, newage):
self.birth = NOW - newage
def _delage(self):
del self.birth
age = property(_getage, _setage, _delage, 'Age of person')
def abstract_operation(self):
'''
post: False # doesn't error because the method is "abstract"
'''
raise NotImplementedError
class SmokeDetector:
''' inv: not (self._is_plugged_in and self._in_original_packaging) '''
_in_original_packaging: bool
_is_plugged_in: bool
def signaling_alarm(self, air_samples: List[str]) -> bool:
'''
pre: self._is_plugged_in
post: implies('smoke' in air_samples, _ == True)
'''
return 'smoke' in air_samples
class Measurer:
def measure(self, x: int) -> str:
'''
post: _ == self.measure(-x)
'''
return 'small' if x <= 10 else 'large'
A_REFERENCED_THING = 42
@dataclasses.dataclass(repr=False)
class ReferenceHoldingClass:
'''
inv: self.item != A_REFERENCED_THING
'''
item: str
def fibb(x: int) -> int:
'''
pre: x>=0
post: _ < 10
'''
if x <= 2:
return 1
r1, r2 = fibb(x - 1), fibb(x - 2)
ret = r1 + r2
return ret
def recursive_example(x: int) -> bool:
'''
pre: x >= 0
post[]:
__old__.x >= 0 # just to confirm __old__ works in recursive cases
_ == True
'''
if x == 0:
return True
else:
return recursive_example(x - 1)
class ProxiedObjectTest(unittest.TestCase):
def test_proxy_type(self) -> None:
poke = make_fake_object(SimpleStateSpace(), Pokeable, 'ppoke')
self.assertIs(type(poke), Pokeable)
def test_copy(self) -> None:
poke1 = make_fake_object(SimpleStateSpace(), Pokeable, 'ppoke')
poke1.poke()
poke2 = copy.copy(poke1)
self.assertIsNot(poke1, poke2)
self.assertEqual(type(poke1), type(poke2))
self.assertIs(poke1.x, poke2.x)
poke1.poke()
self.assertIsNot(poke1.x, poke2.x)
self.assertNotEqual(str(poke1.x.var), str(poke2.x.var))
def test_proxy_alone(self) -> None:
def f(pokeable: Pokeable) -> None:
'''
post[pokeable]: pokeable.x > 0
'''
pokeable.poke()
self.assertEqual(*check_ok(f))
def test_proxy_in_list(self) -> None:
def f(pokeables: List[Pokeable]) -> None:
'''
pre: len(pokeables) == 1
post: all(p.x > 0 for p in pokeables)
'''
for pokeable in pokeables:
pokeable.poke()
self.assertEqual(*check_ok(f))
class ObjectsTest(unittest.TestCase):
def test_obj_member_fail(self) -> None:
def f(foo: Pokeable) -> int:
'''
pre: 0 <= foo.x <= 4
post[foo]: _ < 5
'''
foo.poke()
foo.poke()
return foo.x
self.assertEqual(*check_fail(f))
def test_obj_member_nochange_ok(self) -> None:
def f(foo: Pokeable) -> int:
''' post: _ == foo.x '''
return foo.x
self.assertEqual(*check_ok(f))
def test_obj_member_change_ok(self) -> None:
def f(foo: Pokeable) -> int:
'''
pre: foo.x >= 0
post[foo]: foo.x >= 2
'''
foo.poke()
foo.poke()
return foo.x
self.assertEqual(*check_ok(f))
def test_obj_member_change_detect(self) -> None:
def f(foo: Pokeable) -> int:
'''
pre: foo.x > 0
post[]: True
'''
foo.poke()
return foo.x
self.assertEqual(*check_post_err(f))
def test_example_second_largest(self) -> None:
def second_largest(items: List[int]) -> int:
'''
pre: 2 <= len(items) <= 3 # (max is to cap runtime)
post: _ == sorted(items)[-2]
'''
next_largest, largest = items[:2]
if largest < next_largest:
next_largest, largest = largest, next_largest
for item in items[2:]:
if item > largest:
largest, next_largest = (item, largest)
elif item > next_largest:
next_largest = item
return next_largest
self.assertEqual(*check_ok(second_largest))
def test_pokeable_class(self) -> None:
messages = analyze_class(Pokeable)
self.assertEqual(*check_messages(messages,
state=MessageType.POST_FAIL,
line=50,
column=0))
def test_person_class(self) -> None:
messages = analyze_class(Person)
self.assertEqual(*check_messages(messages, state=MessageType.CONFIRMED))
def test_extend_namedtuple(self) -> None:
def f(p: PersonTuple) -> PersonTuple:
'''
post: _.age != 222
'''
return PersonTuple(p.name, p.age + 1)
self.assertEqual(*check_fail(f))
def test_without_typed_attributes(self) -> None:
def f(p: PersonWithoutAttributes) -> PersonWithoutAttributes:
'''
post: _.age != 222
'''
return PersonTuple(p.name, p.age + 1)
self.assertEqual(*check_fail(f))
def test_property(self) -> None:
def f(p: Person) -> None:
'''
pre: 0 <= p.age < 100
post[p]: p.birth + p.age == NOW
'''
assert p.age == NOW - p.birth
oldbirth = p.birth
p.age = p.age + 1
assert oldbirth == p.birth + 1
self.assertEqual(*check_ok(f))
def test_typevar(self) -> None:
T = TypeVar('T')
class MaybePair(Generic[T]):
'''
inv: (self.left is None) == (self.right is None)
'''
left: Optional[T]
right: Optional[T]
def setpair(self, left: Optional[T], right: Optional[T]):
'''post[self]: True'''
if (left is None) ^ (right is None):
raise ValueError('Populate both values or neither value in the pair')
self.left, self.right = left, right
messages = analyze_class(MaybePair)
self.assertEqual(*check_messages(messages, state=MessageType.EXEC_ERR))
def test_bad_invariant(self):
class Foo:
'''
inv: self.item == 7
'''
def do_a_thing(self) -> None:
pass
self.assertEqual(*check_messages(analyze_class(Foo),
state=MessageType.PRE_UNSAT))
def test_expr_name_resolution(self):
'''
dataclass() generates several methods. It can be tricky to ensure
that invariants for these methods can resolve names in the
correct namespace.
'''
self.assertEqual(*check_messages(analyze_class(ReferenceHoldingClass), state=MessageType.CONFIRMED))
def test_inheritance_base_class_ok(self):
self.assertEqual(*check_messages(analyze_class(SmokeDetector), state=MessageType.CONFIRMED))
def test_super(self):
class FooDetector(SmokeDetector):
def signaling_alarm(self, air_samples: List[str]):
return super().signaling_alarm(air_samples)
self.assertEqual(*check_messages(analyze_class(FooDetector), state=MessageType.CONFIRMED))
def test_use_inherited_postconditions(self):
class CarbonMonoxideDetector(SmokeDetector):
def signaling_alarm(self, air_samples: List[str]) -> bool:
'''
post: implies('carbon_monoxide' in air_samples, _ == True)
'''
return 'carbon_monoxide' in air_samples # fails: does not detect smoke
self.assertEqual(*check_messages(analyze_class(CarbonMonoxideDetector),
state=MessageType.POST_FAIL))
def test_inherited_preconditions_overridable(self):
class SmokeDetectorWithBattery(SmokeDetector):
_battery_power: int
def signaling_alarm(self, air_samples: List[str]) -> bool:
'''
pre: self._battery_power > 0 or self._is_plugged_in
'''
return 'smoke' in air_samples
self.assertEqual(*check_messages(analyze_class(SmokeDetectorWithBattery),
state=MessageType.CONFIRMED))
def test_use_subclasses_of_arguments(self):
# Even though the argument below is typed as the base class, the fact
# that a faulty implementation exists is enough to produce a
# counterexample:
def f(foo: Cat) -> int:
''' post: _ == 1 '''
return foo.size()
self.assertEqual(*check_fail(f))
def test_check_parent_conditions(self):
# Ensure that conditions of parent classes are checked in children
# even when not overridden.
class Parent:
def size(self) -> int:
return 1
def amount_smaller(self, other_size: int) -> int:
'''
pre: other_size >= 1
post: _ >= 0
'''
return other_size - self.size()
class Child(Parent):
def size(self) -> int:
return 2
messages = analyze_class(Child)
self.assertEqual(*check_messages(messages, state=MessageType.POST_FAIL))
# TODO: precondition strengthening check
def TODO_test_cannot_strengthen_inherited_preconditions(self):
class PowerHungrySmokeDetector(SmokeDetector):
_battery_power: int
def signaling_alarm(self, air_samples: List[str]) -> bool:
'''
pre: self._is_plugged_in
pre: self._battery_power > 0
'''
return 'smoke' in air_samples
self.assertEqual(*check_messages(analyze_class(PowerHungrySmokeDetector),
state=MessageType.PRE_INVALID))
def test_container_typevar(self) -> None:
T = TypeVar('T')
def f(s: Sequence[T]) -> Dict[T, T]:
''' post: len(_) == len(s) '''
return dict(zip(s, s))
# (sequence could contain duplicate items)
self.assertEqual(*check_fail(f))
def test_typevar_bounds_fail(self) -> None:
T = TypeVar('T')
def f(x: T) -> int:
''' post:True '''
return x + 1 # type: ignore
self.assertEqual(*check_exec_err(f))
def test_typevar_bounds_ok(self) -> None:
B = TypeVar('B', bound=int)
def f(x: B) -> int:
''' post:True '''
return x + 1
self.assertEqual(*check_ok(f))
def test_any(self) -> None:
def f(x: Any) -> bool:
''' post: True '''
return x is None
self.assertEqual(*check_ok(f))
def test_meeting_class_preconditions(self) -> None:
def f() -> int:
'''
post: _ == -1
'''
pokeable = Pokeable(0)
pokeable.safe_pokeby(-1)
return pokeable.x
result = analyze_function(f)
def test_enforced_fn_preconditions(self) -> None:
def f(x: int) -> bool:
''' post: _ == True '''
return bool(fibb(x)) or True
self.assertEqual(*check_exec_err(f))
def test_generic_object(self) -> None:
def f(thing: object):
''' post: True '''
if isinstance(thing, SmokeDetector):
return thing._is_plugged_in
return False
self.assertEqual(*check_ok(f))
class BehaviorsTest(unittest.TestCase):
def test_syntax_error(self) -> None:
def f(x: int) -> int:
''' pre: x && x '''
self.assertEqual(*check_messages(analyze_function(f),
state=MessageType.SYNTAX_ERR))
def test_invalid_raises(self) -> None:
def f(x: int) -> int:
''' raises: NotExistingError '''
return x
self.assertEqual(*check_messages(analyze_function(f),
state=MessageType.SYNTAX_ERR))
def test_raises_ok(self) -> None:
def f() -> bool:
'''
raises: IndexError, NameError
post: __return__
'''
raise IndexError()
return True
self.assertEqual(*check_ok(f))
def test_optional_can_be_none_fail(self) -> None:
def f(n: Optional[Pokeable]) -> bool:
''' post: _ '''
return isinstance(n, Pokeable)
self.assertEqual(*check_fail(f))
def test_implicit_heapref_conversions(self) -> None:
def f(foo: List[List]) -> None:
'''
pre: len(foo) > 0
post: True
'''
foo[0].append(42)
self.assertEqual(*check_ok(f))
def test_nonuniform_list_types_1(self) -> None:
def f(a: List[object], b: List[int]) -> List[object]:
'''
pre: len(b) > 0
post: b[0] not in _
'''
ret = (a + b[1:])
return ret
self.assertEqual(*check_fail(f))
def test_nonuniform_list_types_2(self) -> None:
def f(a: List[object], b: List[int]) -> List[object]:
'''
pre: len(b) > 0
post: b[-1] not in _
'''
return (a + b[:-1])
self.assertEqual(*check_fail(f))
def test_varargs_fail(self) -> None:
def f(x: int, *a: str, **kw: bool) -> int:
''' post: _ > x '''
return x + len(a) + (42 if kw else 0)
self.assertEqual(*check_fail(f))
def test_varargs_ok(self) -> None:
def f(x: int, *a: str, **kw: bool) -> int:
''' post: _ >= x '''
return x + len(a) + (42 if kw else 0)
self.assertEqual(*check_unknown(f))
def test_recursive_fn_fail(self) -> None:
self.assertEqual(*check_fail(fibb))
def test_recursive_fn_ok(self) -> None:
self.assertEqual(*check_ok(recursive_example))
def test_recursive_postcondition_ok(self) -> None:
def f(x: int) -> int:
''' post: _ == f(-x) '''
return x * x
self.assertEqual(*check_ok(f))
def test_recursive_postcondition_enforcement_suspension(self) -> None:
messages = analyze_class(Measurer)
self.assertEqual(*check_messages(messages,
state=MessageType.POST_FAIL))
def test_error_message_has_unmodified_args(self) -> None:
def f(foo: List[Pokeable]) -> None:
'''
pre: len(foo) == 1
pre: foo[0].x == 10
post[foo]: foo[0].x == 12
'''
foo[0].poke()
self.assertEqual(*check_messages(
analyze_function(f),
state=MessageType.POST_FAIL,
message='false when calling f(foo = [Pokeable(10)])'))
# TODO: List[List] involves no HeapRefs
def TODO_test_potential_circular_references(self) -> None:
# TODO?: potential aliasing of input argument data?
def f(foo: List[List], thing: object) -> None:
'''
pre: len(foo) == 2
pre: len(foo[0]) == 1
pre: len(foo[1]) == 1
post: len(foo[1]) == 1
'''
foo[0].append(object()) # TODO: using 42 yields a z3 sort error
self.assertEqual(*check_ok(f))
def test_nonatomic_comparison(self) -> None:
def f(x: int, l: List[str]) -> bool:
''' post: not _ '''
return l == x
self.assertEqual(*check_ok(f))
def test_difficult_equality(self) -> None:
def f(x: Dict[FrozenSet[float], int]) -> bool:
''' post: not _ '''
return x == {frozenset({10.0}): 1}
self.assertEqual(*check_fail(f))
def test_nondeterminisim_detected(self) -> None:
_GLOBAL_THING = [True]
def f(i: int) -> int:
''' post: True '''
if i > 0:
_GLOBAL_THING[0] = not _GLOBAL_THING[0]
else:
_GLOBAL_THING[0] = not _GLOBAL_THING[0]
if _GLOBAL_THING[0]:
return -i if i < 0 else i
else:
return -i if i < 0 else i
self.assertEqual(*check_exec_err(f, 'NotDeterministic'))
def test_old_works_in_invariants(self) -> None:
class FrozenApples:
''' inv: self.count == __old__.self.count '''
count: int
def add_one(self):
self.count += 1
messages = analyze_class(FrozenApples)
self.assertEqual(*check_messages(messages, state=MessageType.POST_FAIL))
def test_fallback_when_smt_values_out_themselves(self) -> None:
def f(items: List[str]) -> str:
''' post: True '''
return ','.join(items)
self.assertEqual(*check_unknown(f))
def test_fallback_when_regex_is_used(self) -> None:
def f(s: str) -> bool:
''' post: True '''
return bool(re.match('(\d+)', s))
self.assertEqual(*check_unknown(f))
def profile():
# This is a scratch area to run quick profiles.
class ProfileTest(unittest.TestCase):
def test_nonuniform_list_types_2(self) -> None:
def f(a: List[object], b: List[int]) -> List[object]:
...
self.assertEqual(*check_fail(f))
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(ProfileTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
if ('-v' in sys.argv) or ('--verbose' in sys.argv):
set_debug(True)
if ('-p' in sys.argv):
profile()
else:
unittest.main()
|
<gh_stars>10-100
#############################
#
# copyright 2016-2021 Open Interconnect Consortium, Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE OPEN INTERCONNECT CONSORTIUM, INC. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE OR
# WARRANTIES OF NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE OPEN INTERCONNECT CONSORTIUM, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#############################
import argparse
import os
import jsonschema
import json
import sys
import traceback
try:
from deepdiff import DeepDiff
except:
print("missing DeepDiff:")
print("Trying to Install required module: DeepDiff ")
os.system('python3 -m pip install deepdiff')
import deepdiff
if sys.version_info < (3, 5):
raise Exception("ERROR: Python 3.5 or more is required, you are currently running Python %d.%d!" %
(sys.version_info[0], sys.version_info[1]))
def json_print(data):
"""
pretty print json
:param data: json to be printed
"""
try:
json_string = json.dumps(data, indent=2)
print(json_string)
except:
print(data)
def load_json(filename, my_dir=None):
"""
load the JSON file
:param filename: filename (with extension)
:param my_dir: path to the file
:return: json_dict
"""
full_path = filename
if my_dir is not None:
full_path = os.path.join(my_dir, filename)
if os.path.isfile(full_path) is False:
print("json file does not exist:", full_path)
linestring = open(full_path, 'r').read()
json_dict = json.loads(linestring)
return json_dict
def compare_json(file1, file2):
"""
compare 2 json files, ignoring the order..
:param file1: filename (with extension)
:param file2: path to the file
"""
file_data1 = load_json(file1)
file_data2 = load_json(file2)
ddiff = DeepDiff(file_data1, file_data2, ignore_order=True)
if ddiff == {}:
print(" == EQUAL ==")
else:
print(" == NOT EQUAL == ")
json_print(ddiff)
#
# main of script
#
if __name__ == '__main__':
print("***************************")
print("*** compareJson (v1) ***")
print("***************************")
parser = argparse.ArgumentParser()
parser.add_argument("-ver", "--verbose", help="Execute in verbose mode", action='store_true')
parser.add_argument("-file1", "--file1", default=None, help="swagger file name 1", nargs='?', const="", required=True)
parser.add_argument("-file2", "--file2", default=None, help="swagger file name 2", nargs='?', const="", required=True)
args = parser.parse_args()
print("file1 : " + str(args.file1))
print("file2 : " + str(args.file2))
compare_json(args.file1, args.file2)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import sys
import collections
import decimal
from itertools import permutations
import singledispatch as functools
from singledispatch.helpers import Support
import typing
import six
try:
from collections import ChainMap
except ImportError:
from singledispatch.helpers import ChainMap
collections.ChainMap = ChainMap
try:
from collections import OrderedDict
except ImportError:
from singledispatch.helpers import OrderedDict
collections.OrderedDict = OrderedDict
try:
import unittest2 as unittest
except ImportError:
import unittest
coll_abc = getattr(collections, 'abc', collections)
support = Support()
for _prefix in ('collections.abc', '_abcoll'):
if _prefix in repr(coll_abc.Container):
abcoll_prefix = _prefix
break
else:
abcoll_prefix = '?'
del _prefix
str = type("")
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A(object):
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = coll_abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
expected = _mro_compat([
dict, c.MutableMapping, c.Mapping, c.Sized,
c.Iterable, c.Container, object])
self.assertEqual(m, expected)
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
expected = _mro_compat([
collections.ChainMap, c.MutableMapping, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
self.assertEqual(m, expected)
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# preceeds MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
expected = _mro_compat([
D, c.MutableSequence, c.Sequence,
collections.defaultdict, dict, c.MutableMapping,
c.Mapping, c.Sized, c.Iterable, c.Container,
object])
self.assertEqual(m, expected)
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
expected = _mro_compat([
C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
self.assertEqual(m, expected)
def test_register_abc(self):
c = coll_abc
d = {"a": "b"}
l = [1, 2, 3]
s = set([object(), None])
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = coll_abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
<EMAIL>
class C(object):
pass
c.Container.register(C)
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
if sys.version_info < (3,):
class A(object):
__metaclass__ = MetaA
else:
"""
class A(metaclass=MetaA):
pass
"""
A = MetaA('A', (), {})
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = coll_abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P(object):
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class '{prefix}.Container'> "
"or <class '{prefix}.Iterable'>").format(prefix=abcoll_prefix),
("Ambiguous dispatch: <class '{prefix}.Iterable'> "
"or <class '{prefix}.Container'>").format(prefix=abcoll_prefix)),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class '{prefix}.Container'> "
"or <class '{prefix}.Sized'>").format(prefix=abcoll_prefix),
("Ambiguous dispatch: <class '{prefix}.Sized'> "
"or <class '{prefix}.Container'>").format(prefix=abcoll_prefix)),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S(object):
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U(object):
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class '{prefix}.Container'> "
"or <class '{prefix}.Sized'>").format(prefix=abcoll_prefix),
("Ambiguous dispatch: <class '{prefix}.Sized'> "
"or <class '{prefix}.Container'>").format(prefix=abcoll_prefix)),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
UserDict.__init__(self, *args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = coll_abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X(object):
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
#@i.register
#def _(arg: collections.abc.Mapping)
def _(arg):
return "mapping"
_.__annotations__ = dict(arg=coll_abc.Mapping)
i.register(_)
<EMAIL>
#def _(arg: "collections.abc.Sequence"):
def _(arg):
return "sequence"
_.__annotations__ = dict(arg=coll_abc.Sequence)
i.register(_)
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
if sys.version_info < (3,):
# the rest of this test fails on Python 2
return
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A(object):
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A(object):
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A(object):
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A(object):
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A(object):
@functools.singledispatchmethod
def t(self, arg):
return "base"
# @t.register
# def _(self, arg: int):
def _(self, arg):
return "int"
_.__annotations__ = dict(arg=int)
t.register(_)
# @t.register
# def _(self, arg: str):
def _(self, arg):
return "str"
_.__annotations__ = dict(arg=str)
t.register(_)
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
scope = "TestSingleDispatch.test_invalid_registrations.<locals>." * six.PY3
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function " + scope + "_"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
# @i.register
# def _(arg: typing.Iterable[str]):
def _(arg):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
_.__annotations__ = dict(arg=typing.Iterable[str])
i.register(_)
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[' + str.__name__ + '] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
def _mro_compat(classes):
if sys.version_info < (3, 6):
return classes
coll_idx = classes.index(coll_abc.Mapping) + 1
classes[coll_idx:coll_idx] = [coll_abc.Collection]
import contextlib
with contextlib.suppress(ValueError):
rev_idx = classes.index(coll_abc.Sequence) + 1
classes[rev_idx:rev_idx] = [coll_abc.Reversible]
return classes
if __name__ == '__main__':
unittest.main()
|
"""Load transformations from URDF files.
See :doc:`transform_manager` for more information.
"""
import os
import numpy as np
from bs4 import BeautifulSoup
from .transform_manager import TransformManager
from .transformations import transform_from, concat
from .rotations import active_matrix_from_extrinsic_roll_pitch_yaw, matrix_from_axis_angle, norm_vector
from .plot_utils import make_3d_axis, plot_mesh, plot_cylinder, plot_sphere, plot_box
class UrdfTransformManager(TransformManager):
"""Transformation manager that can load URDF files.
URDF is the `Unified Robot Description Format <http://wiki.ros.org/urdf>`_.
URDF allows to define joints between links that can be rotated about one
axis. This transformation manager allows to set the joint angles after
joints have been added or loaded from an URDF.
.. warning::
Note that this module requires the Python package beautifulsoup4.
.. note::
Joint angles must be given in radians.
Parameters
----------
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
check : bool, optional (default: True)
Check if transformation matrices are valid and requested nodes exist,
which might significantly slow down some operations.
"""
def __init__(self, strict_check=True, check=True):
super(UrdfTransformManager, self).__init__(strict_check, check)
self._joints = {}
self.collision_objects = []
self.visuals = []
self.mesh_path = None
self.package_dir = None
def add_joint(self, joint_name, from_frame, to_frame, child2parent, axis,
limits=(float("-inf"), float("inf")), joint_type="revolute"):
"""Add joint.
Parameters
----------
joint_name : string
Name of the joint
from_frame : string
Child link of the joint
to_frame : string
Parent link of the joint
child2parent : array-like, shape (4, 4)
Transformation from child to parent
axis : array-like, shape (3,)
Rotation axis of the joint (defined in the child frame)
limits : pair of float, optional (default: (-inf, inf))
Lower and upper joint angle limit
joint_type : str, optional (default: 'revolute')
Joint type: revolute or prismatic (continuous is the same as
revolute)
"""
self.add_transform(from_frame, to_frame, child2parent)
self._joints[joint_name] = (from_frame, to_frame, child2parent, norm_vector(axis),
limits, joint_type)
def set_joint(self, joint_name, value):
"""Set joint position.
Note that joint values are clipped to their limits.
Parameters
----------
joint_name : string
Name of the joint
value : float
Joint angle in radians in case of revolute joints or position
in case of prismatic joint.
"""
if joint_name not in self._joints:
raise KeyError("Joint '%s' is not known" % joint_name)
from_frame, to_frame, child2parent, axis, limits, joint_type = self._joints[joint_name]
# this is way faster than np.clip:
value = min(max(value, limits[0]), limits[1])
if joint_type == "revolute":
joint_rotation = matrix_from_axis_angle(
np.hstack((axis, (value,))))
joint2A = transform_from(
joint_rotation, np.zeros(3), strict_check=self.strict_check)
else:
assert joint_type == "prismatic"
joint_offset = value * axis
joint2A = transform_from(
np.eye(3), joint_offset, strict_check=self.strict_check)
self.add_transform(from_frame, to_frame, concat(
joint2A, child2parent, strict_check=self.strict_check,
check=self.check))
def get_joint_limits(self, joint_name):
"""Get limits of a joint.
Parameters
----------
joint_name : string
Name of the joint
Returns
-------
limits : pair of float
Lower and upper joint angle limit
"""
if joint_name not in self._joints:
raise KeyError("Joint '%s' is not known" % joint_name)
return self._joints[joint_name][4]
def load_urdf(self, urdf_xml, mesh_path=None, package_dir=None):
"""Load URDF file into transformation manager.
Parameters
----------
urdf_xml : str
Robot definition in URDF
mesh_path : str, optional (default: None)
Path in which we search for meshes that are defined in the URDF.
Meshes will be ignored if it is set to None and no 'package_dir'
is given.
package_dir : str, optional (default: None)
Some URDFs start file names with 'package://' to refer to the ROS
package in which these files (textures, meshes) are located. This
variable defines to which path this prefix will be resolved.
"""
self.mesh_path = mesh_path
self.package_dir = package_dir
urdf = BeautifulSoup(urdf_xml, "xml")
# URDF XML schema:
# https://github.com/ros/urdfdom/blob/master/xsd/urdf.xsd
robot = urdf.find("robot")
if robot is None:
raise UrdfException("Robot tag is missing.")
if not robot.has_attr("name"):
raise UrdfException("Attribute 'name' is missing in robot tag.")
robot_name = robot["name"]
materials = dict([
self._parse_material(material)
for material in robot.findAll("material", recursive=False)])
links = [self._parse_link(link, materials)
for link in robot.findAll("link", recursive=False)]
joints = [self._parse_joint(joint, links)
for joint in robot.findAll("joint", recursive=False)]
self.add_transform(links[0], robot_name, np.eye(4))
for joint in joints:
if joint.joint_type in ["revolute", "continuous"]:
self.add_joint(
joint.joint_name, joint.child, joint.parent,
joint.child2parent, joint.joint_axis, joint.limits,
"revolute")
elif joint.joint_type == "prismatic":
self.add_joint(
joint.joint_name, joint.child, joint.parent,
joint.child2parent, joint.joint_axis, joint.limits,
"prismatic")
else:
assert joint.joint_type == "fixed"
self.add_transform(
joint.child, joint.parent, joint.child2parent)
def _parse_material(self, material):
"""Parse material."""
if not material.has_attr("name"):
raise UrdfException("Material name is missing.")
colors = material.findAll("color")
if len(colors) not in [0, 1]:
raise UrdfException("More than one color is not allowed.")
if len(colors) == 1:
color = self._parse_color(colors[0])
else:
color = None
# TODO texture is currently ignored
return material["name"], color
def _parse_color(self, color):
"""Parse color."""
if not color.has_attr("rgba"):
raise UrdfException("Attribute 'rgba' of color tag is missing.")
return np.fromstring(color["rgba"], sep=" ")
def _parse_link(self, link, materials):
"""Create link."""
if not link.has_attr("name"):
raise UrdfException("Link name is missing.")
self.visuals.extend(self._parse_link_children(link, "visual", materials))
self.collision_objects.extend(
self._parse_link_children(link, "collision", dict()))
return link["name"]
def _parse_link_children(self, link, child_type, materials):
"""Parse collision objects or visuals."""
children = link.findAll(child_type)
shape_objects = []
for i, child in enumerate(children):
if child.has_attr("name"):
name = "%s:%s/%s" % (child_type, link["name"], child["name"])
else:
name = "%s:%s/%s" % (child_type, link["name"], i)
color = None
if child_type == "visual":
material = child.find("material")
if material is not None:
material_name, color = self._parse_material(material)
if color is None and material_name in materials:
color = materials[material_name]
child2link = self._parse_origin(child)
self.add_transform(name, link["name"], child2link)
shape_objects.extend(self._parse_geometry(child, name, color))
return shape_objects
def _parse_geometry(self, child, name, color):
"""Parse geometric primitives (box, cylinder, sphere) or meshes."""
geometry = child.find("geometry")
if geometry is None:
raise UrdfException("Missing geometry tag in link '%s'" % name)
result = []
for shape_type in ["box", "cylinder", "sphere", "mesh"]:
shapes = geometry.findAll(shape_type)
Cls = shape_classes[shape_type]
for shape in shapes:
shape_object = Cls(
name, mesh_path=self.mesh_path,
package_dir=self.package_dir, color=color)
shape_object.parse(shape)
result.append(shape_object)
return result
def _parse_joint(self, joint, links):
"""Create joint object."""
j = Joint()
if not joint.has_attr("name"):
raise UrdfException("Joint name is missing.")
j.joint_name = joint["name"]
if not joint.has_attr("type"):
raise UrdfException("Joint type is missing in joint '%s'."
% j.joint_name)
parent = joint.find("parent")
if parent is None:
raise UrdfException("No parent specified in joint '%s'"
% j.joint_name)
if not parent.has_attr("link"):
raise UrdfException("No parent link name given in joint '%s'."
% j.joint_name)
j.parent = parent["link"]
if j.parent not in links:
raise UrdfException("Parent link '%s' of joint '%s' is not "
"defined." % (j.parent, j.joint_name))
child = joint.find("child")
if child is None:
raise UrdfException("No child specified in joint '%s'"
% j.joint_name)
if not child.has_attr("link"):
raise UrdfException("No child link name given in joint '%s'."
% j.joint_name)
j.child = child["link"]
if j.child not in links:
raise UrdfException("Child link '%s' of joint '%s' is not "
"defined." % (j.child, j.joint_name))
j.joint_type = joint["type"]
if j.joint_type in ["planar", "floating"]:
raise UrdfException("Unsupported joint type '%s'" % j.joint_type)
elif j.joint_type not in ["revolute", "continuous", "prismatic", "fixed"]:
raise UrdfException("Joint type '%s' is not allowed in a URDF "
"document." % j.joint_type)
j.child2parent = self._parse_origin(joint)
j.joint_axis = np.array([1, 0, 0])
if j.joint_type in ["revolute", "continuous", "prismatic"]:
axis = joint.find("axis")
if axis is not None and axis.has_attr("xyz"):
j.joint_axis = np.fromstring(axis["xyz"], sep=" ")
j.limits = self._parse_limits(joint)
return j
def _parse_origin(self, entry):
"""Parse transformation."""
origin = entry.find("origin")
translation = np.zeros(3)
rotation = np.eye(3)
if origin is not None:
if origin.has_attr("xyz"):
translation = np.fromstring(origin["xyz"], sep=" ")
if origin.has_attr("rpy"):
roll_pitch_yaw = np.fromstring(origin["rpy"], sep=" ")
# URDF and KDL use the active convention for rotation matrices.
# For more details on how the URDF parser handles the conversion
# from Euler angles, see this blog post:
# https://orbitalstation.wordpress.com/tag/quaternion/
rotation = active_matrix_from_extrinsic_roll_pitch_yaw(
roll_pitch_yaw)
return transform_from(
rotation, translation, strict_check=self.strict_check)
def _parse_limits(self, joint):
"""Parse joint limits."""
limit = joint.find("limit")
lower, upper = float("-inf"), float("inf")
if limit is not None:
if limit.has_attr("lower"):
lower = float(limit["lower"])
if limit.has_attr("upper"):
upper = float(limit["upper"])
return lower, upper
def plot_visuals(self, frame, ax=None, ax_s=1, wireframe=False, convex_hull_of_mesh=True, alpha=0.3):
"""Plot all visuals in a given reference frame.
Visuals can be boxes, spheres, cylinders, or meshes. Note that visuals
that cannot be connected to the reference frame are omitted.
Parameters
----------
frame : string
Reference frame
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: False)
Plot wireframe (surface otherwise)
convex_hull_of_mesh : bool, optional (default: True)
Displays convex hull of meshes instead of the original mesh. This
makes plotting a lot faster with complex meshes.
alpha : float, optional (default: 0.3)
Alpha value of the surface / wireframe that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
return self._plot_objects(
self.visuals, frame, ax, ax_s, wireframe, convex_hull_of_mesh, alpha)
def plot_collision_objects(self, frame, ax=None, ax_s=1, wireframe=True, convex_hull_of_mesh=True, alpha=1.0):
"""Plot all collision objects in a given reference frame.
Collision objects can be boxes, spheres, cylinders, or meshes. Note
that collision objects that cannot be connected to the reference frame
are omitted.
Parameters
----------
frame : string
Reference frame
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe (surface otherwise)
convex_hull_of_mesh : bool, optional (default: True)
Displays convex hull of meshes instead of the original mesh. This
makes plotting a lot faster with complex meshes.
alpha : float, optional (default: 1)
Alpha value of the surface / wireframe that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
return self._plot_objects(
self.collision_objects, frame, ax, ax_s, wireframe, convex_hull_of_mesh, alpha)
def _plot_objects(self, objects, frame, ax=None, ax_s=1, wireframe=True, convex_hull_of_mesh=True, alpha=1.0):
"""Plot all objects in a given reference frame.
Objects can be boxes, spheres, cylinders, or meshes. Note that objects
that cannot be connected to the reference frame are omitted.
Parameters
----------
objects : list
Objects that will be plotted
frame : string
Reference frame
ax : Matplotlib 3d axis, optional (default: None)
If the axis is None, a new 3d axis will be created
ax_s : float, optional (default: 1)
Scaling of the new matplotlib 3d axis
wireframe : bool, optional (default: True)
Plot wireframe (surface otherwise)
convex_hull_of_mesh : bool, optional (default: True)
Displays convex hull of meshes instead of the original mesh. This
makes plotting a lot faster with complex meshes.
alpha : float, optional (default: 1)
Alpha value of the surface / wireframe that will be plotted
Returns
-------
ax : Matplotlib 3d axis
New or old axis
"""
if ax is None:
ax = make_3d_axis(ax_s)
for obj in objects:
ax = obj.plot(
self, frame, ax, wireframe=wireframe,
convex_hull=convex_hull_of_mesh, alpha=alpha)
return ax
class Joint(object):
"""Joint from URDF file.
This class is only required temporarily while we parse the URDF.
Parameters
----------
child : string
Name of the child
parent : string
Name of the parent frame
Attributes
----------
child : string
Name of the child
parent : string
Name of the parent frame
child2parent : array-like, shape (4, 4)
Transformation from child to parent
joint_name : string
Name of the joint that defines the transformation
joint_axis : array-like, shape (3,)
Rotation axis of the joint (defined in the child frame)
joint_type : string
Either 'fixed' or 'revolute'
limits : pair of float
Lower and upper joint angle limit
"""
def __init__(self):
self.child = None
self.parent = None
self.child2parent = np.eye(4)
self.joint_name = None
self.joint_axis = None
self.joint_type = "fixed"
self.limits = float("-inf"), float("inf")
class Box(object):
def __init__(self, frame, mesh_path, package_dir, color):
self.frame = frame
self.color = color
self.size = np.zeros(3)
def parse(self, box):
if box.has_attr("size"):
self.size[:] = np.fromstring(box["size"], sep=" ")
def plot(self, tm, frame, ax=None, alpha=0.3, wireframe=True, convex_hull=True):
A2B = tm.get_transform(self.frame, frame)
color = self.color if self.color is not None else "k"
return plot_box(
ax, self.size, A2B, wireframe=wireframe, alpha=alpha, color=color)
class Sphere(object):
def __init__(self, frame, mesh_path, package_dir, color):
self.frame = frame
self.color = color
self.radius = 0.0
def parse(self, sphere):
if not sphere.has_attr("radius"):
raise UrdfException("Sphere has no radius.")
self.radius = float(sphere["radius"])
def plot(self, tm, frame, ax=None, alpha=0.3, wireframe=True, convex_hull=True):
center = tm.get_transform(self.frame, frame)[:3, 3]
color = self.color if self.color is not None else "k"
return plot_sphere(
ax, self.radius, center, wireframe=wireframe, alpha=alpha,
color=color)
class Cylinder(object):
def __init__(self, frame, mesh_path, package_dir, color):
self.frame = frame
self.color = color
self.radius = 0.0
self.length = 0.0
def parse(self, cylinder):
if not cylinder.has_attr("radius"):
raise UrdfException("Cylinder has no radius.")
self.radius = float(cylinder["radius"])
if not cylinder.has_attr("length"):
raise UrdfException("Cylinder has no length.")
self.length = float(cylinder["length"])
def plot(self, tm, frame, ax=None, alpha=0.3, wireframe=True, convex_hull=True):
A2B = tm.get_transform(self.frame, frame)
color = self.color if self.color is not None else "k"
return plot_cylinder(
ax, self.length, self.radius, 0.0, A2B, wireframe=wireframe,
alpha=alpha, color=color)
class Mesh(object):
def __init__(self, frame, mesh_path, package_dir, color):
self.frame = frame
self.mesh_path = mesh_path
self.package_dir = package_dir
self.color = color
self.filename = None
self.scale = np.ones(3)
def parse(self, mesh):
if self.mesh_path is None and self.package_dir is None:
self.filename = None
else:
if not mesh.has_attr("filename"):
raise UrdfException("Mesh has no filename.")
if self.mesh_path is not None:
self.filename = os.path.join(self.mesh_path, mesh["filename"])
else:
assert self.package_dir is not None
self.filename = mesh["filename"].replace(
"package://", self.package_dir)
if mesh.has_attr("scale"):
self.scale = np.fromstring(mesh["scale"], sep=" ")
def plot(self, tm, frame, ax=None, alpha=0.3, wireframe=True, convex_hull=True):
A2B = tm.get_transform(self.frame, frame)
color = self.color if self.color is not None else "k"
return plot_mesh(
ax, self.filename, A2B, self.scale, wireframe=wireframe,
convex_hull=convex_hull, alpha=alpha, color=color)
shape_classes = {"box": Box,
"sphere": Sphere,
"cylinder": Cylinder,
"mesh": Mesh}
class UrdfException(Exception):
"""Exception while parsing URDF files."""
pass
|
import time
#modes = {
# "VACANT", #There is no train in this block
# "ENTERING", #I'm still considered empty, but I should start moving to help train in previous block depart
# "APPROACHING", #Previous block is now empty, I own the train and am waiting to sense it
# "HOLDING", #I can sense the train and the next block is occupied
# "DEPARTING", #I can see the train and the next block is free
# "UNKNOWN" #Initial state. Defaults to holding behavior
# }
#Accepts a list of blocks (optimally in reverse order)
def autoRun(coaster):
while True:
for block in coaster:
block.setStatus()
updatestring = str(time.time()) + ":"
for block in coaster:
block.setAction()
#updatestring += " " + block.name + ": " + block.status
#block.moveTrain()
#print(updatestring)
class AbstractBlock:
def __init__(self, blockname = "Unnamed Block"):
self.name = blockname
self.status = "UNKNOWN"
self.EStopFlag = False
#Public Methods
#Call this method to link this block to the next one along the coaster. All blocks have to be linked in order for the coaster to run
def isFollowedBy(self,nextBlock):
self.heir = nextBlock
#Emergency stop procedure. Stop any motion, mark that I've heard the order, and relay the order to my neighbor
#Continues until all blocks have heard, then an exception should be raised right after this method
def E_STOP(self):
if not self.EStopFlag:
self.stopTrain()
self.EStopFlag = True
self.heir.E_STOP()
#The block that I am an heir to uses this method to tell me that it's handing me a train, or that it has finished doing so
def handoff(self, active):
if active:
if self.status in ("VACANT","ENTERING"):
self.status = "ENTERING"
else:
self.E_STOP()
raise RuntimeError(self.name + ": E-STOPPED because I was asked to accept a train but I am in " + self.status)
else:
if self.status is "ENTERING":
self.status = "APPROACHING"
def setStatus(self):
try:
test = self.heir.status
except:
raise RuntimeError(self.name + ": Failed while trying to get the status. Check if an heir has been assigned to this block.")
if self.sense(): #Look for the train. If we see something...
if self.status is "UNKNOWN": #When script starts, make sure all trains are seen by the sensor in their respective blocks
self.status = "HOLDING"
elif self.status is "VACANT":
self.E_STOP()
raise RuntimeError(self.name + ": E-STOPPED because I saw a train when I was supposed to be VACANT")
else:
if self.heir.status in ("VACANT","ENTERING"):
self.status = "DEPARTING"
self.heir.handoff(True)
else:
if self.status is "DEPARTING":
#Ensure that if a train is sensed by one block, it can't be sensed by the next. Otherwise it could lock up here.
self.E_STOP()
raise Warning(self.name + ": I'm stuck because the next block saw the train too early as I was trying to depart")
self.status = "HOLDING"
else: #If we don't see a train anywhere...
if self.status is "UNKNOWN":
self.status = "VACANT"
elif self.status is "HOLDING":
self.E_STOP()
raise RuntimeError(self.name + ":E-STOPPED because I lost sight of the train that I was supposed to be HOLDING")
elif self.status is "DEPARTING":
self.status = "VACANT"
self.heir.handoff(False)
def setAction(self):
if self.status is "UNKNOWN":
#This really should never happen
self.E_STOP()
raise Warning(self.name + ": I was told to act but I am in an UNKNOWN state for some reason. Defaulting to holding behavior...")
if self.status in ("HOLDING","VACANT"):
self.stopTrain()
else:
self.moveTrain()
#Virtual/Private:
def sense(self):
self.E_STOP()
raise NotImplementedError(self.name + ": sense method should be overriden in a derived block class")
def stopTrain(self):
self.E_STOP()
raise NotImplementedError(self.name + ": stopTrain method should be overriden in a derived block class")
def moveTrain(self):
self.E_STOP()
raise NotImplementedError(self.name + ": moveTrain method should be overriden in a derived block class")
|
import re
from atavism.http11.content import Content
from atavism.http11.headers import Headers
from atavism.http11.range import Range
class BaseHttp(object):
""" Base class for other HTTP transactional classes. This class tries
to provide the core functionality for various classes.
"""
RANGE_re = re.compile(r"([0-9]+)?-([0-9]+)?,?")
def __init__(self):
self.path = ''
self.http = 'HTTP/1.1'
self.header = Headers()
self._content = Content()
self.headers_sent = False
self.headers_only = False
self.ranges = []
def __len__(self):
return len(self._content)
def get(self, key, default=None):
return self.header.get(key, default)
def add_header(self, key, val):
self.header.add_header(key, val)
def add_headers(self, hdr_dict):
self.header.add_headers(hdr_dict)
### Connection Information
@property
def is_keepalive(self):
return False if self.header.get('connection', '').lower() == 'close' else True
def send_complete(self):
ct = self._content
while ct._next is not None:
ct = ct._next
if self.headers_sent and ct.send_complete:
return True
return False
def next_output(self):
data = b''
if not self.headers_sent:
data += str(self.header).encode()
self.headers_sent = True
if self.headers_only:
self._content.finished = True
return data
data += self._content.next(len(data))
return data
### Ranges
def parse_ranges(self, key):
""" Parse a range request into individual byte ranges. """
self.ranges = []
poss = self.get(key)
if poss is None or poss.lower() == 'none':
return
if not poss.startswith("bytes="):
return
matches = self.RANGE_re.findall(poss[6:])
for m in matches:
self.ranges.append(Range(m))
def add_range(self, start=None, end=None):
if start is None and end is None:
return
self.ranges.append(Range((start, end)))
def has_ranges(self):
return len(self.ranges) > 0
def set_ranges(self, ranges):
self.ranges = ranges
### Content
@property
def content(self):
return self._content.content
def decoded_content(self):
return self._content.decoded_content()
def set_content(self, cntnt_obj):
self._content = cntnt_obj
def set_content_type(self, ct):
self._content.content_type = ct
def read_content(self, cntnt):
r = 0
if not self.header.finished:
r = self.header.read_content(cntnt)
if self.header.finished:
self._update_content()
r += self._content.read_content(cntnt[r:])
return r
def _update_content(self):
self._content.content_type = self.header.get('content-type')
te = self.get('transfer-encoding')
cl = self.get('content-length')
# Set how we will detect the length of content...
if te is not None and te.lower() == 'chunked':
self._content.content_sz = 'chunked'
elif cl is not None:
self._content.content_sz = cl
else:
self._content.content_sz = None
rngs = self.get('range')
if rngs is not None:
self.parse_ranges('range')
ce = self.get('content-encoding')
if ce is not None and ce.lower() != 'identity':
self._content.set_compression(ce)
def set_compression(self, method):
self._content.set_compression(method)
def _complete(self):
""" Record that the creation of a response/request is complete. """
self._content.finished = True
self._content.compress()
self.header.add_headers(self._content.header_lines())
def is_complete(self):
""" Has the entire input stream been seen?
:return: True or False
"""
if self.header.finished is False:
return False
elif not self.header.needs_content:
return True
return self._content.finished
def add_content(self, cntnt):
self._content.add_content(cntnt)
|
<reponame>sfpd/rlreloaded
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import os.path as osp
import sys,traceback
from StringIO import StringIO
import codecs
NAMESPACE = {}
def eval_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
result = eval(text,NAMESPACE)
return [nodes.Text(unicode(result))],[]
class ExecDirective(Directive):
"""Execute the specified python code and insert the output into the document"""
# http://stackoverflow.com/questions/7250659/python-code-to-generate-part-of-sphinx-documentation-is-it-possible
has_content = True
def run(self):
NAMESPACE["DOC"] = sio = StringIO()
try:
exec '\n'.join(self.content) in NAMESPACE
s = unicode(sio.getvalue().decode('UTF-8'))
if len(s) > 0:
return [nodes.literal_block(text = s)]
else:
return []
except Exception, e:
traceback.print_exc()
return [nodes.error(None, nodes.paragraph(text = "Unable to execute python code at %s:%d:" % (osp.basename(self.src), self.srcline)), nodes.paragraph(text = str(e)))]
# See subl /Library/Python/2.7/site-packages/sphinx/directives/code.py
class LiteralInclude2(Directive):
"""
Like ``.. include:: :literal:``, but only warns if the include file is
not found, and does not raise errors. Also has several options for
selecting what to include.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'linenos': directives.flag,
'tab-width': int,
'language': directives.unchanged_required,
'encoding': directives.encoding,
'pyobject': directives.unchanged_required,
'lines': directives.unchanged_required,
'start-before': directives.unchanged_required,
'end-after': directives.unchanged_required,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
filename = osp.expandvars(self.arguments[0])
env = document.settings.env
if 'pyobject' in self.options and 'lines' in self.options:
return [document.reporter.warning(
'Cannot use both "pyobject" and "lines" options',
line=self.lineno)]
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
f = None
try:
f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
finally:
if f is not None:
f.close()
objectname = self.options.get('pyobject')
if objectname is not None:
from sphinx.pycode import ModuleAnalyzer
analyzer = ModuleAnalyzer.for_file(filename, '')
tags = analyzer.find_tags()
if objectname not in tags:
return [document.reporter.warning(
'Object named %r not found in include file %r' %
(objectname, filename), line=self.lineno)]
else:
lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1]
linespec = self.options.get('lines')
if linespec is not None:
try:
linelist = parselinenos(linespec, len(lines))
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
# just ignore nonexisting lines
nlines = len(lines)
lines = [lines[i] for i in linelist if i < nlines]
if not lines:
return [document.reporter.warning(
'Line spec %r: no lines pulled from include file %r' %
(linespec, filename), line=self.lineno)]
linespec = self.options.get('emphasize-lines')
if linespec:
try:
hl_lines = [x+1 for x in parselinenos(linespec, len(lines))]
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
startafter = self.options.get('start-before')
endbefore = self.options.get('end-after')
prepend = self.options.get('prepend')
append = self.options.get('append')
if startafter is not None or endbefore is not None:
use = not startafter
res = []
for line in lines:
if not use and startafter and startafter in line:
use = True
elif use and endbefore and endbefore in line:
use = False
res.append(line)
break
if use:
res.append(line)
lines = res
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
text = ''.join(lines)
if self.options.get('tab-width'):
text = text.expandtabs(self.options['tab-width'])
retnode = nodes.literal_block(text, text, source=filename)
# set_source_info(self, retnode)
if self.options.get('language', ''):
retnode['language'] = self.options['language']
if 'linenos' in self.options:
retnode['linenos'] = True
if hl_lines is not None:
retnode['highlight_args'] = {'hl_lines': hl_lines}
# env.note_dependency(rel_filename)
return [retnode]
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.add_role('eval', eval_role)
app.add_directive('exec',ExecDirective)
app.add_directive('literalinclude2',LiteralInclude2)
|
<filename>PCN/PCN_main_train.py<gh_stars>0
################################################################################
# Copyright 2021 <NAME>
# See the LICENSE file for details.
# SPDX-License-Identifier: MIT
################################################################################
import os
import sys
sys.stdout.flush()
sys.path.insert(0, "../common")
import argparse
import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.optim import Adam, RMSprop, lr_scheduler
from torch.utils.data import Dataset, DataLoader, Subset
from model_pcn import Model_PCN, Model_Transform, strip_prefix_if_present
from data_reader import Dataset_MLHDF
from file_util import *
# program arguments
parser = argparse.ArgumentParser()
parser.add_argument("--device-name", default="cuda:0", help="use cpu or cuda:0, cuda:1 ...")
parser.add_argument("--data-dir", default="/Users/kim63/Desktop/temp_pdbbind2019/pccnn/data", help="dataset directory")
parser.add_argument("--dataset-type", type=float, default=1, help="ml-hdf version, (1: for fusion, 1.5: for cfusion 2: ml-hdf v2)")
parser.add_argument("--mlhdf-fn", default="pdbbind2019_core_docking_ml.hdf", help="training docking ml-hdf path")
parser.add_argument("--cmlhdf-fn", default="pdbbind2019_core_crystal_ml.hdf", help="training crystal ml-hdf path")
parser.add_argument("--csv-fn", default="pdbbind2019_core_rmsd.csv", help="training csv file path")
parser.add_argument("--model-path", default="/Users/kim63/Desktop/temp_pdbbind2019/pccnn/data/pdbbind2019_core_model_20210510.pth", help="model checkpoint file path")
parser.add_argument("--affine-trans", default=False, action="store_true", help="use affine transformation or not")
parser.add_argument("--use-feat", default=False, action="store_true", help="use ligand and interaction feature or not")
parser.add_argument("--max-atoms", type=int, default=1000, help="maximum number of atoms")
parser.add_argument("--epoch-count", type=int, default=50, help="number of training epochs")
parser.add_argument("--batch-size", type=int, default=50, help="mini-batch size")
parser.add_argument("--learning-rate", type=float, default=0.0007, help="initial learning rate")
parser.add_argument("--decay-rate", type=float, default=0.97, help="learning rate decay")
parser.add_argument("--decay-iter", type=int, default=1000, help="learning rate decay")
parser.add_argument("--checkpoint-iter", type=int, default=50, help="checkpoint save rate, if zero, then save only when loss decreases")
parser.add_argument("--multi-gpus", default=False, action="store_true", help="whether to use multi-gpus")
parser.add_argument("--verbose", type=int, default=0, help="print all input/output shapes or not")
args = parser.parse_args()
# set CUDA for PyTorch
use_cuda = torch.cuda.is_available()
cuda_count = torch.cuda.device_count()
if use_cuda:
device = torch.device(args.device_name)
torch.cuda.set_device(int(args.device_name.split(':')[1]))
else:
device = torch.device("cpu")
print(use_cuda, cuda_count, device)
def worker_init_fn(worker_id):
np.random.seed(int(0))
def train():
# load dataset
csv_path = os.path.join(args.data_dir, args.csv_fn)
mlhdf_path = os.path.join(args.data_dir, args.mlhdf_fn)
cmlhdf_path = os.path.join(args.data_dir, args.cmlhdf_fn)
print(csv_path, mlhdf_path, cmlhdf_path, args.use_feat, args.affine_trans)
dataset = Dataset_MLHDF(csv_path, mlhdf_path, cmlhdf_path, feat_int=args.use_feat, affine_trans=args.affine_trans, max_atoms=args.max_atoms)
# check multi-gpus
num_workers = 0
if args.multi_gpus and cuda_count > 1:
num_workers = cuda_count
# initialize data loader
batch_count = len(dataset) // args.batch_size
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, worker_init_fn=None)
# if validation set is available
#val_dataloader = None
#if val_dataset:
# val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=num_workers, worker_init_fn=None)
# define model
model = Model_PCN(input_dim=[args.max_atoms,22], use_feat=args.use_feat, verbose=args.verbose)
model_t = Model_Transform(verbose=args.verbose)
#if use_cuda:
# model = model.cuda()
if args.multi_gpus and cuda_count > 1:
model = nn.DataParallel(model)
model.to(device)
if isinstance(model, (DistributedDataParallel, DataParallel)):
model_to_save = model.module
else:
model_to_save = model
# set loss, optimizer, decay, other parameters
loss_fn = nn.BCELoss().float()
optimizer = Adam(model.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08)
#optimizer = RMSprop(model.parameters(), lr=args.learning_rate)
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.decay_iter, gamma=args.decay_rate)
# load model
epoch_start = 0
if valid_file(args.model_path):
checkpoint = torch.load(args.model_path)
model_state_dict = checkpoint.pop("model_state_dict")
strip_prefix_if_present(model_state_dict, "module.")
model_to_save.load_state_dict(model_state_dict, strict=False)
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
epoch_start = checkpoint["epoch"]
loss = checkpoint["loss"]
print("checkpoint loaded: %s" % args.model_path)
if not os.path.exists(os.path.dirname(args.model_path)):
os.makedirs(os.path.dirname(args.model_path))
output_dir = os.path.dirname(args.model_path)
step = 0
epoch_losses = []
for epoch_ind in range(epoch_start, args.epoch_count):
model.train()
batch_losses = []
for batch_ind, batch in enumerate(dataloader):
# transfer to GPU
x_cpu, xf_cpu, y_cpu = batch
x = x_cpu.to(device)
y = y_cpu.to(device)
xf = xf_cpu.to(device)
#x1 = x[:,:,:,:3]
#x2 = x[:,:,:,3:]
#print(x1.shape)
#print(x2.shape)
# forward training
#trans = model_t(x1)
#x1t = torch.matmul(torch.squeeze(x1), trans)
#x1t = torch.unsqueeze(x1t, dim=1)
#x1t2 = torch.cat([x1t, x2], 3)
#yp, _ = model(x1t2)
# forward model
yp, _ = model(x, xf)
# compute loss
loss = loss_fn(yp.cpu().float(), y_cpu.float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
batch_loss = loss.cpu().data.item()
batch_losses.append(batch_loss)
print("[%d/%d-%d/%d] training, loss: %.3f, lr: %.7f" % (epoch_ind+1, args.epoch_count, batch_ind+1, batch_count, batch_loss, optimizer.param_groups[0]['lr']))
if args.checkpoint_iter > 0 and step % args.checkpoint_iter == 0:
checkpoint_dict = {
"model_state_dict": model_to_save.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": loss,
"step": step,
"epoch": epoch_ind
}
torch.save(checkpoint_dict, args.model_path)
print("checkpoint saved: %s" % args.model_path)
step += 1
epoch_loss = np.mean(batch_losses)
epoch_losses.append(epoch_loss)
print("[%d/%d] training, epoch loss: %.3f" % (epoch_ind+1, args.epoch_count, epoch_loss))
if args.checkpoint_iter == 0 and (epoch_ind == 0 or epoch_loss < epoch_losses[-1]):
checkpoint_dict = {
"model_state_dict": model_to_save.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": loss,
"step": step,
"epoch": epoch_ind
}
torch.save(checkpoint_dict, args.model_path)
print("checkpoint saved: %s" % args.model_path)
'''if val_dataset:
val_losses = []
model.eval()
with torch.no_grad():
for batch_ind, batch in enumerate(val_dataloader):
x_cpu, y_cpu = batch
x = x_cpu.to(device)
y = y_cpu.to(device)
yp, _ = model(x)
loss = loss_fn(yp.cpu().float(), y.float())
val_losses.append(loss.cpu().data.item())
print("[%d/%d-%d/%d] validation, loss: %.3f" % (epoch_ind+1, args.epoch_count, batch_ind+1, batch_count, loss.cpu().data.item()))
print("[%d/%d] validation, epoch loss: %.3f" % (epoch_ind+1, args.epoch_count, np.mean(val_losses)))'''
# close dataset
dataset.close()
#if val_dataset != None:
# val_dataset.close()
def main():
train()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import string
import re
import json
import sys
import os
# A set of type definitions: first element is regex to match, second is type converter
TYPES = {
#'float': (r'(?:\-)?[0-9]*(?:\.[0-9]*)?(?:[eE][\-\+]?[0-9]+)?|[0-9]+', float),
'float': (r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?|[-+]?NaN|[-+]?Infinity|[-+]?Inf', float),
'integer': (r'[0-9]+', int),
'word': (r'[^\s]+', str),
'string': (r'[^\n]+', lambda s: s.strip()),
'optstring': (r'[^\n]*', lambda s: s.strip()),
'ws': (r'\s+', str)
}
# Single characters that should be collapsed from literal to regex. If 2 or more of one of these characters
# is seen in the literal input, it will be replaced with a regex that matches 1 or more. So for example:
# "******* Heading *******" -> "\*+ Heading \*+"
# This adds some basic robustness to layout variations.
COLLAPSE = {
'-',
'*',
' ',
'\n'
}
# Converts a 2D table represented as a lod (list of dicts) to a dol (dict of lists). For example:
# [{x: 1, y: 2}, {x: 3, y: 6}] -> {x: [1, 3], y: [2, 6]}
def lod2dol(lod):
dols = dict()
for d in lod:
for k in d:
if k not in dols:
dols[k] = []
for d in lod:
for k in dols:
dols[k].append(d[k] if k in d else None)
return dols
# Normalises a flat(ish) pyjson structure where hierarchy is embedded in keys using periods. For example:
# {a.b.c: 3, a.b.d: 4, a.e: 5} -> {a: {b: {c: 3, d: 4}, e: 5}}
# Doesn't handle this case - considered improper
# {a.b : 1, a: 2} -> {a: {b: 1, _default: 2}} - or something like this
def normalise_pyjson(d):
dnext = dict()
for k, v in d.items():
if '.' in k:
kparts = k.split('.')
head = kparts[0]
tail = kparts[1:]
if head not in dnext:
dnext[head] = dict()
#print("!!",dnext['beta'] if 'beta' in dnext else None,head,tail,v)
dnext[head]['.'.join(tail)] = v
else:
dnext[k] = v
dout = dict()
for k, v in dnext.items():
if isinstance(v, dict):
v = normalise_pyjson(v)
dout[k] = v
return dout
# Takes a literal string and (i) collapses strings of length >= 2 of COLLAPSE chars
# into a regex that detects 1 or more and (ii) escapes everything else for regex
def escape_collapse(pre):
out = ""
count = 0
curr_collapse_char = None
for char in pre:
if char == curr_collapse_char:
count += 1
else:
if count > 1:
out += re.escape(curr_collapse_char) + "+"
if count == 1:
out += re.escape(curr_collapse_char)
if char in COLLAPSE:
curr_collapse_char = char
count = 1
else:
curr_collapse_char = None
count = 0
out += re.escape(char)
if count > 1:
out += re.escape(curr_collapse_char) + "+"
if count == 1:
out += re.escape(curr_collapse_char)
return out
# Exception raised if a text does not parse according to the template given
class ParseError(ValueError):
pass
# A node (also the root object) in a kind of template parse tree.
class Varspec(object):
def __init__(self, parent, pre, varname, vartype):
self.parent = parent # reference to parent node
self.pre = pre or '' # the text that comes immediately before this variable node
self.varname = varname # the name of this variable
self.vartype = vartype # the expected type of this variable
self.children = [] # children of this node
# Generate and return the regex for this template
def gen_regex(self):
regex = escape_collapse(self.pre)
#print("LINE", regex)
if self.vartype == 'root':
for c in self.children:
regex += c.gen_regex()
elif self.vartype == 'array':
regex += "((?:"
for c in self.children:
regex += c.gen_regex()
regex += ")+)"
elif self.vartype == 'beginor':
regex += "(?:(?:"
elif self.vartype == 'or':
regex += ")|(?:"
elif self.vartype == 'endor':
regex += "))"
elif self.vartype:
regex += "(" + TYPES[self.vartype][0] + ")"
return regex
# Fills the given dictionary with variables extracted in groups, the result
# of applying regex to a matching text.
def build(self, parent_dict, groups):
#print(self.varname, self.vartype, groups[0] if len(groups) > 0 else 'nil')
obj = None
if self.vartype == 'root':
for c in self.children:
c.build(parent_dict, groups)
elif self.vartype == 'array':
obj = list()
g_children = groups.pop(0)
groups[:] = groups[len(self.children)-1:] # swallow the child captures
if g_children is None: # if inside a not-activated or block
return
regex = ""
for c in self.children:
regex += c.gen_regex()
m = re.match(regex, g_children)
while g_children and m:
d = dict()
c_groups = list(m.groups())
g_children = g_children[len(m.group()):]
for c in self.children:
c.build(d, c_groups)
obj.append(d)
m = re.match(regex, g_children)
if self.varname:
parent_dict[self.varname] = obj
else: # flatten arrays
dol = lod2dol(obj)
parent_dict.update(dol)
elif self.vartype in ('beginor', 'or', 'endor'):
pass
elif self.vartype:
#print(self.vartype, groups)
group = groups.pop(0)
if self.varname and group is not None: # if inside a not-activated or block
try:
obj = TYPES[self.vartype][1](group)
parent_dict[self.varname] = obj
except ValueError as e:
print(group)
raise e
def build_parser(template_filename, prefix='', loose=False):
working_template_path, _ = os.path.split(template_filename)
with open(template_filename) as f:
template = f.read()
if loose:
template = template.strip()
print("Loose whitespace")
insertions = string.Formatter().parse(template)
parent = Varspec(None,None,None,'root')
for pre, spec, _, _ in insertions:
if spec:
varname, vartype = spec.split('|')
if prefix:
if varname:
varname = prefix + "." + varname
if vartype == 'beginarray':
child = Varspec(parent, pre, varname, 'array')
parent.children.append(child)
parent = child
elif vartype == 'endarray':
child = Varspec(parent, pre, None, None)
parent.children.append(child)
parent = parent.parent
# elif vartype == 'beginor':
# child = Varspec(parent, pre, None, 'optionset')
# grandchild = Varspec(child, None, varname, 'option')
# parent.children.append(child)
# child.children.append(grandchild)
# parent = grandchild
# elif vartype == 'or':
# parent = parent.parent
# grandchild = Varspec(parent, pre, None, None)
# parent.children.append(child)
# parent.parent.children.append(next_option_child)
# next_option_child =
# elif vartype == 'endor':
# child = Varspec(parent, pre, None, None)
# parent.children.append(child)
# parent = parent.parent.parent
elif vartype.startswith('include '):
_, include_file = vartype.split()
include_file = os.path.join(working_template_path, include_file)
if prefix:
newprefix = prefix + "." + varname
else:
newprefix = varname
subtree = build_parser(include_file, newprefix)
subtree.pre = pre
parent.children.append(subtree)
else:
child = Varspec(parent, pre, varname, vartype)
parent.children.append(child)
else:
child = Varspec(parent, pre, None, None)
parent.children.append(child)
return parent
def parse_text(parser, text, loose):
regex = parser.gen_regex()
with open("debug.regex", "w") as fout:
fout.write(regex)
if loose:
text = text.strip()
m = re.match(regex, text)
if m:
obj = dict()
parser.build(obj, list(m.groups()))
obj = normalise_pyjson(obj)
return obj
else:
#print(repr(regex), repr(text))
raise ParseError()
# Given a template_filename (or list of possible templates) and a text_filename, applies the template to
# the text and return the pyjson structure of variables extracted. Raises ParseError if no template
# matched.
def untemplate(template_filename, text_filename):
with open(text_filename) as f:
text = f.read()
return untemplate_string(template_filename, text)
def untemplate_string(template_filename, text):
if isinstance(template_filename, str):
template_filename = [template_filename]
for tfn in template_filename:
parser = build_parser(tfn, loose=False)
try:
obj = parse_text(parser, text, loose=False)
return obj
except ParseError:
pass
raise ParseError()
if __name__ == "__main__":
text_filename = sys.argv[1]
template_filename = sys.argv[2:]
obj = untemplate(template_filename, text_filename)
print(json.dumps(obj, indent=4))
|
<filename>Google/benchmarks/unet3d/implementations/unet3d-preview-JAX-tpu-v4-128/models/test_util.py
"""Test utilities."""
from absl import flags
import numpy as np
import torch
from REDACTED.mlperf.submissions.training.v1_0.models.unet3d.models import layers # pylint: disable=unused-import
FLAGS = flags.FLAGS
def jax_param_to_torch(x):
return torch.nn.Parameter(torch.from_numpy(np.array(x)))
def convert_down_sample_weight_to_torch(jax_down_sample_params,
torch_down_params):
"""Converts down sample jax weights to torch."""
conv0_weight = np.transpose(
jax_down_sample_params['ConvBlockFactory_0']['Conv_0']['kernel'],
[4, 3, 0, 1, 2])
torch_down_params.conv1[0].weight = jax_param_to_torch(conv0_weight)
conv1_weight = np.transpose(
jax_down_sample_params['ConvBlockFactory_1']['Conv_0']['kernel'],
[4, 3, 0, 1, 2])
torch_down_params.conv2[0].weight = jax_param_to_torch(conv1_weight)
torch_down_params.conv1[1].weight = jax_param_to_torch(
jax_down_sample_params['ConvBlockFactory_0']['InstanceNorm_1']['scale'])
torch_down_params.conv1[1].bias = jax_param_to_torch(
jax_down_sample_params['ConvBlockFactory_0']['InstanceNorm_1']['bias'])
torch_down_params.conv2[1].weight = jax_param_to_torch(
jax_down_sample_params['ConvBlockFactory_1']['InstanceNorm_1']['scale'])
torch_down_params.conv2[1].bias = jax_param_to_torch(
jax_down_sample_params['ConvBlockFactory_1']['InstanceNorm_1']['bias'])
return torch_down_params
def convert_input_block_to_torch(jax_params, torch_params):
"""Converts input block jax weights to torch."""
conv2_weight = np.transpose(
jax_params['ConvBlockFactory_0']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv1[0].weight = jax_param_to_torch(conv2_weight)
torch_params.conv1[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactory_0']['InstanceNorm_1']['scale'])
torch_params.conv1[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_0']['InstanceNorm_1']['bias'])
conv2_weight = np.transpose(
jax_params['ConvBlockFactory_1']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv2[0].weight = jax_param_to_torch(conv2_weight)
torch_params.conv2[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactory_1']['InstanceNorm_1']['scale'])
torch_params.conv2[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_1']['InstanceNorm_1']['bias'])
return torch_params
def convert_upsample_block_to_torch_with_concat(jax_params, torch_params):
"""Converts upsample jax weights to torch."""
kernel = jax_params['ConvBlockFactory_0']['ConvTranspose_0']['kernel']
# Flax ConvTranspose3D does not do the flip, for numerical equivalance,
# flip kernel.
kernel = np.flip(kernel, [0, 1, 2])
# (2, 2, 2, 2, 8) -> ([2, 8, 2, 2, 2])
conv0_weight = np.transpose(kernel, [3, 4, 0, 1, 2])
torch_params.upsample_conv[0].weight = jax_param_to_torch(conv0_weight)
torch_params.upsample_conv[0].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_0']['ConvTranspose_0']['bias'])
# (3, 3, 3, 16, 8) -> ([8, 16, 3, 3, 3])
conv1_weight = np.transpose(
jax_params['ConvBlockFactory_1']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv1[0].weight = jax_param_to_torch(conv1_weight)
torch_params.conv1[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactory_1']['InstanceNorm_1']['scale'])
torch_params.conv1[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_1']['InstanceNorm_1']['bias'])
# (3, 3, 3, 8, 8) -> ([8, 8, 3, 3, 3])
conv2_weight = np.transpose(
jax_params['ConvBlockFactory_2']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv2[0].weight = jax_param_to_torch(conv2_weight)
torch_params.conv2[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactory_2']['InstanceNorm_1']['scale'])
torch_params.conv2[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_2']['InstanceNorm_1']['bias'])
return torch_params
def convert_upsample_block_to_torch_without_concat(jax_params, torch_params):
"""Converts upsample jax weights to torch."""
kernel = jax_params['ConvBlockFactory_0']['ConvTranspose_0']['kernel']
# Flax ConvTranspose3D does not do the flip, for numerical equivalance,
# flip kernel.
kernel = np.flip(kernel, [0, 1, 2])
# (2, 2, 2, 2, 8) -> ([2, 8, 2, 2, 2])
conv0_weight = np.transpose(kernel, [3, 4, 0, 1, 2])
torch_params.upsample_conv[0].weight = jax_param_to_torch(conv0_weight)
torch_params.upsample_conv[0].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_0']['ConvTranspose_0']['bias'])
# (3, 3, 3, 8, 8) -> ([8, 8, 3, 3, 3])
conv1_weight_1 = np.transpose(
jax_params['ConvBlockFactoryWithoutConcat_1']['Conv_0']['kernel'],
[4, 3, 0, 1, 2])
# (3, 3, 3, 8, 8) -> ([8, 8, 3, 3, 3])
conv1_weight_2 = np.transpose(
jax_params['ConvBlockFactoryWithoutConcat_1']['Conv_1']['kernel'],
[4, 3, 0, 1, 2])
conv1_weight = np.concatenate([conv1_weight_1, conv1_weight_2], axis=1)
torch_params.conv1[0].weight = jax_param_to_torch(conv1_weight)
torch_params.conv1[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactoryWithoutConcat_1']['InstanceNorm_2']['scale'])
torch_params.conv1[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactoryWithoutConcat_1']['InstanceNorm_2']['bias'])
# (3, 3, 3, 8, 8) -> ([8, 8, 3, 3, 3])
conv2_weight = np.transpose(
jax_params['ConvBlockFactory_2']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv2[0].weight = jax_param_to_torch(conv2_weight)
torch_params.conv2[1].weight = jax_param_to_torch(
jax_params['ConvBlockFactory_2']['InstanceNorm_1']['scale'])
torch_params.conv2[1].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_2']['InstanceNorm_1']['bias'])
return torch_params
def convert_upsample_block_to_torch(jax_params, torch_params):
if FLAGS.enable_concat_upsample:
return convert_upsample_block_to_torch_with_concat(jax_params, torch_params)
else:
return convert_upsample_block_to_torch_without_concat(jax_params,
torch_params)
def convert_output_block_to_torch(jax_params, torch_params):
"""Converts output block jax weights to torch."""
conv2_weight = np.transpose(
jax_params['ConvBlockFactory_0']['Conv_0']['kernel'], [4, 3, 0, 1, 2])
torch_params.conv[0].weight = jax_param_to_torch(conv2_weight)
torch_params.conv[0].bias = jax_param_to_torch(
jax_params['ConvBlockFactory_0']['Conv_0']['bias'])
return torch_params
def convert_jax_unet3d_weights_to_torch(jax_params, pytorch_model):
"""Converts JAX unet3d weights to torch."""
downblocknames = [
k for k in jax_params.keys() if k.startswith('DownsampleBlock_')
]
downblocknames = sorted(
downblocknames, key=lambda x: int(x.split('DownsampleBlock_')[-1]))
bottleneck_layer = downblocknames[-1]
for i, downblock in enumerate(downblocknames[:-1]):
pytorch_model.downsample[i] = convert_down_sample_weight_to_torch(
jax_params[downblock], pytorch_model.downsample[i])
pytorch_model.bottleneck = convert_down_sample_weight_to_torch(
jax_params[bottleneck_layer], pytorch_model.bottleneck)
convert_input_block_to_torch(jax_params['InputBlock_0'],
pytorch_model.input_block)
upblocknames = [
k for k in jax_params.keys() if k.startswith('UpsampleBlock_')
]
upblocknames = sorted(
upblocknames, key=lambda x: int(x.split('UpsampleBlock_')[-1]))
for i, upblockname in enumerate(upblocknames):
pytorch_model.upsample[i] = convert_upsample_block_to_torch(
jax_params[upblockname], pytorch_model.upsample[i])
pytorch_model.output = convert_output_block_to_torch(
jax_params['OutputLayer_11'], pytorch_model.output)
return pytorch_model
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import shutil
import sys
import time
import traceback
from typing import List, Optional
from . import buck, commands, log, statistics
from .commands import CommandParser, ExitCode, IncrementalStyle
from .exceptions import EnvironmentException
from .find_directories import find_project_root
from .version import __version__
LOG: logging.Logger = logging.getLogger(__name__)
def _set_default_command(arguments: argparse.Namespace) -> None:
if shutil.which("watchman"):
arguments.command = commands.Incremental.from_arguments
arguments.nonblocking = False
arguments.incremental_style = IncrementalStyle.FINE_GRAINED
arguments.no_start = False
else:
watchman_link = "https://facebook.github.io/watchman/docs/install.html"
LOG.warning(
"No watchman binary found. \n"
"To enable pyre incremental, "
"you can install watchman: {}".format(watchman_link)
)
LOG.warning("Defaulting to non-incremental check.")
arguments.command = commands.Check.from_arguments
def main(argv: List[str]) -> int:
start = time.time()
parser = argparse.ArgumentParser(
allow_abbrev=False,
# pyre-fixme[6]: Expected `_FormatterClass` for 2nd param but got
# `Type[argparse.RawTextHelpFormatter]`.
formatter_class=argparse.RawTextHelpFormatter,
epilog="environment variables:"
"\n `PYRE_BINARY` overrides the pyre binary used."
"\n `PYRE_VERSION_HASH` overrides the pyre version set in the "
"configuration files.",
)
commands.Command.add_arguments(parser)
# Subcommands.
subcommand_names = ", ".join(
[command.NAME for command in commands.COMMANDS if not command.HIDDEN]
)
parsed_commands = parser.add_subparsers(
metavar="{}".format(subcommand_names),
help="""
The pyre command to run; defaults to `incremental`.
Run `pyre command --help` for documentation on a specific command.
""",
)
for command in commands.COMMANDS:
command.add_subparser(parsed_commands)
arguments = parser.parse_args(argv)
log.initialize(arguments.noninteractive)
if not hasattr(arguments, "command"):
_set_default_command(arguments)
command: Optional[CommandParser] = None
client_exception_message = ""
# Having this as a fails-by-default helps flag unexpected exit
# from exception flows.
exit_code = ExitCode.FAILURE
try:
original_directory = os.getcwd()
# TODO(T57959968): Stop changing the directory in the client
os.chdir(find_project_root(original_directory))
if arguments.version:
try:
# TODO(T64512953): Decouple configuration creation with command creation
configuration = arguments.command(
arguments, original_directory
).configuration
if configuration:
binary_version = configuration.get_binary_version()
if binary_version:
log.stdout.write(f"Binary version: {binary_version}\n")
except Exception:
pass
log.stdout.write(f"Client version: {__version__}\n")
exit_code = ExitCode.SUCCESS
else:
command = arguments.command(arguments, original_directory)
log.start_logging_to_directory(
arguments.noninteractive, command.log_directory
)
exit_code = command.run().exit_code()
except (buck.BuckException, EnvironmentException) as error:
if arguments.command == commands.Persistent.from_arguments:
try:
commands.Persistent.run_null_server(timeout=3600 * 12)
exit_code = ExitCode.SUCCESS
except Exception as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
exit_code = ExitCode.SUCCESS
else:
client_exception_message = str(error)
exit_code = (
ExitCode.BUCK_ERROR
if isinstance(error, buck.BuckException)
else ExitCode.FAILURE
)
except commands.ClientException as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except Exception:
client_exception_message = traceback.format_exc()
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
LOG.debug(traceback.format_exc())
exit_code = ExitCode.SUCCESS
finally:
if len(client_exception_message) > 0:
LOG.error(client_exception_message)
log.cleanup()
if command:
result = command.result()
error_message = result.error if result else None
command.cleanup()
configuration = command.configuration
if configuration and configuration.logger:
statistics.log(
category=statistics.LoggerCategory.USAGE,
arguments=arguments,
configuration=configuration,
integers={
"exit_code": exit_code,
"runtime": int((time.time() - start) * 1000),
},
normals={
"root": configuration.local_configuration_root,
"cwd": os.getcwd(),
"client_version": __version__,
"command": command.NAME,
"client_exception": client_exception_message,
"error_message": error_message,
},
)
return exit_code
if __name__ == "__main__":
try:
os.getcwd()
except FileNotFoundError:
LOG.error(
"Pyre could not determine the current working directory. "
"Has it been removed?\nExiting."
)
sys.exit(ExitCode.FAILURE)
sys.exit(main(sys.argv[1:]))
|
from .mixins import ExtendCreateModelMixin, ExtendUpdateModelMixin
from .mixins import UserCreateModelMixin, UserUpdateModelMixin
from rest_framework import viewsets, mixins, filters
from django.shortcuts import get_object_or_404
class FilterViewSet(viewsets.GenericViewSet):
filter_backends = (filters.DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
class ModelFilterViewSet(mixins.ListModelMixin, ExtendCreateModelMixin,
mixins.RetrieveModelMixin, ExtendUpdateModelMixin, mixins.DestroyModelMixin,
FilterViewSet):
pass
resource_required_user_fields = [('creator', 'username', True), ('updater', 'username', False)]
class ResourceListViewSet(mixins.ListModelMixin, UserCreateModelMixin,
FilterViewSet):
required_user_fields = resource_required_user_fields
class ResourceDetailViewSet(mixins.RetrieveModelMixin, UserUpdateModelMixin, mixins.DestroyModelMixin,
FilterViewSet):
required_user_fields = resource_required_user_fields
class ResourceViewSet(mixins.ListModelMixin, UserCreateModelMixin,
mixins.RetrieveModelMixin, UserUpdateModelMixin, mixins.DestroyModelMixin,
FilterViewSet):
required_user_fields = resource_required_user_fields
class NestedMixin(object):
parent_queryset = None # 父类的查询集
parent_lookup = None # 在视图类中传递的参数中代表父类的参数的名字
parent_related_name = None # 父类中表示子类的属性名
parent_pk_field = None # 父类的主键
def get_parent(self, kwargs):
if self.parent_lookup is not None:
lookup_value = kwargs.get(self.parent_lookup)
if lookup_value is not None:
parent = get_object_or_404(self.parent_queryset, **{self.parent_pk_field: lookup_value})
return parent
return None
def set_list_queryset(self, kwargs):
parent = self.get_parent(kwargs)
if parent is not None:
self.queryset = self.queryset.filter(**{self.parent_related_name: parent})
class NestedListViewSet(mixins.ListModelMixin, ExtendCreateModelMixin, NestedMixin,
FilterViewSet):
def list(self, request, *args, **kwargs):
self.set_list_queryset(kwargs)
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
parent = self.get_parent(kwargs)
extra_data = self.extra_data
extra_data[self.parent_related_name] = parent
return super().create(request, *args, **kwargs)
class NestedListOnlyViewSet(mixins.ListModelMixin, NestedMixin, FilterViewSet):
def list(self, request, *args, **kwargs):
self.set_list_queryset(kwargs)
return super().list(request, *args, **kwargs)
class NestedCreateOnlyViewSet(ExtendCreateModelMixin, NestedMixin, FilterViewSet):
def create(self, request, *args, **kwargs):
parent = self.get_parent(kwargs)
extra_data = self.extra_data
extra_data[self.parent_related_name] = parent
return super().create(request, *args, **kwargs)
class NestedDetailViewSet(mixins.RetrieveModelMixin, UserUpdateModelMixin, NestedMixin,
FilterViewSet):
pass
class NestedReadOnlyViewSet(mixins.ListModelMixin, NestedMixin, mixins.RetrieveModelMixin,
FilterViewSet):
def list(self, request, *args, **kwargs):
self.set_list_queryset(kwargs)
return super().list(request, *args, **kwargs)
class NestedResourceListViewSet(mixins.ListModelMixin, UserCreateModelMixin, NestedMixin, FilterViewSet):
required_user_fields = resource_required_user_fields
def list(self, request, *args, **kwargs):
self.set_list_queryset(kwargs)
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
parent = self.get_parent(kwargs)
extra_data = self.extra_data
extra_data[self.parent_related_name] = parent
return super().create(request, *args, **kwargs)
class NestedResourceListOnlyViewSet(mixins.ListModelMixin, NestedMixin, FilterViewSet):
required_user_fields = resource_required_user_fields
def list(self, request, *args, **kwargs):
self.set_list_queryset(kwargs)
return super().list(request, *args, **kwargs)
class NestedResourceCreateOnlyViewSet(UserCreateModelMixin, NestedMixin, FilterViewSet):
required_user_fields = resource_required_user_fields
def create(self, request, *args, **kwargs):
parent = self.get_parent(kwargs)
extra_data = self.extra_data
extra_data[self.parent_related_name] = parent
return super().create(request, *args, **kwargs)
class NestedResourceDetailViewSet(mixins.RetrieveModelMixin, UserUpdateModelMixin, mixins.DestroyModelMixin,
NestedMixin,
FilterViewSet):
required_user_fields = resource_required_user_fields
class NestedResourceReadOnlyViewSet(NestedReadOnlyViewSet):
pass |
import base64
import os.path
from django.conf import settings
from django.core import mail
import mock
from nose import SkipTest
from nose.tools import eq_
import amo
from amo.tests import app_factory, TestCase
from mkt.comm.models import CommunicationThread, CommunicationThreadToken
from mkt.comm.tests.test_views import CommTestMixin
from mkt.comm.utils import create_comm_note
from mkt.comm.utils_mail import (CommEmailParser, get_recipients,
save_from_email_reply)
from mkt.constants import comm
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
sample_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests',
'email.txt')
multi_email = os.path.join(settings.ROOT, 'mkt', 'comm', 'tests',
'email_multipart.txt')
class TestSendMailComm(TestCase, CommTestMixin):
def setUp(self):
self.create_switch('comm-dashboard')
self.developer = amo.tests.user_factory()
self.mozilla_contact = amo.tests.user_factory()
self.reviewer = amo.tests.user_factory()
self.senior_reviewer = amo.tests.user_factory()
self.grant_permission(self.senior_reviewer, '*:*',
'Senior App Reviewers')
self.app = amo.tests.app_factory()
self.app.addonuser_set.create(user=self.developer)
self.app.update(mozilla_contact=self.mozilla_contact.email)
def _create(self, note_type, author=None):
author = author or self.reviewer
return create_comm_note(self.app, self.app.current_version, author,
'Test Comment', note_type=note_type)
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
return recipients
def _check_template(self, call, template):
eq_(call[0][1], 'comm/emails/%s.html' % template)
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_approval(self, email):
self._create(comm.APPROVAL)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.mozilla_contact.email in recipients
self._check_template(email.call_args, 'approval')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation(self, email):
self._create(comm.ESCALATION)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.developer.email in recipients
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args_list[0],
'escalation_senior_reviewer')
self._check_template(email.call_args_list[1],
'escalation_developer')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_vip_app(self, email):
self._create(comm.ESCALATION_VIP_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_vip')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_escalation_prerelease_app(self, email):
self._create(comm.ESCALATION_PRERELEASE_APP)
eq_(email.call_count, 1)
recipients = self._recipients(email)
assert self.senior_reviewer.email in recipients
self._check_template(email.call_args,
'escalation_prerelease_app')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_reviewer_comment(self, email):
another_reviewer = amo.tests.user_factory()
self._create(comm.REVIEWER_COMMENT, author=self.reviewer)
self._create(comm.REVIEWER_COMMENT, author=another_reviewer)
eq_(email.call_count, 3)
recipients = self._recipients(email)
assert self.reviewer.email in recipients
assert self.mozilla_contact.email in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_developer_comment(self, email):
self._create(comm.REVIEWER_COMMENT)
self._create(comm.DEVELOPER_COMMENT, author=self.developer)
eq_(email.call_count, 3)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.reviewer.email in recipients
assert self.developer.email not in recipients
self._check_template(email.call_args, 'generic')
@mock.patch('mkt.comm.utils_mail.send_mail_jinja')
def test_additional_review(self, email):
self._create(comm.ADDITIONAL_REVIEW_PASSED)
eq_(email.call_count, 2)
recipients = self._recipients(email)
assert self.mozilla_contact.email in recipients
assert self.developer.email in recipients
self._check_template(email.call_args, 'tarako')
def test_mail_templates_exist(self):
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
for note_type in comm.EMAIL_SENIOR_REVIEWERS_AND_DEV:
self._create(note_type)
self._create(comm.NO_ACTION)
def test_email_formatting(self):
"""
Manually run test in case you want to spot-check if every email is
formatted nicely and consistently. Prints out each note type email
once.
"""
raise SkipTest
for note_type in comm.COMM_MAIL_MAP:
self._create(note_type)
email_subjects = []
for email in mail.outbox:
if email.subject in email_subjects:
continue
email_subjects.append(email_subjects)
print '##### %s #####' % email.subject
print email.body
class TestEmailReplySaving(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.app = app_factory(name='Antelope', status=amo.STATUS_PENDING)
self.profile = UserProfile.objects.get(pk=999)
t = CommunicationThread.objects.create(
addon=self.app, version=self.app.current_version,
read_permission_reviewer=True)
self.create_switch('comm-dashboard')
self.token = CommunicationThreadToken.objects.create(
thread=t, user=self.profile)
self.token.update(uuid='<KEY>')
self.email_base64 = open(sample_email).read()
self.grant_permission(self.profile, 'Apps:Review')
def test_successful_save(self):
note = save_from_email_reply(self.email_base64)
eq_(note.body, 'test note 5\n')
def test_developer_comment(self):
self.profile.addonuser_set.create(addon=self.app)
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.DEVELOPER_COMMENT)
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Apps:Review')
note = save_from_email_reply(self.email_base64)
eq_(note.note_type, comm.REVIEWER_COMMENT)
def test_with_max_count_token(self):
# Test with an invalid token.
self.token.update(use_count=comm.MAX_TOKEN_USE_COUNT + 1)
assert not save_from_email_reply(self.email_base64)
def test_with_unpermitted_token(self):
"""Test when the token's user does not have a permission on thread."""
self.profile.groupuser_set.filter(
group__rules__contains='Apps:Review').delete()
assert not save_from_email_reply(self.email_base64)
def test_non_existent_token(self):
self.token.update(uuid='youtube?v=wn4RP57Y7bw')
assert not save_from_email_reply(self.email_base64)
def test_with_invalid_msg(self):
assert not save_from_email_reply('youtube?v=WwJjts9FzxE')
class TestEmailParser(TestCase):
def setUp(self):
email_text = open(sample_email).read()
self.parser = CommEmailParser(email_text)
def test_uuid(self):
eq_(self.parser.get_uuid(), '5a0b8a83d501412589cc5d562334b46b')
def test_body(self):
eq_(self.parser.get_body(), 'test note 5\n')
def test_multipart(self):
multipart_email = open(multi_email).read()
payload = base64.standard_b64encode(multipart_email)
parser = CommEmailParser(payload)
eq_(parser.get_body(), 'this is the body text\n')
eq_(parser.get_uuid(), 'abc123')
|
<filename>project_reporter/replicon.py
from pathlib import Path
import pandas as pd
from xml.sax import ContentHandler, parse
import project_reporter.utilities as ut
class ExcelHandler(ContentHandler):
"""
Reference https://stackoverflow.com/questions/33470130/read-excel-xml-xls-file-with-pandas
"""
def __init__(self):
self.chars = [ ]
self.cells = [ ]
self.rows = [ ]
self.tables = [ ]
def characters(self, content):
self.chars.append(content)
def startElement(self, name, atts):
if name=="Cell":
self.chars = [ ]
elif name=="Row":
self.cells=[ ]
elif name=="Table":
self.rows = [ ]
def endElement(self, name):
if name=="Cell":
self.cells.append(''.join(self.chars))
elif name=="Row":
self.rows.append(self.cells)
elif name=="Table":
self.tables.append(self.rows)
def read_replicon(path, header_row=11, start_row=13, end_row=None,
start_column=1, end_column=None):
"""
Read a Replicon timesheet (Excel XML file) located at the given path (string or Path object).
Assume that:
- the header row starts at row ``header_row`` and extends from column ``start_column`` to column ``end_column``
- the data is rectangular in shape, starts at row ``start_row`` and column ``start_column``, and ends at row ``end_row`` and column ``end_column``
Here row and column numbers start at zero and setting the end row or end column to ``None`` means reading to the last row or last column, respectively, of the file.
Return a Pandas data frame representing the timesheet data and header.
"""
path = Path(path)
excel = ExcelHandler()
parse(str(path), excel)
table = excel.tables[0]
columns = [x.strip() for x in table[header_row][start_column:end_column]]
data = [[y.strip() for y in x[start_column:end_column]]
for x in table[start_row:end_row]]
f = pd.DataFrame(data, columns=columns)
return f
def reformat_replicon(replicon_df):
"""
Given a Replicon data frame (in the form output by :func:`read_replicon`) that contains at least the columns
- ``'Entry Date'``
- ``'Task Name'``
- ``'User Name'``
- ``'Billable Hrs'``
convert it into a standard timesheet data frame, that is, one with at the columns
- ``'date'``: datetime object
- ``'task'``
- ``'worker'``
- ``'duration'``.
Return the resulting data frame.
"""
f = replicon_df.copy()
# Drop extraneous columns
drop_cols = [
'Client Name',
'Non-Billable Hrs',
'Total Hrs',
'Employee Id',
]
f = f.drop(drop_cols, axis=1, errors='ignore')
# Rename columns
f = f.rename(columns={c: c.lower().strip().replace(' ', '_')
for c in f.columns})
f = f.rename(columns={
'task_name': 'task',
'user_name': 'worker',
'billable_hrs': 'duration',
'entry_date': 'date'
})
# Fix dtypes
f['date'] = pd.to_datetime(f['date'])
f['duration'] = f['duration'].astype(float)
# Clean worker
def clean_worker(x):
names = x.split(', ')
if len(names) >= 2:
names = names[::-1]
return ' '.join(names).strip()
f['worker'] = f['worker'].map(clean_worker)
# Restrict columns
new_cols = ['date', 'task', 'worker', 'duration']
return f[new_cols].copy()
|
<gh_stars>100-1000
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.spatial.distance import pdist, squareform
from sklearn.decomposition import PCA
PI = float(np.pi)
def fuv2img(fuv, coorW=1024, floorW=1024, floorH=512):
'''
Project 1d signal in uv space to 2d floor plane image
'''
floor_plane_x, floor_plane_y = np.meshgrid(range(floorW), range(floorH))
floor_plane_x, floor_plane_y = -(floor_plane_y - floorH / 2), floor_plane_x - floorW / 2
floor_plane_coridx = (np.arctan2(floor_plane_y, floor_plane_x) / (2 * PI) + 0.5) * coorW - 0.5
floor_plane = map_coordinates(fuv, floor_plane_coridx.reshape(1, -1), order=1, mode='wrap')
floor_plane = floor_plane.reshape(floorH, floorW)
return floor_plane
def np_coorx2u(coorx, coorW=1024):
return ((coorx + 0.5) / coorW - 0.5) * 2 * PI
def np_coory2v(coory, coorH=512):
return -((coory + 0.5) / coorH - 0.5) * PI
def np_coor2xy(coor, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
coor: N x 2, index of array in (col, row) format
'''
coor = np.array(coor)
u = np_coorx2u(coor[:, 0], coorW)
v = np_coory2v(coor[:, 1], coorH)
c = z / np.tan(v)
x = c * np.sin(u) + floorW / 2 - 0.5
y = -c * np.cos(u) + floorH / 2 - 0.5
return np.hstack([x[:, None], y[:, None]])
def np_x_u_solve_y(x, u, floorW=1024, floorH=512):
c = (x - floorW / 2 + 0.5) / np.sin(u)
return -c * np.cos(u) + floorH / 2 - 0.5
def np_y_u_solve_x(y, u, floorW=1024, floorH=512):
c = -(y - floorH / 2 + 0.5) / np.cos(u)
return c * np.sin(u) + floorW / 2 - 0.5
def np_xy2coor(xy, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
xy: N x 2
'''
x = xy[:, 0] - floorW / 2 + 0.5
y = xy[:, 1] - floorH / 2 + 0.5
u = np.arctan2(x, -y)
v = np.arctan(z / np.sqrt(x**2 + y**2))
coorx = (u / (2 * PI) + 0.5) * coorW - 0.5
coory = (-v / PI + 0.5) * coorH - 0.5
return np.hstack([coorx[:, None], coory[:, None]])
def mean_percentile(vec, p1=25, p2=75):
vmin = np.percentile(vec, p1)
vmax = np.percentile(vec, p2)
return vec[(vmin <= vec) & (vec <= vmax)].mean()
def vote(vec, tol):
vec = np.sort(vec)
n = np.arange(len(vec))[::-1]
n = n[:, None] - n[None, :] + 1.0
l = squareform(pdist(vec[:, None], 'minkowski', p=1) + 1e-9)
invalid = (n < len(vec) * 0.4) | (l > tol)
if (~invalid).sum() == 0 or len(vec) < tol:
best_fit = np.median(vec)
p_score = 0
else:
l[invalid] = 1e5
n[invalid] = -1
score = n
max_idx = score.argmax()
max_row = max_idx // len(vec)
max_col = max_idx % len(vec)
assert max_col > max_row
best_fit = vec[max_row:max_col+1].mean()
p_score = (max_col - max_row + 1) / len(vec)
l1_score = np.abs(vec - best_fit).mean()
return best_fit, p_score, l1_score
def get_z1(coory0, coory1, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
return z1
def np_refine_by_fix_z(coory0, coory1, z0=50, coorH=512):
'''
Refine coory1 by coory0
coory0 are assumed on given plane z
'''
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
z1_mean = mean_percentile(z1)
v1_refine = np.arctan2(z1_mean, c0)
coory1_refine = (-v1_refine / PI + 0.5) * coorH - 0.5
return coory1_refine, z1_mean
def infer_coory(coory0, h, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
c0 = z0 / np.tan(v0)
z1 = z0 + h
v1 = np.arctan2(z1, c0)
return (-v1 / PI + 0.5) * coorH - 0.5
def get_gpid(coorx, coorW):
gpid = np.zeros(coorW)
gpid[np.round(coorx).astype(int)] = 1
gpid = np.cumsum(gpid).astype(int)
gpid[gpid == gpid[-1]] = 0
return gpid
def get_gpid_idx(gpid, j):
idx = np.where(gpid == j)[0]
if idx[0] == 0 and idx[-1] != len(idx) - 1:
_shift = -np.where(idx != np.arange(len(idx)))[0][0]
idx = np.roll(idx, _shift)
return idx
def gpid_two_split(xy, tpid_a, tpid_b):
m = np.arange(len(xy)) + 1
cum_a = np.cumsum(xy[:, tpid_a])
cum_b = np.cumsum(xy[::-1, tpid_b])
l1_a = cum_a / m - cum_a / (m * m)
l1_b = cum_b / m - cum_b / (m * m)
l1_b = l1_b[::-1]
score = l1_a[:-1] + l1_b[1:]
best_split = score.argmax() + 1
va = xy[:best_split, tpid_a].mean()
vb = xy[best_split:, tpid_b].mean()
return va, vb
def _get_rot_rad(px, py):
if px < 0:
px, py = -px, -py
rad = np.arctan2(py, px) * 180 / np.pi
if rad > 45:
return 90 - rad
if rad < -45:
return -90 - rad
return -rad
def get_rot_rad(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=5):
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
xy_cor = []
rot_rad_suggestions = []
for j in range(len(init_coorx)):
pca = PCA(n_components=1)
pca.fit(xy[gpid == j])
rot_rad_suggestions.append(_get_rot_rad(*pca.components_[0]))
rot_rad_suggestions = np.sort(rot_rad_suggestions + [1e9])
rot_rad = np.mean(rot_rad_suggestions[:-1])
best_rot_rad_sz = -1
last_j = 0
for j in range(1, len(rot_rad_suggestions)):
if rot_rad_suggestions[j] - rot_rad_suggestions[j-1] > tol:
last_j = j
elif j - last_j > best_rot_rad_sz:
rot_rad = rot_rad_suggestions[last_j:j+1].mean()
best_rot_rad_sz = j - last_j
dx = int(round(rot_rad * 1024 / 360))
return dx, rot_rad
def gen_ww_cuboid(xy, gpid, tol):
xy_cor = []
assert len(np.unique(gpid)) == 4
# For each part seperated by wall-wall peak, voting for a wall
for j in range(4):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({'type': 0, 'val': new_x, 'score': x_score})
else:
xy_cor.append({'type': 1, 'val': new_y, 'score': y_score})
# Sanity fallback
scores = [0, 0]
for j in range(4):
if xy_cor[j]['type'] == 0:
scores[j % 2] += xy_cor[j]['score']
else:
scores[j % 2] -= xy_cor[j]['score']
if scores[0] > scores[1]:
xy_cor[0]['type'] = 0
xy_cor[1]['type'] = 1
xy_cor[2]['type'] = 0
xy_cor[3]['type'] = 1
else:
xy_cor[0]['type'] = 1
xy_cor[1]['type'] = 0
xy_cor[2]['type'] = 1
xy_cor[3]['type'] = 0
return xy_cor
def gen_ww_general(init_coorx, xy, gpid, tol):
xy_cor = []
assert len(init_coorx) == len(np.unique(gpid))
# Candidate for each part seperated by wall-wall boundary
for j in range(len(init_coorx)):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
u0 = np_coorx2u(init_coorx[(j - 1 + len(init_coorx)) % len(init_coorx)])
u1 = np_coorx2u(init_coorx[j])
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({'type': 0, 'val': new_x, 'score': x_score, 'action': 'ori', 'gpid': j, 'u0': u0, 'u1': u1, 'tbd': True})
else:
xy_cor.append({'type': 1, 'val': new_y, 'score': y_score, 'action': 'ori', 'gpid': j, 'u0': u0, 'u1': u1, 'tbd': True})
# Construct wall from highest score to lowest
while True:
# Finding undetermined wall with highest score
tbd = -1
for i in range(len(xy_cor)):
if xy_cor[i]['tbd'] and (tbd == -1 or xy_cor[i]['score'] > xy_cor[tbd]['score']):
tbd = i
if tbd == -1:
break
# This wall is determined
xy_cor[tbd]['tbd'] = False
p_idx = (tbd - 1 + len(xy_cor)) % len(xy_cor)
n_idx = (tbd + 1) % len(xy_cor)
num_tbd_neighbor = xy_cor[p_idx]['tbd'] + xy_cor[n_idx]['tbd']
# Two adjacency walls are not determined yet => not special case
if num_tbd_neighbor == 2:
continue
# Only one of adjacency two walls is determine => add now or later special case
if num_tbd_neighbor == 1:
if (not xy_cor[p_idx]['tbd'] and xy_cor[p_idx]['type'] == xy_cor[tbd]['type']) or\
(not xy_cor[n_idx]['tbd'] and xy_cor[n_idx]['type'] == xy_cor[tbd]['type']):
# Current wall is different from one determined adjacency wall
if xy_cor[tbd]['score'] >= -1:
# Later special case, add current to tbd
xy_cor[tbd]['tbd'] = True
xy_cor[tbd]['score'] -= 100
else:
# Fallback: forced change the current wall or infinite loop
if not xy_cor[p_idx]['tbd']:
insert_at = tbd
if xy_cor[p_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 0
else:
insert_at = n_idx
if xy_cor[n_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 0
new_add = {'type': new_type, 'val': new_val, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False}
xy_cor.insert(insert_at, new_add)
continue
# Below checking special case
if xy_cor[p_idx]['type'] == xy_cor[n_idx]['type']:
# Two adjacency walls are same type, current wall should be differen type
if xy_cor[tbd]['type'] == xy_cor[p_idx]['type']:
# Fallback: three walls with same type => forced change the middle wall
xy_cor[tbd]['type'] = (xy_cor[tbd]['type'] + 1) % 2
xy_cor[tbd]['action'] = 'forced change'
xy_cor[tbd]['val'] = xy[gpid == xy_cor[tbd]['gpid'], xy_cor[tbd]['type']].mean()
else:
# Two adjacency walls are different type => add one
tp0 = xy_cor[n_idx]['type']
tp1 = xy_cor[p_idx]['type']
if xy_cor[p_idx]['type'] == 0:
val0 = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
else:
val0 = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_add = [
{'type': tp0, 'val': val0, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False},
{'type': tp1, 'val': val1, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False},
]
xy_cor = xy_cor[:tbd] + new_add + xy_cor[tbd+1:]
return xy_cor
def gen_ww(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=3, force_cuboid=True):
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
# Generate wall-wall
if force_cuboid:
xy_cor = gen_ww_cuboid(xy, gpid, tol)
else:
xy_cor = gen_ww_general(init_coorx, xy, gpid, tol)
# Ceiling view to normal view
cor = []
for j in range(len(xy_cor)):
next_j = (j + 1) % len(xy_cor)
if xy_cor[j]['type'] == 1:
cor.append((xy_cor[next_j]['val'], xy_cor[j]['val']))
else:
cor.append((xy_cor[j]['val'], xy_cor[next_j]['val']))
cor = np_xy2coor(np.array(cor), z, coorW, coorH, floorW, floorH)
cor = np.roll(cor, -2 * cor[::2, 0].argmin(), axis=0)
return cor, xy_cor
|
<gh_stars>1-10
import pytest
from hexastore.ast import IRI, Variable
from hexastore.blank_node_factory import BlankNodeFactory
from hexastore.default_forward_reasoner import make_default_forward_reasoner
from hexastore.memory import InMemoryHexastore
A = IRI("http://example.com/A")
B = IRI("http://example.com/B")
C = IRI("http://example.com/C")
D = IRI("http://example.com/D")
CHILDREN = IRI("https://schema.org/children")
PARENT = IRI("https://schema.org/parent")
SIBLING = IRI("https://schema.org/sibling")
SPOUSE = IRI("https://schema.org/spouse")
OWL_THING = IRI("http://www.w3.org/2002/07/owl#Thing")
THING = IRI("https://schema.org/Thing")
PERSON = IRI("https://schema.org/Person")
ORGANISATION = IRI("https://schema.org/Organisation")
RELATED_TO = IRI("http://example.com/relatedTo")
BAG = IRI("http://www.w3.org/1999/02/22-rdf-syntax-ns#Bag")
INFERRED_FROM = IRI("https://example.com/inferred_from")
TYPE = IRI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
SYMMETRIC_PROPERTY = IRI("http://www.w3.org/2002/07/owl#SymmetricProperty")
INVERSE_OF = IRI("http://www.w3.org/2002/07/owl#inverseOf")
DOMAIN = IRI("http://www.w3.org/2000/01/rdf-schema#domain")
RANGE = IRI("http://www.w3.org/2000/01/rdf-schema#range")
SUBCLASS_OF = IRI("http://www.w3.org/2000/01/rdf-schema#subclassOf")
SUBPROPERTY_OF = IRI("http://www.w3.org/2000/01/rdf-schema#subpropertyOf")
TRANSITIVE_PROPERTY = IRI("http://www.w3.org/2002/07/owl#TransitiveProperty")
MEMBER = IRI("http://www.w3.org/1999/02/22-rdf-syntax-ns#member")
@pytest.fixture
def store():
blank_node_factory = BlankNodeFactory()
return InMemoryHexastore(blank_node_factory)
@pytest.fixture
def reasoner(store):
return make_default_forward_reasoner(store)
def parent_sibling_rule(store, s, p, o):
inferred_from = (s, p, o)
for s_ in store.ops[o][p].iter():
if s == s_:
continue
store.insert((s, SIBLING, s_), [inferred_from, (s_, p, o)])
store.insert((s_, SIBLING, s), [inferred_from, (s_, p, o)])
@pytest.mark.default_forward_reasoner
def test_default_forward_reasoner_symmetric_property(store, reasoner):
reasoner.insert(SPOUSE, TYPE, SYMMETRIC_PROPERTY)
reasoner.insert(A, SPOUSE, B)
assert (B, SPOUSE, A) in store
@pytest.mark.default_forward_reasoner
def test_default_forward_reasoner_with_delete(store, reasoner):
reasoner.insert(SPOUSE, TYPE, SYMMETRIC_PROPERTY)
reasoner.insert(A, SPOUSE, B)
assert (B, SPOUSE, A) in store
reasoner.delete(A, SPOUSE, B)
assert list(store.triples()) == [(SPOUSE, TYPE, SYMMETRIC_PROPERTY)]
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_transitive(store, reasoner):
reasoner.insert(SUBCLASS_OF, TYPE, TRANSITIVE_PROPERTY)
print(reasoner)
reasoner.insert(PERSON, SUBCLASS_OF, THING)
print(reasoner)
reasoner.insert(THING, SUBCLASS_OF, OWL_THING)
assert (PERSON, SUBCLASS_OF, OWL_THING) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_transitive_reverse(store, reasoner):
reasoner.insert(SUBCLASS_OF, TYPE, TRANSITIVE_PROPERTY)
reasoner.insert(THING, SUBCLASS_OF, OWL_THING)
reasoner.insert(PERSON, SUBCLASS_OF, THING)
assert (PERSON, SUBCLASS_OF, OWL_THING) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_transitive_with_delete(store, reasoner):
reasoner.insert(SUBCLASS_OF, TYPE, TRANSITIVE_PROPERTY)
reasoner.insert(PERSON, SUBCLASS_OF, THING)
reasoner.insert(THING, SUBCLASS_OF, OWL_THING)
reasoner.delete(SUBCLASS_OF, TYPE, TRANSITIVE_PROPERTY)
assert list(store.triples()) == [(PERSON, SUBCLASS_OF, THING), (THING, SUBCLASS_OF, OWL_THING)]
reasoner.insert(ORGANISATION, SUBCLASS_OF, THING)
assert list(store.triples()) == [
(ORGANISATION, SUBCLASS_OF, THING),
(PERSON, SUBCLASS_OF, THING),
(THING, SUBCLASS_OF, OWL_THING),
]
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_subclass_of(store, reasoner):
reasoner.insert(PERSON, SUBCLASS_OF, THING)
reasoner.insert(A, TYPE, PERSON)
assert (A, TYPE, THING) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_subproperty_property(store, reasoner):
reasoner.insert(SPOUSE, SUBPROPERTY_OF, RELATED_TO)
reasoner.insert(A, SPOUSE, B)
assert (A, RELATED_TO, B) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_domain_property(store, reasoner):
reasoner.insert(SPOUSE, DOMAIN, PERSON)
reasoner.insert(A, SPOUSE, B)
assert (A, TYPE, PERSON) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_range_property(store, reasoner):
reasoner.insert(SPOUSE, RANGE, PERSON)
reasoner.insert(A, SPOUSE, B)
assert (B, TYPE, PERSON) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_domain_range_property(store, reasoner):
reasoner.insert(SPOUSE, DOMAIN, PERSON)
reasoner.insert(SPOUSE, RANGE, PERSON)
reasoner.insert(A, SPOUSE, B)
assert (A, TYPE, PERSON) in store
assert (B, TYPE, PERSON) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_with_child(store, reasoner):
reasoner.insert(SPOUSE, TYPE, SYMMETRIC_PROPERTY)
reasoner.insert(CHILDREN, INVERSE_OF, PARENT)
reasoner.insert(A, SPOUSE, B)
reasoner.insert(C, PARENT, A)
reasoner.insert(C, PARENT, B)
assert ((PARENT, INVERSE_OF, CHILDREN), INFERRED_FROM, (CHILDREN, INVERSE_OF, PARENT)) in store
assert (A, CHILDREN, C) in store
assert (A, SPOUSE, B) in store
assert (B, CHILDREN, C) in store
assert (B, SPOUSE, A) in store
assert (C, PARENT, A) in store
assert (C, PARENT, B) in store
assert (CHILDREN, INVERSE_OF, PARENT) in store
assert (PARENT, INVERSE_OF, CHILDREN) in store
assert (SPOUSE, TYPE, SYMMETRIC_PROPERTY) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_with_children_1(store, reasoner):
reasoner.insert(CHILDREN, INVERSE_OF, PARENT)
reasoner.register_rule((Variable("s"), PARENT, Variable("o")), parent_sibling_rule)
reasoner.insert(A, CHILDREN, C)
reasoner.insert(A, CHILDREN, D)
assert ((PARENT, INVERSE_OF, CHILDREN), INFERRED_FROM, (CHILDREN, INVERSE_OF, PARENT)) in store
assert (A, CHILDREN, C) in store
assert (A, CHILDREN, D) in store
assert (C, PARENT, A) in store
assert (CHILDREN, INVERSE_OF, PARENT) in store
assert (PARENT, INVERSE_OF, CHILDREN) in store
@pytest.mark.default_forward_reasoner
def test_forward_reasoner_with_children(store, reasoner):
reasoner.insert(SPOUSE, TYPE, SYMMETRIC_PROPERTY)
reasoner.insert(CHILDREN, INVERSE_OF, PARENT)
reasoner.register_rule((Variable("s"), PARENT, Variable("o")), parent_sibling_rule)
reasoner.insert(A, SPOUSE, B)
reasoner.insert(C, PARENT, A)
reasoner.insert(C, PARENT, B)
reasoner.insert(D, PARENT, A)
reasoner.insert(D, PARENT, B)
assert (SPOUSE, TYPE, SYMMETRIC_PROPERTY) in store
assert (CHILDREN, INVERSE_OF, PARENT) in store
assert (PARENT, INVERSE_OF, CHILDREN) in store
assert ((PARENT, INVERSE_OF, CHILDREN), INFERRED_FROM, (CHILDREN, INVERSE_OF, PARENT)) in store
assert (A, SPOUSE, B) in store
assert (B, SPOUSE, A) in store
assert (C, PARENT, A) in store
assert (A, CHILDREN, C) in store
assert (C, PARENT, B) in store
assert (B, CHILDREN, C) in store
assert (D, PARENT, A) in store
assert (A, CHILDREN, D) in store
assert (C, SIBLING, D) in store
assert (D, SIBLING, C) in store
|
##########################################################################
##file: naturalLanguageProcessing.py
##Author: <NAME>
##Project: Blocks World and Agency
##
##Dependencies: nltk
##
##This file processes input for use in the AgentJBase class.
##
###########################################################################
import nltk
from nltk.corpus import treebank
from copy import copy
from copy import deepcopy
def delimiteByAnd(tokenList):
list = []
if 'and' in tokenList:
i = tokenList.index('and')
list.append(tokenList[:i])
list.append(tokenList[(i+1):])
else:
list.append(tokenList)
return list
def testForCurrentLocation(x):
if (('location' in x) or (('where' in x)and('are' in x)and('you' in x))):
return True
else:
return False
def testForMakeMove(x):
if ((('go' in x)and('to' in x)) or (('move' in x)and('to' in x )) or
(('continue' in x)and('to' in x))):
return True
else:
return False
def findConfig(x):
startIndex = x.index('[')
#compute last instance of ]
y = deepcopy(x)
y.reverse()
endIndex = len(x)- (y.index(']')+1)
configTokens = x[startIndex:(endIndex+1)]
#concatonate tokens and return result
cfgString = ''
for x in configTokens:
cfgString = cfgString+x
return cfgString
def testForSearchNode(x):
if ((('search' in x) or (('look' in x)and('around' in x)) or (('look' in x)and('for' in x))
or (('examine' in x)and('node' in x))or (('examine' in x)and('configuration' in x))
or (('examine' in x)and('config' in x))or (('examine' in x)and('cfg' in x)))):
return True
else:
return False
def testForPickUp(x):
if ((('pick' in x)and('up' in x)) or (('grab' in x))or
(('pick' in x)and('object' in x))):
return True
else:
return False
def testForPutDown(x):
if ((('put' in x)and('down' in x)) or (('drop' in x))or
(('place' in x)and('object' in x))):
return True
else:
return False
def testAskAboutHolding(x):
if ((('what' in x)and('holding' in x)) or (('what' in x)and('carrying' in x))or
(('you' in x)and('carrying' in x))or (('what' in x)and('holding' in x))):
return True
else:
return False
def testSearchAllBwForAnything(x):
if ((('search' in x)and('all' in x)) or (('search' in x)and('entire' in x))or
(('look' in x)and('all' in x))or (('look' in x)and('entire' in x))):
return True
else:
return False
def testHelp(x):
if ('help' in x):
return True
else:
return False
def convertSpeechToFunction(input):
outputList = []
#store as all lowercase
input = input.lower()
#convert to tokens
token = (nltk.word_tokenize(input))
#test for 'and', and separate to process each function
delimited = delimiteByAnd(token)
for x in delimited:
if testForCurrentLocation(x):
outputList.append("Agent.currentLocation()")
elif testForMakeMove(x):
config = findConfig(x)
outputList.append("Agent.goToNode("+config+")")
elif testSearchAllBwForAnything(x):
outputList.append("Agent.searchBwForAnyObject()")
elif testForSearchNode(x):
outputList.append("Agent.examineLocation()")
elif testForPickUp(x):
outputList.append("Agent.pickItUp()")
elif testForPutDown(x):
outputList.append("Agent.putItDown()")
elif testAskAboutHolding(x):
outputList.append("Agent.currentlyCarrying()")
elif testHelp(x):
outputList.append("Agent.help()")
else:
outputList.append("Agent.error()")
return outputList
|
import argparse
import numpy as np
MODEL_PATH_DICT = {
"cnn": {
"tf": "saved_models/BreastDensity_BaselineBreastModel/model.ckpt",
"torch": "saved_models/BreastDensity_BaselineBreastModel/model.p",
},
"histogram": {
"tf": "saved_models/BreastDensity_BaselineHistogramModel/model.ckpt",
"torch": "saved_models/BreastDensity_BaselineHistogramModel/model.p",
},
}
def get_result(library, device_type, model_type):
if library == "tf":
import density_model_tf
inference_func = density_model_tf.inference
elif library == "torch":
import density_model_torch
inference_func = density_model_torch.inference
else:
raise RuntimeError(library)
return inference_func({
"model_type": model_type,
"model_path": MODEL_PATH_DICT[model_type][library],
"device_type": device_type,
"gpu_number": 0,
"image_path": "images/",
"input_size": (2600, 2000),
"bins_histogram": 50,
}, verbose=False)
GOLDEN_RESULT = {
"histogram": (0.0819444, 0.78304, 0.133503, 0.00151265),
"cnn": (0.209689, 0.765076, 0.024949, 0.000285853),
}
# CPU-GOLDEN Consistency
def test_tf_golden_equal_cnn():
assert np.allclose(get_result("tf", "cpu", "cnn"), GOLDEN_RESULT["cnn"])
def test_torch_golden_equal_cnn():
assert np.allclose(get_result("torch", "cpu", "cnn"), GOLDEN_RESULT["cnn"])
def test_tf_golden_equal_histogram():
assert np.allclose(get_result("tf", "cpu", "histogram"), GOLDEN_RESULT["histogram"])
def test_torch_golden_equal_histogram():
assert np.allclose(get_result("torch", "cpu", "histogram"), GOLDEN_RESULT["histogram"])
# CPU-GPU Consistency
def test_tf_cpu_gpu_equal_cnn():
assert np.allclose(get_result("tf", "cpu", "cnn"), get_result("tf", "gpu", "cnn"))
def test_torch_cpu_gpu_equal_cnn():
assert np.allclose(get_result("torch", "cpu", "cnn"), get_result("torch", "gpu", "cnn"))
def test_tf_cpu_gpu_equal_histogram():
assert np.allclose(get_result("tf", "cpu", "histogram"), get_result("tf", "gpu", "histogram"))
def test_torch_cpu_gpu_equal_histogram():
assert np.allclose(get_result("torch", "cpu", "histogram"), get_result("torch", "gpu", "histogram"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run Tests')
parser.add_argument('--using')
parser.add_argument('--with-gpu', action="store_true")
args = parser.parse_args()
test_list = []
if args.using == "tf":
test_list.append(test_tf_golden_equal_cnn)
test_list.append(test_tf_golden_equal_histogram)
if args.with_gpu:
test_list.append(test_tf_cpu_gpu_equal_cnn)
test_list.append(test_tf_cpu_gpu_equal_histogram)
elif args.using == "torch":
test_list.append(test_torch_golden_equal_cnn)
test_list.append(test_torch_golden_equal_histogram)
if args.with_gpu:
test_list.append(test_torch_cpu_gpu_equal_cnn)
test_list.append(test_torch_cpu_gpu_equal_histogram)
else:
raise RuntimeError("Provide --using 'tf' or 'torch'")
for test_func in test_list:
try:
test_func()
print("{}: PASSED".format(test_func.__name__))
except Exception as e:
print("{}: FAILED".format(test_func.__name__))
raise
print("All {} test(s) passed.".format(len(test_list)))
|
<reponame>NinaWie/NeurIPS2021-traffic4cast<filename>data/dataset/dataset.py
# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.
# IARAI licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Optional
from typing import Tuple
import os
import time
import numpy as np
from scipy.ndimage import rotate
import torch
from torch.utils.data import Dataset
from competition.competition_constants import MAX_TEST_SLOT_INDEX
from competition.prepare_test_data.prepare_test_data import prepare_test
from util.h5_util import load_h5_file
class T4CDataset(Dataset):
def __init__(
self,
root_dir: str,
auto_filter: str = "train",
file_filter: str = None,
test_city="ANTWERP",
limit: Optional[int] = None,
transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
use_npy: bool = False,
**kwargs,
):
"""torch dataset from training data.
Parameters
----------
root_dir
data root folder, by convention should be `data/raw`, see `data/README.md`. All `**/training/*8ch.h5` will be added to the dataset.
file_filter: str
filter files under `root_dir`, defaults to `"**/training/*ch8.h5`
limit
truncate dataset size
transform
transform applied to both the input and label
"""
self.root_dir = root_dir
self.limit = limit
self.files = []
self.use_npy = use_npy
self.transform = transform
if file_filter is not None:
self.file_filter = file_filter
else:
if auto_filter == "train":
self.file_filter = "**/training/*2019*8ch.h5"
elif auto_filter == "test":
self.file_filter = f"**/training/*2020*8ch.h5"
print(self.file_filter)
self._load_dataset()
print("nr files in ds", len(self.files))
# Explicitely delete the validation city from the training data
if auto_filter == "train" and (file_filter is None):
self.files = [f for f in self.files if not (test_city in str(f))]
def _load_dataset(self):
self.files = list(Path(self.root_dir).rglob(self.file_filter))
def _load_h5_file(self, fn, sl: Optional[slice]):
if self.use_npy:
return np.load(fn)
else:
return load_h5_file(fn, sl=sl)
def __len__(self):
size_240_slots_a_day = len(self.files) * MAX_TEST_SLOT_INDEX
if self.limit is not None:
return min(size_240_slots_a_day, self.limit)
return size_240_slots_a_day
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
if idx > self.__len__():
raise IndexError("Index out of bounds")
file_idx = int(np.random.rand() * len(self.files))
start_hour = int(np.random.rand() * MAX_TEST_SLOT_INDEX)
# For testing with __main__ below
# print(self.files[file_idx], start_hour)
# return None
if idx == 0:
print("Idx 0", file_idx, start_hour)
two_hours = self._load_h5_file(self.files[file_idx], sl=slice(start_hour, start_hour + 12 * 2 + 1))
input_data, output_data = prepare_test(two_hours)
input_data = self._to_torch(input_data)
output_data = self._to_torch(output_data)
if self.transform is not None:
input_data = self.transform(input_data)
output_data = self.transform(output_data)
return input_data, output_data
def _to_torch(self, data):
data = torch.from_numpy(data)
data = data.to(dtype=torch.float)
return data
class PatchT4CDataset(T4CDataset):
def __init__(
self,
root_dir: str,
file_filter: str = None,
limit: Optional[int] = 100,
transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
use_npy: bool = False,
use_per_file=10,
radius=50,
auto_filter: str = "train",
use_static_map=False,
augment=False,
**kwargs,
):
super().__init__(root_dir, file_filter=file_filter, auto_filter=auto_filter, limit=limit, transform=transform, use_npy=use_npy)
# load static maps
self.use_static_map = use_static_map
if self.use_static_map:
cities = np.unique([self.get_city_for_file(f) for f in self.files])
print("Using static map! - cities in dataset:", cities)
self.static_maps = self.get_static_maps(cities)
self.n_load_files = limit # The number of loaded files depends on whether we have train or test
self.use_per_file = use_per_file # use per file is fixed, we have to see how much it falsifies the val acc
# We always run 2 epochs if we are at train time
if auto_filter == "train":
self.resample_every_x_epoch = 2
else:
self.resample_every_x_epoch = 1
self.augment = augment
print("Augment data?", self.augment)
self.internal_counter = 0
self.auto_filter = auto_filter
self.radius = radius
self._cache_data()
def get_city_for_file(self, file):
return str(file).split("_")[-2]
def get_static_maps(self, cities):
static_dict = {}
for city in cities:
path_to_static = os.path.join(self.root_dir, city, f"{city}_static.h5")
static_dict[city] = load_h5_file(path_to_static)
return static_dict
def _cache_data(self):
"""
Load mulitple files, extract patches and return the preprocessed dataset
Returns
-------
data_x: Torch tensor with input data
data_y: Torch tensor with ground truth
"""
print("\n ---------- ", self.internal_counter, self.auto_filter, "MAKE NEW DATASET -------------")
use_files = np.random.choice(self.files, size=self.n_load_files, replace=False)
nr_samples = self.n_load_files * self.use_per_file
data_x = np.zeros((nr_samples, 12, 2 * self.radius, 2 * self.radius, 8))
data_y = np.zeros((nr_samples, 6, 2 * self.radius, 2 * self.radius, 8))
data_static = np.zeros((nr_samples, 9, 2 * self.radius, 2 * self.radius))
# print("allocated:", data_x.shape, data_y.shape)
img_plane = (1, 2)
counter = 0
for file in use_files:
loaded_file = load_h5_file(file)
# random_times = (np.random.rand(self.use_per_file) * 264).astype(int)
random_times = np.clip(np.random.normal(scale=1.2, size=self.use_per_file) * 132 / 3 + 132, 0, 264).astype(int)
rand_x = (np.random.rand(self.use_per_file) * (495 - 2 * self.radius)).astype(int) + self.radius
rand_y = (np.random.rand(self.use_per_file) * (436 - 2 * self.radius)).astype(int) + self.radius
# print("loaded file ", file, loaded_file.shape, random_times)
for i in range(self.use_per_file):
start_hour = random_times[i]
end_hour = start_hour + 24
s_x, e_x = (rand_x[i] - self.radius, rand_x[i] + self.radius) # start and end x of patch
s_y, e_y = (rand_y[i] - self.radius, rand_y[i] + self.radius) # start and end y of patch
two_hours = loaded_file[start_hour:end_hour, s_x:e_x, s_y:e_y]
if self.augment:
# flip horizontally
if np.random.rand() < 0.5:
two_hours = np.flip(two_hours, axis=img_plane[0])
# print("flipped along axis 1", two_hours.shape)
# flip vertically
if np.random.rand() < 0.5:
two_hours = np.flip(two_hours, axis=img_plane[1])
# print("flipped along axis 2", two_hours.shape)
# rotate
rot_angle = np.random.choice([0, 90, 180, 270])
two_hours = rotate(two_hours, rot_angle, axes=img_plane)
# print("rotated by", rot_angle, two_hours.shape)
# print("two hours", start_hour, s_x, s_y, two_hours.shape)
# print("two hours", two_hours.shape)
# self._load_h5_file(self.files[file_idx], sl=slice(start_hour, start_hour + 12 * 2 + 1))
input_data, output_data = prepare_test(two_hours)
data_x[counter] = input_data
data_y[counter] = output_data
# add static data
if self.use_static_map:
city_of_file = self.get_city_for_file(file)
data_static[counter] = self.static_maps[city_of_file][:, s_x:e_x, s_y:e_y]
counter += 1
# torch and transform
data_x = self._to_torch(data_x)
data_y = self._to_torch(data_y)
if self.transform is not None:
data_x = self.transform(data_x)
data_y = self.transform(data_y)
# update dataset
self.data_x = data_x
self.data_y = data_y
# concatenate static data to data_x after transform
if self.use_static_map:
data_static = self._to_torch(data_static)
if self.transform is not None:
data_static = self.transform(torch.unsqueeze(data_static, dim=-1))
self.data_x = torch.cat((self.data_x, data_static), dim=1)
def one_img_cache_data(self):
"""
Test function for a single big file to create the patches
"""
print("\n ---------- SPECIAL DATASET -------------")
use_file = np.random.choice(self.files, 1, replace=False)[0]
some_hour = int(np.random.rand() * 240)
test_arr = load_h5_file(use_file)
x_hour = test_arr[some_hour : some_hour + 12]
test_out_gt_inds = np.add([1, 2, 3, 6, 9, 12], 11 + some_hour)
y_hour = test_arr[test_out_gt_inds]
data_x, _, _ = create_patches(x_hour)
data_y, _, _ = create_patches(y_hour)
print(data_x.shape)
print(data_y.shape)
data_x = self._to_torch(data_x)
data_y = self._to_torch(data_y)
if self.transform is not None:
data_x = self.transform(data_x)
data_y = self.transform(data_y)
# print("inp and outp after transform", data_x.size(), data_y.size())
return data_x, data_y
def __len__(self):
return self.n_load_files * self.use_per_file
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
self.internal_counter += 1
# print(self.internal_counter, idx)
if self.internal_counter % (len(self) * self.resample_every_x_epoch) == 0:
self._cache_data()
return self.data_x[idx], self.data_y[idx]
if __name__ == "__main__":
dataset = PatchT4CDataset("data/raw", auto_filter="train", limit=3)
from torch.utils.data import DataLoader
train_loader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=0)
for epoch in range(10):
print("NEW EPOCH")
for d_x, d_y in train_loader:
print(d_x.shape, d_y.shape)
|
from typing import Any, Dict, List, Tuple, Type
from dbnd._core.configuration.config_value import ConfigValue
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.utils.basics.text_banner import safe_string
class TaskParameters(object):
def __init__(self, task):
self.task = task
self.task_meta = self.task.task_meta
self._params = [p_value.parameter for p_value in self.task_meta.task_params]
self._param_obj_map = {p.name: p for p in self._params}
self._param_meta_map = {
p_value.name: p_value for p_value in self.task_meta.task_params
}
def get_param(self, param_name):
return self._param_obj_map.get(param_name, None)
def get_value(self, param_name):
# we dont' want to autoread when we run this function
# most cases we are running it from "banner" print
# so if we are in the autoread we will use original values
task_auto_read_original = self.task._task_auto_read_original
if task_auto_read_original is not None:
return task_auto_read_original[param_name]
return getattr(self.task, param_name)
def get_param_meta(self, param_name): # type: (str)->ConfigValue
return self._param_meta_map.get(param_name, None)
def get_params(
self,
param_cls=ParameterDefinition,
significant_only=False,
input_only=False,
output_only=False,
user_only=False,
):
# type: (...)-> List[ParameterDefinition]
result = self._params
if param_cls is not None:
result = filter(lambda p: isinstance(p, param_cls), result)
if significant_only:
result = filter(lambda p: p.significant, result)
if input_only:
result = filter(lambda p: not p.is_output(), result)
if output_only:
result = filter(lambda p: p.is_output(), result)
if user_only:
result = filter(lambda p: not p.system, result)
return list(result)
def get_param_values(
self,
param_cls=ParameterDefinition,
significant_only=False,
input_only=False,
output_only=False,
user_only=False,
):
# type: (Type[ParameterDefinition], bool, bool, bool, bool) -> List[ Tuple[ParameterDefinition, Any]]
result = self.get_params(
param_cls=param_cls,
significant_only=significant_only,
input_only=input_only,
output_only=output_only,
user_only=user_only,
)
result = [(p, self.get_value(p.name)) for p in result]
return result
# TODO: change name to "to_string"
def get_params_serialized(
self, param_cls=ParameterDefinition, significant_only=False, input_only=False
):
return [
(p.name, p.signature(value))
for p, value in self.get_param_values(
param_cls=param_cls,
significant_only=significant_only,
input_only=input_only,
)
]
def get_param_value_origin(self, param_name):
# type: (str) -> str
# Returns where param was created, e.g. ctor/CLI argument/env var/config
from dbnd._core.task_ctrl.task_visualiser import _MAX_VALUE_SIZE
param_meta = self.get_param_meta(param_name)
value_source = param_meta.source if param_meta else ""
value_origin = safe_string(value_source, _MAX_VALUE_SIZE)
return value_origin
def to_env_map(self, *param_names):
# type: (List[str]) -> Dict[str, str]
return {
p.get_env_key(self.task.task_name): p.to_str(value)
for p, value in self.get_param_values()
if value is not None and (len(param_names) == 0 or p.name in param_names)
}
def get_param_env_key(self, param_name):
return self.get_param(param_name).get_env_key(self.task.task_name)
|
#IN NEW VERSIONS of qtcreator:
# add this file in Options -> Debugger -> GDB -> Extra Dumper Files
#IN OLD VERSIONS, this is loaded via .gdbinit and you need the python and end flags!
#python
#import sys
#sys.path.insert(0, '/home/mtoussai/opt/qtcreator-4.2.0-rc1/share/qtcreator/debugger')
from dumper import *
import math
def qdump__rai__String(d, value):
p = value["p"]
N = int(value["N"])
s = "'"
for i in xrange(0, N):
s += "%c" % int(p.dereference())
p += 1
s += "' [%i]" % N
d.putValue(s)
d.putNumChild(2)
if d.isExpanded():
with Children(d):
d.putSubItem("N", value["N"])
d.putSubItem("p", value["p"])
def qdump__rai__Enum(d, value):
d.putItem(value["x"])
def qdump__LIST(d, value):
p = value["p"]
N = int(value["N"])
s = "<%i>" %N
d.putValue(s)
m=N
if m>10:
m=10
d.putNumChild(m+1)
if d.isExpanded():
with Children(d):
for i in xrange(0, m):
s = "(%i)" % i
d.putSubItem(s, p.dereference())
p += 1
i += 1
# d.putSubItem("p", p)
def qdump__rai__Array(d, value):
p = value["p"]
N = int(value["N"])
nd = int(value["nd"])
d0 = int(value["d0"])
d1 = int(value["d1"])
d2 = int(value["d2"])
if nd==0:
s = "<>"
if nd==1:
s = "<%i>" %d0
if nd==2:
s = "<%i %i>"%(d0,d1)
if nd==3:
s = "<%i %i %i>"%(d0,d1,d2)
d.putValue(s)
m=N
if m>10:
m=10
d.putNumChild(m+4)
if d.isExpanded():
with Children(d):
d.putSubItem("N", value["N"])
for i in xrange(0, m):
if nd==1:
s = "(%i)" %(i)
if nd==2:
s = "(%i,%i)"%(i/d1,i%d1)
if nd==3:
s = "(%i,%i,%i)"%(i/(d1*d2),(i/d2)%d1,i%d2)
d.putSubItem(s, p.dereference())
p += 1
i += 1
d.putSubItem("p", value["p"])
d.putSubItem("reference", value["reference"])
d.putSubItem("special", value["special"])
def qdump__Node_typed(d, value):
keys_N = int(value["keys"]["N"])
keys_p = value["keys"]["p"]
pars_N = int(value["parents"]["N"])
pars_p = value["parents"]["p"]
s = ""
for i in xrange(0, keys_N):
string = keys_p.dereference()
string_N = int(string["N"])
string_p = string["p"]
for j in xrange(0, string_N):
s += "%c" % int(string_p.dereference())
string_p += 1
keys_p += 1
if(i<keys_N):
s += " "
s += "("
for i in xrange(0, pars_N):
par = pars_p.dereference()
parkeys_N = int(par["keys"]["N"])
parkeys_p = par["keys"]["p"]
if(parkeys_N>0):
string = (parkeys_p+(parkeys_N-1)).dereference()
string_N = int(string["N"])
string_p = string["p"]
for j in xrange(0, string_N):
s += "%c" % int(string_p.dereference())
string_p += 1
else:
s += "(%i)" % int(par["index"]);
pars_p += 1
if(i<pars_N-1):
s += " "
s += ")"
d.putValue(s)
d.putNumChild(4)
if d.isExpanded():
with Children(d):
d.putSubItem("value", value["value"])
d.putSubItem("keys", value["keys"])
d.putSubItem("parents", value["parents"])
d.putSubItem("numChildren", value["numChildren"])
d.putSubItem("parentOf", value["parentOf"])
d.putSubItem("index", value["index"])
d.putSubItem("container", value["container"])
def qdump__NodeL(d, value):
qdump__LIST(d, value)
def qdump__Graph(d, value):
p = value["p"]
N = int(value["N"])
s = "<%i>" % N
d.putValue(s)
m=N
if m>10:
m=10
d.putNumChild(m+1)
if d.isExpanded():
with Children(d):
for i in xrange(0, m):
s = "(%i)" %i
d.putSubItem(s, p.dereference())
p += 1
d.putSubItem("isNodeOfGraph", value["isNodeOfGraph"])
d.putSubItem("isIndexed", value["isIndexed"])
# d.putSubItem("p", value["p"])
def qdump__BodyL(d, value):
qdump__LIST(d,value)
def qdump__ShapeL(d, value):
qdump__LIST(d,value)
def qdump__JointL(d, value):
qdump__LIST(d,value)
def qdump__ProxyL(d, value):
qdump__LIST(d,value)
#def qdump__rai__Vector(d, value):
# x=value["x"]
# y=value["y"]
# z=value["z"]
# s = "[%g %g %g]" % (x,y,z)
# d.putValue(s)
# d.putNumChild(1)
# if d.isExpanded():
# with Children(d):
# d.putSubItem("isZero", value["isZero"])
# with SubItem(d, "length"):
# d.putValue(math.sqrt(x*x+y*y+z*z))
# d.putType("float")
# d.putNumChild(0)
#def qdump__rai__Quaternion(d, value):
# w=value["w"]
# x=value["x"]
# y=value["y"]
# z=value["z"]
# s = "[%d %d %d %d]" % (w,x,y,z)
# d.putValue(s)
# d.putNumChild(1)
# if d.isExpanded():
# with Children(d):
# d.putSubItem("isZero", value["isZero"])
# with SubItem(d, "degrees"):
# d.putValue(360.0/math.pi*math.acos(w))
# d.putType("float")
# d.putNumChild(0)
def qdump__rai__Frame(d, value):
ID = int(value["ID"])
name = value["name"]
s = "(%i) " % ID
string_N = int(name["N"])
string_p = name["p"]
for j in xrange(0, string_N):
s += "%c" % int(string_p.dereference())
string_p += 1
d.putValue(s)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putSubItem("ID", value["ID"])
d.putSubItem("name", value["name"])
d.putSubItem("parent", value["parent"])
d.putSubItem("outLinks", value["outLinks"])
d.putSubItem("joint", value["joint"])
d.putSubItem("shape", value["shape"])
d.putSubItem("inertia", value["inertia"])
d.putSubItem("Q", value["Q"])
d.putSubItem("X", value["X"])
d.putSubItem("ats", value["ats"])
d.putSubItem("flags", value["flags"])
d.putSubItem("active", value["active"])
d.putSubItem("K", value["K"])
#end
|
import torch.nn as nn
import torch
from . import config
from DLBio.pytorch_helpers import get_device
DEFAULT_BN = config.DEFAULT_BN
DEFAULT_1X1 = config.DEFAULT_1X1
class ResidualAdapter(nn.Module):
def __init__(self, block, relu_after_shortcut=False, use_1x1=False, in_dim=-1, out_dim=-1):
super(ResidualAdapter, self).__init__()
if use_1x1:
self.conv_for_res = nn.Conv2d(in_dim, out_dim, kernel_size=1)
else:
self.conv_for_res = None
if block.stride > 1:
self.downsample = nn.AvgPool2d(
(2, 2), stride=(2, 2), ceil_mode=True)
else:
self.downsample = None
self.block = block
self.relu = nn.ReLU()
self.relu_after_shortcut = relu_after_shortcut
def forward(self, x):
out = self.block(x)
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
if self.conv_for_res is not None:
# in case residual_channel < shortcut channel
shortcut = self.conv_for_res(shortcut)
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
padding = torch.zeros(
batch_size,
residual_channel - shortcut_channel,
featuremap_size[0],
featuremap_size[1]
)
if out.is_cuda:
padding = padding.cuda()
out = out + torch.cat((shortcut, padding), 1)
else:
out = out + shortcut
# old ResNet Basic Block uses ReLU after shortcut
if self.relu_after_shortcut:
return self.relu(out)
else:
return out
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
class IBase(nn.Module):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(IBase, self).__init__()
self.upper = nn.Identity()
self.lower = nn.Identity()
self.left_dw = None
self.right_dw = None
self.stride = stride
self.which_batch_norm = kwargs.get('which_batch_norm', DEFAULT_BN)
self.mult = Multiplication()
def _init_dw(self, dim, k, stride, use_relu, padding=None):
self.left_dw = self._get_dw(dim, k, stride, use_relu, padding=padding)
self.right_dw = self._get_dw(dim, k, stride, use_relu, padding=padding)
def _get_dw(self, dim, k, stride, use_relu, padding=None):
if padding is None:
padding = (k - 1) // 2
else:
print(f'Use new padding value: {padding}')
tmp = nn.Sequential()
if k != 3:
print(f'using conv with kernel size: {k}')
tmp.add_module('dw_conv', nn.Conv2d(
dim, dim, k, groups=dim, padding=padding,
stride=stride, bias=False)
)
if self.which_batch_norm == 'batch':
print('using batchnorm')
tmp.add_module('bn', nn.BatchNorm2d(dim))
elif self.which_batch_norm == 'group':
print('using group norm')
tmp.add_module('bn', nn.GroupNorm(dim // 4, dim))
elif self.which_batch_norm == 'instance':
print('using instance norm')
tmp.add_module('bn', nn.InstanceNorm2d(dim))
elif self.which_batch_norm == 'none':
print('no normalization')
elif self.which_batch_norm == 'gdn':
from pytorch_gdn import GDN
print('using generalized divisive norm')
tmp.add_module('gdn', GDN(dim, get_device()))
else:
raise ValueError(
f'unknown normalization type: {self.which_batch_norm}'
)
if use_relu:
tmp.add_module('relu', nn.ReLU())
return tmp
def forward(self, x):
x = self.upper(x)
x_left = self.left_dw(x)
x_right = self.right_dw(x)
x = self.mult(x_left, x_right)
x = self.lower(x)
return x
class ILogBase(IBase):
def _init_dw(self, dim, k, stride, use_relu):
super(ILogBase, self)._init_dw(dim, k, stride, use_relu)
self.log = torch.log2
self.exp = Exp2Layer()
self.relu = nn.ReLU6()
self.bn = nn.BatchNorm2d(dim, affine=False)
def forward(self, x):
x = self.upper(x)
x_left = self.left_dw(x)
x_right = self.right_dw(x)
x_left = self.relu(x_left) + .5
x_right = self.relu(x_right) + .5
x = self.log(x_left) + self.log(x_right)
x = self.bn(x)
x = self.exp(x)
x = self.lower(x)
return x
class IPoolBase(IBase):
def _init_dw(self, dim, k, stride, use_relu):
super(IPoolBase, self)._init_dw(dim, k, stride, use_relu)
self.pool = nn.MaxPool2d(3, stride=1, padding=1)
def forward(self, x):
x = self.upper(x)
x_left = self.left_dw(x)
x_right = self.right_dw(x)
x_right = -1. * self.pool(x_right)
x = x_left * x_right
x = self.lower(x)
return x
class IBPoolBase(IPoolBase):
def forward(self, x):
x = self.upper(x)
x_left = self.left_dw(x)
x_left = self.pool(x_left)
x_right = self.right_dw(x)
x_right = self.pool(x_right)
x = x_left * x_right
x = self.lower(x)
return x
class IStructBase(IBase):
def forward(self, x):
x = self.upper(x)
fx = self.left_dw(x)
fy = self.right_dw(x)
x = torch.relu(fx) * torch.relu(fy) - torch.relu(fx * fy)
x = self.lower(x)
return x
class BilinearFPLayer(nn.Module):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(BilinearFPLayer, self).__init__()
use_relu = kwargs.get('use_relu', False)
q = kwargs.get('q', 2)
self.combine = nn.Conv2d(
q * in_dim, in_dim, 1, groups=in_dim, bias=False)
self.lower = ConvBNReLU(in_dim, out_dim, 1)
self.left_dw = None
self.right_dw = None
self.stride = stride
self._init_dw(in_dim, k, stride, use_relu, q)
def _init_dw(self, dim, k, stride, use_relu, q):
self.left_dw = self._get_dw(dim, k, stride, use_relu, q=q)
self.right_dw = self._get_dw(dim, k, stride, use_relu, q=q)
def _get_dw(self, dim, k, stride, use_relu, q=2):
tmp = nn.Sequential()
tmp.add_module('dw_conv', nn.Conv2d(
dim, dim * q, k, groups=dim, padding=k // 2, stride=stride, bias=False)
)
tmp.add_module('bn', nn.BatchNorm2d(dim * q))
if use_relu:
tmp.add_module('relu', nn.ReLU())
return tmp
def forward(self, x):
x_left = self.left_dw(x)
x_right = self.right_dw(x)
x = x_left * x_right
x = self.combine(x)
x = self.lower(x)
return x
class BilinearFPLayerReLU(BilinearFPLayer):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
q = kwargs.get('q', 2)
super(BilinearFPLayerReLU, self).__init__(
in_dim, out_dim, k=k, stride=stride, use_relu=True, q=q
)
class AbsReLUBlock(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(AbsReLUBlock, self).__init__(
in_dim, out_dim, k=k, stride=stride, **kwargs)
self.mult = AbsCombination()
# based on the FP111 layer
q = kwargs.get('q', 2)
c1x1_type = kwargs.get('c1x1_type', DEFAULT_1X1)
self.upper = get_upper(in_dim, int(q * out_dim), type_=c1x1_type)
self._init_dw(int(q * out_dim), k, stride, use_relu=True)
self.lower = get_lower(int(q * out_dim), out_dim, type_=c1x1_type)
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
class MyInvertedResidual(nn.Module):
def __init__(self, in_dim, out_dim, stride=1, q=2, k=3, **kwargs):
super(MyInvertedResidual, self).__init__()
self.stride = stride
hidden_dim = int(round(in_dim * q))
hdim_quad = hidden_dim // 3
hdim_lin = hidden_dim - hdim_quad
hidden_dim = hdim_quad + hdim_lin
self.lin_1x1 = ConvBNReLU(in_dim, hdim_lin, kernel_size=1)
self.lin_dwc = nn.Sequential(
ConvBNReLU(
hdim_lin, hdim_lin, kernel_size=k,
stride=stride, groups=hdim_lin),
)
self.quad_1x1 = ConvBNReLU(in_dim, hdim_quad, kernel_size=1)
self.quad_dwc_1 = nn.Sequential(
ConvBNReLU(
hdim_quad, hdim_quad, kernel_size=k,
stride=stride, groups=hdim_quad),
)
self.quad_dwc_2 = nn.Sequential(
ConvBNReLU(
hdim_quad, hdim_quad, kernel_size=k,
stride=stride, groups=hdim_quad),
)
self.bn = nn.BatchNorm2d(hdim_quad)
self.use_res_connect = self.stride == 1 and in_dim == out_dim
self.out = nn.Sequential(
nn.Conv2d(hidden_dim, out_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_dim)
)
def forward(self, x):
x_lin = self.lin_1x1(x)
x_lin = self.lin_dwc(x_lin)
# x = x_lin # test!
x_quad = self.quad_1x1(x)
x_q1 = self.quad_dwc_1(x_quad)
x_q2 = self.quad_dwc_2(x_quad)
x_quad = x_q1 * x_q2
# x_quad = self.bn(x_quad)
# x_quad = torch.relu(x_quad)
x = torch.cat([x_lin, x_quad], dim=1)
return self.out(x)
# if self.use_res_connect:
# return x + self.out(x_lin)
# else:
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class Exp2Layer(nn.Module):
def __call__(self, x):
return 2.**x
# ----------------------------------------------------------------------------
# Lower, Depth-Wise ReLU, Upper
# at least one of lower/upper needs to be active
# 001
class FPL0DWR0U1(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL0DWR0U1, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
self._init_dw(in_dim, k, stride, use_relu=False)
self.lower = get_lower(in_dim, out_dim, type_=c1x1_type)
# 011
class FPL0DWR1U1(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL0DWR1U1, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
self._init_dw(in_dim, k, stride, use_relu=True)
self.lower = get_lower(in_dim, out_dim, type_=c1x1_type)
# 100
class FPL1DWR0U0(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL1DWR0U0, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
self.upper = get_upper(in_dim, out_dim, type_=c1x1_type)
self._init_dw(out_dim, k, stride, use_relu=False)
# 101
class FPL1DWR0U1(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, q=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL1DWR0U1, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
print(f'using q: {q}')
self.upper = get_upper(in_dim, int(q * out_dim), type_=c1x1_type)
self._init_dw(int(q * out_dim), k, stride, use_relu=False)
self.lower = get_lower(int(q * out_dim), out_dim, type_=c1x1_type)
# 110
class FPL1DWR1U0(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL1DWR1U0, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
self.upper = get_upper(in_dim, out_dim, type_=c1x1_type)
self._init_dw(out_dim, k, stride, use_relu=True)
# 111
class FPL1DWR1U1(IBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, q=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(FPL1DWR1U1, self).__init__(
in_dim, out_dim, k=k, stride=stride, which_batch_norm=which_batch_norm)
print(f'using q: {q}')
self.upper = get_upper(in_dim, int(q * out_dim), type_=c1x1_type)
self._init_dw(int(q * out_dim), k, stride, use_relu=True)
self.lower = get_lower(int(q * out_dim), out_dim, type_=c1x1_type)
class LogFPL1DWR0U1(ILogBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, q=1, which_batch_norm=DEFAULT_BN, c1x1_type=DEFAULT_1X1):
super(LogFPL1DWR0U1, self).__init__(
in_dim, out_dim, k=3, stride=stride)
self.upper = get_upper(in_dim, int(q * out_dim), type_=c1x1_type)
self._init_dw(int(q * out_dim), k, stride, use_relu=False)
self.lower = get_lower(int(q * out_dim), out_dim, type_=c1x1_type)
class FPPoolL0DWR0U1(IPoolBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(FPPoolL0DWR0U1, self).__init__(
in_dim, out_dim, k=3, stride=stride)
self._init_dw(in_dim, k, stride, use_relu=False)
self.lower = get_lower(in_dim, out_dim, type_=c1x1_type)
class BPoolL0DWR0U1(IBPoolBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(BPoolL0DWR0U1, self).__init__(
in_dim, out_dim, k=k, stride=stride)
self._init_dw(in_dim, k, stride, use_relu=False)
self.lower = get_lower(in_dim, out_dim, type_=c1x1_type)
class StructL0DWR0U1(IStructBase):
def __init__(self, in_dim, out_dim, k=3, stride=1, **kwargs):
super(StructL0DWR0U1, self).__init__(
in_dim, out_dim, k=k, stride=stride)
self._init_dw(in_dim, k, stride, use_relu=False)
self.lower = get_lower(in_dim, out_dim, type_=c1x1_type)
# DEFAULT VALUE 101
def get_block(block_type, in_dim, out_dim, k=3, stride=1, q=1, add_res=False, **kwargs):
blocks_ = {
'FP001': FPL0DWR0U1, # no q
'FP011': FPL0DWR1U1, # no q
'FP100': FPL1DWR0U0, # no q
'FP101': FPL1DWR0U1,
'FP110': FPL1DWR1U0, # no q
'FP111': FPL1DWR1U1,
'L_FP101': LogFPL1DWR0U1,
'Pool001': FPPoolL0DWR0U1,
'BPool001': BPoolL0DWR0U1,
'Struct001': StructL0DWR0U1,
'InvRes': MyInvertedResidual,
'BilinearFP': BilinearFPLayer,
'BilinearFPReLU': BilinearFPLayerReLU,
}
assert block_type in blocks_.keys()
block = blocks_[block_type](
in_dim, out_dim, k=k, stride=stride, q=q, **kwargs
)
if add_res:
if out_dim < in_dim:
block = ResidualAdapter(
block, use_1x1=True, in_dim=in_dim, out_dim=out_dim
)
else:
block = ResidualAdapter(block)
return block
def get_upper(in_dim, out_dim, **kwargs):
type_ = kwargs.get('type_', DEFAULT_1X1)
return get_1x1_function(in_dim, out_dim, type_=type_)
def get_lower(in_dim, out_dim, **kwargs):
type_ = kwargs.get('type_', DEFAULT_1X1)
return get_1x1_function(in_dim, out_dim, type_=type_)
def get_1x1_function(in_dim, out_dim, type_=DEFAULT_1X1):
if type_ == 'convbnrelu':
return nn.Sequential(
nn.Conv2d(in_dim, out_dim, 1, bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU()
)
elif type_ == 'convrelu':
print('using conv relu')
return nn.Sequential(
nn.Conv2d(in_dim, out_dim, 1, bias=False),
nn.ReLU()
)
elif type_ == 'conv':
print('using conv')
return nn.Sequential(
nn.Conv2d(in_dim, out_dim, 1, bias=False)
)
class Multiplication(nn.Module):
def forward(self, x, y):
return x * y
class AbsCombination(nn.Module):
def forward(self, a, b):
# to approximage abs(a+b) - abs(a-b) -> both inputs need to be >= 0
return torch.relu(a + b) - torch.relu(a - b) - torch.relu(b - a)
|
import warnings
warnings.filterwarnings("ignore")
import random
from AdaFairEQOP import AdaFairEQOP
from multiprocessing import Process, Lock
import pickle
import os
import matplotlib
from sklearn.model_selection import StratifiedKFold, ShuffleSplit, StratifiedShuffleSplit
from Competitors.SMOTEBoost import SMOTEBoost
matplotlib.use('Agg')
import sys
sys.path.insert(0, 'DataPreprocessing')
import time
from Competitors.AdaCost import AdaCostClassifier
from load_dutch_data import load_dutch_data
from load_compas_data import load_compas
from load_adult import load_adult
from load_diabetes import load_diabetes
from load_credit import load_credit
from load_kdd import load_kdd
from load_bank import load_bank
from my_useful_functions import calculate_performance_SP, calculate_performanceEQOP, plot_my_results
class serialazible_list(object):
def __init__(self):
self.performance = []
def create_temp_files(dataset, suffixes):
for suffix in suffixes:
outfile = open(dataset + suffix, 'wb')
pickle.dump(serialazible_list(), outfile)
outfile.close()
if not os.path.exists("Images/"):
os.makedirs("Images/")
def delete_temp_files(dataset, suffixes):
for suffix in suffixes:
os.remove(dataset + suffix)
def predict(clf, X_test, y_test, sa_index, p_Group):
y_pred_probs = clf.predict_proba(X_test)[:, 1]
y_pred_labels = clf.predict(X_test)
return calculate_performance_SP(X_test, y_test, y_pred_labels, y_pred_probs, sa_index, p_Group)
def run_eval(dataset, iterations):
suffixes = [ 'Adaboost', 'AdaFair', 'SMOTEBoost' ]
if dataset == "compass-gender":
X, y, sa_index, p_Group, x_control = load_compas("sex")
elif dataset == "compass-race":
X, y, sa_index, p_Group, x_control = load_compas("race")
elif dataset == "adult-gender":
X, y, sa_index, p_Group, x_control = load_adult("sex")
elif dataset == "adult-race":
X, y, sa_index, p_Group, x_control = load_adult("race")
elif dataset == "dutch":
X, y, sa_index, p_Group, x_control = load_dutch_data()
elif dataset == "bank":
X, y, sa_index, p_Group, x_control = load_bank()
elif dataset == "credit":
X, y, sa_index, p_Group, x_control = load_credit()
elif dataset == "diabetes":
X, y, sa_index, p_Group, x_control = load_diabetes()
elif dataset == "kdd":
X, y, sa_index, p_Group, x_control = load_kdd()
else:
exit(1)
create_temp_files(dataset, suffixes)
threads = []
mutex = []
for lock in range(0, 8):
mutex.append(Lock())
print (dataset)
random.seed(int(time.time()))
for iter in range(0, iterations):
sss = StratifiedShuffleSplit(n_splits=1, test_size=.5, random_state=iter)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# for proc in range(0, 3):
# threads.append(Process(target=train_classifier, args=( X_train, X_test, y_train, y_test, sa_index, p_Group, dataset + suffixes[proc], mutex[proc],proc, 500, 1, dataset)))
threads.append(Process(target=train_classifier, args=( X_train, X_test, y_train, y_test, sa_index, p_Group, dataset + suffixes[1], mutex[1],1, 500, 1, dataset)))
break
for process in threads:
process.start()
for process in threads:
process.join()
results = []
for suffix in suffixes:
infile = open(dataset + suffix, 'rb')
temp_buffer = pickle.load(infile)
results.append(temp_buffer.performance)
infile.close()
plot_my_results(results, suffixes, "Images/EqualOpportunity/" + dataset, dataset)
delete_temp_files(dataset, suffixes)
def train_classifier(X_train, X_test, y_train, y_test, sa_index, p_Group, dataset, mutex, mode, base_learners, c, dataset_name):
if mode == 0:
classifier = AdaCostClassifier(saIndex=sa_index, saValue=p_Group, n_estimators=base_learners, CSB="CSB1")
elif mode == 1:
classifier = AdaFairEQOP(n_estimators=base_learners, saIndex=sa_index, saValue=p_Group, CSB="CSB1", c=c)
elif mode == 2:
if dataset_name == 'adult-gender' or dataset == 'bank':
samples = 100
elif dataset_name == 'compass-gender':
samples = 2
else:
samples = 500
classifier = SMOTEBoost(n_estimators=base_learners,saIndex=sa_index, n_samples=samples, saValue=p_Group, CSB="CSB1" )
classifier.fit(X_train, y_train)
y_pred_labels = classifier.predict(X_test)
mutex.acquire()
infile = open(dataset, 'rb')
dict_to_ram = pickle.load(infile)
infile.close()
dict_to_ram.performance.append(
calculate_performanceEQOP(X_test, y_test, y_pred_labels, sa_index, p_Group))
outfile = open(dataset, 'wb')
pickle.dump(dict_to_ram, outfile)
outfile.close()
mutex.release()
if __name__ == '__main__':
run_eval("compass-gender", 10)
run_eval("adult-gender", 10)
run_eval("bank", 10)
run_eval("kdd", 10)
|
<reponame>jarryliu/queue-sim
#!/usr/local/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, floor, ceil
import scipy as sp
import scipy.stats
import scipy as sp
def mean_confidence_interval(a, k=1, confidence=0.99):
n = len(a)/k
m, se = np.mean(a), sp.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2, n-1)
return m, m-h, m+h
# get the data of single log file and process the latencies
def processFile(path, f, newFile = False):
print(path+f)
data = np.loadtxt(path+f)
(x,y)= data.shape
# 1. get arrival rate
# arrival = data[1:, 1]/1000/1000
arrival = data[floor(x/5):, 1]/1000/1000
#histogram(arrival, "arrival interval distribution")
mean_a = np.mean(arrival)
var_a = np.var(arrival)
# print("Mean Arrival interval is", mean_a, "variance is", var_a)
# 2. get end-to-end latency distribution
# latency = data[1:, 0]/1000/1000
latency = data[floor(x/5):, 0]/1000/1000
# print(f,latency)
#histogram(latency, "end-to-end latency distribution")
m, m_l, m_h = mean_confidence_interval(latency)
mList = np.mean(data[floor(x/5):, 3:8]/1000/1000, 0)
if newFile:
temp = np.mean(data[floor(x/5):, 3:11]/1000/1000, 0)
mList[0] = temp[0]+temp[1]
mList[1] = temp[2] + temp[3]
mList[2] = temp[4] + temp[5]
mList[3:] = temp[6:]
# print(f, m, m_l, m_h)
mean_s = [m, m_l, m_h, np.percentile(latency,5), np.percentile(latency, 99)]+list(mList)
var_s = np.var(latency)
# print("Average Latency is", mean_s, "variance is", var_s, "98 percentile", np.percentile(latency, 95))
return mean_a, var_a, mean_s, var_s
def readFileList(path, fList, newFile = False):
maList = []
varaList = []
msList = []
varsList = []
for f in fList:
mean_a, var_a, mean_s, var_s = processFile(path, f, newFile=newFile)
maList.append(mean_a)
varaList.append(var_a)
msList.append(mean_s)
varsList.append(var_s)
return np.array(maList), np.array(varaList), np.array(msList), np.array(varsList)
def plotStage(bList, mean_s):
#plt.plot(bList, mean_s[:,4], "*-", label= "99 percentile")
plt.plot(bList, mean_s[:, 5], "*-", label="stage 1")
plt.plot(bList, mean_s[:, 6], "*-", label="stage 2")
plt.plot(bList, mean_s[:, 7], "*-", label="stage 3")
plt.plot(bList, mean_s[:, 8], "*-", label="stage 4")
plt.plot(bList, mean_s[:, 9], "*-", label="stage 5")
print("latency ", mean_s[:,0])
print("stage 1 ", mean_s[:,5])
print("stage 2 ", mean_s[:,6])
print("stage 3 ", mean_s[:,7])
print("stage 4 ", mean_s[:,8])
print("stage 5 ", mean_s[:,9])
print(mean_s[:, 9])
def showLatency(path, bList, fList, directory, label = "", showStage = True):
mean_a, var_a, mean_s, var_s = readFileList(path + directory+"/", fList)
plt.fill_between(bList, mean_s[:, 1], mean_s[:, 2], alpha=.5)
plt.plot(bList, mean_s[:, 0], "*-", label=directory)
if showStage:
plotStage(bList, mean_s)
plt.ylabel("Latency (ms)")
plt.xlabel(label)
plt.legend()
plt.show()
def plotPoiRate(pubList, rate):
directory = "poiRate_"+str(rate)
sTest = "poi"
batch = 1
fList = []
for pub in pubList:
fList.append("latency_{:s}_{:d}_{:d}_{:d}".format(sTest, pub, batch, rate))
showLatency("./", np.array(pubList)*rate/1000, fList, directory, label = "Message Rate (kmessages/s)")
def plotPoiBatch(batchList, pub):
directory = "poiBatch_"+str(pub)
sTest = "poi"
rate = 100
fList = []
for batch in batchList:
fList.append("latency_{:s}_{:d}_{:d}_{:d}".format(sTest, pub, batch, rate))
showLatency("./", batchList, fList, directory, label = "Batch Size (messages)")
if __name__ == "__main__":
rate = 100
pubList = [10, 20, 50, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500]
plotPoiRate(pubList, rate)
rate = 500
pubList = [10, 20, 50, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500]
plotPoiRate(pubList, rate)
pub = 100
batchList = [1, 2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plotPoiBatch(batchList, pub)
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This code implements the feature-vectors algorithm.
For a given dataset and a given tree-based model, it extracts a 2-D embedding vector for each feature and visualizes
the interaction among features.
- ``FeatureVec`` implements the feature-vectors algorithm and its output visualization
Copyright 2021
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://opensource.org/licenses/MIT
"""
import os
import warnings
import pandas as pd
import plotly.express as px
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.preprocessing import OneHotEncoder
from fvecs.fv_rulefit import RuleFit
from fvecs.utils import *
# Constants :
MAX_SENTENCES = 20000 # Default value for maximum rules extracted from the tree
MAX_DEPTH = 3 # Deafult value of maximum (average) depth of trees
EPSILON = 1e-12 # Default value for minimum norm of embeddings (for numerical stability)
AXIS_SIZE = 1.1 # Used for plotting a large enough figure
NUM_SEMICIRCLES = 10 # Number of semi-circles to draw in the final figure
class PlotColumns:
x = 'x'
y = 'y'
angles = 'angels'
names = 'names'
class FeatureVec(object):
def __init__(self, mode: str,
feature_names: list = None,
max_depth: int = MAX_DEPTH,
max_sentences: int = MAX_SENTENCES,
exp_rand_tree_size: bool = False,
tree_generator=None):
"""
:param mode: 'classify' for classification tasks or 'regress' for regression tasks
:param max_depth: integer with the maximum (average) depth of the trained trees within the ensemble
:param feature_names: list with names of the features within the data
:param max_sentences: integer with the maximum number of extracted sentences
:param exp_rand_tree_size: If True (default), it allows to have trees with different sizes. Otherwise False.
:param tree_generator: Tree generator model (overwrites above features), one of the RandomForestClassifier or RandomForestRegressor
"""
if tree_generator is None:
assert (mode == 'classify') or (mode == 'regress'), \
f'Mode should either be "classify" or "regress", but {mode} was given instead'
assert (max_sentences > 2 ** max_depth), 'Number of sentences should be larger than the tree decision routes.'
assert isinstance(exp_rand_tree_size, bool), 'exp_rand_tree_size should be True or False.'
num_trees = max_sentences // (2 ** max_depth)
if mode == 'classify':
tree_generator = RandomForestClassifier(n_estimators=num_trees, max_depth=max_depth)
else:
tree_generator = RandomForestRegressor(n_estimators=num_trees, max_depth=max_depth)
else:
assert isinstance(tree_generator, RandomForestRegressor) or isinstance(tree_generator, RandomForestClassifier), \
'tree_generator should be a scikit-learn random forest instance.'
exp_rand_tree_size = False # We don't want to touch the given tree_generator
self._feature_names = list(feature_names)
self._rf = RuleFit(
tree_generator, max_rules=max_sentences, exp_rand_tree_size=exp_rand_tree_size)
# initiate attributes:
self._init_attributes()
def fit(self, X: np.ndarray, y: np.ndarray, categorical_feats: list = None,
restart: bool = False, bagging: int = 0, window_size: int = 1):
"""
Fit the tree model.
:param categorical_feats: List of the column names that are categorical
:param X: The input of the dataset to be explained (a numpy array or a Pandas dataframe)
:param y: outputs (encoded integer class label for classification or real value for regression)
:param restart: If True, it will train the tree generator model from scratch.
:param bagging: If >0, it is the number of iterations of baggings performed to extract confidence intervals.
:param window_size: The neighborhood window size for two features to be considered adjacent.
"""
assert isinstance(X, np.ndarray) or isinstance(X, pd.DataFrame), \
'X has to be a numpy array of a DataFrame'
assert isinstance(y, np.ndarray) or isinstance(y, list), 'y should be a numpy array or a list'
assert len(np.array(y).shape) == 1, 'y should be 1-d'
assert (len(X) == len(y)), 'X and y must be the same length'
assert isinstance(bagging, int), 'bagging must be an integer'
assert isinstance(restart, bool), 'restart should be True or False'
if self._feature_names is None:
if isinstance(X, pd.DataFrame):
self._feature_names = list(X.columns)
else: # If X is a numpy array, name features by index numbers
log_feats = int(np.ceil(np.log10(X.shape[-1])))
self._feature_names = [str(x).zfill(log_feats) for x in range(0, X.shape[-1])]
else:
assert (len(self._feature_names) == X.shape[-1]), \
'X must have the same number of features as feature_names'
X = pd.DataFrame(X, columns=self._feature_names)
if restart or self._rf.not_trained:
if categorical_feats is None:
warnings.warn('categorical_feats is not provided. All non-string features will be treated as numerical.')
is_numerical = pd.DataFrame(X).apply(lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
categorical_feats = X.columns[~is_numerical]
_X, encoded_original_mapping = self._prepare_input(X, categorical_feats)
else:
_X, encoded_original_mapping = X.values, None
# Re-initiate instance attributes
self._init_attributes()
self._rf.fit(_X, np.array(y), restart=restart)
rules = self._rf.get_rules()
cm = cooccurance_matrix(rules, X.shape[-1], window_size, encoded_original_mapping)
dimred = TruncatedSVD(2) # 2-d dimensionality reduction
vectors = normalize_angles(dimred.fit_transform(cm))
norms = np.linalg.norm(vectors, axis=-1)
vectors /= max(EPSILON, np.max(norms))
self._vectors = vectors
self._importance = np.linalg.norm(self._vectors, axis=-1)
self._stds = np.zeros(self._vectors.shape)
if bagging:
all_vectors = []
for _ in range(bagging):
self._rf.bag_trees(_X, y)
rules_bag = self._rf.get_rules()
cm_bag = cooccurance_matrix(rules_bag, X.shape[-1], window_size, encoded_original_mapping)
vectors_bag = dimred.fit_transform(cm_bag)
vectors_bag = normalize_angles(vectors_bag)
norms_bag = np.linalg.norm(vectors_bag, axis=-1)
all_vectors.append(vectors_bag / max(EPSILON, np.max(norms_bag)))
self._stds = np.std(all_vectors, 0)
@staticmethod
def _prepare_input(X: np.ndarray, categorical_feats: list):
"""
Transforms the dataset input into numerical format by encoding categorical variables.
:param X: a numpy array where each row is a data point
:param categorical_feats: List of the feature names that are categorical
:return: (1) numerical_X: Transformed array where categorical variables are one-hot encoded
(2) feature_assignment: Maps each feature in the transformed array to its original feature
"""
encoder = OneHotEncoder(drop='if_binary') # one-hot encoder for categorical variables
encoded_X = []
encoded_original_mapping = {} # Mapping from encoded feature number to original feature number
encoded_idx = 0
for feature_idx, feature in enumerate(X):
column = X[feature].values.reshape((-1, 1))
if feature in categorical_feats:
encoded_column = encoder.fit_transform(column).toarray()
else: # if the feature is not categorical
encoded_column = column.astype(float)
for _ in range(encoded_column.shape[-1]): # update mapping
encoded_original_mapping[encoded_idx] = feature_idx
encoded_idx += 1
encoded_X.append(encoded_column)
return np.concatenate(encoded_X,-1), encoded_original_mapping
@property
def importance(self):
return self._importance
@property
def vectors(self):
return self._vectors
@property
def confidence_bounds(self):
if self._stds is None:
return None
return 3 * self._stds
@property
def angles(self):
if self._angels is None:
self._angels = np.arctan2(self._vectors[:, 1], self._vectors[:, 0])
return self._angels
def _init_attributes(self):
"""
Initiate user visible attributes each time fitting a model
"""
self._angels = None
self._vectors = None
self._importance = None
self._stds = None
def plot(self, dynamic: bool = True, confidence: bool = True, fpath: str = None,
font_size: float = 15., marker_size: float = 10.,
confidence_line_width: float = 1., confidence_line_opacity: float = 0.5,
confidence_line_color: str = 'gray', confidence_line_style: str = 'solid'):
"""
Creates and show a plot with the feature-vectors
:param dynamic: If True (default) the output is a dynamic html plot. Otherwise, it will be an image.
:param confidence: If True it will show the confidence interval. Otherwise, intervals are not shown.
:param fpath: Path to save the image. For a dynamic figure, the path should be a .html file.
:param font_size: font size of the plot texts
:param marker_size: marker size of the feature vectors
:param confidence_line_width: width of the feature vector confidence lines
:param confidence_line_opacity: opacity of the confidence lines
:param confidence_line_color: color of the confidence lines
:param confidence_line_style: line style of the confidence lines
"""
assert self._vectors is not None, "You should first fit the tree-based model."
angles = np.arctan2(self._vectors[:, 1], self._vectors[:, 0])
max_angle = np.max(np.abs(angles))
feature_names = self._feature_names + ['origin', '']
plot_vectors = np.concatenate([self._vectors, [[0, 0], [0, 0]]])
plot_angles = np.concatenate([angles, [-max_angle, max_angle]])
plot_data = np.stack([plot_vectors[:, 1], plot_vectors[:, 0], plot_angles, feature_names], axis=-1)
plot_df = pd.DataFrame(
data=plot_data,
columns=[PlotColumns.x, PlotColumns.y, PlotColumns.angles, PlotColumns.names]
)
plot_df[[PlotColumns.x, PlotColumns.y, PlotColumns.angles]] = \
plot_df[[PlotColumns.x, PlotColumns.y, PlotColumns.angles]].apply(pd.to_numeric)
fig = px.scatter(
plot_df, x=PlotColumns.x, y=PlotColumns.y, color=PlotColumns.angles, width=1000, height=500,
hover_name=feature_names,
hover_data={PlotColumns.x: False, PlotColumns.y: False, PlotColumns.angles: False, PlotColumns.names: False},
color_continuous_scale=px.colors.sequential.Rainbow)
fig.update_yaxes(visible=False, showticklabels=False, range=[0, AXIS_SIZE])
fig.update_xaxes(visible=False, showticklabels=False, range=[-AXIS_SIZE, AXIS_SIZE])
if not dynamic:
for i in range(len(plot_vectors) - 2):
fig.add_annotation(
x=plot_vectors[:, 1][i],
y=plot_vectors[:, 0][i],
text=feature_names[i],
font=dict(size=15),
axref=PlotColumns.x,
ayref=PlotColumns.y,
ax=plot_vectors[:, 1][i],
ay=plot_vectors[:, 0][i],
arrowhead=2,
)
fig.update_traces(marker=dict(size=marker_size), textfont_size=font_size)
fig.update(layout_coloraxis_showscale=False)
fig.update_layout(showlegend=False)
for i in range(NUM_SEMICIRCLES): # Draws semi-circles with same origins for better visualization of importance
fig.add_shape(
type='circle',
x0=(i + 1) / 10 * AXIS_SIZE,
y0=(i + 1) / 10 * AXIS_SIZE,
x1=-(i + 1) / 10 * AXIS_SIZE,
y1=-(i + 1) / 10 * AXIS_SIZE,
line_color="red", opacity=0.5, line=dict(dash='dot', width=3))
if confidence:
for vector, std, angle in zip(self._vectors, self._stds, angles):
fig.add_shape(
type='circle',
x0=vector[1] + 3 * std[1],
y0=vector[0] + 3 * std[0],
x1=vector[1] - 3 * std[1],
y1=vector[0] - 3 * std[0],
line_color=confidence_line_color,
opacity=confidence_line_opacity,
line=dict(dash=confidence_line_style, width=confidence_line_width))
fig.show()
if fpath:
assert os.path.exists(os.path.split(fpath)[0]), 'The folder containing the save path does not exist!'
if os.path.exists(fpath):
warnings.warn('Figure already exists. Overwriting!')
if dynamic:
pre, ext = os.path.splitext(fpath)
if ext != '.html':
print('For a dynamic figure, path extension should be an html file > changing suffix to .html')
fpath = pre + '.html'
fig.write_html(fpath)
else:
fig.write_image(fpath)
|
<reponame>nap-lab/apollo
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Data Collector
"""
import os
import sys
import time
import rospy
from std_msgs.msg import String
from modules.canbus.proto import chassis_pb2
from modules.control.proto import control_cmd_pb2
from modules.localization.proto import localization_pb2
class DataCollector(object):
"""
DataCollector Class
"""
def __init__(self, file):
self.proc = [line.rstrip('\n') for line in open(file)]
self.index = 0
outfile = file + '_recorded.csv'
i = 0
outfile = file + str(i) + '_recorded.csv'
while os.path.exists(outfile):
i += 1
outfile = file + str(i) + '_recorded.csv'
self.file = open(outfile, 'w')
self.file.write(
"time,io,ctlmode,ctlbrake,ctlthrottle,ctlgear_location,vehicle_speed,"
+
"engine_rpm,driving_mode,throttle_percentage,brake_percentage,gear_location, imu\n"
)
self.sequence_num = 0
self.control_pub = rospy.Publisher(
'/apollo/control', control_cmd_pb2.ControlCommand, queue_size=1)
rospy.sleep(0.3)
self.controlcmd = control_cmd_pb2.ControlCommand()
# Send First Reset Message
print "Send Reset Command"
self.controlcmd.header.module_name = "control"
self.controlcmd.header.sequence_num = self.sequence_num
self.sequence_num = self.sequence_num + 1
self.controlcmd.header.timestamp_sec = rospy.get_time()
self.controlcmd.pad_msg.action = 2
self.control_pub.publish(self.controlcmd)
rospy.sleep(0.3)
# Set Default Message
print "Send Default Command"
self.controlcmd.pad_msg.action = 1
self.controlcmd.throttle = 0
self.controlcmd.brake = 0
self.controlcmd.steering_rate = 100
self.controlcmd.steering_target = 0
self.controlcmd.gear_location = chassis_pb2.Chassis.GEAR_DRIVE
self.printedcondition = False
self.runtimer = False
self.canmsg_received = False
self.localization_received = False
def callback_localization(self, data):
"""
New Localization
"""
self.acceleration = data.pose.linear_acceleration_vrf.y
self.localization_received = True
def callback_canbus(self, data):
"""
New CANBUS
"""
if not self.localization_received:
print "No Localization Message Yet"
return
timenow = data.header.timestamp_sec
self.vehicle_speed = data.speed_mps
self.engine_rpm = data.engine_rpm
self.throttle_percentage = data.throttle_percentage
self.brake_percentage = data.brake_percentage
self.gear_location = data.gear_location
self.driving_mode = data.driving_mode
self.write_file(timenow, 0)
self.canmsg_received = True
def publish_control(self):
"""
New Control Command
"""
if not self.canmsg_received:
print "No CAN Message Yet"
return
self.controlcmd.header.sequence_num = self.sequence_num
self.sequence_num = self.sequence_num + 1
while self.index < len(self.proc):
cmdtype = self.proc[self.index][0]
proc = self.proc[self.index][2:].lstrip()
if cmdtype == 'a':
command = 'self.controlcmd.' + proc
exec (command)
self.index = self.index + 1
self.printedcondition = False
print proc
elif cmdtype == 'c':
condition = 'self.' + proc
if eval(condition):
self.index = self.index + 1
self.printedcondition = False
print proc
else:
if not self.printedcondition:
print "Waiting for condition: ", proc
self.printedcondition = True
break
elif cmdtype == 't':
delaytime = float(proc)
if not self.runtimer:
self.starttime = rospy.get_time()
self.runtimer = True
print "Waiting for time: ", delaytime
break
elif rospy.get_time() > (self.starttime + delaytime):
self.index = self.index + 1
self.runtimer = False
print "Delayed for: ", delaytime
else:
break
else:
print "Invalid Command, What are you doing?"
print "Exiting"
self.file.close()
rospy.signal_shutdown("Shutting down")
self.controlcmd.header.timestamp_sec = rospy.get_time()
#self.control_pub.publish(self.controlcmd.SerializeToString())
self.control_pub.publish(self.controlcmd)
self.write_file(self.controlcmd.header.timestamp_sec, 1)
if self.index >= len(self.proc):
print "Reached end of commands, shutting down"
self.file.close()
rospy.signal_shutdown("Shutting down")
def write_file(self, time, io):
"""
Write Message to File
"""
self.file.write(
"%.4f,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" %
(time, io, 1, self.controlcmd.brake, self.controlcmd.throttle,
self.controlcmd.gear_location, self.vehicle_speed, self.engine_rpm,
self.driving_mode, self.throttle_percentage, self.brake_percentage,
self.gear_location, self.acceleration))
def main():
"""
Main function
"""
if len(sys.argv) <= 1:
print "Require Command Script"
return
elif len(sys.argv) > 2:
print "Too many inputs"
return
file = sys.argv[1]
rospy.init_node('data_collector', anonymous=True)
data_collector = DataCollector(file)
localizationsub = rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
data_collector.callback_localization)
canbussub = rospy.Subscriber('/apollo/canbus/chassis', chassis_pb2.Chassis,
data_collector.callback_canbus)
rate = rospy.Rate(100)
while not rospy.is_shutdown():
data_collector.publish_control()
rate.sleep()
if __name__ == '__main__':
main()
|
from casexml.apps.phone.dbaccessors.sync_logs_by_user import get_synclogs_for_user
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed, KafkaCheckpointEventHandler
from corehq.apps.receiverwrapper.util import get_version_and_app_from_build_id
from corehq.apps.users.models import CouchUser, CommCareUser, WebUser, DeviceAppMeta
from corehq.apps.users.util import update_latest_builds, update_last_sync, update_device_meta
from corehq.util.doc_processor.interface import BaseDocProcessor, DocumentProcessorController
from corehq.util.doc_processor.couch import CouchDocumentProvider
from dimagi.utils.parsing import string_to_utc_datetime
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.interface import PillowProcessor
from pillowtop.feed.interface import Change
from pillowtop.checkpoints.manager import KafkaPillowCheckpoint
from pillowtop.reindexer.reindexer import Reindexer, ReindexerFactory
SYNCLOG_SQL_USER_SYNC_GROUP_ID = "synclog_sql_user_sync"
def get_user_sync_history_pillow(
pillow_id='UpdateUserSyncHistoryPillow', num_processes=1, process_num=0, **kwargs):
"""
This gets a pillow which iterates through all synclogs
"""
change_feed = KafkaChangeFeed(
topics=[topics.SYNCLOG_SQL], client_id=SYNCLOG_SQL_USER_SYNC_GROUP_ID,
num_processes=num_processes, process_num=process_num)
checkpoint = KafkaPillowCheckpoint(pillow_id, [topics.SYNCLOG_SQL])
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=change_feed,
processor=UserSyncHistoryProcessor(),
change_processed_event_handler=KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100, change_feed=change_feed
),
)
class UserSyncHistoryProcessor(PillowProcessor):
def process_change(self, change):
synclog = change.get_document()
if not synclog:
return
version = None
app_id = None
try:
sync_date = string_to_utc_datetime(synclog.get('date'))
except (ValueError, AttributeError):
return
build_id = synclog.get('build_id')
if build_id:
version, app_id = get_version_and_app_from_build_id(synclog.get('domain'), build_id)
user_id = synclog.get('user_id')
if user_id:
user = CouchUser.get_by_user_id(user_id)
save = update_last_sync(user, app_id, sync_date, version)
if version:
save |= update_latest_builds(user, app_id, sync_date, version)
app_meta = None
device_id = synclog.get('device_id')
if device_id:
if app_id:
app_meta = DeviceAppMeta(app_id=app_id, build_id=build_id, last_sync=sync_date)
save |= update_device_meta(user, device_id, device_app_meta=app_meta, save=False)
if save:
user.save(fire_signals=False)
class UserSyncHistoryReindexerDocProcessor(BaseDocProcessor):
def __init__(self, pillow_processor):
self.pillow_processor = pillow_processor
def process_doc(self, doc):
synclog_changes = self._doc_to_changes(doc)
for change in synclog_changes:
try:
self.pillow_processor.process_change(change)
except Exception:
return False
return True
def handle_skip(self, doc):
print('Unable to process user {}'.format(
doc['_id'],
))
return True
def _doc_to_changes(self, doc):
# creates a change object for the last 10 synclogs
# of the given user, for the synclog pillow to process.
# this means we wont have to iterate through all synclogs
# when reindexing.
synclogs = get_synclogs_for_user(doc['_id'], limit=10)
changes = [Change(
id=res['doc']['_id'],
sequence_id=None,
document=res['doc']
) for res in synclogs]
return changes
class UserSyncHistoryReindexer(Reindexer):
def __init__(self, doc_provider, chunk_size=1000, reset=False):
self.reset = reset
self.doc_provider = doc_provider
self.chunk_size = chunk_size
self.doc_processor = UserSyncHistoryReindexerDocProcessor(UserSyncHistoryProcessor())
def reindex(self):
processor = DocumentProcessorController(
self.doc_provider,
self.doc_processor,
reset=self.reset,
chunk_size=self.chunk_size,
)
processor.run()
class UpdateUserSyncHistoryReindexerFactory(ReindexerFactory):
slug = 'user-sync-history'
arg_contributors = [
ReindexerFactory.resumable_reindexer_args,
]
def build(self):
iteration_key = "UpdateUserSyncHistoryPillow_reindexer"
doc_provider = CouchDocumentProvider(iteration_key, doc_type_tuples=[
CommCareUser,
WebUser
])
return UserSyncHistoryReindexer(doc_provider, **self.options)
|
<filename>src/test/shell/bazel/testdata/bazel_toolchain_test_data/tools/arm_compiler/cc_toolchain_config.bzl
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests compiling using an external Linaro toolchain on a Linux machine
#
"""Implementation of a rule that configures a Linaro toolchain."""
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"feature",
"flag_group",
"flag_set",
"tool",
"tool_path",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
def _impl(ctx):
if (ctx.attr.cpu == "armeabi-v7a"):
toolchain_identifier = "armeabi-v7a"
elif (ctx.attr.cpu == "k8"):
toolchain_identifier = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
host_system_name = "armeabi-v7a"
elif (ctx.attr.cpu == "k8"):
host_system_name = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_system_name = "arm_a15"
elif (ctx.attr.cpu == "k8"):
target_system_name = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_cpu = "armeabi-v7a"
elif (ctx.attr.cpu == "k8"):
target_cpu = "k8"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_libc = "glibc_2.19"
elif (ctx.attr.cpu == "k8"):
target_libc = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "k8"):
compiler = "compiler"
elif (ctx.attr.cpu == "armeabi-v7a"):
compiler = "gcc"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
abi_version = "gcc"
elif (ctx.attr.cpu == "k8"):
abi_version = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
abi_libc_version = "glibc_2.19"
elif (ctx.attr.cpu == "k8"):
abi_libc_version = "local"
else:
fail("Unreachable")
cc_target_os = None
builtin_sysroot = None
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
if (ctx.attr.cpu == "armeabi-v7a"):
objcopy_embed_data_action = action_config(
action_name = "objcopy_embed_data",
enabled = True,
tools = [
tool(path = "linaro_linux_gcc/arm-linux-gnueabihf-objcopy"),
],
)
elif (ctx.attr.cpu == "k8"):
objcopy_embed_data_action = action_config(
action_name = "objcopy_embed_data",
enabled = True,
tools = [tool(path = "/usr/bin/objcopy")],
)
else:
objcopy_embed_data_action = None
action_configs = [objcopy_embed_data_action]
if (ctx.attr.cpu == "k8"):
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-fno-canonical-system-headers",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
elif (ctx.attr.cpu == "armeabi-v7a"):
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-no-canonical-prefixes",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
else:
unfiltered_compile_flags_feature = None
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
if (ctx.attr.cpu == "armeabi-v7a"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"--sysroot=external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc",
"-mfloat-abi=hard",
"-nostdinc",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/lib/gcc/arm-linux-gnueabihf/5.3.1/include",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/include",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/include",
"-U_FORTIFY_SOURCE",
"-fstack-protector",
"-fPIE",
"-fdiagnostics-color=always",
"-Wall",
"-Wunused-but-set-parameter",
"-Wno-free-nonheap-object",
"-fno-omit-frame-pointer",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/include/c++/5.3.1/arm-linux-gnueabihf",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/include/c++/5.3.1",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/include/c++/5.3.1/arm-linux-gnueabihf",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/include/c++/5.3.1",
],
),
],
),
],
)
elif (ctx.attr.cpu == "k8"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-U_FORTIFY_SOURCE",
"-D_FORTIFY_SOURCE=2",
"-fstack-protector",
"-Wall",
"-Wl,-z,-relro,-z,now",
"-Wunused-but-set-parameter",
"-Wno-free-nonheap-object",
"-fno-omit-frame-pointer",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-std=c++0x"])],
),
],
)
else:
default_compile_flags_feature = None
supports_pic_feature = feature(name = "supports_pic", enabled = True)
opt_feature = feature(name = "opt")
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
if (ctx.attr.cpu == "armeabi-v7a"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
"--sysroot=external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc",
"-lstdc++",
"-latomic",
"-lm",
"-lpthread",
"-Ltools/arm_compiler/linaro_linux_gcc/clang_more_libs",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/lib",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/lib",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/lib",
"-Bexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/bin",
"-Wl,--dynamic-linker=/lib/ld-linux-armhf.so.3",
"-no-canonical-prefixes",
"-pie",
"-Wl,-z,relro,-z,now",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
elif (ctx.attr.cpu == "k8"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
"-lstdc++",
"-lm",
"-Wl,-no-as-needed",
"-pass-exit-codes",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
else:
default_link_flags_feature = None
objcopy_embed_flags_feature = feature(
name = "objcopy_embed_flags",
enabled = True,
flag_sets = [
flag_set(
actions = ["objcopy_embed_data"],
flag_groups = [flag_group(flags = ["-I", "binary"])],
),
],
)
dbg_feature = feature(name = "dbg")
if (ctx.attr.cpu == "k8"):
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
elif (ctx.attr.cpu == "armeabi-v7a"):
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
cxx_builtin_include_directories = [
"%package(@org_linaro_components_toolchain_gcc_5_3_1//include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/usr/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/usr/lib/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//include)%/c++/5.3.1",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//lib/gcc/arm-linux-gnueabihf/5.3.1/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/include)%/c++/5.3.1",
]
elif (ctx.attr.cpu == "k8"):
cxx_builtin_include_directories = [
"/usr/include/c++/4.8",
"/usr/include/x86_64-linux-gnu/c++/4.8",
"/usr/include/c++/4.8/backward",
"/usr/lib/gcc/x86_64-linux-gnu/4.8/include",
"/usr/local/include",
"/usr/lib/gcc/x86_64-linux-gnu/4.8/include-fixed",
"/usr/include/x86_64-linux-gnu",
"/usr/include",
]
else:
fail("Unreachable")
artifact_name_patterns = []
make_variables = []
if (ctx.attr.cpu == "armeabi-v7a"):
tool_paths = [
tool_path(
name = "ar",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ar",
),
tool_path(
name = "compat-ld",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "cpp",
path = "linaro_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "dwp",
path = "linaro_linux_gcc/arm-linux-gnueabihf-dwp",
),
tool_path(
name = "gcc",
path = "linaro_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "gcov",
path = "arm-frc-linux-gnueabi/arm-frc-linux-gnueabi-gcov-4.9",
),
tool_path(
name = "ld",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "nm",
path = "linaro_linux_gcc/arm-linux-gnueabihf-nm",
),
tool_path(
name = "objcopy",
path = "linaro_linux_gcc/arm-linux-gnueabihf-objcopy",
),
tool_path(
name = "objdump",
path = "linaro_linux_gcc/arm-linux-gnueabihf-objdump",
),
tool_path(
name = "strip",
path = "linaro_linux_gcc/arm-linux-gnueabihf-strip",
),
]
elif (ctx.attr.cpu == "k8"):
tool_paths = [
tool_path(name = "ar", path = "/usr/bin/ar"),
tool_path(name = "cpp", path = "/usr/bin/cpp"),
tool_path(name = "dwp", path = "/usr/bin/dwp"),
tool_path(name = "gcc", path = "/usr/bin/gcc"),
tool_path(name = "gcov", path = "/usr/bin/gcov"),
tool_path(name = "ld", path = "/usr/bin/ld"),
tool_path(name = "nm", path = "/usr/bin/nm"),
tool_path(name = "objcopy", path = "/usr/bin/objcopy"),
tool_path(name = "objdump", path = "/usr/bin/objdump"),
tool_path(name = "strip", path = "/usr/bin/strip"),
]
else:
fail("Unreachable")
out = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(out, "Fake executable")
return [
cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
),
DefaultInfo(
executable = out,
),
]
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"cpu": attr.string(mandatory = True, values = ["armeabi-v7a", "k8"]),
},
provides = [CcToolchainConfigInfo],
executable = True,
)
|
<gh_stars>1-10
#!/usr/bin/env python
"""
PGInteraction
"""
import csv
import psycopg2
import psycopg2.extras
import simplejson as json
from jsonschema import validate, ValidationError
from psycopg2 import errorcodes
from datacoco_db.helper.deprecate import deprecated
class InvalidJsonResult(Exception):
pass
def _result_iter(cursor, arraysize):
"An iterator that uses fetchmany to keep memory usage down"
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
yield result
class PGInteraction:
"""
Simple Class for interaction with Postgres
"""
def __init__(self, dbname, host, user, password, port, schema="public"):
if not dbname or not host or not user or not port or password is None:
raise RuntimeError("%s request all __init__ arguments" % __name__)
self.host = host
self.user = user
self.password = password
self.dbname = dbname
self.port = port
self.con = None
self.cur = None
self.unload_stmt = None
def conn(self, dict_cursor=False):
"""
Open a connection, should be done right before time of insert
"""
self.con = psycopg2.connect(
"dbname="
+ self.dbname
+ " host="
+ self.host
+ " user="
+ self.user
+ " password="
+ self.password
+ " port="
+ str(self.port)
)
self.dict_cursor = dict_cursor
@deprecated("Use batch_open() instead")
def batchOpen(self):
self.batch_open()
def batch_open(self):
if self.dict_cursor:
self.cur = self.con.cursor(
cursor_factory=psycopg2.extras.RealDictCursor
)
else:
self.cur = self.con.cursor()
@deprecated("Use batch_commit() instead")
def batchCommit(self):
self.batch_commit()
def batch_commit(self):
try:
self.con.commit()
except Exception as e:
pg_error = errorcodes.lookup(e.pgcode)
raise RuntimeError(pg_error)
def fetch_sql_all(self, sql):
"""
:param sql:
:return:
"""
try:
self.cur.execute(sql)
results = self.cur.fetchall()
except Exception as err:
print(err)
raise
return results
def fetch_sql(self, sql, blocksize=1000):
try:
self.cur.execute(sql)
results = _result_iter(self.cur, arraysize=blocksize)
except Exception as e:
pgError = errorcodes.lookup(e.pgcode)
raise RuntimeError(pgError)
return results
def export_sql_to_csv(
self, sql, csv_filename, delimiter=",", headers=True
):
result = self.fetch_sql(sql)
f = open(csv_filename, "w", newline="")
print("exporting to file:" + f.name)
writer = csv.writer(f, delimiter=delimiter)
if headers:
writer.writerow(
[i[0] for i in self.cur.description]
) # write headers
for row in result:
writer.writerow(
[str(s).replace("\t", " ").replace("\n", " ") for s in row]
)
f.flush()
f.close()
def json_serialize(self, dict_obj):
"""For conversion of python value to json value during encoding."""
for key, val in dict_obj.items():
if isinstance(val, str) and val == "true":
val = True
if isinstance(val, str) and val == "false":
val = False
if isinstance(val, str) and val.lower() in ("null", "none"):
val = None
dict_obj[key] = val
return dict_obj
def validate_json_scheme(self, json_string, json_schema):
try:
validate(json.loads(json_string), json_schema)
except ValidationError as e:
raise InvalidJsonResult("Invalid JSON found {}".format(e.message))
def export_sql_to_json(self, sql, filename, json_schema=None):
results = self.fetch_sql(sql)
with open(filename, "w") as f:
json_results = json.dumps(
(self.json_serialize(record) for record in results),
iterable_as_array=True,
)
if json_schema:
self.validate_json_scheme(
json_string=json_results, json_schema=json_schema
)
f.write(json_results)
def export_sql_to_s3(
self, sql, s3path, aws_access_key, aws_secret_key, options=None
):
"""
where option is an array of:
{ MANIFEST
| DELIMITER [ AS ] 'delimiter-char'
| FIXEDWIDTH [ AS ] 'fixedwidth-spec' }
| ENCRYPTED
| BZIP2
| GZIP
| ADDQUOTES
| NULL [ AS ] 'null-string'
| ESCAPE
| ALLOWOVERWRITE
| PARALLEL [ { ON | TRUE } | { OFF | FALSE } ]
[ MAXFILESIZE [AS] max-size [ MB | GB ] ]
"""
default_options = ["delimiter '|'", "ALLOWOVERWRITE", "PARALLEL false"]
if options is None:
options = default_options
self.unload_stmt = (
("unload ('%s') to '%s' " % (sql, s3path))
+ ("credentials 'aws_access_key_id=%s" % (aws_access_key))
+ (
";aws_secret_access_key=%s' %s"
% (aws_secret_key, " ".join(options))
)
)
print("unload command %s " % self.unload_stmt)
self.exec_sql(self.unload_stmt)
print("Unload complete")
def exec_sql(self, sql):
try:
results = self.cur.execute(sql)
except Exception as e:
print(e)
pgError = errorcodes.lookup(e.pgcode)
raise RuntimeError(pgError)
return results
@deprecated("Use bulk_dictionary_insert() instead")
def bulkDictionaryInsert(self, table_name, col_dict):
self.bulk_dictionary_insert(table_name, col_dict)
def bulk_dictionary_insert(self, table_name, col_dict):
if len(col_dict) == 0:
return
placeholders = ", ".join(["%s"] * len(col_dict))
columns = ", ".join(col_dict.keys())
sql = "INSERT into %s ( %s ) VALUES ( %s )" % ( # nosec
table_name,
columns,
placeholders,
)
try:
self.cur.execute(sql, list(col_dict.values()))
except Exception as e:
pgError = errorcodes.lookup(e.pgcode)
raise RuntimeError(pgError)
@deprecated
def bulkPostCleanup(self, table_name):
self.bulk_post_cleanup(table_name)
def bulk_post_cleanup(self, table_name):
sql = """
delete from {0}
where etl_updated=0
and nk in (select nk from {0} where etl_updated = 1);
update {0} set etl_updated = 0
where etl_updated = 1;""".format( # nosec
table_name
)
try:
self.cur.execute(sql)
except Exception as e:
pgError = errorcodes.lookup(e.pgcode)
raise RuntimeError(pgError)
def table_exists(self, table_name):
"""
Checks for the existence of a table in specified redshift environment
:param connection: A redshift connection
:param table_name: The table name, including schema
:return: A boolean value indicating the tables existence
"""
schema_name = "public"
if "." in table_name:
schema_name, table = table_name.split(".")
else:
table = table_name
sql = """select
case when count(1) = 1 then true else false end
from pg_class c
join pg_namespace n on n.oid = c.relnamespace
where trim(n.nspname) = '%s' and trim(c.relname) = '%s'
""" % ( # nosec
schema_name.lower(),
table.lower(),
)
return self.fetch_sql_all(sql)[0]
@deprecated("Use batch_close instead")
def batchClose(self):
self.batch_close()
def batch_close(self):
self.con.close()
|
<reponame>cy-Ajeesh-Anil/testproject
"""empty message
Revision ID: a09758231d3f
Revises:
Create Date: 2020-08-24 15:05:09.520261
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a09758231d3f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('country',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('country_name', sa.String(), nullable=True),
sa.Column('country_code', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('education_level',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('exam',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('exam_name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('grading_scheme',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('scheme', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('institution_type',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('language',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('language', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('language_test',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('test_name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('listening',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('quantitative',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('rank',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('rank_percentage', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reading',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('speaking',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('total',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('verbal',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('writing',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('score', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('emergency_contact',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('emy_name', sa.String(), nullable=True),
sa.Column('emy_phone', sa.String(), nullable=True),
sa.Column('emy_email', sa.String(), nullable=True),
sa.Column('emy_relationship', sa.String(), nullable=True),
sa.Column('emy_address', sa.String(), nullable=True),
sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('exam_test_score',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('exam_id', sa.Integer(), nullable=True),
sa.Column('exam_date', sa.Date(), nullable=True),
sa.Column('verbal_id', sa.Integer(), nullable=True),
sa.Column('verbal_rank_id', sa.Integer(), nullable=True),
sa.Column('quantitative_id', sa.Integer(), nullable=True),
sa.Column('quantitative_rank_id', sa.Integer(), nullable=True),
sa.Column('writing_id', sa.Integer(), nullable=True),
sa.Column('writing_rank_id', sa.Integer(), nullable=True),
sa.Column('total_id', sa.Integer(), nullable=True),
sa.Column('total_rank_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['exam_id'], ['exam.id'], ),
sa.ForeignKeyConstraint(['quantitative_id'], ['quantitative.id'], ),
sa.ForeignKeyConstraint(['quantitative_rank_id'], ['rank.id'], ),
sa.ForeignKeyConstraint(['total_id'], ['total.id'], ),
sa.ForeignKeyConstraint(['total_rank_id'], ['rank.id'], ),
sa.ForeignKeyConstraint(['verbal_id'], ['verbal.id'], ),
sa.ForeignKeyConstraint(['verbal_rank_id'], ['rank.id'], ),
sa.ForeignKeyConstraint(['writing_id'], ['writing.id'], ),
sa.ForeignKeyConstraint(['writing_rank_id'], ['rank.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('grade',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('grade', sa.String(), nullable=True),
sa.Column('grade_scheme_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['grade_scheme_id'], ['grading_scheme.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('institution',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('country_id', sa.Integer(), nullable=True),
sa.Column('institution_type_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['country_id'], ['country.id'], ),
sa.ForeignKeyConstraint(['institution_type_id'], ['institution_type.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('language_test_score',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('test_id', sa.Integer(), nullable=True),
sa.Column('exam_date', sa.Date(), nullable=True),
sa.Column('listening_id', sa.Integer(), nullable=True),
sa.Column('reading_id', sa.Integer(), nullable=True),
sa.Column('writing_id', sa.Integer(), nullable=True),
sa.Column('speaking_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['listening_id'], ['listening.id'], ),
sa.ForeignKeyConstraint(['reading_id'], ['reading.id'], ),
sa.ForeignKeyConstraint(['speaking_id'], ['speaking.id'], ),
sa.ForeignKeyConstraint(['test_id'], ['language_test.id'], ),
sa.ForeignKeyConstraint(['writing_id'], ['writing.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('states',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('state_name', sa.String(), nullable=True),
sa.Column('country_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['country_id'], ['country.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('city',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('city_name', sa.String(), nullable=True),
sa.Column('state_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['state_id'], ['states.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('education',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('country_id', sa.Integer(), nullable=True),
sa.Column('education_level_id', sa.Integer(), nullable=True),
sa.Column('institution_id', sa.Integer(), nullable=True),
sa.Column('institution_type_id', sa.Integer(), nullable=True),
sa.Column('grading_scheme_id', sa.Integer(), nullable=True),
sa.Column('grade_id', sa.Integer(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.Column('school_name', sa.String(), nullable=True),
sa.Column('school_address', sa.String(), nullable=True),
sa.Column('student_number', sa.String(), nullable=True),
sa.Column('most_recent', sa.Boolean(), nullable=True),
sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.ForeignKeyConstraint(['country_id'], ['country.id'], ),
sa.ForeignKeyConstraint(['education_level_id'], ['education_level.id'], ),
sa.ForeignKeyConstraint(['grade_id'], ['grade.id'], ),
sa.ForeignKeyConstraint(['grading_scheme_id'], ['grading_scheme.id'], ),
sa.ForeignKeyConstraint(['institution_id'], ['institution.id'], ),
sa.ForeignKeyConstraint(['institution_type_id'], ['institution_type.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('profile',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('photo', sa.String(), nullable=True),
sa.Column('first_name', sa.String(), nullable=True),
sa.Column('last_name', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('gender', sa.String(), nullable=True),
sa.Column('address', sa.String(), nullable=True),
sa.Column('citizenship_id', sa.Integer(), nullable=True),
sa.Column('city_id', sa.Integer(), nullable=True),
sa.Column('state_id', sa.Integer(), nullable=True),
sa.Column('country_id', sa.Integer(), nullable=True),
sa.Column('postal_code', sa.String(), nullable=True),
sa.Column('passport_no', sa.String(), nullable=True),
sa.Column('date_of_birth', sa.Date(), nullable=True),
sa.Column('language_id', sa.Integer(), nullable=True),
sa.Column('marital_status', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.ForeignKeyConstraint(['citizenship_id'], ['country.id'], ),
sa.ForeignKeyConstraint(['city_id'], ['city.id'], ),
sa.ForeignKeyConstraint(['country_id'], ['country.id'], ),
sa.ForeignKeyConstraint(['language_id'], ['language.id'], ),
sa.ForeignKeyConstraint(['state_id'], ['states.id'], ),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('profile')
op.drop_table('education')
op.drop_table('city')
op.drop_table('states')
op.drop_table('language_test_score')
op.drop_table('institution')
op.drop_table('grade')
op.drop_table('exam_test_score')
op.drop_table('emergency_contact')
op.drop_table('writing')
op.drop_table('verbal')
op.drop_table('total')
op.drop_table('speaking')
op.drop_table('reading')
op.drop_table('rank')
op.drop_table('quantitative')
op.drop_table('listening')
op.drop_table('language_test')
op.drop_table('language')
op.drop_table('institution_type')
op.drop_table('grading_scheme')
op.drop_table('exam')
op.drop_table('education_level')
op.drop_table('country')
# ### end Alembic commands ###
|
<gh_stars>1-10
import json
import os
import logging
import sys
from datetime import datetime
from dateutil.tz import gettz
from common import (common_const, line, utils, flex_message)
from validation.smart_register_param_check import SmartRegisterParamCheck
from common.channel_access_token import ChannelAccessToken
from smart_register.smart_register_order_info import SmartRegisterOrderInfo
import paypayopa
import polling
# 環境変数
LIFF_CHANNEL_ID = int(os.environ.get("LIFF_CHANNEL_ID"))
LIFF_URL = os.environ.get("LIFF_URL")
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
CHANNEL_ACCESS_TOKEN_DB = os.environ.get("CHANNEL_ACCESS_TOKEN_DB")
DETAILS_PASS = os.environ.get("DETAILS_PASS")
# PayPay API
PAY_PAY_API_KEY = os.environ.get("PAY_PAY_API_KEY")
PAY_PAY_API_SECRET = os.environ.get("PAY_PAY_API_SECRET")
PAY_PAY_API_MERCHANT_ID = os.environ.get("PAY_PAY_API_MERCHANT_ID")
if (os.environ.get("PAY_PAY_IS_PROD") == 'True'
or os.environ.get("PAY_PAY_IS_PROD") == 'true'):
PAY_PAY_IS_PROD = True
else:
PAY_PAY_IS_PROD = False
client = paypayopa.Client(auth=(PAY_PAY_API_KEY, PAY_PAY_API_SECRET),
production_mode=PAY_PAY_IS_PROD)
client.set_assume_merchant(PAY_PAY_API_MERCHANT_ID)
# ログ出力の設定
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# LINEリソースの宣言
OA_CHANNEL_ID = os.getenv('OA_CHANNEL_ID', None)
if OA_CHANNEL_ID is None:
logger.error('Specify CHANNEL_ID as environment variable.')
sys.exit(1)
# AWSリソースの生成
order_info_table = SmartRegisterOrderInfo()
accesstoken_table = ChannelAccessToken()
def send_messages(order_info, datetime_now):
"""
OAへメッセージを送信をする
Parameters
----------
order_info:dict
該当ユーザーの注文情報
datetime_now:string
決済日時
Returns
-------
なし
"""
# DBより短期チャネルアクセストークンを取得
channel_access_token = accesstoken_table.get_item(OA_CHANNEL_ID)
if channel_access_token is None:
logger.error(
'CHANNEL_ACCESS_TOKEN in Specified CHANNEL_ID: %s is not exist.',
OA_CHANNEL_ID)
else:
order_id = order_info['orderId']
details_url = LIFF_URL + DETAILS_PASS + '?orderId=' + order_id
flex_obj = flex_message.create_receipt(
order_info, datetime_now, details_url)
line.send_push_message(
channel_access_token['channelAccessToken'],
flex_obj, order_info['userId'])
def fetch_payment_details(merchant_payment_id):
"""
支払いの詳細を取得する
Parameters
----------
merchant_payment_id
販売者が提供する一意の支払いトランザクションID
Returns
-------
status
"""
resp = client.Code.get_payment_details(merchant_payment_id)
if (str(resp['data']) == 'None'):
return {
'error': 'true'
}
return resp['data']['status']
def is_correct_response(resp):
logger.info(resp)
return resp
def lambda_handler(event, context):
"""
PayPay API(confirm)の通信結果を返す
Parameters
----------
event : dict
POST時に渡されたパラメータ
context : dict
コンテキスト内容。
Returns
-------
response : dict
PayPay APIの通信結果
"""
# パラメータログ
logger.info(event)
body = json.loads(event['body'])
if body is None:
error_msg_display = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_display, 400)
# パラメータバリデーションチェック
param_checker = SmartRegisterParamCheck(body)
if error_msg := param_checker.check_api_put_paypay_confirm():
error_msg_disp = ('\n').join(error_msg)
logger.error(error_msg_disp)
return utils.create_error_response(error_msg_disp, status=400) # noqa: E501
order_id = body['orderId']
# 注文履歴から決済金額を取得
order_info = order_info_table.get_item(order_id)
amount = float(order_info['amount'])
transaction_id = 999999
currency = 'JPY'
datetime_now = datetime.now(gettz('Asia/Tokyo'))
try:
polling.poll(
lambda: fetch_payment_details(order_id) == 'COMPLETED' or fetch_payment_details(order_id) == 'FAILED',
check_success=is_correct_response,
step=2,
timeout=100)
api_response = client.Code.get_payment_details(order_id)
# DB更新
order_info_table.update_transaction(
order_id, transaction_id, utils.get_ttl_time(datetime_now))
# メッセージ送信処理
send_messages(order_info,
datetime_now.strftime('%Y/%m/%d %H:%M:%S'))
except Exception as e:
logger.exception('Occur Exception: %s', e)
return utils.create_error_response('Error')
response = utils.create_success_response(
json.dumps(api_response))
logger.info('response %s', response)
return response
|
<reponame>misokg/Cornell-MOE
# -*- coding: utf-8 -*-
"""Tools to compute LCB and optimize the next best point(s) to sample using LCB through C++ calls.
This file contains a class to compute + derivatives and a functions to solve the q,p-KG optimization problem.
The :class:`moe.optimal_learning.python.cpp_wrappers.knowledge_gradient.KnowledgeGradient`
The optimization functions are convenient wrappers around the matching C++ calls.
See gpp_knowledge_gradient_optimization.hpp/cpp for further details on knowledge gradient.
"""
from builtins import range
import numpy
from moe.optimal_learning.python.data_containers import SamplePoint
def lower_confidence_bound_optimization(
gaussian_process,
candidate_pts,
num_to_sample,
):
"""Solve the q,p-LCB problem, returning the optimal set of q points to sample CONCURRENTLY in future experiments.
.. NOTE:: The following comments are copied from gpp_math.hpp, ComputeOptimalPointsToSample().
These comments are copied into
:func:`moe.optimal_learning.python.python_version.expected_improvement.multistart_expected_improvement_optimization`
This is the primary entry-point for EI optimization in the optimal_learning library. It offers our best shot at
improving robustness by combining higher accuracy methods like gradient descent with fail-safes like random/grid search.
Returns the optimal set of q points to sample CONCURRENTLY by solving the q,p-EI problem. That is, we may want to run 4
experiments at the same time and maximize the EI across all 4 experiments at once while knowing of 2 ongoing experiments
(4,2-EI). This function handles this use case. Evaluation of q,p-EI (and its gradient) for q > 1 or p > 1 is expensive
(requires monte-carlo iteration), so this method is usually very expensive.
Compared to ComputeHeuristicPointsToSample() (``gpp_heuristic_expected_improvement_optimization.hpp``), this function
makes no external assumptions about the underlying objective function. Instead, it utilizes a feature of the
GaussianProcess that allows the GP to account for ongoing/incomplete experiments.
If ``num_to_sample = 1``, this is the same as ComputeOptimalPointsToSampleWithRandomStarts().
The option of using GPU to compute general q,p-EI via MC simulation is also available. To enable it, make sure you have
installed GPU components of MOE, otherwise, it will throw Runtime excpetion.
:param num_to_sample: how many simultaneous experiments you would like to run (i.e., the q in q,p-EI)
:type num_to_sample: int >= 1
:return: point(s) that maximize the knowledge gradient (solving the q,p-KG problem)
:rtype: array of float64 with shape (num_to_sample, ei_optimizer.objective_function.dim)
"""
# Create enough randomness sources if none are specified.
mean_surface = gaussian_process.compute_mean_of_points(candidate_pts)
standard_deviation = numpy.zeros(candidate_pts.shape[0])
for pt in range(candidate_pts.shape[0]):
standard_deviation[pt] = gaussian_process.compute_cholesky_variance_of_points(candidate_pts[[pt],:])[0,0]
target = mean_surface - standard_deviation
index = numpy.argmin(target)
ucb = mean_surface + standard_deviation
upper_bound = numpy.min(ucb)
condition = target <= upper_bound
satisfied_candidate_pts = candidate_pts[condition,:]
satisfied_standard_deviation = numpy.zeros(satisfied_candidate_pts.shape[0])
results = numpy.zeros((num_to_sample, gaussian_process.dim))
results[0] = candidate_pts[index]
for i in range(1, num_to_sample):
sample_point = [SamplePoint(results[i-1],
numpy.zeros(gaussian_process.num_derivatives+1),
0.25)]
gaussian_process.add_sampled_points(sample_point)
for pt in range(satisfied_standard_deviation.shape[0]):
satisfied_standard_deviation[pt] = gaussian_process.compute_cholesky_variance_of_points(satisfied_candidate_pts[[pt],:])[0,0]
index = numpy.argmax(satisfied_standard_deviation)
results[i] = satisfied_candidate_pts[index]
return results, 0.0
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 13:39:00 2019
@author: isaaclera
"""
import simpy
import osmnx as ox
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import random
from matplotlib import colors
from shapely.ops import transform
from functools import partial
import pyproj
import scipy.spatial
from collections import OrderedDict
random.seed(0)
# =============================================================================
# Triggered actions when a mobile agent is under the coverage of a IoT device (edge/sensor)
# =============================================================================
class generic_action(object):
# service_coverage
# key => street node network
# value => id. module SW
def __init__(self, service_coverage, env):
self.service_coverage = service_coverage
self.env = env
def action(self,mobile_agent):
None
class my_custom_action(generic_action):
def __init__(self, *args, **kwargs):
super(my_custom_action, self).__init__(*args, **kwargs)
self.plates = {}
self.fees = {}
#mandatory function
def action(self,ma): #mobile_entity
# print "ACTION"
# print ma
# print ma.next_time
# print ma.get_current_position()
# print "-"*10
if ma.get_current_position() in service_coverage.keys():
if ma.plate in self.plates:
self.fees[ma.plate]={"arrive":self.plates[ma.plate],"end":self.env.now}
else:
self.plates[ma.plate]=self.env.now
# =============================================================================
# Generic definition of a mobile agent
# =============================================================================
# In this example, I thought in vehicles (cars)
#More constructs are necessaries: random paths,
# A speed is necessary? or/and use of mov. distributions?
class generic_mobile_entity(object): #GME
def __init__(self, _id, path, speed, action=None, start=0):
self.__default_speed = 10.0
self._id=_id
self.path = path
self.speed = speed
self.next_time = None
self.do = action
self.start = start
self.current_position = 0 #path index
if speed==0.0:
self.speed = self.__default_speed
def __str__(self):
return "Agent (%i) in node: %i[%i/%i]"%(self._id,self.path[self.current_position],self.current_position,len(self.path)-1)
def get_current_position(self):
return self.path[self.current_position]
class car_agent(generic_mobile_entity):
def __init__(self, *args, **kwargs):
super(car_agent, self).__init__(*args, **kwargs)
self.plate = "EU"+str(self._id)
def __str__(self):
return "Car: %s {%s}"%(self.plate,super(car_agent,self).__str__())
#def actionFunction(mobileAgent, nextTime):
# # print "%i \t Arrived! %s \t NEXT TIME: %i"%(env.now,mobileAgent,nextTime)
# nodeid = mobileAgent.path[mobileAgent.current_position]
# x,y = G.nodes[nodeid]['x'],G.nodes[nodeid]['y']
# if (x,y) in service_coverage.keys():
# print (x,y)
# if ma.idx in plates:
# print "REGISTRADO"
#
# else:
# plates.append(ma.idx)
# print plates
#
# =============================================================================
# UTILS FUNCTIONS
# =============================================================================
def create_pos(G,scale):
x = nx.get_node_attributes(G,'x')
y = nx.get_node_attributes(G,'y')
pos = {}
for k in x.keys():
lat = x[k]*scale
lng = y[k]*scale
pos[k]=np.array([lat,lng])
return pos
def create_points(G):
x = nx.get_node_attributes(G,'x')
y = nx.get_node_attributes(G,'y')
pos = OrderedDict()
for k in x.keys():
lat = x[k]
lng = y[k]
pos[k]=[lat,lng]
return pos
def toMeters(geometry):
project = partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(init='EPSG:32633'))
return transform(project,geometry).length
def get_random_node(G):
return list(G.nodes())[random.randint(0,len(G.nodes())-1)]
# =============================================================================
# INTERNAL FUNCTION OF YAFS-CORE
# =============================================================================
def __add_mobile_agent(idDES,gme,G):
yield env.timeout(gme.start)
while (len(gme.path)-1 > gme.current_position):
e = (gme.path[gme.current_position], gme.path[ gme.current_position+1])
data = G.get_edge_data(*e)
try:
nextTime = int(toMeters(data[0]["geometry"])/gme.speed)
except KeyError:
nextTime = 1 #default time by roundabout or other Spatial THINGS
#take an action?
gme.next_time = nextTime
gme.do.action(gme)
gme.current_position += 1
yield env.timeout(nextTime)
#Last movement
gme.do.action(gme)
print "Mobile agent: %s ends "%gme.plate
# =============================================================================
# ## Street network
# =============================================================================
G = ox.graph_from_point((39.637759, 2.646532), distance=750, network_type='drive')
# =============================================================================
# ## Fog topology (based on the current YAFS version)
# =============================================================================
topology_json = {}
topology_json["entity"] = []
topology_json["link"] = []
cloud_dev = {"id": 0, "model": "cloud","mytag":"cloud", "IPT": 5000 * 10 ^ 6, "RAM": 40000,"COST": 3,"WATT":20.0, 'x': 2.6484887, 'y': 39.6580786}
sensor_dev = {"id": 1, "model": "radar-device1", "IPT": 100* 10 ^ 6, "RAM": 4000,"COST": 3,"WATT":40.0, 'x': 2.645623, 'y': 39.6426471}
sensor_dev2 = {"id": 2, "model": "radar-device2", "IPT": 100 * 10 ^ 6, "RAM": 4000,"COST": 3, "WATT": 40.0,'x': 2.6507741, 'y': 39.6362394}
link1 = {"s": 0, "d": 1, "BW": 1, "PR": 10}
link2 = {"s": 0, "d": 2, "BW": 1, "PR": 1}
topology_json["entity"].append(cloud_dev)
topology_json["entity"].append(sensor_dev)
topology_json["entity"].append(sensor_dev2)
topology_json["link"].append(link1)
topology_json["link"].append(link2)
G2 = nx.Graph()
for edge in topology_json["link"]:
G2.add_edge(edge["s"], edge["d"])
attNodes = {}
for entity in topology_json["entity"]:
attNodes[entity['id']] = entity
nx.set_node_attributes(G2, values=attNodes)
# =============================================================================
# # Plot both structures
# =============================================================================
posG = create_pos(G,100)
posG2 = create_pos(G2,100)
nx.draw(G,posG,node_size=50)
nx.draw(G2,posG2,node_size=20,node_color="yellow",edge_color='pink',width=2)
# =============================================================================
# #Creating links among g1:nodes - g2:nodes
# The coverage of fog entities and street structures
# YAFS INTERNAL FUNCTION
# =============================================================================
tolerance = 0.0001
pG = create_points(G)
pG2 = create_points(G2)
tree = scipy.spatial.KDTree(pG.values())
points_within_tolerance = tree.query_ball_point(pG2.values(),tolerance)
# key = node network
# value = id - module SW
service_coverage = {}
for idx,pt in enumerate(points_within_tolerance):
## MODULE SW
key2 = pG2.keys()[idx]
nG2 = G2.nodes[key2]
print "%s is close to "%nG2["model"]
## Street coverage
for p in pt:
key = <KEY>
print G.nodes[key]
# service_coverage[(G.nodes[key]['x'],G.nodes[key]['y'])]=nG2["model"]
service_coverage[key] = nG2["id"]
print "SERVICE COVERAGE"
print service_coverage
# =============================================================================
# Simulation execution
# =============================================================================
env = simpy.Environment()
counter =0
action = my_custom_action(service_coverage,env)
for i in range(10000):
try:
src = get_random_node(G)
dst = get_random_node(G)
path = nx.shortest_path(G,src,dst)
paths = list(nx.all_simple_paths(G, source=src, target=dst,cutoff=5))
# if len(path)==0:
# continue
speed = random.randint(2,20)
start = random.randint(0,2000)
ma = car_agent(i,path,speed,action,start)
env.process(__add_mobile_agent(i, ma, G))
except nx.NetworkXNoPath:
counter+=1 #oneway edges by random choice
env.run(until=1000000)
print "COCHES REGISTRADOS EN ESE MOVIMIENTO: %i"%len(action.fees) |
<reponame>Impavidity/SearchEngine
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nltk.tokenize import word_tokenize
import os
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from gensim import corpora, models, similarities
import logging
import pprint
import json
from nltk.corpus import wordnet
from revised import *
from BasicProcess import *
import math
def PivotTrain(DocNum):
import operator
with open("tmp/Frequent") as frequent_file:
Frequent = json.load(frequent_file)
with open("tmp/PostingList") as postinglist_file:
PostingList = json.load(postinglist_file)
print "open file finished"
Count = 0
corpus_tfidf2 = []
for document in Frequent:
sumdf = 0
for key,value in document.iteritems():
sumdf = math.log10(float(value)) + sumdf + 1
for key,value in document.iteritems():
t = (math.log10((value))+1)
t = t/sumdf
t = t*len(document)
t = t/(1+0.0118*len(document))
#print (DocNum)/len(PostingList[str(key)])
t = t*math.log10((DocNum)/len(PostingList[str(key)]))
if len(corpus_tfidf2)<Count+1:
corpus_tfidf2.append([(int(key),t)])
else:
corpus_tfidf2[Count].append((int(key),t))
if len(corpus_tfidf2)<Count+1:
corpus_tfidf2.append([])
corpus_tfidf2[Count].sort(key=lambda x:x[0])
Count+=1
corpus_tfidf2_file = open("tmp/corpus_tfidf2","w")
corpus_tfidf2_file.write(json.dumps(corpus_tfidf2 , indent = 4))
def PivotCosine(q,doc):
f = 0
q_pointer=0
doc_pointer=0
#print "len q:",len(q),"len doc",len(doc)
while (q_pointer<len(q) and doc_pointer<len(doc)):
if q[q_pointer][0] == doc[doc_pointer][0]:
#print "same word id:",q_pointer,"q word tfidf:",q[q_pointer],"doc word tfidf:",doc[doc_pointer]
f = f + (q[q_pointer][1])*(doc[doc_pointer][1])
q_pointer += 1
doc_pointer += 1
else:
if q[q_pointer][0]<doc[doc_pointer][0]:
q_pointer += 1
else:
doc_pointer += 1
return f
def PivotCosineSimilarity(q,dlist, d):
Rate = {}
n = 0
for i in dlist:
doc = d[i]
cos = PivotCosine(q,doc)
#print "doc:",n,"cos:",cos
#print cos
Rate[i] = cos
n += 1
anslist = sorted(Rate.iteritems(), key = lambda d:d[1], reverse = True)
print anslist[:20]
return anslist
def PivotCosineQuery(query,corpus_tfidf2,PostingList,dictionary,word2id):
query_tokens = Tokenize([query])
query_filtered = Filter(query_tokens)
#query_without_pun = RemovePunctuation(query_filtered)
query_Lemmatized = Lemmatizer(query_tokens)
print "basic process finished"
q = query_Lemmatized[0]
#revised
i = 0
if len(q)<10:
while i<len(q):
q[i] = Revised(q[i],word2id)
i+=1
dlist=set([])
for w in q:
if w in word2id:
if str(word2id[w]) in PostingList:
dlist |= set(PostingList[str(word2id[w])])
q_bow = dictionary.doc2bow(q)
#q_bow_dfidf = GetQDfidf(q_bow,PostingList)
#print "q:",q_bow_dfidf
#print q_bow
return PivotCosineSimilarity(q_bow, dlist,corpus_tfidf2) |
#!/usr/bin/env python
# ephem_updater_sunrise.py
# Modules to import:
import datetime
import time
import ephem
import socket
import struct
import os
# CREATE RAM-DISK SUB-DIRECTORY IF DOES NOT EXIST
try:
os.makedirs ('/rex/data/ramdisk/sunrise')
except OSError:
if not os.path.isdir ('/rex/data/ramdisk/sunrise'):
raise
# Calculate what time it is now:
nowtd = datetime.datetime.now()
nowtd_hours = time.localtime(time.time())[3]
nowtd_minutes = time.localtime(time.time())[4]
nowtd_time = datetime.datetime.now().strftime('%H:%M')
nowtd_time_h = datetime.datetime.now().strftime('%H')
nowtd_time_m = datetime.datetime.now().strftime('%M')
nowtd_time_s = datetime.datetime.now().strftime('%S')
# READ LATITIDE FILE FROM REX CORE
file_LAT = open('/rex/data/static/LAT.txt', 'r')
LAT = file_LAT.read()
file_LAT.close()
# READ LONGITUDE FILE FROM REX CORE
file_LON = open('/rex/data/static/LON.txt', 'r')
LON = file_LON.read()
file_LON.close()
# OBSERVER SETTINGS:
eos=ephem.Observer()
eos.pressure = 0
eos.horizon = '-0:34'
eos_is_rise = True
eos_is_sun = True
eos.lat = LAT
eos.lon = LON
eos.date = datetime.date.today()
eos_next_event = eos.next_rising if eos_is_rise else eos.next_setting
eos_sun_moon = ephem.Sun() if eos_is_sun else ephem.Moon()
# SUN OR MOON PyEphem SELECTION:
sun = ephem.Sun()
moon = ephem.Moon()
# OBSERVER:
eos_time = (eos_next_event(eos_sun_moon, use_center=False))
# UTC STRUCTURED DATE & TIME
utc_hour = ephem.date.datetime(eos_time).strftime('%H')
utc_minute = ephem.date.datetime(eos_time).strftime('%M')
utc_second = ephem.date.datetime(eos_time).strftime('%S')
utc_month = ephem.date.datetime(eos_time).strftime('%m')
utc_day = ephem.date.datetime(eos_time).strftime('%d')
utc_year = ephem.date.datetime(eos_time).strftime('%Y')
utc_weekday = ephem.date.datetime(eos_time).strftime('%w')
utc_dayyear = ephem.date.datetime(eos_time).strftime('%j')
utc_weekyear = ephem.date.datetime(eos_time).strftime('%W')
utc_lapd = ephem.date.datetime(eos_time).strftime('%x')
utc_lapt = ephem.date.datetime(eos_time).strftime('%X')
# LOCAL STRUCTURED DATE & TIME
loc_hour = ephem.localtime(eos_time).strftime('%H')
loc_minute = ephem.localtime(eos_time).strftime('%M')
loc_second = ephem.localtime(eos_time).strftime('%S')
loc_month = ephem.localtime(eos_time).strftime('%m')
loc_day = ephem.localtime(eos_time).strftime('%d')
loc_year = ephem.localtime(eos_time).strftime('%Y')
loc_weekday = ephem.localtime(eos_time).strftime('%w')
loc_dayyear = ephem.localtime(eos_time).strftime('%j')
loc_weekyear = ephem.localtime(eos_time).strftime('%W')
loc_lapd = ephem.localtime(eos_time).strftime('%x')
loc_lapt = ephem.localtime(eos_time).strftime('%X')
# WRITE FILE UTC HOUR
file_utc_hour = open('/rex/data/ramdisk/sunrise/utc_hour.txt', 'w')
file_utc_hour.write((loc_hour))
file_utc_hour.close()
# WRITE FILE UTC MINUTE
file_utc_minute = open('/rex/data/ramdisk/sunrise/utc_minute.txt', 'w')
file_utc_minute.write((loc_minute))
file_utc_minute.close()
# WRITE FILE UTC SECOND
file_utc_second = open('/rex/data/ramdisk/sunrise/utc_second.txt', 'w')
file_utc_second.write((loc_second))
file_utc_second.close()
# WRITE FILE UTC MONTH
file_utc_month = open('/rex/data/ramdisk/sunrise/utc_month.txt', 'w')
file_utc_month.write((loc_month))
file_utc_month.close()
# WRITE FILE UTC DAY
file_utc_day = open('/rex/data/ramdisk/sunrise/utc_day.txt', 'w')
file_utc_day.write((loc_day))
file_utc_day.close()
# WRITE FILE UTC YEAR
file_utc_year = open('/rex/data/ramdisk/sunrise/utc_year.txt', 'w')
file_utc_year.write((loc_year))
file_utc_year.close()
# WRITE FILE UTC WEEKDAY
file_utc_weekday = open('/rex/data/ramdisk/sunrise/utc_weekday.txt', 'w')
file_utc_weekday.write((loc_weekday))
file_utc_weekday.close()
# WRITE FILE UTC DAY OF YEAR
file_utc_dayyear = open('/rex/data/ramdisk/sunrise/utc_dayyear.txt', 'w')
file_utc_dayyear.write((loc_dayyear))
file_utc_dayyear.close()
# WRITE FILE UTC WEEK OF YEAR
file_utc_weekyear = open('/rex/data/ramdisk/sunrise/utc_weekyear.txt', 'w')
file_utc_weekyear.write((loc_weekyear))
file_utc_weekyear.close()
# WRITE FILE UTC, LOCALE'S APPROPRIATE DATE REPRESENTATION
file_utc_lapd = open('/rex/data/ramdisk/sunrise/utc_lapd.txt', 'w')
file_utc_lapd.write((loc_lapd))
file_utc_lapd.close()
# WRITE FILE UTC, LOCALE'S APPROPRIATE TIME REPRESENTATION
file_utc_lapt = open('/rex/data/ramdisk/sunrise/utc_lapt.txt', 'w')
file_utc_lapt.write((loc_lapt))
file_utc_lapt.close()
# WRITE FILE LOCAL HOUR
file_loc_hour = open('/rex/data/ramdisk/sunrise/loc_hour.txt', 'w')
file_loc_hour.write((loc_hour))
file_loc_hour.close()
# WRITE FILE LOCAL MINUTE
file_loc_minute = open('/rex/data/ramdisk/sunrise/loc_minute.txt', 'w')
file_loc_minute.write((loc_minute))
file_loc_minute.close()
# WRITE FILE LOCAL SECOND
file_loc_second = open('/rex/data/ramdisk/sunrise/loc_second.txt', 'w')
file_loc_second.write((loc_second))
file_loc_second.close()
# WRITE FILE LOCAL MONTH
file_loc_month = open('/rex/data/ramdisk/sunrise/loc_month.txt', 'w')
file_loc_month.write((loc_month))
file_loc_month.close()
# WRITE FILE LOCAL DAY
file_loc_day = open('/rex/data/ramdisk/sunrise/loc_day.txt', 'w')
file_loc_day.write((loc_day))
file_loc_day.close()
# WRITE FILE LOCAL YEAR
file_loc_year = open('/rex/data/ramdisk/sunrise/loc_year.txt', 'w')
file_loc_year.write((loc_year))
file_loc_year.close()
# WRITE FILE LOCAL WEEKDAY
file_loc_weekday = open('/rex/data/ramdisk/sunrise/loc_weekday.txt', 'w')
file_loc_weekday.write((loc_weekday))
file_loc_weekday.close()
# WRITE FILE LOCAL DAY OF YEAR
file_loc_dayyear = open('/rex/data/ramdisk/sunrise/loc_dayyear.txt', 'w')
file_loc_dayyear.write((loc_dayyear))
file_loc_dayyear.close()
# WRITE FILE LOCAL WEEK OF YEAR
file_loc_weekyear = open('/rex/data/ramdisk/sunrise/loc_weekyear.txt', 'w')
file_loc_weekyear.write((loc_weekyear))
file_loc_weekyear.close()
# WRITE FILE LOCAL, LOCALE'S APPROPRIATE DATE REPRESENTATION
file_loc_lapd = open('/rex/data/ramdisk/sunrise/loc_lapd.txt', 'w')
file_loc_lapd.write((loc_lapd))
file_loc_lapd.close()
# WRITE FILE LOCAL, LOCALE'S APPROPRIATE TIME REPRESENTATION
file_loc_lapt = open('/rex/data/ramdisk/sunrise/loc_lapt.txt', 'w')
file_loc_lapt.write((loc_lapt))
file_loc_lapt.close()
|
<gh_stars>1-10
import json
import attr
import falcon
import pytest
from ebl.bibliography.application.reference_schema import ReferenceSchema
from ebl.corpus.domain.manuscript import (
ManuscriptType,
Period,
PeriodModifier,
Provenance,
)
from ebl.fragmentarium.domain.museum_number import MuseumNumber
from ebl.tests.factories.bibliography import ReferenceFactory
from ebl.tests.factories.corpus import ChapterFactory
from ebl.corpus.domain.stage import Stage
from ebl.tests.corpus.support import (
allow_references,
allow_signs,
create_chapter_dto,
create_chapter_url,
)
def test_get(client, bibliography, text_repository):
chapter = ChapterFactory.build()
allow_references(chapter, bibliography)
text_repository.create_chapter(chapter)
get_result = client.simulate_get(create_chapter_url(chapter, "/manuscripts"))
assert get_result.status == falcon.HTTP_OK
assert get_result.headers["Access-Control-Allow-Origin"] == "*"
assert get_result.json == create_chapter_dto(chapter)["manuscripts"]
def test_updating(client, bibliography, sign_repository, signs, text_repository):
uncertain_fragment = MuseumNumber.of("K.1")
allow_signs(signs, sign_repository)
chapter = ChapterFactory.build()
allow_references(chapter, bibliography)
text_repository.create_chapter(chapter)
updated_chapter = attr.evolve(
chapter,
manuscripts=(
attr.evolve(
chapter.manuscripts[0], museum_number="new.number", accession=""
),
),
uncertain_fragments=(uncertain_fragment,),
)
post_result = client.simulate_post(
create_chapter_url(chapter, "/manuscripts"),
body=json.dumps(
{
"manuscripts": create_chapter_dto(updated_chapter)["manuscripts"],
"uncertainFragments": [str(uncertain_fragment)],
}
),
)
assert post_result.status == falcon.HTTP_OK
assert post_result.headers["Access-Control-Allow-Origin"] == "*"
assert post_result.json == create_chapter_dto(updated_chapter)
get_result = client.simulate_get(create_chapter_url(chapter))
assert get_result.status == falcon.HTTP_OK
assert get_result.headers["Access-Control-Allow-Origin"] == "*"
assert get_result.json == create_chapter_dto(updated_chapter)
def test_updating_text_not_found(client, bibliography):
post_result = client.simulate_post(
f"/texts/1/1/chapters/{Stage.STANDARD_BABYLONIAN.value}/unknown/manuscripts",
body=json.dumps({"manuscripts": [], "uncertainFragments": []}),
)
assert post_result.status == falcon.HTTP_NOT_FOUND
def test_updating_invalid_reference(
client, bibliography, sign_repository, signs, text_repository
):
allow_signs(signs, sign_repository)
chapter = ChapterFactory.build()
allow_references(chapter, bibliography)
text_repository.create_chapter(chapter)
manuscript = {
"id": chapter.manuscripts[0].id,
"siglumDisambiguator": "1c",
"museumNumber": "X.1",
"accession": "",
"periodModifier": PeriodModifier.NONE.value,
"period": Period.OLD_ASSYRIAN.long_name,
"provenance": Provenance.BABYLON.long_name,
"type": ManuscriptType.SCHOOL.long_name,
"notes": "",
"colophon": "",
"unplacedLines": "",
"references": [ReferenceSchema().dump(ReferenceFactory.build())],
}
post_result = client.simulate_post(
create_chapter_url(chapter, "/manuscripts"),
body=json.dumps({"manuscripts": [manuscript], "uncertainFragments": []}),
)
assert post_result.status == falcon.HTTP_UNPROCESSABLE_ENTITY
def test_updating_text_category(client):
post_result = client.simulate_post(
f"/texts/invalid/1/chapters/{Stage.STANDARD_BABYLONIAN.value}/unknown/manuscripts",
body=json.dumps({"manuscripts": [], "uncertainFragments": []}),
)
assert post_result.status == falcon.HTTP_NOT_FOUND
def test_updating_invalid_id(client):
post_result = client.simulate_post(
f"/texts/1/invalid/chapters/{Stage.STANDARD_BABYLONIAN.value}/unknown/manuscripts",
body=json.dumps({"manuscripts": [], "uncertainFragments": []}),
)
assert post_result.status == falcon.HTTP_NOT_FOUND
def test_updating_invalid_stage(client):
post_result = client.simulate_post(
"/texts/1/1/chapters/invalid/unknown/manuscripts",
body=json.dumps({"manuscripts": [], "uncertainFragments": []}),
)
assert post_result.status == falcon.HTTP_NOT_FOUND
AMBIGUOUS_MANUSCRIPTS = {
"manuscripts": [
{
"id": 1,
"siglumDisambiguator": "1c",
"museumNumber": "X.1",
"accession": "",
"periodModifier": PeriodModifier.NONE.value,
"period": Period.OLD_ASSYRIAN.long_name,
"provenance": Provenance.BABYLON.long_name,
"type": ManuscriptType.SCHOOL.long_name,
"notes": "",
"colophon": "",
"unplacedLines": "",
"references": [],
},
{
"id": 2,
"siglumDisambiguator": "1c",
"museumNumber": "X.2",
"accession": "",
"periodModifier": PeriodModifier.NONE.value,
"period": Period.OLD_ASSYRIAN.long_name,
"provenance": Provenance.BABYLON.long_name,
"type": ManuscriptType.SCHOOL.long_name,
"notes": "",
"colophon": "",
"unplacedLines": "",
"references": [],
},
],
"uncertainFragments": [],
}
INVALID_MUSEUM_NUMBER = {
"manuscripts": [
{
"id": 1,
"siglumDisambiguator": "1c",
"museumNumber": "invalid",
"accession": "",
"periodModifier": PeriodModifier.NONE.value,
"period": Period.OLD_ASSYRIAN.long_name,
"provenance": Provenance.BABYLON.long_name,
"type": ManuscriptType.SCHOOL.long_name,
"notes": "",
"colophon": "",
"unplacedLines": "",
"references": [],
}
],
"uncertainFragments": [],
}
INVALID_PROVENANCE = {
"manuscripts": [
{
"id": 1,
"siglumDisambiguator": "1c",
"museumNumber": "invalid",
"accession": "",
"periodModifier": PeriodModifier.NONE.value,
"period": Period.OLD_ASSYRIAN.long_name,
"provenance": Provenance.STANDARD_TEXT.long_name,
"type": ManuscriptType.NONE.long_name,
"notes": "",
"colophon": "",
"unplacedLines": "",
"references": [],
}
],
"uncertainFragments": [],
}
@pytest.mark.parametrize(
"manuscripts,expected_status",
[
[[{}], falcon.HTTP_BAD_REQUEST],
[[], falcon.HTTP_UNPROCESSABLE_ENTITY],
[AMBIGUOUS_MANUSCRIPTS, falcon.HTTP_BAD_REQUEST],
[INVALID_MUSEUM_NUMBER, falcon.HTTP_BAD_REQUEST],
[INVALID_PROVENANCE, falcon.HTTP_BAD_REQUEST],
[
{"manuscripts": [], "uncertainFragments": ["invalid"]},
falcon.HTTP_BAD_REQUEST,
],
],
)
def test_update_invalid_entity(
client,
bibliography,
manuscripts,
expected_status,
sign_repository,
signs,
text_repository,
):
allow_signs(signs, sign_repository)
chapter = ChapterFactory.build()
allow_references(chapter, bibliography)
text_repository.create_chapter(chapter)
post_result = client.simulate_post(
create_chapter_url(chapter, "/manuscripts"),
body=json.dumps({"manuscripts": manuscripts, "uncertainFragments": []}),
)
assert post_result.status == expected_status
|
<reponame>mglantz/insights-core<gh_stars>1-10
"""
SELinux
=======
Combiner for more complex handling of SELinux being disabled by any means
available to the users. It uses results of ``SEStatus``, ``Grub1Config``,
``Grub2Config``, ``Grub2EFIConfig`` and ``SelinuxConfig`` parsers.
It contains a dictionary ``problems`` in which it stores detected problems with
keys as follows and values are parsed lines with detected problem:
* ``sestatus_disabled`` - SELinux is disabled on runtime.
* ``sestatus_not_enforcing`` - SELinux is not in enforcing mode.
* ``grub_disabled`` - SELinux is set in Grub to be disabled.
* ``grub_not_enforcing`` - SELinux is set in Grub to not be in enforcing mode.
* ``selinux_conf_disabled`` - SELinux is set in configuration file to be disabled.
* ``sestatus_not_enforcing`` - SELinux is set in configuration file to not be in enforcing mode.
Examples:
>>> selinux = shared[SELinux]
>>> selinux.ok()
False
>>> selinux.problems
{'grub_disabled': ['/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 ro root= ...'],
'selinux_conf_disabled': 'disabled',
'sestatus_not_enforcing': 'permissive'}
"""
from ..core.plugins import combiner
from ..parsers.sestatus import SEStatus
from ..parsers.grub_conf import Grub1Config, Grub1EFIConfig, Grub2Config, Grub2EFIConfig
from ..parsers.selinux_config import SelinuxConfig
GRUB_DISABLED = 'grub_disabled'
GRUB_NOT_ENFORCING = 'grub_not_enforcing'
RUNTIME_DISABLED = 'sestatus_disabled'
RUNTIME_NOT_ENFORCING = 'sestatus_not_enforcing'
BOOT_DISABLED = 'selinux_conf_disabled'
BOOT_NOT_ENFORCING = 'selinux_conf_not_enforcing'
@combiner(SEStatus, SelinuxConfig,
optional=[Grub1Config, Grub1EFIConfig, Grub2Config, Grub2EFIConfig])
class SELinux(object):
"""
A combiner for detecting that SELinux is enabled and running and also enabled at boot time.
"""
def __init__(self, se_status, selinux_config, grub1, grub1_efi, grub2, grub2_efi):
self.problems = {}
self.sestatus = se_status
self.selinux_config = selinux_config
self.grub_config = grub1 or grub1_efi or grub2 or grub2_efi
self._check_sestatus()
self._check_boot_config()
self._check_grub_config()
def _check_sestatus(self):
"""
Check runtime SELinux configuration from sestatus output.
Values of output from sestatus command are always lowercase.
"""
if self.sestatus.data['selinux_status'] != 'enabled':
self.problems[RUNTIME_DISABLED] = self.sestatus.data['selinux_status']
elif self.sestatus.data['current_mode'] != 'enforcing':
self.problems[RUNTIME_NOT_ENFORCING] = self.sestatus.data['current_mode']
def _check_boot_config(self):
"""
Check that SELinux is not disabled in /etc/sysconfig/selinux.
This file determines the boot configuration for SELinux.
"""
opt_value = self.selinux_config.data.get('SELINUX')
if opt_value is None:
self.problems[BOOT_NOT_ENFORCING] = 'Missing in config (Permissive by default)'
elif opt_value == 'disabled':
self.problems[BOOT_DISABLED] = opt_value
elif opt_value != 'enforcing':
self.problems[BOOT_NOT_ENFORCING] = opt_value
def _check_grub_config(self):
"""
Check grub and grub 2 for kernel boot options if selinux settings is not overriden.
Experiments confirmed that only lowercase is accepted in grub configuration.
grub is in rhel-6 and the boot line looks usually like
kernel /boot/vmlinuz-2.4.20-selinux-2003040709 ro root=/dev/hda1 nousb selinux=0
grub 2 is in rhel-7 and the boot line looks usually like
linux16 /vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet
"""
conf = self.grub_config.boot_entries if self.grub_config is not None else []
se_dis = [e.cmdline for e in conf if 'selinux=0' in e.cmdline]
if se_dis:
self.problems[GRUB_DISABLED] = se_dis
se_noe = [e.cmdline for e in conf if 'enforcing=0' in e.cmdline]
if se_noe:
self.problems[GRUB_NOT_ENFORCING] = se_noe
def ok(self):
"""
Checks if there are any problems with SELinux configuration.
Returns
bool: True if SELinux is enabled and functional, false otherwise.
"""
return not bool(self.problems)
|
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Third-party
import astropy.units as u
from astropy.coordinates.angles import rotation_matrix
import gary.coordinates as gc
import gary.integrate as gi
import gary.dynamics as gd
import numpy as np
from scipy.signal import argrelmin, argrelmax
from superfreq import SuperFreq
from ..util import _validate_nd_array, estimate_dt_nsteps
__all__ = ['create_ensemble', 'nearest_pericenter', 'nearest_apocenter',
'align_ensemble', 'prepare_parent_orbit', 'compute_align_matrix',
'compute_all_freqs', 'create_ensemble_isoenergy']
def create_ensemble(w0, potential, n=1000, m_scale=1E4):
"""
Generate an ensemble of test-particle orbits around the specified initial
conditions in the specified potential. The position and velocity scales of
the ensemble are set by the mass scale (`m_scale`).
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
n : int (optional)
Number of orbits in the ensemble.
m_scale : numeric (optional)
Mass scale of the ensemble.
Returns
-------
ensemble_w0 : :class:`numpy.ndarray`
The initial conditions for the ensemble. Will have shape (n+1,6),
where the first (index 0) initial conditions are the parent orbit
(e.g., specified when calling the function).
"""
w0 = _validate_nd_array(w0, expected_ndim=1)
# compute enclosed mass and position, velocity scales
menc = potential.mass_enclosed(w0)
rscale = (m_scale / menc)**(1/3.) * np.sqrt(np.sum(w0[:3]**2))
vscale = (m_scale / menc)**(1/3.) * np.sqrt(np.sum(w0[3:]**2))
ensemble_w0 = np.zeros((n,6))
ensemble_w0[:,:3] = np.random.normal(w0[:3], rscale / np.sqrt(3), size=(n,3))
# ensemble_w0[:,3:] = np.random.normal(w0[3:], vscale / np.sqrt(3), size=(n,3))
# ensemble_w0[:,3:] = w0[None, 3:]
# ensemble_w0[:,3:] = np.random.normal(w0[3:], 0.002/np.sqrt(3), size=(n,3))
# _r = np.random.normal(0, rscale, size=n)
# _phi = np.random.uniform(0, 2*np.pi, size=n)
# _theta = np.arccos(2*np.random.uniform(size=n) - 1)
# ensemble_w0[:,:3] = np.array([_r*np.cos(_phi)*np.sin(_theta),
# _r*np.sin(_phi)*np.sin(_theta),
# _r*np.cos(_theta)]).T + w0[None,:3]
vsph = gc.cartesian_to_spherical(w0[:3]*u.kpc, w0[3:]*u.kpc/u.Myr).value
n_vsph = np.zeros((n,3))
n_vsph[:,0] = np.random.normal(vsph[0], vscale, size=n)
n_vsph[:,1] = np.zeros(n) + vsph[1]
n_vsph[:,2] = np.zeros(n) + vsph[2]
ensemble_w0[:,3:] = gc.spherical_to_cartesian(ensemble_w0[:,:3].T*u.kpc, n_vsph.T*u.kpc/u.Myr).value.T
return np.vstack((w0,ensemble_w0))
def create_ensemble_isoenergy(w0, potential, n=1000, m_scale=1E4):
"""
Generate an ensemble of test-particle orbits around the specified initial
conditions in the specified potential. The position and velocity scales of
the ensemble are set by the mass scale (`m_scale`).
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
n : int (optional)
Number of orbits in the ensemble.
m_scale : numeric (optional)
Mass scale of the ensemble.
Returns
-------
ensemble_w0 : :class:`numpy.ndarray`
The initial conditions for the ensemble. Will have shape (n+1,6),
where the first (index 0) initial conditions are the parent orbit
(e.g., specified when calling the function).
"""
w0 = _validate_nd_array(w0, expected_ndim=1)
E = potential.total_energy(w0[:3], w0[3:])[0]
# compute enclosed mass and position, velocity scales
menc = potential.mass_enclosed(w0)
rscale = (m_scale / menc)**(1/3.) * np.sqrt(np.sum(w0[:3]**2))
vscale = (m_scale / menc)**(1/3.) * np.sqrt(np.sum(w0[3:]**2))
ensemble_w0 = np.zeros((n,6))
ensemble_w0[:,:3] = w0[None,:3]
# ensemble_w0[:,:3] = np.random.normal(w0[:3], rscale / np.sqrt(3), size=(n,3))
# ensemble_w0[:,3:] = np.random.normal(w0[3:], vscale / np.sqrt(3), size=(n,3))
# ensemble_w0[:,3:] = w0[None, 3:]
# ensemble_w0[:,3:] = np.random.normal(w0[3:], 0.002/np.sqrt(3), size=(n,3))
# _r = np.random.normal(0, rscale, size=n)
# _phi = np.random.uniform(0, 2*np.pi, size=n)
# _theta = np.arccos(2*np.random.uniform(size=n) - 1)
# ensemble_w0[:,:3] = np.array([_r*np.cos(_phi)*np.sin(_theta),
# _r*np.sin(_phi)*np.sin(_theta),
# _r*np.cos(_theta)]).T + w0[None,:3]
vsph = gc.cartesian_to_spherical(w0[:3]*u.kpc, w0[3:]*u.kpc/u.Myr).value
n_vsph = np.zeros((n,3))
n_vsph[:,0] = np.random.normal(vsph[0], vscale, size=n)
magv = np.sqrt(2*(E-potential.value(ensemble_w0[:,:3])) - (n_vsph[:,0]**2 + vsph[2]**2))
POO = np.random.uniform(0,2*np.pi,size=n)
n_vsph[:,1] = magv*np.cos(POO)
n_vsph[:,2] = magv*np.sin(POO)
ensemble_w0[:,3:] = gc.spherical_to_cartesian(ensemble_w0[:,:3].T*u.kpc, n_vsph.T*u.kpc/u.Myr).value.T
return np.vstack((w0,ensemble_w0))
def nearest_pericenter(w0, potential, forward=True, period=None):
"""
Find the nearest pericenter to the initial conditions.
By default, this looks for the nearest pericenter *forward* in time,
but this can be changed by setting the `forward` argument to `False`.
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
forward : bool (optional)
Find the nearest pericenter either forward (True) in time
or backward (False) in time.
period : numeric (optional)
The period of the orbit. If not specified, will estimate
it internally. Used to figured out how long to integrate
for when searching for the nearest pericenter.
Returns
-------
peri_w0 : :class:`numpy.ndarray`
The 6D phase-space position of the nearest pericenter.
"""
w0 = _validate_nd_array(w0, expected_ndim=1)
if period is None:
dt,nsteps = estimate_dt_nsteps(w0, potential,
nperiods=10, nsteps_per_period=256)
else:
dt = period / 256. # 512 steps per orbital period
nsteps = int(10.*period / dt)
if not forward:
dt *= -1
t,w = potential.integrate_orbit(w0, dt=dt, nsteps=nsteps,
Integrator=gi.DOPRI853Integrator)
r = np.sqrt(np.sum(w[:,0,:3]**2, axis=-1))
peris, = argrelmin(r)
# nearest peri:
peri_idx = peris[0]
return w[peri_idx, 0]
def nearest_min_pericenter(w0, potential, forward=True, period=None):
"""
Find the nearest *minimum* pericenter to the initial conditions.
By default, this looks for the nearest pericenter *forward* in time,
but this can be changed by setting the `forward` argument to `False`.
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
forward : bool (optional)
Find the nearest pericenter either forward (True) in time
or backward (False) in time.
period : numeric (optional)
The period of the orbit. If not specified, will estimate
it internally. Used to figured out how long to integrate
for when searching for the nearest pericenter.
Returns
-------
peri_w0 : :class:`numpy.ndarray`
The 6D phase-space position of the nearest pericenter.
"""
w0 = _validate_nd_array(w0, expected_ndim=1)
if period is None:
dt,nsteps = estimate_dt_nsteps(w0, potential,
nperiods=10, nsteps_per_period=256)
else:
dt = period / 256. # 512 steps per orbital period
nsteps = int(10.*period / dt)
if not forward:
dt *= -1
t,w = potential.integrate_orbit(w0, dt=dt, nsteps=nsteps,
Integrator=gi.DOPRI853Integrator)
r = np.sqrt(np.sum(w[:,0,:3]**2, axis=-1))
peri_idx = r.argmin()
return w[peri_idx, 0]
def nearest_apocenter(w0, potential, forward=True, period=None):
"""
Find the nearest apocenter to the initial conditions.
By default, this looks for the nearest apocenter *forward* in time,
but this can be changed by setting the `forward` argument to `False`.
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
forward : bool (optional)
Find the nearest apocenter either forward (True) in time
or backward (False) in time.
period : numeric (optional)
The period of the orbit. If not specified, will estimate
it internally. Used to figured out how long to integrate
for when searching for the nearest apocenter.
Returns
-------
apo_w0 : :class:`numpy.ndarray`
The 6D phase-space position of the nearest apocenter.
"""
w0 = _validate_nd_array(w0, expected_ndim=1)
if period is None:
dt,nsteps = estimate_dt_nsteps(w0, potential,
nperiods=10, nsteps_per_period=256)
else:
dt = period / 256. # 512 steps per orbital period
nsteps = int(10.*period / dt)
if not forward:
dt *= -1
t,w = potential.integrate_orbit(w0, dt=dt, nsteps=nsteps,
Integrator=gi.DOPRI853Integrator)
r = np.sqrt(np.sum(w[:,0,:3]**2, axis=-1))
apos, = argrelmax(r)
# nearest peri:
apo_idx = apos[0]
return w[apo_idx, 0]
def compute_align_matrix(w):
"""
Given a single phase-space position, compute the rotation matrix that
orients the angular momentum with the z axis and places the point
along the x axis.
Parameters
----------
w : array_like
The point to transform.
Returns
-------
R : :class:`numpy.ndarray`
A 2D numpy array (rotation matrix).
"""
w = _validate_nd_array(w, expected_ndim=1)
x = w[:3].copy()
v = w[3:].copy()
# first rotate about z to put on x-z plane
theta = np.arctan2(x[1], x[0]) * u.radian
R1 = rotation_matrix(theta, 'z')
x = np.asarray(R1.dot(x))[0]
v = np.asarray(R1.dot(v))[0]
# now rotate about y to put on x axis
theta = np.arctan2(x[2], x[0]) * u.radian
R2 = rotation_matrix(-theta, 'y')
x = np.asarray(R2.dot(x))[0]
v = np.asarray(R2.dot(v))[0]
# now align L with z axis
# theta = np.arccos(L[2] / np.sqrt(np.sum(L**2))) * u.radian
L = np.cross(x, v)
theta = np.arctan2(L[2], L[1]) * u.radian
R3 = rotation_matrix(theta - 90*u.deg, 'x')
x = np.asarray(R3.dot(x))[0]
v = np.asarray(R3.dot(v))[0]
return R3*R2*R1
def align_ensemble(ws):
"""
Given a collection of orbits (e.g., ensemble orbits), rotate the
ensemble so that the 0th orbit is along the x-axis with angular
momentum vector aligned with the z-axis.
Parameters
----------
ws : array_like
A 3D array of orbits with shape (ntimes, norbits, 6).
Returns
-------
new_ws : :class:`numpy.ndarray`
The transformed orbits.
"""
R = compute_align_matrix(ws[-1,0])
new_x = np.array(R.dot(ws[-1,:,:3].T).T)
new_v = np.array(R.dot(ws[-1,:,3:].T).T)
new_w = np.vstack((new_x.T, new_v.T)).T
return new_w
def prepare_parent_orbit(w0, potential, nperiods, nsteps_per_period, min_pericenter=True):
"""
Parameters
----------
w0 : array_like
The parent orbit initial conditions as a 1D numpy array.
potential : `gary.potential.PotentialBase`
The gravitational potential.
nperiods : int
Number of (max) periods to integrate.
nsteps_per_period : int
Number of steps to take per (max) orbital period.
min_pericenter : bool (optional)
Find the nearest *minimum* pericenter.
"""
dt,nsteps,T = estimate_dt_nsteps(w0, potential, nperiods, nsteps_per_period,
return_periods=True)
T = T.max()
# get position of nearest pericenter
if min_pericenter:
peri_w0 = nearest_min_pericenter(w0, potential, period=T)
else:
peri_w0 = nearest_pericenter(w0, potential, period=T)
# integration parameters set by input
dt = T / nsteps_per_period
nsteps = int((nperiods+1) * nsteps_per_period)
t,w = potential.integrate_orbit(peri_w0, dt=dt, nsteps=nsteps,
Integrator=gi.DOPRI853Integrator)
r = np.sqrt(np.sum(w[:,0,:3]**2, axis=-1))
apo_ix, = argrelmax(r)
try:
final_apo_ix = apo_ix[nperiods-1]
except:
final_apo_ix = apo_ix[nperiods-2]
return peri_w0, dt, final_apo_ix
def compute_all_freqs(t, ws, hamming_p=1, nintvec=10, force_cartesian=False):
"""
Compute the fundamental frequencies and amplitudes for all
specified orbits.
This assumes that all orbits have the same geometry as the first
orbit in the orbit array. That is, (if ``force_cartesian`` is
``False``) if the first orbit is a tube orbit, it assumes all orbits
are tubes.
Parameters
----------
t : array_like
ws : array_like
hamming_p : int (optional)
nintvec : int (optional)
force_cartesian : bool (optional)
Returns
-------
freqs : :class:`numpy.ndarray`
amps : :class:`numpy.ndarray`
"""
# classify parent orbit
circ = gd.classify_orbit(ws[:,0])
is_tube = np.any(circ)
allfreqs = []
allamps = []
for i in range(ws.shape[1]):
ww = ws[:,i]
if is_tube and not force_cartesian:
# need to flip coordinates until circulation is around z axis
new_ws = gd.align_circulation_with_z(ww, circ)
new_ws = gc.cartesian_to_poincare_polar(new_ws)
else:
new_ws = ww
fs = [(new_ws[:,j] + 1j*new_ws[:,j+ws.shape[-1]//2]) for j in range(ws.shape[-1]//2)]
sf = SuperFreq(t, p=hamming_p)
try:
freqs,d,ixs = sf.find_fundamental_frequencies(fs, nintvec=nintvec)
except:
allfreqs.append([np.nan,np.nan,np.nan])
allamps.append([np.nan,np.nan,np.nan])
continue
allfreqs.append(freqs.tolist())
allamps.append(d['|A|'][ixs].tolist())
allfreqs = np.array(allfreqs)
allamps = np.array(allamps)
return allfreqs, allamps
|
<filename>utils/eval_lex.py
#! /usr/bin/env python
# Copyright 2008, 2009, 2016, 2017, 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compares predicted pronunciations against a golden pronunciation lexicon.
Reports several evaluation metrics, including Phoneme Error Rate and
Mean Reciprocal Rank.
"""
from __future__ import unicode_literals
__author__ = '<EMAIL> (<NAME>)'
import io
import math
import optparse
import sys
import edist
_stdin = io.open(0, mode='rt', encoding='utf-8', closefd=False)
_stdout = io.open(1, mode='wt', encoding='utf-8', closefd=False)
_stderr = io.open(2, mode='wt', encoding='utf-8', closefd=False)
_INF = 1e300 * 1e300
_NAN = _INF - _INF
class FLAGS(object):
output_edit_stats = False
output_word_stats = False
output_summary_stats = True
predicted_oracle_type = 'best'
def fdiv(x, y): # pylint: disable=invalid-name
"""Floating point division without exceptions.
By default, Python turns SIGFPE into an exception on floating point
division-by-zero. Work around it.
Args:
x: dividend
y: divisor
Returns:
x / y as close as possible to IEEE floating point semantics
"""
if x != x or y != y:
return _NAN
if y == 0:
if x == 0:
return _NAN
elif x > 0: # This is wrong when y == -0.0.
return _INF
else:
return -_INF
else:
return (x * 1.0) / y
def ReadLexicon(reader):
"""Reads a pronunciation lexicon from 'reader'.
The lexicon file format is line-oriented, where each line is of the form
word TAB pronunciation [TAB cost [TAB field]...]
and pronunciations are space-separated. The third column gives an
optional rank or cost (lower is better) that is used when multiple
pronunciations per word are provided.
Args:
reader: source of the lexicon
Returns:
lexicon read
"""
d = {}
for line in reader:
line = line.rstrip('\r\n')
fields = line.split('\t')
if len(fields) < 2:
_stderr.write('Warning: Ignoring line with less than two '
+ 'tab-separated fields: %s\n' % line)
continue
word = fields[0]
pron = fields[1].strip()
if pron != fields[1]:
_stderr.write('Info: Removed outer whitespace around pronunciation '
+ '"%s" on line: %s\n' % (pron, line))
if ' ' in pron:
_stderr.write('Warning: Two adjacent spaces found in pronunciation '
+ '"%s" on line: %s\n' % (pron, line))
pron = tuple(pron.split(' '))
if len(fields) >= 3:
try:
cost = float(fields[2])
other = tuple(fields[3:])
except ValueError:
_stderr.write('Warning: Ignoring line with ill-formed cost "%s": %s\n'
% (fields[2], line))
continue
else:
assert len(fields) == 2
other = ()
# Special hack to assign increasing cost to multiple pronunciations
# for a given word when no explicit cost is given.
if word in d:
max_cost = -_INF
for _, cost, _ in d[word]:
if cost > max_cost:
max_cost = cost
cost = max_cost + 1
_stderr.write(('Info: Multiple pronunciations for word "%s" found,'
' but cost was not specified for "%s"; using %g\n')
% (word, fields[1], cost))
else:
_stderr.write('Info: No cost specified, using 0: %s\n' % line)
cost = 0
if word in d:
d[word].append((pron, cost, other))
else:
d[word] = [(pron, cost, other)]
# Sort prons by increasing cost.
for prons in d.values():
prons.sort(key=lambda x: x[1])
return d
class Stats(object):
def __init__(self):
self.not_found = 0
self.words_total = 0
self.prons = 0
self.word_error = 0
self.phone_edits = 0
self.reference_length = 0
self.phone_error = 0
self.squared_phone_error = 0
self.max_phone_error = -_INF
self.reciprocal_rank = 0
self.edit_stats = {}
return
def CompareLexica(golden_lex, predicted_lex, writer,
golden_cutoff=None, predicted_cutoff=None):
"""Compares predicted and golden lexica.
Compares the predicted pronunciations against the golden
reference dictionary. Outputs statistics to 'writer' in
tab-separated value format with headers, to facility further
analysis with R.
Args:
golden_lex: golden lexicon to compare against
predicted_lex: lexicon containing predicted pronunciations
writer: writer to which output will be sent
golden_cutoff: only consider top n golden pronunciations
predicted_cutoff: only consider top n predicted pronunciations
"""
stats = Stats()
for word, prons in predicted_lex.items():
if word not in golden_lex:
_stderr.write("Warning: Word '%s' not found in golden lexicon\n" % word)
stats.not_found += 1
continue
else:
stats.words_total += 1
assert prons
stats.prons += len(prons)
assert len(golden_lex[word]) >= 1
# Compute stats for phone error rate and word error rate:
argvals = []
for predicted in prons[:predicted_cutoff]:
predicted_pron = predicted[0]
# Inner oracle: Find the best among the golden pronunciations.
inner_argmin = None
inner_valmin = _INF
for golden in golden_lex[word][:golden_cutoff]:
golden_pron = golden[0]
ed = edist.EditDistance(golden_pron, predicted_pron,
edist.LevenshteinCost)
val = fdiv(ed.Valmin(), len(golden_pron)) # Per-word phone error.
if val < inner_valmin:
inner_argmin = (golden, predicted, ed)
inner_valmin = val
assert inner_argmin is not None
if FLAGS.output_word_stats:
# Output phone error per predicted pronunciation.
writer.write('#PRON_PhE\t%s\t%s\t%s\t%f\t%s\n' %
(word,
' '.join(inner_argmin[0][0]),
' '.join(predicted_pron),
inner_valmin,
'\t'.join(predicted[2])))
argvals.append((inner_argmin, inner_valmin))
assert argvals
argvals.sort(key=lambda x: x[1]) # Sort by increasing values.
# Outer oracle: Find the best/etc. among the predicted pronunciations.
if FLAGS.predicted_oracle_type == 'best':
index = 0
elif FLAGS.predicted_oracle_type == 'worst':
index = -1
elif FLAGS.predicted_oracle_type == 'median':
index = len(argvals) >> 1
else:
assert False, 'This cannot happen.'
((golden, predicted, ed), pherr) = argvals[index]
golden_pron = golden[0]
edits = ed.Valmin()
assert fdiv(edits, len(golden_pron)) == pherr
if FLAGS.output_word_stats:
# Output phone error per word.
writer.write('#WORD_PhE\t%s\t%s\t%s\t%f\t%s\n' %
(word,
' '.join(golden_pron),
' '.join(predicted[0]),
pherr,
'\t'.join(predicted[2])))
if edits != 0:
stats.word_error += 1
stats.phone_edits += edits
stats.reference_length += len(golden_pron)
stats.phone_error += pherr
stats.squared_phone_error += pherr * pherr
if pherr > stats.max_phone_error:
stats.max_phone_error = pherr
if FLAGS.output_edit_stats:
alignment = ed.Argmin('<eps>', '<eps>')
for edit in alignment:
if edit not in stats.edit_stats:
stats.edit_stats[edit] = 0
stats.edit_stats[edit] += 1
# Compute stats for mean reciprocal rank:
rr = 0
found = False
for i, predicted in enumerate(prons[:predicted_cutoff]):
predicted_pron = predicted[0]
# Inner oracle: Find the best among the golden pronunciations.
for golden in golden_lex[word][:golden_cutoff]:
golden_pron = golden[0]
if predicted_pron == golden_pron:
rr = fdiv(1, 1 + i) # Reciprocal rank, one-based.
found = True
break
if found:
break
assert found == (rr != 0)
stats.reciprocal_rank += rr
if FLAGS.output_word_stats:
writer.write('#WORD_RR\t%s\t%f\n' % (word, rr))
if FLAGS.output_edit_stats:
for (i, o), c in stats.edit_stats.items():
writer.write('#EDIT\t%s\t%s\t%d\n' % (i, o, c))
if FLAGS.output_summary_stats:
writer.write('#MISSING: %d words without reference pronunciation\n'
% stats.not_found)
writer.write('#TOTAL: %d words with reference pronunciation\n'
% stats.words_total)
writer.write('#PRONS: %d pronunciations\n' % stats.prons)
writer.write('#PPW: %.6f pronunciations per word (average)\n'
% fdiv(stats.prons, stats.words_total))
writer.write('#ORACLE: %s\n' % FLAGS.predicted_oracle_type)
writer.write('#WER: %.4f %% word error rate\n'
% (100 * fdiv(stats.word_error, stats.words_total)))
writer.write('#PhER: %.4f %% phone error rate\n'
% (100 * fdiv(stats.phone_edits, stats.reference_length)))
writer.write('#AVG_PhE: %.4f %% mean phone error per word\n'
% (100 * fdiv(stats.phone_error, stats.words_total)))
writer.write('#RMS_PhE: %.4f %% root mean square phone error per word\n'
% (100 * math.sqrt(fdiv(stats.squared_phone_error,
stats.words_total))))
writer.write('#MAX_PhE: %.4f %% maximum phone error per word\n'
% (100.0 * stats.max_phone_error))
writer.write('#MRR: %.6f mean reciprocal rank\n'
% fdiv(stats.reciprocal_rank, stats.words_total))
return
def main(argv):
if len(argv) < 2:
_stderr.write('Not enough arguments. Use --help for usage information.\n')
sys.exit(1)
with io.open(argv[1], mode='rt', encoding='utf-8') as reader:
golden = ReadLexicon(reader)
if len(argv) == 3:
with io.open(argv[2], mode='rt', encoding='utf-8') as reader:
predicted = ReadLexicon(reader)
else:
predicted = ReadLexicon(_stdin)
CompareLexica(golden, predicted, _stdout) # Uses oracle.
# Alternatively, CompareLexica(golden, predicted, _stdout, 1, 1)
# would compute word/phone error based on the top golden and top
# predicted pronunciation.
return
if __name__ == '__main__':
usage = 'Usage: %prog [options] GOLDEN_LEXICON [PREDICTIONS]'
parser = optparse.OptionParser(usage)
parser.add_option('--output_edit_stats', action='store_true',
dest='output_edit_stats',
help='Output statistics about edit operations.')
parser.add_option('--output_word_stats', action='store_true',
dest='output_word_stats',
help='Output phone error and MRR for each word.')
parser.add_option('--predicted_oracle_type', action='store',
dest='predicted_oracle_type',
help='What type of oracle to use to choose among multiple'
' predicted pronunciations for a word; valid values are'
' "best" to choose the best pronunciation;'
' "worst" to choose the worst pronunciation; and'
' "median" to choose a pronunciation half-way between'
' best and worst.')
options, args = parser.parse_args()
if options.output_edit_stats:
FLAGS.output_edit_stats = True
if options.output_word_stats:
FLAGS.output_word_stats = True
if options.predicted_oracle_type is None:
# Don't change the default value.
pass
elif options.predicted_oracle_type in ('best', 'worst', 'median'):
FLAGS.predicted_oracle_type = options.predicted_oracle_type
else:
_stderr.write('Illegal option value --predicted_oracle_type=%s.\n'
% options.predicted_oracle_type)
sys.exit(1)
main([parser.prog] + args)
|
# coding=utf-8
import logging
import numpy as np
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)
def _classification(embedding, labels_np, split_ratio=0.7):
labels_np = shuffle(labels_np)
nodes = labels_np[:, 0]
labels = labels_np[:, 1]
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
train_size = int(labels_np.shape[0] * split_ratio)
features = embedding[nodes]
train_x = features[:train_size, :]
train_y = labels[:train_size, :]
test_x = features[train_size:, :]
test_y = labels[train_size:, :]
clf = OneVsRestClassifier(
LogisticRegression(class_weight='balanced', solver='liblinear', max_iter=3000))
clf.fit(train_x, train_y)
y_pred = clf.predict_proba(test_x)
y_pred = lb.transform(np.argmax(y_pred, 1))
acc = np.sum(np.argmax(y_pred, 1) == np.argmax(test_y, 1)) / len(y_pred)
eval_dict = {
'acc': acc,
'f1-micro': metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),
average='micro'),
'f1-macro': metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),
average='macro'),
}
return list(eval_dict.values())
def classification(embedding, labels_np, split_ratio=0.7, loop=100):
eval_dict = {
'acc': 0.0,
'f1-micro': 0.0,
'f1-macro': 0.0,
'std-micro': 0.0,
'std-macro': 0.0
}
temp = []
for _ in range(loop):
temp.append(_classification(embedding, labels_np, split_ratio))
temp = np.array(temp)
eval_dict['acc'] = np.round(np.mean(temp[:, 0]), 4)
eval_dict['f1-micro'] = np.round(np.mean(temp[:, 1]), 4)
eval_dict['f1-macro'] = np.round(np.mean(temp[:, 2]), 4)
eval_dict['std-micro'] = np.round(np.std(temp[:, 1]), 4)
eval_dict['std-macro'] = np.round(np.std(temp[:, 2]), 4)
print(eval_dict)
return eval_dict
def _clustering(embedding, labels_np):
labels_np = shuffle(labels_np)
nodes = labels_np[:, 0]
labels = labels_np[:, 1]
features = embedding[nodes]
model = KMeans(n_clusters=max(labels) + 1)
# model = BayesianGaussianMixture(n_components=max(labels) + 1)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
model.fit(features)
# c_pred = model.labels_
c_pred = model.predict(features)
nmi = metrics.normalized_mutual_info_score(np.argmax(labels, 1), c_pred)
eval_dict = {
'homogeneity': metrics.homogeneity_score(np.argmax(labels, 1), c_pred),
'completeness': metrics.completeness_score(np.argmax(labels, 1), c_pred),
'silhouette': metrics.silhouette_score(features, c_pred),
'NMI': nmi
}
# print(eval_dict)
return eval_dict
def clustering(embedding, labels_np, loop=10):
eval_dict = {
'homogeneity': 0.0,
'completeness': 0.0,
'silhouette': 0.0,
'NMI': 0
}
for _ in range(loop):
tmp_dict = _clustering(embedding, labels_np)
for key in tmp_dict.keys():
eval_dict[key] += tmp_dict[key]
for key in tmp_dict.keys():
eval_dict[key] = round((1.0 * eval_dict[key]) / loop, 4)
print(eval_dict)
return eval_dict
|
import matplotlib
matplotlib.use('Agg')
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
import glob
import numpy as np
import ipdb as pb
from pathlib import Path
#import explorE_delete as ed
#figure(num=None, figsize=(15, 15), dpi=60, facecolor='w', edgecolor='k')
#IPW https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/bar_stacked.html
SMALL_SIZE = 10
MEDIUM_SIZE = 10
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def plot_hist_and_table(df_for_num_steps, df_for_num_steps_ts, df_for_num_steps_unif, num_steps):
fig_h, ax_h = plt.subplots()
proportions_unif = df_for_num_steps_unif['sample_size_1'] / num_steps
proportions = df_for_num_steps['sample_size_1'] / num_steps
proportions_ts = df_for_num_steps_ts['sample_size_1'] / num_steps
ax_h.hist(proportions, alpha = 0.5, label = "Epsilon Greedy")
ax_h.hist(proportions_unif, alpha = 0.5, label = "Uniform Random")
ax_h.hist(proportions_ts, alpha = 0.5, label = "Thompson Sampling")
ax_h.legend()
fig_h.suptitle("Prop > 0.75 or ")
# rows = ["Areferg"]
# columns = ["Berger"]
# cell_text = ["ergerg"]
# the_table = ax_h.table(cellText=cell_text,
# rowLabels=rows,
# colLabels=columns,
# loc='right')
# fig_h.subplots_adjust(left=0.2, wspace=0.4)
data = np.random.uniform(0, 1, 80).reshape(20, 4)
mean_ts = np.mean(proportions_ts)
var_ts = np.var(proportions_ts)
mean_eg = np.mean(proportions)
var_eg = np.var(proportions)
prop_lt_25_eg = np.sum(proportions < 0.25) / len(proportions)
prop_lt_25_ts = np.sum(proportions_ts < 0.25) / len(proportions_ts)
# prop_gt_25_lt_5_eg = np.sum(> proportions > 0.25) / len(proportions)
# prop_gt_25_lt_5_ts = np.sum(> proportions_ts > 0.25) / len(proportions_ts)
data = [[mean_ts, var_ts, prop_lt_25_ts, 3], [mean_eg, var_eg, prop_lt_25_eg,7]]
final_data = [['%.3f' % j for j in i] for i in data] #<0.25, 0.25< & <0.5, <0.5 & <0.75, <0.75 & <1.0
table = ax_h.table(cellText=final_data, colLabels=['Mean', 'Variance', 'prop < 0.25', '0.25 < prop < 0.5'], rowLabels = ["Thompson Sampling", "EG"], loc='bottom', cellLoc='center', colColours=['#FFFFFF', '#F3CC32', '#2769BD', '#DC3735'], bbox=[0.25, -0.5, 0.5, 0.3])
table.auto_set_font_size(False)
table.set_fontsize(7)
table.auto_set_column_width((-1, 0, 1, 2, 3))
table.auto_set_font_size(False)
table.set_fontsize(7)
table.auto_set_column_width((-1, 0, 1, 2, 3))
# Adjust layout to make room for the table:
#ax_h.tick_params(axis='x', pad=20)
#fig_h.subplots_adjust(left=0.2, bottom=0.5)
#fig_h.tight_layout()
fig_h.savefig("histograms/condition_prop_n={}.png".format(num_steps), bbox_inches = 'tight')
fig_h.clf()
def stacked_bar_plot_with_cutoff(df = None, to_check = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None, percentile_dict_left = None, \
percentile_dict_right = None, bs_prop = 0.0,\
ax = None, ax_idx = None, to_check_ts = None):
if load_df == True:
with open(to_check, 'rb') as f:
df = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ipw != None:
ipw_t1_list = np.load(to_check_ipw)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
#print(data)
step_sizes = df['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
t1_list = []
t1_wald_list = []
wald_stat_list = []
wald_pval_list = []
arm1_mean_list = []
arm2_mean_list = []
arm1_std_list = []
arm2_std_list = []
ratio_mean_list = []
ratio_std_list = []
t1_simbased_list = []
t1_list_unif = []
t1_wald_list_unif = []
var_list = []
t1_list_ts = []
for num_steps in step_sizes:
df_for_num_steps = df[df['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
# bins = np.arange(0, 1.01, .025)
plot_hist_and_table(df_for_num_steps, df_for_num_steps_ts, df_for_num_steps_unif, num_steps)
pval_for_num_steps = df_for_num_steps['pvalue'].mean()
num_replications = len(df_for_num_steps)
# print(num_replications)
# if use_pval == True:
num_rejected = np.sum(df_for_num_steps['pvalue'] < .05)
num_rejected_ts = np.sum(df_for_num_steps_ts['pvalue'] < .05)
num_rejected_unif = np.sum(df_for_num_steps_unif['pvalue'] < .05)
var = np.var(df_for_num_steps_unif['pvalue'] < .05)
t1 =num_rejected / num_replications
t1_ts = num_rejected_ts / num_replications
t1_unif =num_rejected_unif / num_replications
t1_list_unif.append(t1_unif)
t1_list_ts.append(t1_ts)
t1_list.append(t1)
var_list.append(var)
t1_list_ts = np.array(t1_list_ts)
ind = np.arange(len(step_sizes))
# print(ind)
# print(step_sizes)
ax.set_xticks(ind)
ax.set_xticklabels(step_sizes)
print("var", var_list)
width = 0.23
capsize = width*8
width_total = 2*width
t1_list = np.array(t1_list)
t1_list_unif = np.array(t1_list_unif)
t1_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list*(1-t1_list)/num_sims) #95 CI for Proportion
t1_se_unif = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_unif*(1-t1_list_unif)/num_sims)
print(t1_se_unif) #note that power goes to 1.0 for unif, thus error bars
#print(t1_se_unif)
p1 = ax.bar(ind, t1_list, width = width, yerr = t1_se, \
ecolor='black', capsize=capsize, color = 'blue', edgecolor='black')
p2 = ax.bar(ind-width, t1_list_unif, width = width,\
yerr = t1_se_unif, ecolor='black', \
capsize=capsize, color = 'red', \
edgecolor='black')
if ax_idx == 2:
leg1 = ax.legend((p1[0], p2[0]), ('Epsilon Greedy Chi Squared', "Uniform Chi Squared"), bbox_to_anchor=(1.0, 1.6))
#leg2 = ax.legend(loc = 2)
ax.add_artist(leg1)
# plt.tight_layout()
# plt.title(title)
# if ax_idx == 6 or ax_idx == 7 or ax_idx == 8:
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0, 0.3)
ax.axhline(y=0.05, linestyle='--')
return [t1_list_unif, t1_list, t1_list_ts] #returns [UR Eps_Greedy, TS], in this case, need to return for each step size, but only plotting for one bs, so save step size by model (4x2)
def compute_epsilon_and_save(actions_for_all_sims, num_steps, num_sims, es = 0):
if es != 0:
print("ES non zero")
save_dir = "../simulation_analysis_saves/epsilon_plots/cached_data/num_sims={}/Effect/".format(num_sims)
save_file = save_dir + "num_steps={}es={}.npy".format(num_steps, es)
save_file_std = save_dir + "std_num_steps={}es={}.npy".format(num_steps, es)
else:
print("ES 0")
save_dir = "../simulation_analysis_saves/epsilon_plots/cached_data/num_sims={}/NoEffect/".format(num_sims)
save_file = save_dir + "num_steps={}.npy".format(num_steps)
save_file_std = save_dir + "std_num_steps={}.npy".format(num_steps)
if os.path.isfile(save_file) and os.path.isfile(save_file_std):
print("cached data found, loading...")
phi = np.load(save_file)
phi_std = np.load(save_file_std)
else:
print("no cached data found, computing...")
prop_list = [] #collect over sims
for action_file in actions_for_all_sims: #looping over num_sims files
actions_df = pd.read_csv(action_file, skiprows=1)
phi_curr = actions_df[actions_df["SampleNumber"] == num_steps]["IsExploring"].iloc[0]
# num_exploring = len()
prop_list.append(phi_curr)
phi = sum(prop_list)/num_sims
phi = np.array([phi])
phi_std = np.std(prop_list)
Path(save_dir).mkdir(parents=True, exist_ok=True)
np.save(save_file, phi)
np.save(save_file_std, phi_std)
return phi, phi_std
def compute_phi_and_save(actions_for_all_sims, num_steps, num_sims, c, es = 0):
if es != 0:
print("ES non zero")
save_dir = "../simulation_analysis_saves/phi_plots/cached_data/num_sims={}/Effect/".format(num_sims)
save_file = save_dir + "num_steps={}es={}c={}.npy".format(num_steps, es, c)
save_file_std = save_dir + "std_num_steps={}es={}c={}.npy".format(num_steps, es, c)
else:
print("ES 0")
save_dir = "../simulation_analysis_saves/phi_plots/cached_data/num_sims={}/NoEffect/".format(num_sims)
save_file = save_dir + "num_steps={}c={}.npy".format(num_steps, c)
save_file_std = save_dir + "std_num_steps={}c={}.npy".format(num_steps, c)
if os.path.isfile(save_file) and os.path.isfile(save_file_std):
print("cached data found, loading {}...".format(save_file))
phi = np.load(save_file)
phi_std = np.load(save_file_std)
else:
print("no cached data found, computing...")
prop_list = [] #collect over sims
for action_file in actions_for_all_sims: #looping over num_sims files
actions_df = pd.read_csv(action_file, skiprows=1)
phi_curr = actions_df[actions_df["SampleNumber"] == num_steps]["IsExploring"].iloc[0]
# num_exploring = len()
prop_list.append(phi_curr)
phi = sum(prop_list)/num_sims
phi = np.array([phi])
phi_std = np.std(prop_list)
Path(save_dir).mkdir(parents=True, exist_ok=True)
np.save(save_file, phi)
np.save(save_file_std, phi_std)
return phi, phi_std
def compute_propcm_and_save(actions_for_all_sims, num_steps, num_sims, alg_name, c=0.1, es = 0, epsilon=0.1):
num_forced = 2
if es != 0:
print("ES non zero")
save_dir = "../simulation_analysis_saves/split_histograms/cached_data/prop_cm/num_sims={}/Effect/{}/".format(num_sims, alg_name)
save_file = save_dir + "num_steps={}es={}c={}epsilon={}.npy".format(num_steps, es, c, epsilon)
else:
print("ES 0")
save_dir = "../simulation_analysis_saves/split_histograms/cached_data/prop_cm/num_sims={}/NoEffect/{}/".format(num_sims, alg_name)
save_file = save_dir + "num_steps={}c={}epsilon={}.npy".format(num_steps, c, epsilon)
if os.path.isfile(save_file):
print("cached data found, loading {}...".format(save_file))
prop_list = np.load(save_file)
else:
print("no cached data found, computing...")
prop_list = [] #collect over sims
# pb.set_trace()
for action_file in actions_for_all_sims: #looping over num_sims files
actions_df = pd.read_csv(action_file, skiprows=1)
actions_df_explore = actions_df[actions_df["IsExploring"] == 0] #explore means exploit here, ie explor emeans whether not explore
actions_df_explore_action1 = np.sum(actions_df_explore["AlgorithmAction"] == 1)
num_exp = len(actions_df_explore)
if num_exp != 0:
num_exp_prop = num_exp/(num_steps - num_forced)
prop_list.append(num_exp_prop)
prop_list = np.array(prop_list)
Path(save_dir).mkdir(parents=True, exist_ok=True)
print("saving to ", save_file)
np.save(save_file, prop_list)
return prop_list
def plot_phi(actions_dir, num_sims, n, c, ax, es = 0):
"""
get prop in cond 1 for when exploring
"""
# pb.set_trace()
if n == 32:
step_sizes = [int(np.ceil(n/2)), int(n), int(np.ceil(2*n)), int(np.ceil(4*n))]
else:
step_sizes = [int(np.ceil(n/2)), int(n), int(np.ceil(2*n)), int(np.ceil(4*n))]
extra_steps = [3,4,5]
phi_list = [] #per step sizes
phi_std_list = [] #per step sizes
for num_steps in step_sizes:#loop over step sizes, make a hist for each
print("num_steps", num_steps)
actions_for_all_sims = glob.glob(actions_dir + "/tbb_actions_{}_*.csv".format(num_steps)) #assumes one step size tbb_actions_
if num_steps in extra_steps:
if num_steps < 16:
actions_for_all_sims = glob.glob(actions_dir + "/tbb_actions_{}_*.csv".format(16)) #assumes one step size tbb_actions_
assert(len(actions_for_all_sims) != 0)
phi, phi_std = compute_phi_and_save(actions_for_all_sims, num_steps, num_sims, c, es)
phi_list.append(phi)
phi_std_list.append(phi_std)
# pb.set_trace()
phi_list = np.array(phi_list)
se = np.sqrt(phi_list*(1-phi_list))/np.sqrt(num_sims)
h = se * stats.t.ppf((1 + 0.95) / 2., num_sims-1)
h = stats.t.ppf(1-0.025, num_sims)*np.sqrt(phi_list*(1-phi_list)/num_sims) #95 CI for Proportion
print(phi_std_list, num_sims)
if c == 0.1 and 0:
ax.errorbar(step_sizes, phi_list,yerr = h,fmt = ".-", label = "c = {}".format(c))
else:
ax.errorbar(step_sizes, phi_list,yerr = None,fmt = ".-", label = "c = {}".format(c))
ind = np.arange(len(step_sizes))
ind = step_sizes
ax.set_xticks(ind)
print(step_sizes)
ax.set_xticklabels(step_sizes)
ax.tick_params(axis='x', rotation=45)
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0.0, 1.0)
ax.legend()
def plot_epsilon(actions_dir, num_sims, n, ax):
"""
get prop in cond 1 for when exploring
"""
# pb.set_trace()
step_sizes = [int(np.ceil(n/2)), int(n), int(np.ceil(2*n)), int(np.ceil(4*n))]
epsilon_list = [] #per step sizes
std_list = [] #per step sizes
for num_steps in step_sizes:#loop over step sizes, make a hist for each
print("num_steps", num_steps)
actions_for_all_sims = glob.glob(actions_dir + "/tbb_actions_{}_*.csv".format(num_steps)) #assumes one step size tbb_actions_
epsilon, epsilon_std = compute_epsilon_and_save(actions_for_all_sims, num_steps, num_sims)
#print("len(actions_for_all_sims)", len(actions_for_all_sims))
# prop_list = [] #collect over sims
#
# for action_file in actions_for_all_sims: #looping over num_sims files
# actions_df = pd.read_csv(action_file, skiprows=1)
#
# phi_curr = actions_df[actions_df["SampleNumber"] == num_steps]["IsExploring"].iloc[0]
## num_exploring = len()
# prop_list.append(phi_curr)
# phi = sum(prop_list)/num_sims
epsilon_list.append(epsilon)
std_list.append(epsilon_std)
# pb.set_trace()
epsilon_list = np.array(epsilon_list)
h = stats.t.ppf(1-0.025, num_sims)*np.sqrt(epsilon_list*(1-epsilon_list)/num_sims) #95 CI for Proportion
ax.errorbar(step_sizes, epsilon_list, fmt=".-", yerr = h)
ind = np.arange(len(step_sizes))
ind = step_sizes
ax.set_xticks(ind)
print(step_sizes)
ax.set_xticklabels(step_sizes)
ax.tick_params(axis='x', rotation=45)
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0.0, 1.0)
def get_prop_explore(actions_dir, num_sims, n, epsilon, alg_name, explore = True, effect = "NoEffect"):
"""
get prop in cond 1 for when exploring
"""
step_sizes = [int(np.ceil(n/2)), int(n), int(np.ceil(2*n)), int(np.ceil(4*n))]
# debug = actions_dir.split("/")[0] + "/" + actions_dir.split("/")[1] + "/" + actions_dir.split("/")[2]
# print("checking", debug)
# print("exists...?")
# print(os.path.isdir(debug))
for num_steps in step_sizes:#loop over step sizes, make a hist for each
print("num_steps", num_steps)
actions_for_all_sims = glob.glob(actions_dir + "/tbb_actions_{}_*.csv".format(num_steps)) #assumes one step size tbb_actions_
#print("len(actions_for_all_sims)", len(actions_for_all_sims))
prop_list = [] #collect over sims
num_exp_list = [] #collect over sims
for action_file in actions_for_all_sims: #looping over num_sims files
actions_df = pd.read_csv(action_file, skiprows=1)
if explore == True:
actions_df_explore = actions_df[actions_df["IsExploring"] == 1]
elif explore == False:
actions_df_explore = actions_df[actions_df["IsExploring"] == 0]
elif explore == "Both":
actions_df_explore = actions_df
actions_df_explore_action1 = np.sum(actions_df_explore["AlgorithmAction"] == 1)
num_exp = len(actions_df_explore)
if len(actions_df_explore) != 0:
actions_df_explore_action1_prop = actions_df_explore_action1 / num_exp
prop_list.append(actions_df_explore_action1_prop)
num_exp_list.append(num_exp)
#now make hist from prop_list, which is over sims
print(len(prop_list))
fig_h, ax_h = plt.subplots()
ax_h.hist(prop_list)
num_exp_mean = np.mean(np.array(num_exp_list))
#ax_h.legend()
Path("../simulation_analysis_saves/split_histograms/{}/ExploreAndExploit/{}".format(effect, alg_name)).mkdir(parents=True, exist_ok=True)
Path("../simulation_analysis_saves/split_histograms/{}/IsExploring/{}".format(effect, alg_name)).mkdir(parents=True, exist_ok=True)
Path("../simulation_analysis_saves/split_histograms/{}/IsExploiting/{}".format(effect, alg_name)).mkdir(parents=True, exist_ok=True)
if explore == True:
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 In Exploration Across {} Simulations \n Epsilon = {}".format(num_steps, num_sims, epsilon))
fig_h.savefig("../simulation_analysis_saves/split_histograms/{}/IsExploring/{}/condition_prop_ntotal={}_nexplore={}.png".format(effect, alg_name, num_steps, num_exp_mean), bbox_inches = 'tight')
elif explore == False:
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 In Exploitation Across {} Simulations \n Epsilon = {}".format(num_steps, num_sims, epsilon))
fig_h.savefig("../simulation_analysis_saves/split_histograms/{}/IsExploiting/{}/condition_prop_ntotal={}_nexploit={}.png".format(effect, alg_name, num_steps, num_exp_mean), bbox_inches = 'tight')
elif explore == "Both":
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 Across {} Simulations \n Epsilon = {}".format(num_steps,num_sims, epsilon))
fig_h.savefig("../simulation_analysis_saves/split_histograms/NoEffect/ExploreAndExploit/{}/condition_prop_ntotal={}.png".format(alg_name, num_steps, bbox_inches = 'tight'))
fig_h.clf()
def prop_exploit_cm_hist(actions_dir_list, alg_name_list, num_sims, n, effect = "NoEffect", es = 0, c = 0.1, epsilon = 0.1):
"""
prportion of exploit cumulative, histogram
"""
step_sizes = [int(np.ceil(n/2)), int(n), int(np.ceil(2*n)), int(np.ceil(4*n))]
# debug = actions_dir.split("/")[0] + "/" + actions_dir.split("/")[1] + "/" + actions_dir.split("/")[2]
# print("checking", debug)
# print("exists...?")
# print(os.path.isdir(debug))
num_forced = 2
bw_dict = {32:0.01, 88:0.01, 785:0.01}
binwidth = bw_dict[n]
fig, ax = plt.subplots(2,2)
ax = ax.ravel()
i = 0
for num_steps in step_sizes:#loop over step sizes, make a hist for each
print("num_steps", num_steps)
for actions_dir, alg_name in zip(actions_dir_list, alg_name_list):
actions_for_all_sims = glob.glob(actions_dir + "/tbb_actions_{}_*.csv".format(num_steps)) #assumes one step size tbb_actions_
prop_list = compute_propcm_and_save(actions_for_all_sims, num_steps, num_sims, alg_name = alg_name, c=c, es = es, epsilon=c)
ax[i].hist(prop_list, alpha = 0.5, label = alg_name, bins=np.arange(0, 1 + binwidth, binwidth))
ax[i].set_title("n = {}, es = {}".format(num_steps, es))
ax[i].set_ylabel("Number of Simulations")
ax[i].set_xlabel("Proportion of Samples Where Exploiting (Using TS)")
ax[i].legend()
i +=1
save_dir = "../simulation_analysis_saves/split_histograms/{}/IsExploiting/Prop_cm/n={}/".format(effect, n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
p1 = 0.5 + es/2
p2 = 0.5 - es/2
title = "Proportion of Exploit Samples Across {} Simulations \n $p_1$ = {} $p_2$ = {}".format(num_sims, p1,p2)
fig.tight_layout(rect=[0, 0.03, 1, 0.87])
fig.suptitle(title)
#fig.tight_layout()
fig.savefig(save_dir + "/" + title +".png", bbox_inches = 'tight')
def parse_dir(root, root_cutoffs):
num_sims = 500
num_sims = 5000
arm_prob= 0.5
#arm_prob_list = [0.2, 0.5, 0.8]
arm_prob_list = [0.2, 0.5, 0.8]
#pb.set_trace()
es_list = [0.5, 0.3, 0.1]
n_list = [32, 88, 785]
# n_list = [32]
epsilon = 0.1
c_list = [0.08, 0.1, 0.12]
# c_list = [0.08]
#EpsilonGreedyIsEffect/num_sims=5armProb=0.5/es=0.3epsilon=0.1/
root_dir = root + "/num_sims={}armProb={}".format(num_sims, arm_prob)
fig, ax = plt.subplots(1,3, sharey = True)
fig_prop, ax_prop = plt.subplots(1,3, sharey = True)
ax[0].set_ylabel("$\hat \phi$")
#fig.set_size_inches(17.5, 13.5)
ax = ax.ravel()
ax_prop = ax_prop.ravel()
root_ets = "../simulation_saves/EpsilonTSNoEffect/num_sims=5000armProb=0.5/"#hard code for now
for c in c_list:
i = 0
for n in n_list:
bs = 1
# es_dir = root_dir + "/N={}epsilon={}/".format(n, epsilon)
es_dir = root_dir + "/N={}c={}/".format(n, c)
ets_dir = root_ets + "/N={}epsilon={}".format(n, 0.1)
to_check = glob.glob(es_dir + "/*Prior*{}*{}Df.pkl".format(bs,n))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(es_dir + "/*Prior*{}*{}Df.pkl".format(bs,n))) == 1)
# to_check_unif = glob.glob(es_dir + "/*Uniform*{}*{}Df.pkl".format(bs, n))[0]
# assert(len(glob.glob(es_dir + "/*Uniform*{}*{}Df.pkl".format(bs, n))) == 1)
# to_check_ts = glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,n))[0] #Has eg, 34 in 348!!
# assert(len(glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,n))) == 1)
actions_dir_ets = ets_dir + "/bbEqualMeansEqualPriorburn_in_size-batch_size={}-{}".format(bs, bs)
actions_dir_tsppd = es_dir + "bbEqualMeansEqualPriorburn_in_size-batch_size={}-{}".format(bs, bs)
prop_explore = plot_phi(actions_dir_tsppd, num_sims, n, c, ax[i]) #averaged over sims
actions_dir_list = [actions_dir_ets,actions_dir_tsppd]
alg_name_list = ["Epsilon TS {}".format(epsilon), "TS PostDiff {}".format(c)]
if c == 0.1:
prop_exploit_cm_hist(actions_dir_list, alg_name_list = alg_name_list, num_sims = num_sims, n = n, c=c, epsilon = epsilon)
if c==0.1 and 0:
#prop_explore = plot_epsilon(actions_dir_ets, num_sims, n, ax[i]) #averaged over sims
#: prop_explore = get_prop_explore(actions_dir_tsppd, num_sims, n, epsilon) #averaged over sims
alg_name = "EpsilonTS0.1"
prop_exploit = get_prop_explore(actions_dir_ets, num_sims, n, epsilon, alg_name, False) #averaged over sims
prop_exploit = get_prop_explore(actions_dir_ets, num_sims, n, epsilon, alg_name, True) #averaged over sims
alg_name = "TSPPD0.1"
prop_exploit = get_prop_explore(actions_dir_tsppd, num_sims, n, epsilon, alg_name, False) #averaged over sims
prop_exploit = get_prop_explore(actions_dir_tsppd, num_sims, n, epsilon, alg_name, True) #averaged over sims
#prop_exploit = get_prop_explore(actions_dir_tsppd, num_sims, n, epsilon, "Both") #averaged over sims
ax[i].set_title("n = {}".format(n))
i += 1
fig.tight_layout(rect=[0, 0.03, 1, 0.87])
save_dir = "../simulation_analysis_saves/phi_plots/NoEffect/"
Path(save_dir).mkdir(parents=True, exist_ok=True)
title = "$\hat \phi$ Across {} Simulations $p_1 = p_2 = 0.5$ \n $\phi$ := p($|p_1 - p_2| < c$)".format(num_sims)
title_prop = "$Proportion of Exploit Samples Across {} Simulations $p_1 = p_2 = 0.5$".format(num_sims)
fig.suptitle(title)
fig.savefig(save_dir + "/" + title +".png")
#handles, labels = ax[i-1].get_legend_handles_labels()
#fig.legend(handles, labels, loc='upper right', prop={'size': 50})
#fig.tight_layout()
# if not os.path.isdir("plots"):
# os.mkdir("plots")
# print("saving to ", "plots/{}.png".format(title))
# fig.tight_layout()
# fig.subplots_adjust(top=.8)
# fig.savefig("plots/{}.svg".format(title), bbox_inches = 'tight')
# plt.show()
# plt.clf()
# plt.close()
if __name__ == "__main__":
root = "EpsilonGreedyNoEffect"
root = "../simulation_saves/TSPPDNoEffect_c=0pt1"
root = "../simulation_saves/TSPPDNoEffect"
#parse_dir(root, root_cutoffs)
parse_dir(root, root)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 19:50:38 2022
@author: afadaei
"""
import numpy as np
import pandas as pd
import os
def Load_Valid_Stations(VALID_PATH):
Valid_Stations = np.load(VALID_PATH ,allow_pickle='TRUE').item()
return Valid_Stations
def update(valid, Station, dataset, Feature_Lat, Feature_Long):
for i in range(len(dataset)):
if((dataset[Feature_Lat][i], dataset[Feature_Long][i]) in valid):
Station[(dataset[Feature_Lat][i], dataset[Feature_Long][i])] += 1
return Station
def Count_Start_End(DIR_PATH, Valid_Stations):
Start = dict.fromkeys(Valid_Stations, 0)
End = dict.fromkeys(Valid_Stations, 0)
for year in os.listdir(DIR_PATH):
new_dir = DIR_PATH + "\\" + year
#if year == "2020":
# break
for month in os.listdir(new_dir):
FolderPath = new_dir + "\\" + month
if(month == "ZipFile"):
continue
for item in os.listdir(FolderPath):
DataSetPath = FolderPath + "\\" + item
data = pd.read_csv(DataSetPath)
if "start station latitude" in data:
Start = update(Valid_Stations, Start, data, "start station latitude", "start station longitude")
End = update(Valid_Stations, End, data, "end station latitude", "end station longitude")
elif "Start Station Latitude" in data:
Start = update(Valid_Stations, Start, data, "Start Station Latitude", "Start Station Longitude")
End = update(Valid_Stations, End, data, "End Station Latitude", "End Station Longitude")
elif "start_lat" in data:
Start = update(Valid_Stations, Start, data, "start_lat", "start_lat")
End = update(Valid_Stations, End, data, "end_lat", "end_lng")
else:
print("No Data this year")
print(year, month)
return Start, End
def Make_StartEnd_DataFrame(Starting_Stations, Ending_Stations):
Start_Lat = [i[0] for i in Starting_Stations]
Start_Long = [i[1] for i in Starting_Stations]
#The lines below are not needed since both Starting_Station and Ending_Station are sorted in the same way
#End_Lat = [i[0] for i in Ending_Stations]
#End_Long = [i[1] for i in Ending_Stations]
d = {"Start Latitude": Start_Lat, "Start Longitude": Start_Long, "Number Out": Starting_Stations.values(), "Number In": Ending_Stations.values()}
df = pd.DataFrame(data=d)
return df
def Save_CSV(SAVE_OBJ, SAVE_PATH):
SAVE_OBJ.to_csv(SAVE_PATH)
def Save_OBJ(SAVE_OBJ, SAVE_PATH):
np.save(SAVE_PATH, SAVE_OBJ)
def Load_OBJ(LOAD_PATH):
return np.load(LOAD_PATH,allow_pickle='TRUE').item()
if __name__ == "__main__":
DIR_PATH = r"D:\Amin\Education\Course\Data Analytics and Visualization (Winter 2021-2022)\Project\DataSet";
VALID_PATH = r"D:\Amin\Education\Course\Data Analytics and Visualization (Winter 2021-2022)\Project\Code\Valid_Stations3.npy"
SAVE_PATH = r"D:\Amin\Education\Course\Data Analytics and Visualization (Winter 2021-2022)\Project\Code\Start_End.csv"
START_PATH = r"D:\Amin\Education\Course\Data Analytics and Visualization (Winter 2021-2022)\Project\Code\Start.npy"
END_PATH = r"D:\Amin\Education\Course\Data Analytics and Visualization (Winter 2021-2022)\Project\Code\End.npy"
# Uncomment or Comment any one of the following functions to use them
Valid_Stations = Load_Valid_Stations(VALID_PATH)
Start, End = Count_Start_End(DIR_PATH, Valid_Stations)
DF = Make_StartEnd_DataFrame(Start, End)
Save_OBJ(Start, START_PATH)
Save_OBJ(End, END_PATH)
Save_CSV(DF, SAVE_PATH)
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import datetime
import discord
import os
import re
from typing import Callable, Dict, List, Set, Tuple
from cache import PssCache
import emojis
import pss_assert
import pss_core as core
import pss_crew as crew
import pss_dropship as dropship
import pss_entity as entity
import pss_item as item
import pss_lookups as lookups
import pss_research as research
import pss_room as room
import pss_sprites as sprites
import settings
import utility as util
# ---------- Classes ----------
class LegacyPromotionDesignDetails(entity.LegacyEntityDetails):
def __init__(self, promotion_info: dict):
"""
RewardString
"""
self.__name: str = promotion_info.get('Name', None)
self.__description: str = promotion_info.get('Description', None)
self.__flags: int = int(promotion_info.get('Flags', '0'))
self.__requirements: List[PromoRequirement] = _convert_requirement_string(promotion_info.get('RequirementString', ''))
self.__sprite_id_background: int = int(promotion_info.get('BackgroundSpriteId', '0'))
self.__sprite_id_button: int = int(promotion_info.get('ButtonSpriteId', '0'))
self.__sprite_id_close_button: int = int(promotion_info.get('CloseButtonSpriteId', '0'))
self.__sprite_id_icon: int = int(promotion_info.get('IconSpriteId', '0'))
self.__sprite_id_title: int = int(promotion_info.get('TitleSpriteId', '0'))
self.__subtitle: str = promotion_info.get('Subtitle', None)
self.__vip_extra_crew_draws: int = int(promotion_info('ExtraCrewDraws', '0'))
self.__vip_resource_conversion_discount: int = int(promotion_info('ResourceConversionDiscountPercentage', '0'))
self.__vip_reward_store_discount: int = int(promotion_info('RewardStoreDiscountPercentage', '0'))
self.__vip_speed_up_discount: int = int(promotion_info('SpeedUpDiscountPercentage', '0'))
self.__vip_starbux_bonus: int = int(promotion_info.get('StarbuxBonusPercentage', '0'))
self.__vip_xp_bonus: int = int(promotion_info.get('XPBonusPercentage', '0'))
self.__from_datetime: datetime.datetime = _get_datetime(promotion_info.get('FromDate', None), settings.API_DATETIME_FORMAT_CUSTOM)
self.__to_datetime: datetime.datetime = _get_datetime(promotion_info.get('ToDate', None), settings.API_DATETIME_FORMAT_CUSTOM)
self.__iap_options: str = core.convert_iap_options_mask(promotion_info.get('PurchaseMask', '0'))
details_long: List[Tuple[str, str]] = [
]
details_short: List[Tuple[str, str, bool]] = [
]
super().__init__(
name=promotion_info[PROMOTION_DESIGN_DESCRIPTION_PROPERTY_NAME],
description=promotion_info['Description'],
details_long=details_long,
details_short=details_short
)
@property
def description(self) -> str:
return self.__description
@property
def from_datetime(self) -> datetime.datetime:
return self.__from_datetime
@property
def iap_options(self) -> str:
return self.__iap_options
@property
def name(self) -> str:
return self.__name
@property
def requirements(self) -> str:
pretty_requirements = [requirement.get_pretty_requirement_string() for requirement in self.__requirements]
return ', '.join(pretty_requirements)
@property
def sprite_url_background(self) -> str:
return sprites.get_sprite_download_url(self.__sprite_id_background)
@property
def sprite_url_button(self) -> str:
return sprites.get_sprite_download_url(self.__sprite_id_button)
@property
def sprite_url_close_button(self) -> str:
return sprites.get_sprite_download_url(self.__sprite_id_close_button)
@property
def sprite_url_icon(self) -> str:
return sprites.get_sprite_download_url(self.__sprite_id_icon)
@property
def sprite_url_title(self) -> str:
return sprites.get_sprite_download_url(self.__sprite_id_title)
@property
def subtitle(self) -> str:
return self.__subtitle
@property
def to_datetime(self) -> datetime.datetime:
return self.__to_datetime
@property
def vip_extra_crew_draws(self) -> int:
return self.__vip_extra_crew_draws
@property
def vip_resource_conversion_discount(self) -> int:
"""Represents a percentage."""
return self.__vip_resource_conversion_discount
@property
def vip_reward_store_discount(self) -> int:
"""Represents a percentage."""
return self.__vip_reward_store_discount
@property
def vip_speed_up_discount(self) -> int:
"""Represents a percentage."""
return self.__vip_speed_up_discount
@property
def vip_starbux_bonus(self) -> int:
"""Represents a percentage."""
return self.__vip_starbux_bonus
@property
def vip_xp_bonus(self) -> int:
"""Represents a percentage."""
return self.__vip_xp_bonus
class PromoRequirement():
def __init__(self, requirement: str):
self.__lower_than: bool = False
self.__greater_than: bool = False
self.__equal: bool = False
self.__requirement_type: str = None
self.__requirement_value: int = None
requirement = requirement.strip()
if '>' in requirement:
self.__greater_than = True
if '>=' in requirement:
self.__requirement_type, self.__requirement_value = _get_requirement_type_and_value(requirement, '>=')
else:
self.__requirement_type, self.__requirement_value = _get_requirement_type_and_value(requirement, '>', 1)
elif '<' in requirement:
self.__lower_than = True
if '<=' in requirement:
self.__requirement_type, self.__requirement_value = _get_requirement_type_and_value(requirement, '<=')
else:
self.__requirement_type, self.__requirement_value = _get_requirement_type_and_value(requirement, '<', -1)
elif '==' in requirement:
self.__equal = True
self.__requirement_type, self.__requirement_value = _get_requirement_type_and_value(requirement, '==')
def get_pretty_requirement_string(self) -> str:
modifier = 'Min' if self.__greater_than else 'Max' if self.__lower_than else ''
if modifier:
modifier = f'{modifier} '
pretty_requirement_type = _get_pretty_requirement_type(self.__requirement_type)
result = f'{modifier}{pretty_requirement_type}: {self.__requirement_value}'
return result
# ---------- Constants ----------
PROMOTION_DESIGN_BASE_PATH = 'PromotionService/ListAllPromotionDesigns2?languageKey=en'
PROMOTION_DESIGN_KEY_NAME = 'PromotionDesignId'
PROMOTION_DESIGN_DESCRIPTION_PROPERTY_NAME = 'Name'
REWARD_TYPE_GET_ENTITY_FUNCTIONS: Dict[str, Callable] = {
'item': item.get_item_details_by_id,
'character': crew.get_char_details_by_id,
'research': research.get_research_details_by_id,
'room': room.get_room_details_by_id,
'starbux': None
}
# ---------- Promo info ----------
async def get_promotion_details_by_id(promotion_design_id: str, promotions_data: dict = None) -> LegacyPromotionDesignDetails:
if promotion_design_id:
if promotions_data is None:
promotions_data = await promotion_designs_retriever.get_data_dict3()
if promotion_design_id and promotion_design_id in promotions_data.keys():
promotion_info = promotions_data[promotion_design_id]
promotion_details = LegacyPromotionDesignDetails(promotion_info)
return promotion_details
return None
def get_promotions_details_by_name(promotion_name: str) -> entity.EntityDetailsCollection:
pss_assert.valid_entity_name(promotion_name, 'promotion_name')
async def get_promotions_designs_info_by_name(promotion_name: str, as_embed: bool = settings.USE_EMBEDS):
pss_assert.valid_entity_name(promotion_name, 'promotion_name')
promotion_infos = await promotion_designs_retriever.get_entities_infos_by_name(promotion_name)
promotions_details = [LegacyPromotionDesignDetails(promotion_info) for promotion_info in promotion_infos if promotion_info['PromotionType'] == 'FirstPurchase']
if not promotions_details:
return [f'Could not find a promotion named **{promotion_name}**.'], False
else:
if as_embed:
return _get_promotions_details_as_embed(promotions_details), True
else:
return _get_promotions_details_as_text(promotion_name, promotions_details), True
def _get_promotions_details_as_embed(promotion_details: Dict[str, dict]) -> discord.Embed:
pass
def _get_promotions_details_as_text(promotion_name: str, promotion_details: Dict[str, dict]) -> List[str]:
promotion_details_count = len(promotion_details)
lines = [f'Promotion stats for **{promotion_name}**']
for i, promotion_details in enumerate(promotion_details):
if promotion_details_count > 2:
lines.extend(promotion_details.get_details_as_text_short())
else:
lines.extend(promotion_details.get_details_as_text_long())
if i < promotion_details_count - 1:
lines.append(settings.EMPTY_LINE)
return lines
# ---------- Create EntityDetails ----------
# ---------- Transformation functions ----------
# ---------- Helper functions ----------
def _convert_reward_string(reward_string: str) -> Dict[str, str]:
result = {}
if not reward_string:
return result
for reward in reward_string.split('|'):
reward_type, entity_id = reward.split(':')
result.setdefault(reward_type, []).append(entity_id)
return result
def _convert_requirement_string(requirement_string: str) -> List[PromoRequirement]:
result: List[PromoRequirement] = []
if not requirement_string:
return result
for requirement in requirement_string.split('&&'):
promo_requirement = PromoRequirement(requirement)
result.append(promo_requirement)
return result
def _get_datetime(api_datetime: str, datetime_format: str) -> datetime.datetime:
if not api_datetime:
return None
result = datetime.datetime.strptime(api_datetime, datetime_format)
if result < settings.PSS_START_DATE:
return None
else:
return result
def _get_pretty_requirement_type(requirement_type: str, language_key: str = 'en') -> str:
if language_key and requirement_type:
result = lookups.PROMO_REQUIREMENT_TYPE_LOOKUP.get(language_key, {}).get(requirement_type, None)
return result
else:
return None
def _get_pretty_reward_string(rewards: Dict[str, List[str]]) -> str:
result = []
for entity_type in [key for key in rewards.keys() if rewards[key]]:
get_entity_details_function = REWARD_TYPE_GET_ENTITY_FUNCTIONS[entity_type.lower()]
if get_entity_details_function:
intermediate = []
for entity_id in rewards[entity_type]:
entity_details: entity.LegacyEntityDetails = get_entity_details_function(entity_id)
intermediate.append(entity_details.get_details_as_text_short())
result.append(', '.join(intermediate))
else:
result.append(f'{entity_type}: {sum(rewards[entity_type])}')
return ', '.join(result)
def _get_requirement_type_and_value(requirement_string: str, separator: str, add_to_value: int = 0) -> (str, int):
requirement_type, requirement_value = requirement_string.split(separator)
requirement_value = int(requirement_value) + add_to_value
return requirement_type, requirement_value
# ---------- Initilization ----------
promotion_designs_retriever = entity.EntityRetriever(
PROMOTION_DESIGN_BASE_PATH,
PROMOTION_DESIGN_KEY_NAME,
PROMOTION_DESIGN_DESCRIPTION_PROPERTY_NAME,
cache_name='PromotionDesigns'
)
|
# This file is part of JST.
#
# JST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# JST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ticket_counting.ticket_counters import UUID_TICKETS, LABEL_TICKETS, INT_REGISTER_TICKETS, FLOAT_REGISTER_TICKETS
from compiler.compiler_state import CompilerState
from loggers.logger import Logger
class TestAst(unittest.TestCase):
def setUp(self):
self.compiler_state = CompilerState()
self.enable_debug(False)
UUID_TICKETS.next_value = 0
LABEL_TICKETS.next_value = 0
INT_REGISTER_TICKETS.next_value = 0
FLOAT_REGISTER_TICKETS.next_value = 0
def tearDown(self):
self.compiler_state.teardown()
self.compiler_state = None
def enable_debug(self, enable, productions=True, source=False):
if enable:
prod_logger = self.compiler_state.get_parser_logger()
prod_logger.add_switch(Logger.INFO)
if productions:
prod_logger.add_switch(Logger.PRODUCTION)
if source:
prod_logger.add_switch(Logger.SOURCE)
def test_empty_file(self):
data = ""
ast = self.compiler_state.parse(data)
print(ast)
def test_plain_main(self):
data = """
int main()
{
return 0;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_simple_variable_declaration(self):
data = """
int main()
{
int i;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_simple_variable_initialization(self):
data = """
int main()
{
int i = 5;
int j = i;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_const_declaration(self):
data = """
int main()
{
const int i = 5;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_declaration(self):
data = """
int main()
{
int i[5];
}
"""
ast = self.compiler_state.parse(data)
result = ast.to_graph_viz_str()
print(result)
import re
expected_solution = \
'digraph {\n' \
'\t"FileAST\\\\n\d\d\d\d\d" -> {"FunctionDefinition\\\\nint main\\\\n\d\d\d\d\d"};\n' \
'\t"FunctionDefinition\\\\nint main\\\\n\d\d\d\d\d" -> {"CompoundStatement\\\\n\d\d\d\d\d"};\n' \
'\t"CompoundStatement\\\\n\d\d\d\d\d" -> {"ArrayDeclaration\\\\nint\[5\]\[5\] i\\\\n\d\d\d\d\d"};\n' \
'\t"ArrayDeclaration\\\\nint\[5\]\[5\] i\\\\n\d\d\d\d\d" -> {};\n' \
'}'
m = re.match(expected_solution, result)
print(m)
self.assertTrue(True if m else False)
def test_2d_array_declaration(self):
data = """
int main()
{
int i[5][7];
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_post_plus_plus(self):
data = """
int main()
{
int i = 0;
int b = 0;
b = i++;
return 0;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_lone_if(self):
data = """
int main()
{
int i;
if (i == 5)
{
i = 6;
}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_if_else(self):
data = """
int main()
{
int i;
if (i == 5)
{
i = 6;
}
else
{
i = 5;
}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_if_elif_else(self):
data = """
int main()
{
int i;
if (i == 5)
{
i = 6;
}
else if(i == 6)
{
i = 7;
}
else
{
i = 5;
}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_simple_assign_const(self):
data = """
int main()
{
int g;
char a;
float p;
g = 5;
a = 2;
p = 1.2;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_simple_assign_var(self):
data = """
int main()
{
int g;
int G;
g = 5;
G = g;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_cast_in_binary_expression(self):
data = """
int main()
{
int i = 5;
float f = -4.5;
i = (int) ((float) i + f);
return 0;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_simple_assign(self):
data = """
int main()
{
int a[10];
a[1] = 4;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_simple_access(self):
data = """
int main()
{
int g;
int a[10];
a[1] = 4;
g = a[1];
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_access_const_expr(self):
data = """
int main()
{
int g;
int a[10];
a[6] = 4;
g = a[5 + 1];
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_access_var_expr(self):
data = """
int main()
{
int g;
int a[10];
a[1] = 4;
g = a[1];
g = a[g + 1];
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_array_twodim(self):
data = """
int main()
{
int b[10][10];
b[1][1] = 5;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_for_loop_1(self):
data = """
int main()
{
int i;
for(i = 0; ;) {}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_for_loop_2(self):
data = """
int main()
{
int i;
for(i = 0; ; i++) {}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_for_loop_3(self):
data = """
int main()
{
int i;
for(i = 0; i < 1; i++) {}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_while_loop(self):
data = """
int main()
{
int i;
while(i < 5){}
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_do_while_loop(self):
data = """
int main()
{
int i;
do {} while (i > 10);
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_function_decl_top_impl_bottom(self):
data = """
int do_stuff();
int do_stuff()
{
return 5;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_function_decl_top_impl_bottom_call_middle(self):
data = """
int do_stuff();
int main()
{
return do_stuff();
}
int do_stuff()
{
return 5;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_function_parameters(self):
# TODO: this one fails because we don't have the concept of a "dereference expression/operation", although
# TODO: we aren't super worried about pointers for now.
data = """
int do_stuff(int* ret, int x)
{
*ret = x + x;
return 5;
}
int main()
{
int* ptr;
int num;
return do_stuff(ptr, num);
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_function_def_on_top(self):
data = """
// Definition on top
int do_stuff()
{
return 5;
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_function_def_on_top_call(self):
data = """
int do_stuff()
{
return 5;
}
int main()
{
return do_stuff();
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
def test_declare_const_and_var_types(self):
data = """
int main(){
int x;
int * y = 0;
int z[10];
const int i;
float j;
char k = 'a';
}
"""
ast = self.compiler_state.parse(data)
print(ast.to_graph_viz_str())
|
<reponame>ImadDabbura/ml-zoo
def multilabel_sample(y, size=1000, min_count=5, seed=None):
''' Takes a matrix of binary labels `y` and returns
the indices for a sample of size `size` if
`size` > 1 or `size` * len(y) if size =< 1.
The sample is guaranteed to have > `min_count` of
each label.
'''
try:
if (np.unique(y).astype(int) != np.array([0, 1])).any():
raise ValueError()
except (TypeError, ValueError):
raise ValueError(
'multilabel_sample only works with binary indicator matrices')
if (y.sum(axis=0) < min_count).any():
raise ValueError(
'Some classes do not have enough examples. Change min_count if'
'necessary.')
if size <= 1:
size = np.floor(y.shape[0] * size)
if y.shape[1] * min_count > size:
warn(f'Size less than number of columns * min_count, returning '
f'{y.shape[1] * min_count} items instead of {size}.')
size = y.shape[1] * min_count
rng = np.random.RandomState(
seed if seed is not None else np.random.randint(1))
if isinstance(y, pd.DataFrame):
choices = y.index
y = y.values
else:
choices = np.arange(y.shape[0])
sample_idxs = np.array([], dtype=choices.dtype)
# first, guarantee > min_count of each label
for j in range(y.shape[1]):
label_choices = choices[y[:, j] == 1]
label_idxs_sampled = rng.choice(label_choices, size=min_count, replace=False)
sample_idxs = np.concatenate([label_idxs_sampled, sample_idxs])
sample_idxs = np.unique(sample_idxs)
# now that we have at least min_count of each, we can just random sample
sample_count = int(size - sample_idxs.shape[0])
# get sample_count indices from remaining choices
remaining_choices = np.setdiff1d(choices, sample_idxs)
remaining_sampled = rng.choice(remaining_choices,
size=sample_count,
replace=False)
return np.concatenate([sample_idxs, remaining_sampled])
def multilabel_sample_dataframe(df, labels, size, min_count=5, seed=None):
''' Takes a dataframe `df` and returns a sample of size `size` where all
classes in the binary matrix `labels` are represented at
least `min_count` times.
'''
idxs = multilabel_sample(labels, size=size, min_count=min_count, seed=seed)
return df.loc[idxs]
def multilabel_train_test_split(X, Y, size, min_count=5, seed=None):
''' Takes a features matrix `X` and a label matrix `Y` and
returns (X_train, X_test, Y_train, Y_test) where all
classes in Y are represented at least `min_count` times.
'''
index = Y.index if isinstance(Y, pd.DataFrame) else np.arange(Y.shape[0])
test_set_idxs = multilabel_sample(Y, size=size, min_count=min_count, seed=seed)
train_set_idxs = np.setdiff1d(index, test_set_idxs)
test_set_mask = index.isin(test_set_idxs)
train_set_mask = ~test_set_mask
return (X[train_set_mask], X[test_set_mask], Y[train_set_mask], Y[test_set_mask])
class SparseInteractions(BaseEstimator, TransformerMixin):
def __init__(self, degree=2, feature_name_separator="_"):
self.degree = degree
self.feature_name_separator = feature_name_separator
def fit(self, X, y=None):
return self
def transform(self, X):
if not sparse.isspmatrix_csc(X):
X = sparse.csc_matrix(X)
if hasattr(X, "columns"):
self.orig_col_names = X.columns
else:
self.orig_col_names = np.array([str(i) for i in range(X.shape[1])])
spi = self._create_sparse_interactions(X)
return spi
def get_feature_names(self):
return self.feature_names
def _create_sparse_interactions(self, X):
out_mat = []
self.feature_names = self.orig_col_names.tolist()
for sub_degree in range(2, self.degree + 1):
for col_ixs in combinations(range(X.shape[1]), sub_degree):
# add name for new column
name = self.feature_name_separator.join(self.orig_col_names[list(col_ixs)])
self.feature_names.append(name)
# get column multiplications value
out = X[:, col_ixs[0]]
for j in col_ixs[1:]:
out = out.multiply(X[:, j])
out_mat.append(out)
return sparse.hstack([X] + out_mat)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.